en_rx.c revision 298775
1/*
2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include "opt_inet.h"
34#include <linux/mlx4/cq.h>
35#include <linux/slab.h>
36#include <linux/mlx4/qp.h>
37#include <linux/if_ether.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40#include <linux/mlx4/driver.h>
41#ifdef CONFIG_NET_RX_BUSY_POLL
42#include <net/busy_poll.h>
43#endif
44
45#include "mlx4_en.h"
46
47
48static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
49				 struct mlx4_en_rx_ring *ring,
50				 int index)
51{
52	struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
53	    (ring->buf + (ring->stride * index));
54	int possible_frags;
55	int i;
56
57	/* Set size and memtype fields */
58	rx_desc->data[0].byte_count = cpu_to_be32(priv->rx_mb_size - MLX4_NET_IP_ALIGN);
59	rx_desc->data[0].lkey = cpu_to_be32(priv->mdev->mr.key);
60
61	/*
62	 * If the number of used fragments does not fill up the ring
63	 * stride, remaining (unused) fragments must be padded with
64	 * null address/size and a special memory key:
65	 */
66	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
67	for (i = 1; i < possible_frags; i++) {
68		rx_desc->data[i].byte_count = 0;
69		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
70		rx_desc->data[i].addr = 0;
71	}
72}
73
74static int
75mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring,
76     __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list)
77{
78	bus_dma_segment_t segs[1];
79	bus_dmamap_t map;
80	struct mbuf *mb;
81	int nsegs;
82	int err;
83
84	/* try to allocate a new spare mbuf */
85	if (unlikely(ring->spare.mbuf == NULL)) {
86		mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
87		if (unlikely(mb == NULL))
88			return (-ENOMEM);
89		/* setup correct length */
90		mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
91
92		/* make sure IP header gets aligned */
93		m_adj(mb, MLX4_NET_IP_ALIGN);
94
95		/* load spare mbuf into BUSDMA */
96		err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
97		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
98		if (unlikely(err != 0)) {
99			m_freem(mb);
100			return (err);
101		}
102
103		/* store spare info */
104		ring->spare.mbuf = mb;
105		ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr);
106
107		bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
108		    BUS_DMASYNC_PREREAD);
109	}
110
111	/* synchronize and unload the current mbuf, if any */
112	if (likely(mb_list->mbuf != NULL)) {
113		bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
114		    BUS_DMASYNC_POSTREAD);
115		bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
116	}
117
118	mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
119	if (unlikely(mb == NULL))
120		goto use_spare;
121
122	/* setup correct length */
123	mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
124
125	/* make sure IP header gets aligned */
126	m_adj(mb, MLX4_NET_IP_ALIGN);
127
128	err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
129	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
130	if (unlikely(err != 0)) {
131		m_freem(mb);
132		goto use_spare;
133	}
134
135	*pdma = cpu_to_be64(segs[0].ds_addr);
136	mb_list->mbuf = mb;
137
138	bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
139	return (0);
140
141use_spare:
142	/* swap DMA maps */
143	map = mb_list->dma_map;
144	mb_list->dma_map = ring->spare.dma_map;
145	ring->spare.dma_map = map;
146
147	/* swap MBUFs */
148	mb_list->mbuf = ring->spare.mbuf;
149	ring->spare.mbuf = NULL;
150
151	/* store physical address */
152	*pdma = ring->spare.paddr_be;
153	return (0);
154}
155
156static void
157mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list)
158{
159	bus_dmamap_t map = mb_list->dma_map;
160	bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD);
161	bus_dmamap_unload(ring->dma_tag, map);
162	m_freem(mb_list->mbuf);
163	mb_list->mbuf = NULL;	/* safety clearing */
164}
165
166static int
167mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
168    struct mlx4_en_rx_ring *ring, int index)
169{
170	struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
171	    (ring->buf + (index * ring->stride));
172	struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index;
173
174	mb_list->mbuf = NULL;
175
176	if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) {
177		priv->port_stats.rx_alloc_failed++;
178		return (-ENOMEM);
179	}
180	return (0);
181}
182
183static inline void
184mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
185{
186	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
187}
188
189static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
190{
191	struct mlx4_en_rx_ring *ring;
192	int ring_ind;
193	int buf_ind;
194	int new_size;
195	int err;
196
197	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
198		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
199			ring = priv->rx_ring[ring_ind];
200
201			err = mlx4_en_prepare_rx_desc(priv, ring,
202						      ring->actual_size);
203			if (err) {
204				if (ring->actual_size == 0) {
205					en_err(priv, "Failed to allocate "
206						     "enough rx buffers\n");
207					return -ENOMEM;
208				} else {
209					new_size =
210						rounddown_pow_of_two(ring->actual_size);
211					en_warn(priv, "Only %d buffers allocated "
212						      "reducing ring size to %d\n",
213						ring->actual_size, new_size);
214					goto reduce_rings;
215				}
216			}
217			ring->actual_size++;
218			ring->prod++;
219		}
220	}
221	return 0;
222
223reduce_rings:
224	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
225		ring = priv->rx_ring[ring_ind];
226		while (ring->actual_size > new_size) {
227			ring->actual_size--;
228			ring->prod--;
229			mlx4_en_free_buf(ring,
230			    ring->mbuf + ring->actual_size);
231		}
232	}
233
234	return 0;
235}
236
237static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
238				struct mlx4_en_rx_ring *ring)
239{
240	int index;
241
242	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
243	       ring->cons, ring->prod);
244
245	/* Unmap and free Rx buffers */
246	BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
247	while (ring->cons != ring->prod) {
248		index = ring->cons & ring->size_mask;
249		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
250		mlx4_en_free_buf(ring, ring->mbuf + index);
251		++ring->cons;
252	}
253}
254
255void mlx4_en_calc_rx_buf(struct net_device *dev)
256{
257	struct mlx4_en_priv *priv = netdev_priv(dev);
258	int eff_mtu = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN +
259	    MLX4_NET_IP_ALIGN;
260
261	if (eff_mtu > MJUM16BYTES) {
262		en_err(priv, "MTU(%d) is too big\n", (int)dev->if_mtu);
263                eff_mtu = MJUM16BYTES;
264        } else if (eff_mtu > MJUM9BYTES) {
265                eff_mtu = MJUM16BYTES;
266        } else if (eff_mtu > MJUMPAGESIZE) {
267                eff_mtu = MJUM9BYTES;
268        } else if (eff_mtu > MCLBYTES) {
269                eff_mtu = MJUMPAGESIZE;
270        } else {
271                eff_mtu = MCLBYTES;
272        }
273
274	priv->rx_mb_size = eff_mtu;
275
276	en_dbg(DRV, priv, "Effective RX MTU: %d bytes\n", eff_mtu);
277}
278
279int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
280			   struct mlx4_en_rx_ring **pring,
281			   u32 size, int node)
282{
283	struct mlx4_en_dev *mdev = priv->mdev;
284	struct mlx4_en_rx_ring *ring;
285	int err;
286	int tmp;
287	uint32_t x;
288
289        ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL);
290        if (!ring) {
291                en_err(priv, "Failed to allocate RX ring structure\n");
292                return -ENOMEM;
293        }
294
295	/* Create DMA descriptor TAG */
296	if ((err = -bus_dma_tag_create(
297	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
298	    1,				/* any alignment */
299	    0,				/* no boundary */
300	    BUS_SPACE_MAXADDR,		/* lowaddr */
301	    BUS_SPACE_MAXADDR,		/* highaddr */
302	    NULL, NULL,			/* filter, filterarg */
303	    MJUM16BYTES,		/* maxsize */
304	    1,				/* nsegments */
305	    MJUM16BYTES,		/* maxsegsize */
306	    0,				/* flags */
307	    NULL, NULL,			/* lockfunc, lockfuncarg */
308	    &ring->dma_tag))) {
309		en_err(priv, "Failed to create DMA tag\n");
310		goto err_ring;
311	}
312
313	ring->prod = 0;
314	ring->cons = 0;
315	ring->size = size;
316	ring->size_mask = size - 1;
317	ring->stride = roundup_pow_of_two(
318	    sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
319	ring->log_stride = ffs(ring->stride) - 1;
320	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
321
322	tmp = size * sizeof(struct mlx4_en_rx_mbuf);
323
324        ring->mbuf = kzalloc(tmp, GFP_KERNEL);
325        if (ring->mbuf == NULL) {
326                err = -ENOMEM;
327                goto err_dma_tag;
328        }
329
330	err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map);
331	if (err != 0)
332		goto err_info;
333
334	for (x = 0; x != size; x++) {
335		err = -bus_dmamap_create(ring->dma_tag, 0,
336		    &ring->mbuf[x].dma_map);
337		if (err != 0) {
338			while (x--)
339				bus_dmamap_destroy(ring->dma_tag,
340				    ring->mbuf[x].dma_map);
341			goto err_info;
342		}
343	}
344	en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n",
345		 ring->mbuf, tmp);
346
347	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
348				 ring->buf_size, 2 * PAGE_SIZE);
349	if (err)
350		goto err_dma_map;
351
352	err = mlx4_en_map_buffer(&ring->wqres.buf);
353	if (err) {
354		en_err(priv, "Failed to map RX buffer\n");
355		goto err_hwq;
356	}
357	ring->buf = ring->wqres.buf.direct.buf;
358	*pring = ring;
359	return 0;
360
361err_hwq:
362	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
363err_dma_map:
364	for (x = 0; x != size; x++) {
365		bus_dmamap_destroy(ring->dma_tag,
366		    ring->mbuf[x].dma_map);
367	}
368	bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
369err_info:
370	vfree(ring->mbuf);
371err_dma_tag:
372	bus_dma_tag_destroy(ring->dma_tag);
373err_ring:
374	kfree(ring);
375	return (err);
376}
377
378int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
379{
380	struct mlx4_en_rx_ring *ring;
381	int i;
382	int ring_ind;
383	int err;
384	int stride = roundup_pow_of_two(
385	    sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
386
387	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
388		ring = priv->rx_ring[ring_ind];
389
390		ring->prod = 0;
391		ring->cons = 0;
392		ring->actual_size = 0;
393		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
394		ring->rx_alloc_order = priv->rx_alloc_order;
395		ring->rx_alloc_size = priv->rx_alloc_size;
396		ring->rx_buf_size = priv->rx_buf_size;
397                ring->rx_mb_size = priv->rx_mb_size;
398
399		ring->stride = stride;
400		if (ring->stride <= TXBB_SIZE)
401			ring->buf += TXBB_SIZE;
402
403		ring->log_stride = ffs(ring->stride) - 1;
404		ring->buf_size = ring->size * ring->stride;
405
406		memset(ring->buf, 0, ring->buf_size);
407		mlx4_en_update_rx_prod_db(ring);
408
409		/* Initialize all descriptors */
410		for (i = 0; i < ring->size; i++)
411			mlx4_en_init_rx_desc(priv, ring, i);
412
413#ifdef INET
414		/* Configure lro mngr */
415		if (priv->dev->if_capenable & IFCAP_LRO) {
416			if (tcp_lro_init(&ring->lro))
417				priv->dev->if_capenable &= ~IFCAP_LRO;
418			else
419				ring->lro.ifp = priv->dev;
420		}
421#endif
422	}
423
424
425	err = mlx4_en_fill_rx_buffers(priv);
426	if (err)
427		goto err_buffers;
428
429	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
430		ring = priv->rx_ring[ring_ind];
431
432		ring->size_mask = ring->actual_size - 1;
433		mlx4_en_update_rx_prod_db(ring);
434	}
435
436	return 0;
437
438err_buffers:
439	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
440		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
441
442	ring_ind = priv->rx_ring_num - 1;
443
444	while (ring_ind >= 0) {
445		ring = priv->rx_ring[ring_ind];
446		if (ring->stride <= TXBB_SIZE)
447			ring->buf -= TXBB_SIZE;
448		ring_ind--;
449	}
450
451	return err;
452}
453
454
455void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
456			     struct mlx4_en_rx_ring **pring,
457			     u32 size, u16 stride)
458{
459	struct mlx4_en_dev *mdev = priv->mdev;
460	struct mlx4_en_rx_ring *ring = *pring;
461	uint32_t x;
462
463	mlx4_en_unmap_buffer(&ring->wqres.buf);
464	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
465	for (x = 0; x != size; x++)
466		bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map);
467	/* free spare mbuf, if any */
468	if (ring->spare.mbuf != NULL) {
469		bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
470		    BUS_DMASYNC_POSTREAD);
471		bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map);
472		m_freem(ring->spare.mbuf);
473	}
474	bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
475	vfree(ring->mbuf);
476	bus_dma_tag_destroy(ring->dma_tag);
477	kfree(ring);
478	*pring = NULL;
479#ifdef CONFIG_RFS_ACCEL
480	mlx4_en_cleanup_filters(priv, ring);
481#endif
482}
483
484void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
485				struct mlx4_en_rx_ring *ring)
486{
487#ifdef INET
488	tcp_lro_free(&ring->lro);
489#endif
490	mlx4_en_free_rx_buf(priv, ring);
491	if (ring->stride <= TXBB_SIZE)
492		ring->buf -= TXBB_SIZE;
493}
494
495
496static void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb)
497{
498	int i;
499	int offset = ETHER_HDR_LEN;
500
501	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
502		if (*(mb->m_data + offset) != (unsigned char) (i & 0xff))
503			goto out_loopback;
504	}
505	/* Loopback found */
506	priv->loopback_ok = 1;
507
508out_loopback:
509	m_freem(mb);
510}
511
512
513static inline int invalid_cqe(struct mlx4_en_priv *priv,
514			      struct mlx4_cqe *cqe)
515{
516	/* Drop packet on bad receive or bad checksum */
517	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
518		     MLX4_CQE_OPCODE_ERROR)) {
519		en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
520		       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
521		       ((struct mlx4_err_cqe *)cqe)->syndrome);
522		return 1;
523	}
524	if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
525		en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
526		return 1;
527	}
528
529	return 0;
530}
531
532static struct mbuf *
533mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
534    struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list,
535    int length)
536{
537	struct mbuf *mb;
538
539	/* get mbuf */
540	mb = mb_list->mbuf;
541
542	/* collect used fragment while atomically replacing it */
543	if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list))
544		return (NULL);
545
546	/* range check hardware computed value */
547	if (unlikely(length > mb->m_len))
548		length = mb->m_len;
549
550	/* update total packet length in packet header */
551	mb->m_len = mb->m_pkthdr.len = length;
552	return (mb);
553}
554
555/* For cpu arch with cache line of 64B the performance is better when cqe size==64B
556 * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc)
557 * was added in the beginning of each cqe (the real data is in the corresponding 32B).
558 * The following calc ensures that when factor==1, it means we are alligned to 64B
559 * and we get the real cqe data*/
560#define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor)
561int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
562{
563	struct mlx4_en_priv *priv = netdev_priv(dev);
564	struct mlx4_cqe *cqe;
565	struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
566	struct mlx4_en_rx_mbuf *mb_list;
567	struct mlx4_en_rx_desc *rx_desc;
568	struct mbuf *mb;
569	struct mlx4_cq *mcq = &cq->mcq;
570	struct mlx4_cqe *buf = cq->buf;
571#ifdef INET
572	struct lro_entry *queued;
573#endif
574	int index;
575	unsigned int length;
576	int polled = 0;
577	u32 cons_index = mcq->cons_index;
578	u32 size_mask = ring->size_mask;
579	int size = cq->size;
580	int factor = priv->cqe_factor;
581
582	if (!priv->port_up)
583		return 0;
584
585	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
586	 * descriptor offset can be deducted from the CQE index instead of
587	 * reading 'cqe->index' */
588	index = cons_index & size_mask;
589	cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
590
591	/* Process all completed CQEs */
592	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
593		    cons_index & size)) {
594		mb_list = ring->mbuf + index;
595		rx_desc = (struct mlx4_en_rx_desc *)
596		    (ring->buf + (index << ring->log_stride));
597
598		/*
599		 * make sure we read the CQE after we read the ownership bit
600		 */
601		rmb();
602
603		if (invalid_cqe(priv, cqe)) {
604			goto next;
605		}
606		/*
607		 * Packet is OK - process it.
608		 */
609		length = be32_to_cpu(cqe->byte_cnt);
610		length -= ring->fcs_del;
611
612		mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length);
613		if (unlikely(!mb)) {
614			ring->errors++;
615			goto next;
616		}
617
618		ring->bytes += length;
619		ring->packets++;
620
621		if (unlikely(priv->validate_loopback)) {
622			validate_loopback(priv, mb);
623			goto next;
624		}
625
626		/* forward Toeplitz compatible hash value */
627		mb->m_pkthdr.flowid = be32_to_cpu(cqe->immed_rss_invalid);
628		M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
629		mb->m_pkthdr.rcvif = dev;
630		if (be32_to_cpu(cqe->vlan_my_qpn) &
631		    MLX4_CQE_VLAN_PRESENT_MASK) {
632			mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
633			mb->m_flags |= M_VLANTAG;
634		}
635		if (likely(dev->if_capenable &
636		    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
637		    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
638		    (cqe->checksum == cpu_to_be16(0xffff))) {
639			priv->port_stats.rx_chksum_good++;
640			mb->m_pkthdr.csum_flags =
641			    CSUM_IP_CHECKED | CSUM_IP_VALID |
642			    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
643			mb->m_pkthdr.csum_data = htons(0xffff);
644			/* This packet is eligible for LRO if it is:
645			 * - DIX Ethernet (type interpretation)
646			 * - TCP/IP (v4)
647			 * - without IP options
648			 * - not an IP fragment
649			 */
650#ifdef INET
651			if (mlx4_en_can_lro(cqe->status) &&
652					(dev->if_capenable & IFCAP_LRO)) {
653				if (ring->lro.lro_cnt != 0 &&
654						tcp_lro_rx(&ring->lro, mb, 0) == 0)
655					goto next;
656			}
657
658#endif
659			/* LRO not possible, complete processing here */
660			INC_PERF_COUNTER(priv->pstats.lro_misses);
661		} else {
662			mb->m_pkthdr.csum_flags = 0;
663			priv->port_stats.rx_chksum_none++;
664		}
665
666		/* Push it up the stack */
667		dev->if_input(dev, mb);
668
669next:
670		++cons_index;
671		index = cons_index & size_mask;
672		cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
673		if (++polled == budget)
674			goto out;
675	}
676	/* Flush all pending IP reassembly sessions */
677out:
678#ifdef INET
679	while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
680		SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
681		tcp_lro_flush(&ring->lro, queued);
682	}
683#endif
684	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
685	mcq->cons_index = cons_index;
686	mlx4_cq_set_ci(mcq);
687	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
688	ring->cons = mcq->cons_index;
689	ring->prod += polled; /* Polled descriptors were realocated in place */
690	mlx4_en_update_rx_prod_db(ring);
691	return polled;
692
693}
694
695/* Rx CQ polling - called by NAPI */
696static int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget)
697{
698        struct net_device *dev = cq->dev;
699        int done;
700
701        done = mlx4_en_process_rx_cq(dev, cq, budget);
702        cq->tot_rx += done;
703
704        return done;
705
706}
707void mlx4_en_rx_irq(struct mlx4_cq *mcq)
708{
709	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
710	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
711        int done;
712
713        // Shoot one within the irq context
714        // Because there is no NAPI in freeBSD
715        done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET);
716	if (priv->port_up  && (done == MLX4_EN_RX_BUDGET) ) {
717		cq->curr_poll_rx_cpu_id = curcpu;
718		taskqueue_enqueue(cq->tq, &cq->cq_task);
719        }
720	else {
721		mlx4_en_arm_cq(priv, cq);
722	}
723}
724
725void mlx4_en_rx_que(void *context, int pending)
726{
727        struct mlx4_en_cq *cq;
728	struct thread *td;
729
730        cq = context;
731	td = curthread;
732
733	thread_lock(td);
734	sched_bind(td, cq->curr_poll_rx_cpu_id);
735	thread_unlock(td);
736
737        while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET)
738                        == MLX4_EN_RX_BUDGET);
739        mlx4_en_arm_cq(cq->dev->if_softc, cq);
740}
741
742
743/* RSS related functions */
744
745static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
746				 struct mlx4_en_rx_ring *ring,
747				 enum mlx4_qp_state *state,
748				 struct mlx4_qp *qp)
749{
750	struct mlx4_en_dev *mdev = priv->mdev;
751	struct mlx4_qp_context *context;
752	int err = 0;
753
754	context = kmalloc(sizeof *context , GFP_KERNEL);
755	if (!context) {
756		en_err(priv, "Failed to allocate qp context\n");
757		return -ENOMEM;
758	}
759
760	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
761	if (err) {
762		en_err(priv, "Failed to allocate qp #%x\n", qpn);
763		goto out;
764	}
765	qp->event = mlx4_en_sqp_event;
766
767	memset(context, 0, sizeof *context);
768	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
769				qpn, ring->cqn, -1, context);
770	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
771
772	/* Cancel FCS removal if FW allows */
773	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
774		context->param3 |= cpu_to_be32(1 << 29);
775		ring->fcs_del = ETH_FCS_LEN;
776	} else
777		ring->fcs_del = 0;
778
779	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
780	if (err) {
781		mlx4_qp_remove(mdev->dev, qp);
782		mlx4_qp_free(mdev->dev, qp);
783	}
784	mlx4_en_update_rx_prod_db(ring);
785out:
786	kfree(context);
787	return err;
788}
789
790int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
791{
792	int err;
793	u32 qpn;
794
795	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0);
796	if (err) {
797		en_err(priv, "Failed reserving drop qpn\n");
798		return err;
799	}
800	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
801	if (err) {
802		en_err(priv, "Failed allocating drop qp\n");
803		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
804		return err;
805	}
806
807	return 0;
808}
809
810void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
811{
812	u32 qpn;
813
814	qpn = priv->drop_qp.qpn;
815	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
816	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
817	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
818}
819
820/* Allocate rx qp's and configure them according to rss map */
821int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
822{
823	struct mlx4_en_dev *mdev = priv->mdev;
824	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
825	struct mlx4_qp_context context;
826	struct mlx4_rss_context *rss_context;
827	int rss_rings;
828	void *ptr;
829	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
830			MLX4_RSS_TCP_IPV6);
831	int i;
832	int err = 0;
833	int good_qps = 0;
834	static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
835				0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
836				0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
837
838	en_dbg(DRV, priv, "Configuring rss steering\n");
839	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
840				    priv->rx_ring_num,
841				    &rss_map->base_qpn, 0);
842	if (err) {
843		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
844		return err;
845	}
846
847	for (i = 0; i < priv->rx_ring_num; i++) {
848		priv->rx_ring[i]->qpn = rss_map->base_qpn + i;
849		err = mlx4_en_config_rss_qp(priv, priv->rx_ring[i]->qpn,
850					    priv->rx_ring[i],
851					    &rss_map->state[i],
852					    &rss_map->qps[i]);
853		if (err)
854			goto rss_err;
855
856		++good_qps;
857	}
858
859	/* Configure RSS indirection qp */
860	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
861	if (err) {
862		en_err(priv, "Failed to allocate RSS indirection QP\n");
863		goto rss_err;
864	}
865	rss_map->indir_qp.event = mlx4_en_sqp_event;
866	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
867				priv->rx_ring[0]->cqn, -1, &context);
868
869	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
870		rss_rings = priv->rx_ring_num;
871	else
872		rss_rings = priv->prof->rss_rings;
873
874	ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) +
875	    MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
876	rss_context = ptr;
877	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
878					    (rss_map->base_qpn));
879	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
880	if (priv->mdev->profile.udp_rss) {
881		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
882		rss_context->base_qpn_udp = rss_context->default_qpn;
883	}
884	rss_context->flags = rss_mask;
885	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
886	for (i = 0; i < 10; i++)
887		rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
888
889	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
890			       &rss_map->indir_qp, &rss_map->indir_state);
891	if (err)
892		goto indir_err;
893
894	return 0;
895
896indir_err:
897	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
898		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
899	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
900	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
901rss_err:
902	for (i = 0; i < good_qps; i++) {
903		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
904			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
905		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
906		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
907	}
908	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
909	return err;
910}
911
912void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
913{
914	struct mlx4_en_dev *mdev = priv->mdev;
915	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
916	int i;
917
918	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
919		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
920	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
921	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
922
923	for (i = 0; i < priv->rx_ring_num; i++) {
924		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
925			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
926		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
927		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
928	}
929	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
930}
931
932