1219820Sjeff/*
2272407Shselasky * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3219820Sjeff *
4219820Sjeff * This software is available to you under a choice of one of two
5219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
6219820Sjeff * General Public License (GPL) Version 2, available from the file
7219820Sjeff * COPYING in the main directory of this source tree, or the
8219820Sjeff * OpenIB.org BSD license below:
9219820Sjeff *
10219820Sjeff *     Redistribution and use in source and binary forms, with or
11219820Sjeff *     without modification, are permitted provided that the following
12219820Sjeff *     conditions are met:
13219820Sjeff *
14219820Sjeff *      - Redistributions of source code must retain the above
15219820Sjeff *        copyright notice, this list of conditions and the following
16219820Sjeff *        disclaimer.
17219820Sjeff *
18219820Sjeff *      - Redistributions in binary form must reproduce the above
19219820Sjeff *        copyright notice, this list of conditions and the following
20219820Sjeff *        disclaimer in the documentation and/or other materials
21219820Sjeff *        provided with the distribution.
22219820Sjeff *
23219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30219820Sjeff * SOFTWARE.
31219820Sjeff *
32219820Sjeff */
33219820Sjeff
34219820Sjeff#ifndef _MLX4_EN_H_
35219820Sjeff#define _MLX4_EN_H_
36219820Sjeff
37272407Shselasky#include <linux/bitops.h>
38219820Sjeff#include <linux/compiler.h>
39219820Sjeff#include <linux/list.h>
40219820Sjeff#include <linux/mutex.h>
41272407Shselasky#include <linux/kobject.h>
42219820Sjeff#include <linux/netdevice.h>
43272407Shselasky#include <linux/if_vlan.h>
44272407Shselasky#include <linux/if_ether.h>
45272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
46272407Shselasky#include <linux/dcbnl.h>
47272407Shselasky#endif
48219820Sjeff
49219820Sjeff#include <linux/mlx4/device.h>
50219820Sjeff#include <linux/mlx4/qp.h>
51219820Sjeff#include <linux/mlx4/cq.h>
52219820Sjeff#include <linux/mlx4/srq.h>
53219820Sjeff#include <linux/mlx4/doorbell.h>
54219820Sjeff#include <linux/mlx4/cmd.h>
55219820Sjeff
56219820Sjeff#include <netinet/tcp_lro.h>
57219820Sjeff
58219820Sjeff#include "en_port.h"
59272407Shselasky#include "mlx4_stats.h"
60219820Sjeff
61219820Sjeff#define DRV_NAME	"mlx4_en"
62272407Shselasky#define DRV_VERSION	"2.1"
63272407Shselasky#define DRV_RELDATE	__DATE__
64219820Sjeff
65219820Sjeff#define MLX4_EN_MSG_LEVEL	(NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
66219820Sjeff
67219820Sjeff/*
68219820Sjeff * Device constants
69219820Sjeff */
70219820Sjeff
71219820Sjeff
72219820Sjeff#define MLX4_EN_PAGE_SHIFT	12
73219820Sjeff#define MLX4_EN_PAGE_SIZE	(1 << MLX4_EN_PAGE_SHIFT)
74272407Shselasky#define DEF_RX_RINGS		16
75272407Shselasky#define MAX_RX_RINGS		128
76272407Shselasky#define MIN_RX_RINGS		4
77219820Sjeff#define TXBB_SIZE		64
78219820Sjeff#define HEADROOM		(2048 / TXBB_SIZE + 1)
79219820Sjeff#define STAMP_STRIDE		64
80219820Sjeff#define STAMP_DWORDS		(STAMP_STRIDE / 4)
81219820Sjeff#define STAMP_SHIFT		31
82219820Sjeff#define STAMP_VAL		0x7fffffff
83219820Sjeff#define STATS_DELAY		(HZ / 4)
84272407Shselasky#define SERVICE_TASK_DELAY	(HZ / 4)
85272407Shselasky#define MAX_NUM_OF_FS_RULES	256
86219820Sjeff
87272407Shselasky#define MLX4_EN_FILTER_HASH_SHIFT 4
88272407Shselasky#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
89272407Shselasky
90272407Shselasky#ifdef CONFIG_NET_RX_BUSY_POLL
91272407Shselasky#define LL_EXTENDED_STATS
92272407Shselasky#endif
93272407Shselasky
94272407Shselasky/* vlan valid range */
95272407Shselasky#define VLAN_MIN_VALUE		1
96272407Shselasky#define VLAN_MAX_VALUE		4094
97272407Shselasky
98219820Sjeff/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
99219820Sjeff#define MAX_DESC_SIZE		512
100219820Sjeff#define MAX_DESC_TXBBS		(MAX_DESC_SIZE / TXBB_SIZE)
101219820Sjeff
102219820Sjeff/*
103219820Sjeff * OS related constants and tunables
104219820Sjeff */
105219820Sjeff
106219820Sjeff#define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
107219820Sjeff
108272407Shselasky#define MLX4_EN_ALLOC_SIZE     PAGE_ALIGN(PAGE_SIZE)
109272407Shselasky#define MLX4_EN_ALLOC_ORDER    get_order(MLX4_EN_ALLOC_SIZE)
110219820Sjeff
111272407Shselaskyenum mlx4_en_alloc_type {
112272407Shselasky	MLX4_EN_ALLOC_NEW = 0,
113272407Shselasky	MLX4_EN_ALLOC_REPLACEMENT = 1,
114272407Shselasky};
115272407Shselasky
116219820Sjeff/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
117219820Sjeff * and 4K allocations) */
118219820Sjeff#if MJUMPAGESIZE == 4096
119219820Sjeffenum {
120219820Sjeff	FRAG_SZ0 = MCLBYTES,
121219820Sjeff	FRAG_SZ1 = MJUMPAGESIZE,
122219820Sjeff	FRAG_SZ2 = MJUMPAGESIZE,
123219820Sjeff};
124219820Sjeff#define MLX4_EN_MAX_RX_FRAGS	3
125219820Sjeff#elif MJUMPAGESIZE == 8192
126219820Sjeffenum {
127219820Sjeff	FRAG_SZ0 = MCLBYTES,
128219820Sjeff	FRAG_SZ1 = MJUMPAGESIZE,
129219820Sjeff};
130219820Sjeff#define MLX4_EN_MAX_RX_FRAGS	2
131219820Sjeff#elif MJUMPAGESIZE == 8192
132219820Sjeff#else
133219820Sjeff#error	"Unknown PAGE_SIZE"
134219820Sjeff#endif
135219820Sjeff
136219820Sjeff/* Maximum ring sizes */
137272407Shselasky#define MLX4_EN_DEF_TX_QUEUE_SIZE       4096
138272407Shselasky
139272407Shselasky/* Minimum packet number till arming the CQ */
140272407Shselasky#define MLX4_EN_MIN_RX_ARM	2048
141272407Shselasky#define MLX4_EN_MIN_TX_ARM	2048
142272407Shselasky
143272407Shselasky/* Maximum ring sizes */
144219820Sjeff#define MLX4_EN_MAX_TX_SIZE	8192
145219820Sjeff#define MLX4_EN_MAX_RX_SIZE	8192
146219820Sjeff
147272407Shselasky/* Minimum ring sizes */
148272407Shselasky#define MLX4_EN_MIN_RX_SIZE	(4096 / TXBB_SIZE)
149219820Sjeff#define MLX4_EN_MIN_TX_SIZE	(4096 / TXBB_SIZE)
150219820Sjeff
151219820Sjeff#define MLX4_EN_SMALL_PKT_SIZE		64
152272407Shselasky
153272407Shselasky#define MLX4_EN_MAX_TX_RING_P_UP	32
154272407Shselasky#define MLX4_EN_NUM_UP			1
155272407Shselasky
156272407Shselasky#define MAX_TX_RINGS			(MLX4_EN_MAX_TX_RING_P_UP * \
157272407Shselasky					 (MLX4_EN_NUM_UP + 1))
158272407Shselasky
159272407Shselasky#define MLX4_EN_DEF_TX_RING_SIZE	1024
160219820Sjeff#define MLX4_EN_DEF_RX_RING_SIZE  	1024
161219820Sjeff
162219820Sjeff/* Target number of bytes to coalesce with interrupt moderation */
163219820Sjeff#define MLX4_EN_RX_COAL_TARGET	0x20000
164219820Sjeff#define MLX4_EN_RX_COAL_TIME	0x10
165219820Sjeff
166272407Shselasky#define MLX4_EN_TX_COAL_PKTS	64
167272407Shselasky#define MLX4_EN_TX_COAL_TIME	64
168219820Sjeff
169219820Sjeff#define MLX4_EN_RX_RATE_LOW		400000
170219820Sjeff#define MLX4_EN_RX_COAL_TIME_LOW	0
171219820Sjeff#define MLX4_EN_RX_RATE_HIGH		450000
172219820Sjeff#define MLX4_EN_RX_COAL_TIME_HIGH	128
173219820Sjeff#define MLX4_EN_RX_SIZE_THRESH		1024
174219820Sjeff#define MLX4_EN_RX_RATE_THRESH		(1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
175219820Sjeff#define MLX4_EN_SAMPLE_INTERVAL		0
176219820Sjeff#define MLX4_EN_AVG_PKT_SMALL		256
177219820Sjeff
178219820Sjeff#define MLX4_EN_AUTO_CONF	0xffff
179219820Sjeff
180219820Sjeff#define MLX4_EN_DEF_RX_PAUSE	1
181219820Sjeff#define MLX4_EN_DEF_TX_PAUSE	1
182219820Sjeff
183272407Shselasky/* Interval between successive polls in the Tx routine when polling is used
184219820Sjeff   instead of interrupts (in per-core Tx rings) - should be power of 2 */
185219820Sjeff#define MLX4_EN_TX_POLL_MODER	16
186219820Sjeff#define MLX4_EN_TX_POLL_TIMEOUT	(HZ / 4)
187219820Sjeff
188272407Shselasky#define MLX4_EN_64_ALIGN	(64 - NET_SKB_PAD)
189272407Shselasky#define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
190219820Sjeff#define HEADER_COPY_SIZE       (128)
191219820Sjeff#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETHER_HDR_LEN)
192219820Sjeff
193219820Sjeff#define MLX4_EN_MIN_MTU		46
194219820Sjeff#define ETH_BCAST		0xffffffffffffULL
195219820Sjeff
196219820Sjeff#define MLX4_EN_LOOPBACK_RETRIES	5
197219820Sjeff#define MLX4_EN_LOOPBACK_TIMEOUT	100
198219820Sjeff
199219820Sjeff#ifdef MLX4_EN_PERF_STAT
200219820Sjeff/* Number of samples to 'average' */
201219820Sjeff#define AVG_SIZE			128
202219820Sjeff#define AVG_FACTOR			1024
203219820Sjeff
204219820Sjeff#define INC_PERF_COUNTER(cnt)		(++(cnt))
205219820Sjeff#define ADD_PERF_COUNTER(cnt, add)	((cnt) += (add))
206219820Sjeff#define AVG_PERF_COUNTER(cnt, sample) \
207219820Sjeff	((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
208219820Sjeff#define GET_PERF_COUNTER(cnt)		(cnt)
209219820Sjeff#define GET_AVG_PERF_COUNTER(cnt)	((cnt) / AVG_FACTOR)
210219820Sjeff
211219820Sjeff#else
212219820Sjeff
213219820Sjeff#define INC_PERF_COUNTER(cnt)		do {} while (0)
214219820Sjeff#define ADD_PERF_COUNTER(cnt, add)	do {} while (0)
215219820Sjeff#define AVG_PERF_COUNTER(cnt, sample)	do {} while (0)
216219820Sjeff#define GET_PERF_COUNTER(cnt)		(0)
217219820Sjeff#define GET_AVG_PERF_COUNTER(cnt)	(0)
218219820Sjeff#endif /* MLX4_EN_PERF_STAT */
219219820Sjeff
220219820Sjeff/*
221219820Sjeff * Configurables
222219820Sjeff */
223219820Sjeff
224219820Sjeffenum cq_type {
225219820Sjeff	RX = 0,
226219820Sjeff	TX = 1,
227219820Sjeff};
228219820Sjeff
229219820Sjeff
230219820Sjeff/*
231219820Sjeff * Useful macros
232219820Sjeff */
233219820Sjeff#define ROUNDUP_LOG2(x)		ilog2(roundup_pow_of_two(x))
234219820Sjeff#define XNOR(x, y)		(!(x) == !(y))
235219820Sjeff#define ILLEGAL_MAC(addr)	(addr == 0xffffffffffffULL || addr == 0x0)
236219820Sjeff
237219820Sjeffstruct mlx4_en_tx_info {
238272407Shselasky        struct mbuf *mb;
239272407Shselasky        u32 nr_txbb;
240272407Shselasky	u32 nr_bytes;
241272407Shselasky        u8 linear;
242272407Shselasky        u8 nr_segs;
243272407Shselasky        u8 data_offset;
244272407Shselasky        u8 inl;
245272407Shselasky#if 0
246272407Shselasky	u8 ts_requested;
247272407Shselasky#endif
248219820Sjeff};
249219820Sjeff
250219820Sjeff
251219820Sjeff#define MLX4_EN_BIT_DESC_OWN	0x80000000
252219820Sjeff#define CTRL_SIZE	sizeof(struct mlx4_wqe_ctrl_seg)
253219820Sjeff#define MLX4_EN_MEMTYPE_PAD	0x100
254219820Sjeff#define DS_SIZE		sizeof(struct mlx4_wqe_data_seg)
255219820Sjeff
256219820Sjeff
257219820Sjeffstruct mlx4_en_tx_desc {
258219820Sjeff	struct mlx4_wqe_ctrl_seg ctrl;
259219820Sjeff	union {
260219820Sjeff		struct mlx4_wqe_data_seg data; /* at least one data segment */
261219820Sjeff		struct mlx4_wqe_lso_seg lso;
262219820Sjeff		struct mlx4_wqe_inline_seg inl;
263219820Sjeff	};
264219820Sjeff};
265219820Sjeff
266219820Sjeff#define MLX4_EN_USE_SRQ		0x01000000
267219820Sjeff
268272407Shselasky#define MLX4_EN_TX_BUDGET 64*4 //Compensate for no NAPI in freeBSD - might need some fine tunning in the future.
269272407Shselasky#define MLX4_EN_RX_BUDGET 64
270272407Shselasky
271272407Shselasky#define MLX4_EN_CX3_LOW_ID	0x1000
272272407Shselasky#define MLX4_EN_CX3_HIGH_ID	0x1005
273272407Shselasky
274219820Sjeffstruct mlx4_en_tx_ring {
275272407Shselasky        spinlock_t tx_lock;
276219820Sjeff	struct mlx4_hwq_resources wqres;
277219820Sjeff	u32 size ; /* number of TXBBs */
278219820Sjeff	u32 size_mask;
279219820Sjeff	u16 stride;
280219820Sjeff	u16 cqn;	/* index of port CQ associated with this ring */
281219820Sjeff	u32 prod;
282219820Sjeff	u32 cons;
283219820Sjeff	u32 buf_size;
284219820Sjeff	u32 doorbell_qpn;
285219820Sjeff	void *buf;
286219820Sjeff	u16 poll_cnt;
287219820Sjeff	int blocked;
288219820Sjeff	struct mlx4_en_tx_info *tx_info;
289219820Sjeff	u8 *bounce_buf;
290272407Shselasky	u8 queue_index;
291272407Shselasky	cpuset_t affinity_mask;
292272407Shselasky	struct buf_ring *br;
293219820Sjeff	u32 last_nr_txbb;
294219820Sjeff	struct mlx4_qp qp;
295219820Sjeff	struct mlx4_qp_context context;
296219820Sjeff	int qpn;
297219820Sjeff	enum mlx4_qp_state qp_state;
298219820Sjeff	struct mlx4_srq dummy;
299219820Sjeff	unsigned long bytes;
300219820Sjeff	unsigned long packets;
301272407Shselasky	unsigned long tx_csum;
302272407Shselasky	unsigned long queue_stopped;
303272407Shselasky	unsigned long wake_queue;
304219820Sjeff	struct mlx4_bf bf;
305219820Sjeff	bool bf_enabled;
306272407Shselasky	struct netdev_queue *tx_queue;
307272407Shselasky	int hwtstamp_tx_type;
308272407Shselasky	spinlock_t comp_lock;
309272407Shselasky	int full_size;
310272407Shselasky	int inline_thold;
311219820Sjeff	u64 watchdog_time;
312219820Sjeff};
313219820Sjeff
314219820Sjeffstruct mlx4_en_rx_desc {
315219820Sjeff	/* actual number of entries depends on rx ring stride */
316219820Sjeff	struct mlx4_wqe_data_seg data[0];
317219820Sjeff};
318219820Sjeff
319272407Shselaskystruct mlx4_en_rx_buf {
320272407Shselasky	dma_addr_t dma;
321272407Shselasky	struct page *page;
322272407Shselasky	unsigned int page_offset;
323272407Shselasky};
324272407Shselasky
325219820Sjeffstruct mlx4_en_rx_ring {
326219820Sjeff	struct mlx4_hwq_resources wqres;
327219820Sjeff	u32 size ;	/* number of Rx descs*/
328219820Sjeff	u32 actual_size;
329219820Sjeff	u32 size_mask;
330219820Sjeff	u16 stride;
331219820Sjeff	u16 log_stride;
332219820Sjeff	u16 cqn;	/* index of port CQ associated with this ring */
333219820Sjeff	u32 prod;
334219820Sjeff	u32 cons;
335219820Sjeff	u32 buf_size;
336272407Shselasky	u8  fcs_del;
337272407Shselasky	u16 rx_alloc_order;
338272407Shselasky	u32 rx_alloc_size;
339272407Shselasky	u32 rx_buf_size;
340272407Shselasky	u32 rx_mb_size;
341272407Shselasky	int qpn;
342219820Sjeff	void *buf;
343219820Sjeff	void *rx_info;
344272407Shselasky	unsigned long errors;
345219820Sjeff	unsigned long bytes;
346219820Sjeff	unsigned long packets;
347272407Shselasky#ifdef LL_EXTENDED_STATS
348272407Shselasky	unsigned long yields;
349272407Shselasky	unsigned long misses;
350272407Shselasky	unsigned long cleaned;
351272407Shselasky#endif
352272407Shselasky	unsigned long csum_ok;
353272407Shselasky	unsigned long csum_none;
354272407Shselasky	int hwtstamp_rx_filter;
355272407Shselasky	int numa_node;
356219820Sjeff	struct lro_ctrl lro;
357219820Sjeff};
358219820Sjeff
359219820Sjeffstatic inline int mlx4_en_can_lro(__be16 status)
360219820Sjeff{
361272407Shselasky	static __be16 status_all;
362272407Shselasky	static __be16 status_ipv4_ipok_tcp;
363272407Shselasky	static __be16 status_ipv6_ipok_tcp;
364272407Shselasky
365272407Shselasky	status_all                         = cpu_to_be16(
366272407Shselasky			MLX4_CQE_STATUS_IPV4    |
367272407Shselasky			MLX4_CQE_STATUS_IPV4F   |
368272407Shselasky			MLX4_CQE_STATUS_IPV6    |
369272407Shselasky			MLX4_CQE_STATUS_IPV4OPT |
370272407Shselasky			MLX4_CQE_STATUS_TCP     |
371272407Shselasky			MLX4_CQE_STATUS_UDP     |
372272407Shselasky			MLX4_CQE_STATUS_IPOK);
373272407Shselasky	status_ipv4_ipok_tcp               = cpu_to_be16(
374272407Shselasky			MLX4_CQE_STATUS_IPV4    |
375272407Shselasky			MLX4_CQE_STATUS_IPOK    |
376272407Shselasky			MLX4_CQE_STATUS_TCP);
377272407Shselasky	status_ipv6_ipok_tcp               = cpu_to_be16(
378272407Shselasky			MLX4_CQE_STATUS_IPV6    |
379272407Shselasky			MLX4_CQE_STATUS_IPOK    |
380272407Shselasky			MLX4_CQE_STATUS_TCP);
381272407Shselasky
382272407Shselasky	status &= status_all;
383272407Shselasky	return (status == status_ipv4_ipok_tcp ||
384272407Shselasky			status == status_ipv6_ipok_tcp);
385219820Sjeff}
386219820Sjeff
387272407Shselasky
388219820Sjeffstruct mlx4_en_cq {
389219820Sjeff	struct mlx4_cq          mcq;
390219820Sjeff	struct mlx4_hwq_resources wqres;
391219820Sjeff	int                     ring;
392219820Sjeff	spinlock_t              lock;
393219820Sjeff	struct net_device      *dev;
394272407Shselasky        /* Per-core Tx cq processing support */
395272407Shselasky        struct timer_list timer;
396219820Sjeff	int size;
397219820Sjeff	int buf_size;
398219820Sjeff	unsigned vector;
399219820Sjeff	enum cq_type is_tx;
400219820Sjeff	u16 moder_time;
401219820Sjeff	u16 moder_cnt;
402219820Sjeff	struct mlx4_cqe *buf;
403219820Sjeff	struct task cq_task;
404219820Sjeff	struct taskqueue *tq;
405219820Sjeff#define MLX4_EN_OPCODE_ERROR	0x1e
406219820Sjeff	u32 tot_rx;
407272407Shselasky	u32 tot_tx;
408272407Shselasky
409272407Shselasky#ifdef CONFIG_NET_RX_BUSY_POLL
410272407Shselasky	unsigned int state;
411272407Shselasky#define MLX4_EN_CQ_STATEIDLE        0
412272407Shselasky#define MLX4_EN_CQ_STATENAPI     1    /* NAPI owns this CQ */
413272407Shselasky#define MLX4_EN_CQ_STATEPOLL     2    /* poll owns this CQ */
414272407Shselasky#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATENAPI | MLX4_EN_CQ_STATEPOLL)
415272407Shselasky#define MLX4_EN_CQ_STATENAPI_YIELD  4    /* NAPI yielded this CQ */
416272407Shselasky#define MLX4_EN_CQ_STATEPOLL_YIELD  8    /* poll yielded this CQ */
417272407Shselasky#define CQ_YIELD (MLX4_EN_CQ_STATENAPI_YIELD | MLX4_EN_CQ_STATEPOLL_YIELD)
418272407Shselasky#define CQ_USER_PEND (MLX4_EN_CQ_STATEPOLL | MLX4_EN_CQ_STATEPOLL_YIELD)
419272407Shselasky	spinlock_t poll_lock; /* protects from LLS/napi conflicts */
420272407Shselasky#endif  /* CONFIG_NET_RX_BUSY_POLL */
421219820Sjeff};
422219820Sjeff
423219820Sjeffstruct mlx4_en_port_profile {
424219820Sjeff	u32 flags;
425219820Sjeff	u32 tx_ring_num;
426219820Sjeff	u32 rx_ring_num;
427219820Sjeff	u32 tx_ring_size;
428219820Sjeff	u32 rx_ring_size;
429219820Sjeff	u8 rx_pause;
430272407Shselasky	u8 rx_ppp;
431219820Sjeff	u8 tx_pause;
432272407Shselasky	u8 tx_ppp;
433272407Shselasky	int rss_rings;
434219820Sjeff};
435219820Sjeff
436219820Sjeffstruct mlx4_en_profile {
437219820Sjeff	int rss_xor;
438219820Sjeff	int udp_rss;
439219820Sjeff	u8 rss_mask;
440219820Sjeff	u32 active_ports;
441219820Sjeff	u32 small_pkt_int;
442219820Sjeff	u8 no_reset;
443272407Shselasky	u8 num_tx_rings_p_up;
444219820Sjeff	struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
445219820Sjeff};
446219820Sjeff
447219820Sjeffstruct mlx4_en_dev {
448272407Shselasky	struct mlx4_dev		*dev;
449219820Sjeff	struct pci_dev		*pdev;
450219820Sjeff	struct mutex		state_lock;
451272407Shselasky	struct net_device	*pndev[MLX4_MAX_PORTS + 1];
452272407Shselasky	u32			port_cnt;
453219820Sjeff	bool			device_up;
454272407Shselasky	struct mlx4_en_profile	profile;
455219820Sjeff	u32			LSO_support;
456219820Sjeff	struct workqueue_struct *workqueue;
457272407Shselasky	struct device		*dma_device;
458272407Shselasky	void __iomem		*uar_map;
459272407Shselasky	struct mlx4_uar		priv_uar;
460219820Sjeff	struct mlx4_mr		mr;
461272407Shselasky	u32			priv_pdn;
462272407Shselasky	spinlock_t		uar_lock;
463219820Sjeff	u8			mac_removed[MLX4_MAX_PORTS + 1];
464272407Shselasky	unsigned long		last_overflow_check;
465272407Shselasky	unsigned long		overflow_period;
466219820Sjeff};
467219820Sjeff
468219820Sjeff
469219820Sjeffstruct mlx4_en_rss_map {
470219820Sjeff	int base_qpn;
471219820Sjeff	struct mlx4_qp qps[MAX_RX_RINGS];
472219820Sjeff	enum mlx4_qp_state state[MAX_RX_RINGS];
473219820Sjeff	struct mlx4_qp indir_qp;
474219820Sjeff	enum mlx4_qp_state indir_state;
475219820Sjeff};
476219820Sjeff
477219820Sjeffstruct mlx4_en_port_state {
478219820Sjeff	int link_state;
479219820Sjeff	int link_speed;
480219820Sjeff	int transciver;
481272407Shselasky	int autoneg;
482219820Sjeff};
483219820Sjeff
484272407Shselaskyenum mlx4_en_mclist_act {
485272407Shselasky	MCLIST_NONE,
486272407Shselasky	MCLIST_REM,
487272407Shselasky	MCLIST_ADD,
488219820Sjeff};
489219820Sjeff
490272407Shselaskystruct mlx4_en_mc_list {
491272407Shselasky	struct list_head	list;
492272407Shselasky	enum mlx4_en_mclist_act	action;
493272407Shselasky	u8			addr[ETH_ALEN];
494272407Shselasky	u64			reg_id;
495219820Sjeff};
496219820Sjeff
497272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
498272407Shselasky/* Minimal TC BW - setting to 0 will block traffic */
499272407Shselasky#define MLX4_EN_BW_MIN 1
500272407Shselasky#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
501272407Shselasky
502272407Shselasky#define MLX4_EN_TC_ETS 7
503272407Shselasky
504272407Shselasky#endif
505272407Shselasky
506272407Shselasky
507272407Shselaskyenum {
508272407Shselasky	MLX4_EN_FLAG_PROMISC		= (1 << 0),
509272407Shselasky	MLX4_EN_FLAG_MC_PROMISC		= (1 << 1),
510272407Shselasky	/* whether we need to enable hardware loopback by putting dmac
511272407Shselasky	 * in Tx WQE
512272407Shselasky	 */
513272407Shselasky	MLX4_EN_FLAG_ENABLE_HW_LOOPBACK	= (1 << 2),
514272407Shselasky	/* whether we need to drop packets that hardware loopback-ed */
515272407Shselasky	MLX4_EN_FLAG_RX_FILTER_NEEDED	= (1 << 3),
516272407Shselasky	MLX4_EN_FLAG_FORCE_PROMISC	= (1 << 4),
517272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
518272407Shselasky	MLX4_EN_FLAG_DCB_ENABLED	= (1 << 5)
519272407Shselasky#endif
520219820Sjeff};
521219820Sjeff
522272407Shselasky#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
523272407Shselasky#define MLX4_EN_MAC_HASH_IDX 5
524272407Shselasky
525272407Shselaskystruct en_port {
526272407Shselasky	struct kobject		kobj;
527272407Shselasky	struct mlx4_dev		*dev;
528272407Shselasky	u8			port_num;
529272407Shselasky	u8			vport_num;
530219820Sjeff};
531219820Sjeff
532272407Shselaskystruct mlx4_en_frag_info {
533272407Shselasky        u16 frag_size;
534272407Shselasky        u16 frag_prefix_size;
535219820Sjeff};
536219820Sjeff
537272407Shselasky
538219820Sjeffstruct mlx4_en_priv {
539219820Sjeff	struct mlx4_en_dev *mdev;
540219820Sjeff	struct mlx4_en_port_profile *prof;
541219820Sjeff	struct net_device *dev;
542272407Shselasky	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
543219820Sjeff	struct mlx4_en_port_state port_state;
544219820Sjeff	spinlock_t stats_lock;
545272407Shselasky	/* To allow rules removal while port is going down */
546272407Shselasky	struct list_head ethtool_list;
547219820Sjeff
548257867Salfred	unsigned long last_moder_packets[MAX_RX_RINGS];
549219820Sjeff	unsigned long last_moder_tx_packets;
550257867Salfred	unsigned long last_moder_bytes[MAX_RX_RINGS];
551219820Sjeff	unsigned long last_moder_jiffies;
552257867Salfred	int last_moder_time[MAX_RX_RINGS];
553219820Sjeff	u16 rx_usecs;
554219820Sjeff	u16 rx_frames;
555219820Sjeff	u16 tx_usecs;
556219820Sjeff	u16 tx_frames;
557219820Sjeff	u32 pkt_rate_low;
558219820Sjeff	u16 rx_usecs_low;
559219820Sjeff	u32 pkt_rate_high;
560219820Sjeff	u16 rx_usecs_high;
561219820Sjeff	u16 sample_interval;
562219820Sjeff	u16 adaptive_rx_coal;
563219820Sjeff	u32 msg_enable;
564219820Sjeff	u32 loopback_ok;
565219820Sjeff	u32 validate_loopback;
566219820Sjeff
567219820Sjeff	struct mlx4_hwq_resources res;
568219820Sjeff	int link_state;
569219820Sjeff	int last_link_state;
570219820Sjeff	bool port_up;
571219820Sjeff	int port;
572219820Sjeff	int registered;
573219820Sjeff	int allocated;
574272407Shselasky	int stride;
575272407Shselasky	unsigned char current_mac[ETH_ALEN + 2];
576272407Shselasky        u64 mac;
577219820Sjeff	int mac_index;
578219820Sjeff	unsigned max_mtu;
579219820Sjeff	int base_qpn;
580272407Shselasky	int cqe_factor;
581219820Sjeff
582219820Sjeff	struct mlx4_en_rss_map rss_map;
583272407Shselasky	__be32 ctrl_flags;
584219820Sjeff	u32 flags;
585272407Shselasky	u8 num_tx_rings_p_up;
586219820Sjeff	u32 tx_ring_num;
587219820Sjeff	u32 rx_ring_num;
588219820Sjeff	u32 rx_mb_size;
589272407Shselasky        struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
590272407Shselasky	u16 rx_alloc_order;
591272407Shselasky	u32 rx_alloc_size;
592272407Shselasky	u32 rx_buf_size;
593272407Shselasky        u16 num_frags;
594219820Sjeff	u16 log_rx_info;
595219820Sjeff
596272407Shselasky	struct mlx4_en_tx_ring **tx_ring;
597272407Shselasky	struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
598272407Shselasky	struct mlx4_en_cq **tx_cq;
599272407Shselasky	struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
600272407Shselasky	struct mlx4_qp drop_qp;
601272407Shselasky	struct work_struct rx_mode_task;
602219820Sjeff	struct work_struct watchdog_task;
603219820Sjeff	struct work_struct linkstate_task;
604219820Sjeff	struct delayed_work stats_task;
605272407Shselasky	struct delayed_work service_task;
606219820Sjeff	struct mlx4_en_perf_stats pstats;
607219820Sjeff	struct mlx4_en_pkt_stats pkstats;
608272407Shselasky	struct mlx4_en_flow_stats flowstats[MLX4_NUM_PRIORITIES];
609219820Sjeff	struct mlx4_en_port_stats port_stats;
610272407Shselasky	struct mlx4_en_vport_stats vport_stats;
611272407Shselasky	struct mlx4_en_vf_stats vf_stats;
612272407Shselasky	DECLARE_BITMAP(stats_bitmap, NUM_ALL_STATS);
613272407Shselasky	struct list_head mc_list;
614272407Shselasky	struct list_head curr_list;
615272407Shselasky	u64 broadcast_id;
616219820Sjeff	struct mlx4_en_stat_out_mbox hw_stats;
617272407Shselasky	int vids[128];
618272407Shselasky	bool wol;
619272407Shselasky	struct device *ddev;
620272407Shselasky	struct dentry *dev_root;
621272407Shselasky	u32 counter_index;
622219820Sjeff	eventhandler_tag vlan_attach;
623219820Sjeff	eventhandler_tag vlan_detach;
624219820Sjeff	struct callout watchdog_timer;
625272407Shselasky        struct ifmedia media;
626219820Sjeff	volatile int blocked;
627219820Sjeff	struct sysctl_oid *sysctl;
628219820Sjeff	struct sysctl_ctx_list conf_ctx;
629219820Sjeff	struct sysctl_ctx_list stat_ctx;
630272407Shselasky#define MLX4_EN_MAC_HASH_IDX 5
631272407Shselasky	struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
632272407Shselasky
633272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
634272407Shselasky	struct ieee_ets ets;
635272407Shselasky	u16 maxrate[IEEE_8021QAZ_MAX_TCS];
636272407Shselasky	u8 dcbx_cap;
637272407Shselasky#endif
638272407Shselasky#ifdef CONFIG_RFS_ACCEL
639272407Shselasky	spinlock_t filters_lock;
640272407Shselasky	int last_filter_id;
641272407Shselasky	struct list_head filters;
642272407Shselasky	struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
643272407Shselasky#endif
644272407Shselasky	struct en_port *vf_ports[MLX4_MAX_NUM_VF];
645272407Shselasky	unsigned long last_ifq_jiffies;
646272407Shselasky	u64 if_counters_rx_errors;
647272407Shselasky	u64 if_counters_rx_no_buffer;
648272407Shselasky
649219820Sjeff};
650219820Sjeff
651220016Sjeffenum mlx4_en_wol {
652220016Sjeff	MLX4_EN_WOL_MAGIC = (1ULL << 61),
653220016Sjeff	MLX4_EN_WOL_ENABLED = (1ULL << 62),
654220016Sjeff};
655219820Sjeff
656272407Shselaskystruct mlx4_mac_entry {
657272407Shselasky	struct hlist_node hlist;
658272407Shselasky	unsigned char mac[ETH_ALEN + 2];
659272407Shselasky	u64 reg_id;
660272407Shselasky};
661219820Sjeff
662272407Shselasky#ifdef CONFIG_NET_RX_BUSY_POLL
663272407Shselaskystatic inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
664272407Shselasky{
665272407Shselasky	spin_lock_init(&cq->poll_lock);
666272407Shselasky	cq->state = MLX4_EN_CQ_STATEIDLE;
667272407Shselasky}
668272407Shselasky
669272407Shselasky/* called from the device poll rutine to get ownership of a cq */
670272407Shselaskystatic inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
671272407Shselasky{
672272407Shselasky	int rc = true;
673272407Shselasky	spin_lock(&cq->poll_lock);
674272407Shselasky	if (cq->state & MLX4_CQ_LOCKED) {
675272407Shselasky		WARN_ON(cq->state & MLX4_EN_CQ_STATENAPI);
676272407Shselasky		cq->state |= MLX4_EN_CQ_STATENAPI_YIELD;
677272407Shselasky		rc = false;
678272407Shselasky	} else
679272407Shselasky		/* we don't care if someone yielded */
680272407Shselasky		cq->state = MLX4_EN_CQ_STATENAPI;
681272407Shselasky	spin_unlock(&cq->poll_lock);
682272407Shselasky	return rc;
683272407Shselasky}
684272407Shselasky
685272407Shselasky/* returns true is someone tried to get the cq while napi had it */
686272407Shselaskystatic inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
687272407Shselasky{
688272407Shselasky	int rc = false;
689272407Shselasky	spin_lock(&cq->poll_lock);
690272407Shselasky	WARN_ON(cq->state & (MLX4_EN_CQ_STATEPOLL |
691272407Shselasky			     MLX4_EN_CQ_STATENAPI_YIELD));
692272407Shselasky
693272407Shselasky	if (cq->state & MLX4_EN_CQ_STATEPOLL_YIELD)
694272407Shselasky		rc = true;
695272407Shselasky	cq->state = MLX4_EN_CQ_STATEIDLE;
696272407Shselasky	spin_unlock(&cq->poll_lock);
697272407Shselasky	return rc;
698272407Shselasky}
699272407Shselasky
700272407Shselasky/* called from mlx4_en_low_latency_poll() */
701272407Shselaskystatic inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
702272407Shselasky{
703272407Shselasky	int rc = true;
704272407Shselasky	spin_lock_bh(&cq->poll_lock);
705272407Shselasky	if ((cq->state & MLX4_CQ_LOCKED)) {
706272407Shselasky		struct net_device *dev = cq->dev;
707272407Shselasky		struct mlx4_en_priv *priv = netdev_priv(dev);
708272407Shselasky		struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
709272407Shselasky
710272407Shselasky		cq->state |= MLX4_EN_CQ_STATEPOLL_YIELD;
711272407Shselasky		rc = false;
712272407Shselasky#ifdef LL_EXTENDED_STATS
713272407Shselasky		rx_ring->yields++;
714272407Shselasky#endif
715272407Shselasky	} else
716272407Shselasky		/* preserve yield marks */
717272407Shselasky		cq->state |= MLX4_EN_CQ_STATEPOLL;
718272407Shselasky	spin_unlock_bh(&cq->poll_lock);
719272407Shselasky	return rc;
720272407Shselasky}
721272407Shselasky
722272407Shselasky/* returns true if someone tried to get the cq while it was locked */
723272407Shselaskystatic inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
724272407Shselasky{
725272407Shselasky	int rc = false;
726272407Shselasky	spin_lock_bh(&cq->poll_lock);
727272407Shselasky	WARN_ON(cq->state & (MLX4_EN_CQ_STATENAPI));
728272407Shselasky
729272407Shselasky	if (cq->state & MLX4_EN_CQ_STATEPOLL_YIELD)
730272407Shselasky		rc = true;
731272407Shselasky	cq->state = MLX4_EN_CQ_STATEIDLE;
732272407Shselasky	spin_unlock_bh(&cq->poll_lock);
733272407Shselasky	return rc;
734272407Shselasky}
735272407Shselasky
736272407Shselasky/* true if a socket is polling, even if it did not get the lock */
737272407Shselaskystatic inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
738272407Shselasky{
739272407Shselasky	WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
740272407Shselasky	return cq->state & CQ_USER_PEND;
741272407Shselasky}
742272407Shselasky#else
743272407Shselaskystatic inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
744272407Shselasky{
745272407Shselasky}
746272407Shselasky
747272407Shselaskystatic inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
748272407Shselasky{
749272407Shselasky	return true;
750272407Shselasky}
751272407Shselasky
752272407Shselaskystatic inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
753272407Shselasky{
754272407Shselasky	return false;
755272407Shselasky}
756272407Shselasky
757272407Shselaskystatic inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
758272407Shselasky{
759272407Shselasky	return false;
760272407Shselasky}
761272407Shselasky
762272407Shselaskystatic inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
763272407Shselasky{
764272407Shselasky	return false;
765272407Shselasky}
766272407Shselasky
767272407Shselaskystatic inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
768272407Shselasky{
769272407Shselasky	return false;
770272407Shselasky}
771272407Shselasky#endif /* CONFIG_NET_RX_BUSY_POLL */
772272407Shselasky
773272407Shselasky#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
774272407Shselasky
775219820Sjeffvoid mlx4_en_destroy_netdev(struct net_device *dev);
776219820Sjeffint mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
777219820Sjeff			struct mlx4_en_port_profile *prof);
778219820Sjeff
779272407Shselaskyint mlx4_en_start_port(struct net_device *dev);
780272407Shselaskyvoid mlx4_en_stop_port(struct net_device *dev);
781219820Sjeff
782219820Sjeffvoid mlx4_en_free_resources(struct mlx4_en_priv *priv);
783219820Sjeffint mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
784219820Sjeff
785272407Shselaskyint mlx4_en_pre_config(struct mlx4_en_priv *priv);
786272407Shselaskyint mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
787272407Shselasky		      int entries, int ring, enum cq_type mode, int node);
788272407Shselaskyvoid mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
789272407Shselaskyint mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
790272407Shselasky			int cq_idx);
791219820Sjeffvoid mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
792219820Sjeffint mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
793219820Sjeffint mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
794219820Sjeff
795219820Sjeffvoid mlx4_en_tx_irq(struct mlx4_cq *mcq);
796219820Sjeffu16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb);
797219820Sjeff
798272407Shselaskyint mlx4_en_transmit(struct ifnet *dev, struct mbuf *m);
799272407Shselaskyint mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
800272407Shselasky			   struct mlx4_en_tx_ring **pring,
801272407Shselasky			   u32 size, u16 stride, int node, int queue_idx);
802272407Shselaskyvoid mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
803272407Shselasky			     struct mlx4_en_tx_ring **pring);
804219820Sjeffint mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
805219820Sjeff			     struct mlx4_en_tx_ring *ring,
806272407Shselasky			     int cq, int user_prio);
807219820Sjeffvoid mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
808219820Sjeff				struct mlx4_en_tx_ring *ring);
809272407Shselaskyvoid mlx4_en_qflush(struct ifnet *dev);
810219820Sjeff
811219820Sjeffint mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
812272407Shselasky			   struct mlx4_en_rx_ring **pring,
813272407Shselasky			   u32 size, int node);
814219820Sjeffvoid mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
815272407Shselasky			     struct mlx4_en_rx_ring **pring,
816272407Shselasky			     u32 size, u16 stride);
817272407Shselaskyvoid mlx4_en_tx_que(void *context, int pending);
818272407Shselaskyvoid mlx4_en_rx_que(void *context, int pending);
819219820Sjeffint mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
820219820Sjeffvoid mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
821219820Sjeff				struct mlx4_en_rx_ring *ring);
822219820Sjeffint mlx4_en_process_rx_cq(struct net_device *dev,
823219820Sjeff			  struct mlx4_en_cq *cq,
824219820Sjeff			  int budget);
825272407Shselaskyvoid mlx4_en_poll_tx_cq(unsigned long data);
826219820Sjeffvoid mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
827272407Shselasky		int is_tx, int rss, int qpn, int cqn, int user_prio,
828272407Shselasky		struct mlx4_qp_context *context);
829219820Sjeffvoid mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
830219820Sjeffint mlx4_en_map_buffer(struct mlx4_buf *buf);
831219820Sjeffvoid mlx4_en_unmap_buffer(struct mlx4_buf *buf);
832272407Shselaskyvoid mlx4_en_calc_rx_buf(struct net_device *dev);
833219820Sjeff
834219820Sjeffint mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
835219820Sjeffvoid mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
836272407Shselaskyint mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
837272407Shselaskyvoid mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
838219820Sjeffint mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
839219820Sjeffvoid mlx4_en_rx_irq(struct mlx4_cq *mcq);
840219820Sjeff
841272407Shselaskyint mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
842272407Shselaskyint mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
843219820Sjeff
844219820Sjeffint mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
845219820Sjeffint mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
846272407Shselaskyint mlx4_en_get_vport_stats(struct mlx4_en_dev *mdev, u8 port);
847272407Shselaskyvoid mlx4_en_create_debug_files(struct mlx4_en_priv *priv);
848272407Shselaskyvoid mlx4_en_delete_debug_files(struct mlx4_en_priv *priv);
849272407Shselaskyint mlx4_en_register_debugfs(void);
850272407Shselaskyvoid mlx4_en_unregister_debugfs(void);
851219820Sjeff
852272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
853272407Shselaskyextern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
854272407Shselaskyextern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
855272407Shselasky#endif
856272407Shselasky
857272407Shselaskyint mlx4_en_setup_tc(struct net_device *dev, u8 up);
858272407Shselasky
859272407Shselasky#ifdef CONFIG_RFS_ACCEL
860272407Shselaskyvoid mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
861272407Shselasky			     struct mlx4_en_rx_ring *rx_ring);
862272407Shselasky#endif
863272407Shselasky
864219820Sjeff#define MLX4_EN_NUM_SELF_TEST	5
865219820Sjeffvoid mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
866272407Shselaskyvoid mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
867219820Sjeff
868219820Sjeff/*
869272407Shselasky * Functions for time stamping
870272407Shselasky */
871272407Shselasky#define SKBTX_HW_TSTAMP (1 << 0)
872272407Shselasky#define SKBTX_IN_PROGRESS (1 << 2)
873272407Shselasky
874272407Shselaskyu64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
875272407Shselasky
876272407Shselasky/* Functions for caching and restoring statistics */
877272407Shselaskyint mlx4_en_get_sset_count(struct net_device *dev, int sset);
878272407Shselaskyvoid mlx4_en_restore_ethtool_stats(struct mlx4_en_priv *priv,
879272407Shselasky				    u64 *data);
880272407Shselasky
881272407Shselasky/*
882219820Sjeff * Globals
883219820Sjeff */
884219820Sjeffextern const struct ethtool_ops mlx4_en_ethtool_ops;
885272407Shselasky
886272407Shselasky/*
887272407Shselasky * Defines for link speed - needed by selftest
888272407Shselasky */
889272407Shselasky#define MLX4_EN_LINK_SPEED_1G	1000
890272407Shselasky#define MLX4_EN_LINK_SPEED_10G	10000
891272407Shselasky#define MLX4_EN_LINK_SPEED_40G	40000
892272407Shselasky
893272407Shselaskyenum {
894272407Shselasky        NETIF_MSG_DRV           = 0x0001,
895272407Shselasky        NETIF_MSG_PROBE         = 0x0002,
896272407Shselasky        NETIF_MSG_LINK          = 0x0004,
897272407Shselasky        NETIF_MSG_TIMER         = 0x0008,
898272407Shselasky        NETIF_MSG_IFDOWN        = 0x0010,
899272407Shselasky        NETIF_MSG_IFUP          = 0x0020,
900272407Shselasky        NETIF_MSG_RX_ERR        = 0x0040,
901272407Shselasky        NETIF_MSG_TX_ERR        = 0x0080,
902272407Shselasky        NETIF_MSG_TX_QUEUED     = 0x0100,
903272407Shselasky        NETIF_MSG_INTR          = 0x0200,
904272407Shselasky        NETIF_MSG_TX_DONE       = 0x0400,
905272407Shselasky        NETIF_MSG_RX_STATUS     = 0x0800,
906272407Shselasky        NETIF_MSG_PKTDATA       = 0x1000,
907272407Shselasky        NETIF_MSG_HW            = 0x2000,
908272407Shselasky        NETIF_MSG_WOL           = 0x4000,
909272407Shselasky};
910272407Shselasky
911272407Shselasky
912272407Shselasky/*
913272407Shselasky * printk / logging functions
914272407Shselasky */
915272407Shselasky
916272407Shselasky#define en_print(level, priv, format, arg...)                   \
917272407Shselasky        {                                                       \
918272407Shselasky        if ((priv)->registered)                                 \
919272407Shselasky                printk(level "%s: %s: " format, DRV_NAME,       \
920272407Shselasky                        (priv->dev)->if_xname, ## arg); \
921272407Shselasky        else                                                    \
922272407Shselasky                printk(level "%s: %s: Port %d: " format,        \
923272407Shselasky                        DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
924272407Shselasky                        (priv)->port, ## arg);                  \
925272407Shselasky        }
926272407Shselasky
927272407Shselasky
928272407Shselasky#define en_dbg(mlevel, priv, format, arg...)			\
929272407Shselaskydo {								\
930272407Shselasky	if (NETIF_MSG_##mlevel & priv->msg_enable)		\
931272407Shselasky		en_print(KERN_DEBUG, priv, format, ##arg);	\
932272407Shselasky} while (0)
933272407Shselasky#define en_warn(priv, format, arg...)			\
934272407Shselasky	en_print(KERN_WARNING, priv, format, ##arg)
935272407Shselasky#define en_err(priv, format, arg...)			\
936272407Shselasky	en_print(KERN_ERR, priv, format, ##arg)
937272407Shselasky#define en_info(priv, format, arg...)			\
938272407Shselasky	en_print(KERN_INFO, priv, format, ## arg)
939272407Shselasky
940272407Shselasky#define mlx4_err(mdev, format, arg...)			\
941272407Shselasky	pr_err("%s %s: " format, DRV_NAME,		\
942272407Shselasky	       dev_name(&mdev->pdev->dev), ##arg)
943272407Shselasky#define mlx4_info(mdev, format, arg...)			\
944272407Shselasky	pr_info("%s %s: " format, DRV_NAME,		\
945272407Shselasky		dev_name(&mdev->pdev->dev), ##arg)
946272407Shselasky#define mlx4_warn(mdev, format, arg...)			\
947272407Shselasky	pr_warning("%s %s: " format, DRV_NAME,		\
948272407Shselasky		   dev_name(&mdev->pdev->dev), ##arg)
949272407Shselasky
950219820Sjeff#endif
951