device.h revision 318533
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *	- Redistributions of source code must retain the above
15 *	  copyright notice, this list of conditions and the following
16 *	  disclaimer.
17 *
18 *	- Redistributions in binary form must reproduce the above
19 *	  copyright notice, this list of conditions and the following
20 *	  disclaimer in the documentation and/or other materials
21 *	  provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX4_DEVICE_H
34#define MLX4_DEVICE_H
35
36#include <linux/pci.h>
37#include <linux/completion.h>
38#include <linux/radix-tree.h>
39#include <linux/types.h>
40#include <linux/bitops.h>
41#include <linux/workqueue.h>
42#include <asm/atomic.h>
43
44#include <linux/clocksource.h>
45
46#define MAX_MSIX_P_PORT		17
47#define MAX_MSIX		64
48#define MSIX_LEGACY_SZ		4
49#define MIN_MSIX_P_PORT		5
50
51#define MLX4_ROCE_MAX_GIDS	128
52#define MLX4_ROCE_PF_GIDS	16
53
54#define MLX4_NUM_UP			8
55#define MLX4_NUM_TC			8
56#define MLX4_MAX_100M_UNITS_VAL		255	/*
57						 * work around: can't set values
58						 * greater then this value when
59						 * using 100 Mbps units.
60						 */
61#define MLX4_RATELIMIT_100M_UNITS	3	/* 100 Mbps */
62#define MLX4_RATELIMIT_1G_UNITS		4	/* 1 Gbps */
63#define MLX4_RATELIMIT_DEFAULT		0x00ff
64
65#define CORE_CLOCK_MASK 0xffffffffffffULL
66
67enum {
68	MLX4_FLAG_MSI_X		= 1 << 0,
69	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
70	MLX4_FLAG_MASTER	= 1 << 2,
71	MLX4_FLAG_SLAVE		= 1 << 3,
72	MLX4_FLAG_SRIOV		= 1 << 4,
73	MLX4_FLAG_DEV_NUM_STR	= 1 << 5,
74	MLX4_FLAG_OLD_REG_MAC   = 1 << 6,
75};
76
77enum {
78	MLX4_PORT_CAP_IS_SM	= 1 << 1,
79	MLX4_PORT_CAP_DEV_MGMT_SUP = 1 << 19,
80};
81
82enum {
83	MLX4_MAX_PORTS		= 2,
84	MLX4_MAX_PORT_PKEYS	= 128
85};
86
87/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
88 * These qkeys must not be allowed for general use. This is a 64k range,
89 * and to test for violation, we use the mask (protect against future chg).
90 */
91#define MLX4_RESERVED_QKEY_BASE  (0xFFFF0000)
92#define MLX4_RESERVED_QKEY_MASK  (0xFFFF0000)
93
94enum {
95	MLX4_BOARD_ID_LEN = 64,
96	MLX4_VSD_LEN = 208
97};
98
99enum {
100	MLX4_MAX_NUM_PF		= 16,
101	MLX4_MAX_NUM_VF		= 64,
102	MLX4_MFUNC_MAX		= 80,
103	MLX4_MAX_EQ_NUM		= 1024,
104	MLX4_MFUNC_EQ_NUM	= 4,
105	MLX4_MFUNC_MAX_EQES     = 8,
106	MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
107};
108
109/* Driver supports 3 diffrent device methods to manage traffic steering:
110 *	-device managed - High level API for ib and eth flow steering. FW is
111 *			  managing flow steering tables.
112 *	- B0 steering mode - Common low level API for ib and (if supported) eth.
113 *	- A0 steering mode - Limited low level API for eth. In case of IB,
114 *			     B0 mode is in use.
115 */
116enum {
117	MLX4_STEERING_MODE_A0,
118	MLX4_STEERING_MODE_B0,
119	MLX4_STEERING_MODE_DEVICE_MANAGED
120};
121
122static inline const char *mlx4_steering_mode_str(int steering_mode)
123{
124	switch (steering_mode) {
125	case MLX4_STEERING_MODE_A0:
126		return "A0 steering";
127
128	case MLX4_STEERING_MODE_B0:
129		return "B0 steering";
130
131	case MLX4_STEERING_MODE_DEVICE_MANAGED:
132		return "Device managed flow steering";
133
134	default:
135		return "Unrecognize steering mode";
136	}
137}
138
139enum {
140	MLX4_DEV_CAP_FLAG_RC		= 1LL <<  0,
141	MLX4_DEV_CAP_FLAG_UC		= 1LL <<  1,
142	MLX4_DEV_CAP_FLAG_UD		= 1LL <<  2,
143	MLX4_DEV_CAP_FLAG_XRC		= 1LL <<  3,
144	MLX4_DEV_CAP_FLAG_SRQ		= 1LL <<  6,
145	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1LL <<  7,
146	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
147	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
148	MLX4_DEV_CAP_FLAG_DPDP		= 1LL << 12,
149	MLX4_DEV_CAP_FLAG_BLH		= 1LL << 15,
150	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1LL << 16,
151	MLX4_DEV_CAP_FLAG_APM		= 1LL << 17,
152	MLX4_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
153	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1LL << 19,
154	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1LL << 20,
155	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1LL << 21,
156	MLX4_DEV_CAP_FLAG_IBOE		= 1LL << 30,
157	MLX4_DEV_CAP_FLAG_UC_LOOPBACK	= 1LL << 32,
158	MLX4_DEV_CAP_FLAG_FCS_KEEP	= 1LL << 34,
159	MLX4_DEV_CAP_FLAG_WOL_PORT1	= 1LL << 37,
160	MLX4_DEV_CAP_FLAG_WOL_PORT2	= 1LL << 38,
161	MLX4_DEV_CAP_FLAG_UDP_RSS	= 1LL << 40,
162	MLX4_DEV_CAP_FLAG_VEP_UC_STEER	= 1LL << 41,
163	MLX4_DEV_CAP_FLAG_VEP_MC_STEER	= 1LL << 42,
164	MLX4_DEV_CAP_FLAG_CROSS_CHANNEL	= 1LL << 44,
165	MLX4_DEV_CAP_FLAG_COUNTERS	= 1LL << 48,
166	MLX4_DEV_CAP_FLAG_COUNTERS_EXT	= 1LL << 49,
167	MLX4_DEV_CAP_FLAG_SET_PORT_ETH_SCHED = 1LL << 53,
168	MLX4_DEV_CAP_FLAG_SENSE_SUPPORT	= 1LL << 55,
169	MLX4_DEV_CAP_FLAG_FAST_DROP	= 1LL << 57,
170	MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
171	MLX4_DEV_CAP_FLAG_64B_EQE	= 1LL << 61,
172	MLX4_DEV_CAP_FLAG_64B_CQE	= 1LL << 62
173};
174
175enum {
176	MLX4_DEV_CAP_FLAG2_RSS			= 1LL <<  0,
177	MLX4_DEV_CAP_FLAG2_RSS_TOP		= 1LL <<  1,
178	MLX4_DEV_CAP_FLAG2_RSS_XOR		= 1LL <<  2,
179	MLX4_DEV_CAP_FLAG2_FS_EN		= 1LL <<  3,
180	MLX4_DEV_CAP_FLAG2_FSM			= 1LL <<  4,
181	MLX4_DEV_CAP_FLAG2_VLAN_CONTROL		= 1LL <<  5,
182	MLX4_DEV_CAP_FLAG2_UPDATE_QP		= 1LL <<  6,
183	MLX4_DEV_CAP_FLAG2_LB_SRC_CHK		= 1LL <<  7,
184	MLX4_DEV_CAP_FLAG2_DMFS_IPOIB		= 1LL <<  8,
185	MLX4_DEV_CAP_FLAG2_ETS_CFG		= 1LL <<  9,
186	MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP	= 1LL <<  10,
187	MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN		= 1LL <<  11,
188	MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 12,
189	MLX4_DEV_CAP_FLAG2_TS			= 1LL <<  13,
190	MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW	   = 1LL <<  14,
191	MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN	= 1LL <<  15,
192	MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS	= 1LL <<  16,
193	MLX4_DEV_CAP_FLAG2_FS_EN_NCSI		= 1LL <<  17,
194	MLX4_DEV_CAP_FLAG2_80_VFS		= 1LL <<  18,
195	MLX4_DEV_CAP_FLAG2_DMFS_TAG_MODE	= 1LL <<  19,
196	MLX4_DEV_CAP_FLAG2_ROCEV2		= 1LL <<  20,
197	MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL	= 1LL <<  21,
198	MLX4_DEV_CAP_FLAG2_CQE_STRIDE		= 1LL <<  22,
199	MLX4_DEV_CAP_FLAG2_EQE_STRIDE		= 1LL <<  23,
200	MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1LL << 24,
201	MLX4_DEV_CAP_FLAG2_RX_CSUM_MODE		= 1LL <<  25,
202};
203
204/* bit enums for an 8-bit flags field indicating special use
205 * QPs which require special handling in qp_reserve_range.
206 * Currently, this only includes QPs used by the ETH interface,
207 * where we expect to use blueflame.  These QPs must not have
208 * bits 6 and 7 set in their qp number.
209 *
210 * This enum may use only bits 0..7.
211 */
212enum {
213	MLX4_RESERVE_BF_QP	= 1 << 7,
214};
215
216enum {
217	MLX4_DEV_CAP_CQ_FLAG_IO			= 1 <<  0
218};
219
220enum {
221	MLX4_QUERY_FUNC_FLAGS_BF_RES_QP	= 1LL << 0
222};
223
224/* bit enums for an 8-bit flags field indicating special use
225 * QPs which require special handling in qp_reserve_range.
226 * Currently, this only includes QPs used by the ETH interface,
227 * where we expect to use blueflame.  These QPs must not have
228 * bits 6 and 7 set in their qp number.
229 *
230 * This enum may use only bits 0..7.
231 */
232enum {
233	MLX4_RESERVE_ETH_BF_QP		= 1 << 7,
234};
235
236
237enum {
238	MLX4_DEV_CAP_64B_EQE_ENABLED	= 1LL << 0,
239	MLX4_DEV_CAP_64B_CQE_ENABLED	= 1LL << 1
240};
241
242enum {
243	MLX4_USER_DEV_CAP_64B_CQE	= 1L << 0
244};
245
246enum {
247	MLX4_FUNC_CAP_64B_EQE_CQE	= 1L << 0
248};
249
250
251#define MLX4_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
252
253enum {
254	MLX4_BMME_FLAG_WIN_TYPE_2B	= 1 << 1,
255	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6,
256	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7,
257	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9,
258	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10,
259	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11,
260};
261
262enum mlx4_event {
263	MLX4_EVENT_TYPE_COMP		   = 0x00,
264	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01,
265	MLX4_EVENT_TYPE_COMM_EST	   = 0x02,
266	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03,
267	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13,
268	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14,
269	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04,
270	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
271	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06,
272	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
273	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
274	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
275	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
276	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
277	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
278	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
279	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
280	MLX4_EVENT_TYPE_CMD		   = 0x0a,
281	MLX4_EVENT_TYPE_VEP_UPDATE	   = 0x19,
282	MLX4_EVENT_TYPE_COMM_CHANNEL	   = 0x18,
283	MLX4_EVENT_TYPE_OP_REQUIRED	   = 0x1a,
284	MLX4_EVENT_TYPE_FATAL_WARNING	   = 0x1b,
285	MLX4_EVENT_TYPE_FLR_EVENT	   = 0x1c,
286	MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
287	MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT  = 0x3e,
288	MLX4_EVENT_TYPE_NONE		   = 0xff,
289};
290
291enum {
292	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1,
293	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4
294};
295
296enum {
297	MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE		= 1,
298	MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE	= 2,
299};
300
301enum {
302	MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
303};
304
305enum slave_port_state {
306	SLAVE_PORT_DOWN = 0,
307	SLAVE_PENDING_UP,
308	SLAVE_PORT_UP,
309};
310
311enum slave_port_gen_event {
312	SLAVE_PORT_GEN_EVENT_DOWN = 0,
313	SLAVE_PORT_GEN_EVENT_UP,
314	SLAVE_PORT_GEN_EVENT_NONE,
315};
316
317enum slave_port_state_event {
318	MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
319	MLX4_PORT_STATE_DEV_EVENT_PORT_UP,
320	MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
321	MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
322};
323
324enum {
325	MLX4_PERM_LOCAL_READ	= 1 << 10,
326	MLX4_PERM_LOCAL_WRITE	= 1 << 11,
327	MLX4_PERM_REMOTE_READ	= 1 << 12,
328	MLX4_PERM_REMOTE_WRITE	= 1 << 13,
329	MLX4_PERM_ATOMIC	= 1 << 14,
330	MLX4_PERM_BIND_MW	= 1 << 15,
331};
332
333enum {
334	MLX4_OPCODE_NOP			= 0x00,
335	MLX4_OPCODE_SEND_INVAL		= 0x01,
336	MLX4_OPCODE_RDMA_WRITE		= 0x08,
337	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
338	MLX4_OPCODE_SEND		= 0x0a,
339	MLX4_OPCODE_SEND_IMM		= 0x0b,
340	MLX4_OPCODE_LSO			= 0x0e,
341	MLX4_OPCODE_RDMA_READ		= 0x10,
342	MLX4_OPCODE_ATOMIC_CS		= 0x11,
343	MLX4_OPCODE_ATOMIC_FA		= 0x12,
344	MLX4_OPCODE_MASKED_ATOMIC_CS	= 0x14,
345	MLX4_OPCODE_MASKED_ATOMIC_FA	= 0x15,
346	MLX4_OPCODE_BIND_MW		= 0x18,
347	MLX4_OPCODE_FMR			= 0x19,
348	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
349	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
350
351	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
352	MLX4_RECV_OPCODE_SEND		= 0x01,
353	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
354	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
355
356	MLX4_CQE_OPCODE_ERROR		= 0x1e,
357	MLX4_CQE_OPCODE_RESIZE		= 0x16,
358};
359
360enum {
361	MLX4_STAT_RATE_OFFSET	= 5
362};
363
364enum mlx4_protocol {
365	MLX4_PROT_IB_IPV6 = 0,
366	MLX4_PROT_ETH,
367	MLX4_PROT_IB_IPV4,
368	MLX4_PROT_FCOE
369};
370
371enum {
372	MLX4_MTT_FLAG_PRESENT		= 1
373};
374
375enum {
376	MLX4_MAX_MTT_SHIFT		= 31
377};
378
379enum mlx4_qp_region {
380	MLX4_QP_REGION_FW = 0,
381	MLX4_QP_REGION_ETH_ADDR,
382	MLX4_QP_REGION_FC_ADDR,
383	MLX4_QP_REGION_FC_EXCH,
384	MLX4_NUM_QP_REGION
385};
386
387enum mlx4_port_type {
388	MLX4_PORT_TYPE_NONE	= 0,
389	MLX4_PORT_TYPE_IB	= 1,
390	MLX4_PORT_TYPE_ETH	= 2,
391	MLX4_PORT_TYPE_AUTO	= 3,
392	MLX4_PORT_TYPE_NA	= 4
393};
394
395enum mlx4_special_vlan_idx {
396	MLX4_NO_VLAN_IDX        = 0,
397	MLX4_VLAN_MISS_IDX,
398	MLX4_VLAN_REGULAR
399};
400
401enum mlx4_steer_type {
402	MLX4_MC_STEER = 0,
403	MLX4_UC_STEER,
404	MLX4_NUM_STEERS
405};
406
407enum {
408	MLX4_NUM_FEXCH          = 64 * 1024,
409};
410
411enum {
412	MLX4_MAX_FAST_REG_PAGES = 511,
413};
414
415enum {
416	MLX4_DEV_PMC_SUBTYPE_GUID_INFO	 = 0x14,
417	MLX4_DEV_PMC_SUBTYPE_PORT_INFO	 = 0x15,
418	MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE	 = 0x16,
419};
420
421/* Port mgmt change event handling */
422enum {
423	MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK	= 1 << 0,
424	MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK		= 1 << 1,
425	MLX4_EQ_PORT_INFO_LID_CHANGE_MASK		= 1 << 2,
426	MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK		= 1 << 3,
427	MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK	= 1 << 4,
428};
429
430#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
431			     MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
432
433enum mlx4_module_id {
434	MLX4_MODULE_ID_SFP		= 0x3,
435	MLX4_MODULE_ID_QSFP		= 0xC,
436	MLX4_MODULE_ID_QSFP_PLUS	= 0xD,
437	MLX4_MODULE_ID_QSFP28		= 0x11,
438};
439
440static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
441{
442	return (major << 32) | (minor << 16) | subminor;
443}
444
445struct mlx4_phys_caps {
446	u32			gid_phys_table_len[MLX4_MAX_PORTS + 1];
447	u32			pkey_phys_table_len[MLX4_MAX_PORTS + 1];
448	u32			num_phys_eqs;
449	u32			base_sqpn;
450	u32			base_proxy_sqpn;
451	u32			base_tunnel_sqpn;
452};
453
454struct mlx4_caps {
455	u64			fw_ver;
456	u32			function;
457	int			num_ports;
458	int			vl_cap[MLX4_MAX_PORTS + 1];
459	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
460	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1];
461	u64			def_mac[MLX4_MAX_PORTS + 1];
462	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
463	int			gid_table_len[MLX4_MAX_PORTS + 1];
464	int			pkey_table_len[MLX4_MAX_PORTS + 1];
465	int			trans_type[MLX4_MAX_PORTS + 1];
466	int			vendor_oui[MLX4_MAX_PORTS + 1];
467	int			wavelength[MLX4_MAX_PORTS + 1];
468	u64			trans_code[MLX4_MAX_PORTS + 1];
469	int			local_ca_ack_delay;
470	int			num_uars;
471	u32			uar_page_size;
472	int			bf_reg_size;
473	int			bf_regs_per_page;
474	int			max_sq_sg;
475	int			max_rq_sg;
476	int			num_qps;
477	int			max_wqes;
478	int			max_sq_desc_sz;
479	int			max_rq_desc_sz;
480	int			max_qp_init_rdma;
481	int			max_qp_dest_rdma;
482	u32			*qp0_proxy;
483	u32			*qp1_proxy;
484	u32			*qp0_tunnel;
485	u32			*qp1_tunnel;
486	int			num_srqs;
487	int			max_srq_wqes;
488	int			max_srq_sge;
489	int			reserved_srqs;
490	int			num_cqs;
491	int			max_cqes;
492	int			reserved_cqs;
493	int			num_eqs;
494	int			reserved_eqs;
495	int			num_comp_vectors;
496	int			comp_pool;
497	int			num_mpts;
498	int			max_fmr_maps;
499	u64			num_mtts;
500	int			fmr_reserved_mtts;
501	int			reserved_mtts;
502	int			reserved_mrws;
503	int			reserved_uars;
504	int			num_mgms;
505	int			num_amgms;
506	int			reserved_mcgs;
507	int			num_qp_per_mgm;
508	int			steering_mode;
509	int			num_pds;
510	int			reserved_pds;
511	int			max_xrcds;
512	int			reserved_xrcds;
513	int			mtt_entry_sz;
514	u32			max_msg_sz;
515	u32			page_size_cap;
516	u64			flags;
517	u64			flags2;
518	u32			bmme_flags;
519	u32			reserved_lkey;
520	u16			stat_rate_support;
521	u8			cq_timestamp;
522	u8			port_width_cap[MLX4_MAX_PORTS + 1];
523	int			max_gso_sz;
524	int			max_rss_tbl_sz;
525	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
526	int			reserved_qps;
527	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
528	int                     log_num_macs;
529	int                     log_num_vlans;
530	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
531	u8			supported_type[MLX4_MAX_PORTS + 1];
532	u8                      suggested_type[MLX4_MAX_PORTS + 1];
533	u8                      default_sense[MLX4_MAX_PORTS + 1];
534	u32			port_mask[MLX4_MAX_PORTS + 1];
535	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
536	u32			max_counters;
537	u8			port_ib_mtu[MLX4_MAX_PORTS + 1];
538	u16			sqp_demux;
539	u32			sync_qp;
540	u32			cq_flags;
541	u32			eqe_size;
542	u32			cqe_size;
543	u8			eqe_factor;
544	u32			userspace_caps; /* userspace must be aware to */
545	u32			function_caps;  /* functions must be aware to */
546	u8			fast_drop;
547	u16			hca_core_clock;
548	u32			max_basic_counters;
549	u32			max_extended_counters;
550	u8			def_counter_index[MLX4_MAX_PORTS + 1];
551	u8			alloc_res_qp_mask;
552};
553
554struct mlx4_buf_list {
555	void		       *buf;
556	dma_addr_t		map;
557};
558
559struct mlx4_buf {
560	struct mlx4_buf_list	direct;
561	struct mlx4_buf_list   *page_list;
562	int			nbufs;
563	int			npages;
564	int			page_shift;
565};
566
567struct mlx4_mtt {
568	u32			offset;
569	int			order;
570	int			page_shift;
571};
572
573enum {
574	MLX4_DB_PER_PAGE = PAGE_SIZE / 4
575};
576
577struct mlx4_db_pgdir {
578	struct list_head	list;
579	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
580	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
581	unsigned long	       *bits[2];
582	__be32		       *db_page;
583	dma_addr_t		db_dma;
584};
585
586struct mlx4_ib_user_db_page;
587
588struct mlx4_db {
589	__be32			*db;
590	union {
591		struct mlx4_db_pgdir		*pgdir;
592		struct mlx4_ib_user_db_page	*user_page;
593	}			u;
594	dma_addr_t		dma;
595	int			index;
596	int			order;
597};
598
599struct mlx4_hwq_resources {
600	struct mlx4_db		db;
601	struct mlx4_mtt		mtt;
602	struct mlx4_buf		buf;
603};
604
605struct mlx4_mr {
606	struct mlx4_mtt		mtt;
607	u64			iova;
608	u64			size;
609	u32			key;
610	u32			pd;
611	u32			access;
612	int			enabled;
613};
614
615enum mlx4_mw_type {
616	MLX4_MW_TYPE_1 = 1,
617	MLX4_MW_TYPE_2 = 2,
618};
619
620struct mlx4_mw {
621	u32			key;
622	u32			pd;
623	enum mlx4_mw_type	type;
624	int			enabled;
625};
626
627struct mlx4_fmr {
628	struct mlx4_mr		mr;
629	struct mlx4_mpt_entry  *mpt;
630	__be64		       *mtts;
631	dma_addr_t		dma_handle;
632	int			max_pages;
633	int			max_maps;
634	int			maps;
635	u8			page_shift;
636};
637
638struct mlx4_uar {
639	unsigned long		pfn;
640	int			index;
641	struct list_head	bf_list;
642	unsigned		free_bf_bmap;
643	void __iomem	       *map;
644	void __iomem	       *bf_map;
645};
646
647struct mlx4_bf {
648	unsigned long		offset;
649	int			buf_size;
650	struct mlx4_uar	       *uar;
651	void __iomem	       *reg;
652};
653
654struct mlx4_cq {
655	void (*comp)		(struct mlx4_cq *);
656	void (*event)		(struct mlx4_cq *, enum mlx4_event);
657
658	struct mlx4_uar	       *uar;
659
660	u32			cons_index;
661
662	__be32		       *set_ci_db;
663	__be32		       *arm_db;
664	int			arm_sn;
665
666	int			cqn;
667	unsigned		vector;
668
669	atomic_t		refcount;
670	struct completion	free;
671	int			eqn;
672	u16			irq;
673};
674
675struct mlx4_qp {
676	void (*event)		(struct mlx4_qp *, enum mlx4_event);
677
678	int			qpn;
679
680	atomic_t		refcount;
681	struct completion	free;
682};
683
684struct mlx4_srq {
685	void (*event)		(struct mlx4_srq *, enum mlx4_event);
686
687	int			srqn;
688	int			max;
689	int			max_gs;
690	int			wqe_shift;
691
692	atomic_t		refcount;
693	struct completion	free;
694};
695
696struct mlx4_av {
697	__be32			port_pd;
698	u8			reserved1;
699	u8			g_slid;
700	__be16			dlid;
701	u8			reserved2;
702	u8			gid_index;
703	u8			stat_rate;
704	u8			hop_limit;
705	__be32			sl_tclass_flowlabel;
706	u8			dgid[16];
707};
708
709struct mlx4_eth_av {
710	__be32		port_pd;
711	u8		reserved1;
712	u8		smac_idx;
713	u16		reserved2;
714	u8		reserved3;
715	u8		gid_index;
716	u8		stat_rate;
717	u8		hop_limit;
718	__be32		sl_tclass_flowlabel;
719	u8		dgid[16];
720	u8		s_mac[6];
721	u8	reserved4[2];
722	__be16		vlan;
723	u8		mac[6];
724};
725
726union mlx4_ext_av {
727	struct mlx4_av		ib;
728	struct mlx4_eth_av	eth;
729};
730
731struct mlx4_if_stat_control {
732	u8 reserved1[3];
733	/* Extended counters enabled */
734	u8 cnt_mode;
735	/* Number of interfaces */
736	__be32 num_of_if;
737	__be32 reserved[2];
738};
739
740struct mlx4_if_stat_basic {
741	struct mlx4_if_stat_control control;
742	struct {
743		__be64 IfRxFrames;
744		__be64 IfRxOctets;
745		__be64 IfTxFrames;
746		__be64 IfTxOctets;
747	} counters[];
748};
749#define MLX4_IF_STAT_BSC_SZ(ports)(sizeof(struct mlx4_if_stat_extended) +\
750				   sizeof(((struct mlx4_if_stat_extended *)0)->\
751				   counters[0]) * ports)
752
753struct mlx4_if_stat_extended {
754	struct mlx4_if_stat_control control;
755	struct {
756		__be64 IfRxUnicastFrames;
757		__be64 IfRxUnicastOctets;
758		__be64 IfRxMulticastFrames;
759		__be64 IfRxMulticastOctets;
760		__be64 IfRxBroadcastFrames;
761		__be64 IfRxBroadcastOctets;
762		__be64 IfRxNoBufferFrames;
763		__be64 IfRxNoBufferOctets;
764		__be64 IfRxErrorFrames;
765		__be64 IfRxErrorOctets;
766		__be32 reserved[39];
767		__be64 IfTxUnicastFrames;
768		__be64 IfTxUnicastOctets;
769		__be64 IfTxMulticastFrames;
770		__be64 IfTxMulticastOctets;
771		__be64 IfTxBroadcastFrames;
772		__be64 IfTxBroadcastOctets;
773		__be64 IfTxDroppedFrames;
774		__be64 IfTxDroppedOctets;
775		__be64 IfTxRequestedFramesSent;
776		__be64 IfTxGeneratedFramesSent;
777		__be64 IfTxTsoOctets;
778	} __packed counters[];
779};
780#define MLX4_IF_STAT_EXT_SZ(ports)   (sizeof(struct mlx4_if_stat_extended) +\
781				      sizeof(((struct mlx4_if_stat_extended *)\
782				      0)->counters[0]) * ports)
783
784union mlx4_counter {
785	struct mlx4_if_stat_control	control;
786	struct mlx4_if_stat_basic	basic;
787	struct mlx4_if_stat_extended	ext;
788};
789#define MLX4_IF_STAT_SZ(ports)		MLX4_IF_STAT_EXT_SZ(ports)
790
791struct mlx4_quotas {
792	int qp;
793	int cq;
794	int srq;
795	int mpt;
796	int mtt;
797	int counter;
798	int xrcd;
799};
800
801struct mlx4_dev {
802	struct pci_dev	       *pdev;
803	unsigned long		flags;
804	unsigned long		num_slaves;
805	struct mlx4_caps	caps;
806	struct mlx4_phys_caps	phys_caps;
807	struct mlx4_quotas	quotas;
808	struct radix_tree_root	qp_table_tree;
809	u8			rev_id;
810	char			board_id[MLX4_BOARD_ID_LEN];
811	u16			vsd_vendor_id;
812	char			vsd[MLX4_VSD_LEN];
813	int			num_vfs;
814	int			numa_node;
815	int			oper_log_mgm_entry_size;
816	u64			regid_promisc_array[MLX4_MAX_PORTS + 1];
817	u64			regid_allmulti_array[MLX4_MAX_PORTS + 1];
818};
819
820struct mlx4_clock_params {
821	u64 offset;
822	u8 bar;
823	u8 size;
824};
825
826struct mlx4_eqe {
827	u8			reserved1;
828	u8			type;
829	u8			reserved2;
830	u8			subtype;
831	union {
832		u32		raw[6];
833		struct {
834			__be32	cqn;
835		} __packed comp;
836		struct {
837			u16	reserved1;
838			__be16	token;
839			u32	reserved2;
840			u8	reserved3[3];
841			u8	status;
842			__be64	out_param;
843		} __packed cmd;
844		struct {
845			__be32	qpn;
846		} __packed qp;
847		struct {
848			__be32	srqn;
849		} __packed srq;
850		struct {
851			__be32	cqn;
852			u32	reserved1;
853			u8	reserved2[3];
854			u8	syndrome;
855		} __packed cq_err;
856		struct {
857			u32	reserved1[2];
858			__be32	port;
859		} __packed port_change;
860		struct {
861			#define COMM_CHANNEL_BIT_ARRAY_SIZE	4
862			u32 reserved;
863			u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
864		} __packed comm_channel_arm;
865		struct {
866			u8	port;
867			u8	reserved[3];
868			__be64	mac;
869		} __packed mac_update;
870		struct {
871			__be32	slave_id;
872		} __packed flr_event;
873		struct {
874			__be16  current_temperature;
875			__be16  warning_threshold;
876		} __packed warming;
877		struct {
878			u8 reserved[3];
879			u8 port;
880			union {
881				struct {
882					__be16 mstr_sm_lid;
883					__be16 port_lid;
884					__be32 changed_attr;
885					u8 reserved[3];
886					u8 mstr_sm_sl;
887					__be64 gid_prefix;
888				} __packed port_info;
889				struct {
890					__be32 block_ptr;
891					__be32 tbl_entries_mask;
892				} __packed tbl_change_info;
893			} params;
894		} __packed port_mgmt_change;
895		struct {
896			u8 reserved[3];
897			u8 port;
898			u32 reserved1[5];
899		} __packed bad_cable;
900	}			event;
901	u8			slave_id;
902	u8			reserved3[2];
903	u8			owner;
904} __packed;
905
906struct mlx4_init_port_param {
907	int			set_guid0;
908	int			set_node_guid;
909	int			set_si_guid;
910	u16			mtu;
911	int			port_width_cap;
912	u16			vl_cap;
913	u16			max_gid;
914	u16			max_pkey;
915	u64			guid0;
916	u64			node_guid;
917	u64			si_guid;
918};
919
920#define MAD_IFC_DATA_SZ 192
921/* MAD IFC Mailbox */
922struct mlx4_mad_ifc {
923	u8      base_version;
924	u8      mgmt_class;
925	u8      class_version;
926	u8      method;
927	__be16  status;
928	__be16  class_specific;
929	__be64  tid;
930	__be16  attr_id;
931	__be16  resv;
932	__be32  attr_mod;
933	__be64  mkey;
934	__be16  dr_slid;
935	__be16  dr_dlid;
936	u8      reserved[28];
937	u8      data[MAD_IFC_DATA_SZ];
938} __packed;
939
940#define mlx4_foreach_port(port, dev, type)				\
941	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
942		if ((type) == (dev)->caps.port_mask[(port)])
943
944#define mlx4_foreach_non_ib_transport_port(port, dev)                     \
945	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \
946		if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
947
948#define mlx4_foreach_ib_transport_port(port, dev)                         \
949	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \
950		if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
951			((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
952
953#define MLX4_INVALID_SLAVE_ID	0xFF
954
955#define MLX4_SINK_COUNTER_INDEX 0xff
956
957void handle_port_mgmt_change_event(struct work_struct *work);
958
959static inline int mlx4_master_func_num(struct mlx4_dev *dev)
960{
961	return dev->caps.function;
962}
963
964static inline int mlx4_is_master(struct mlx4_dev *dev)
965{
966	return dev->flags & MLX4_FLAG_MASTER;
967}
968
969static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
970{
971	return dev->phys_caps.base_sqpn + 8 +
972		16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
973}
974
975static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
976{
977	return (qpn < dev->phys_caps.base_sqpn + 8 +
978		16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev));
979}
980
981static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
982{
983	int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8;
984
985	if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8)
986		return 1;
987
988	return 0;
989}
990
991static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
992{
993	return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
994}
995
996static inline int mlx4_is_slave(struct mlx4_dev *dev)
997{
998	return dev->flags & MLX4_FLAG_SLAVE;
999}
1000
1001int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
1002		   struct mlx4_buf *buf);
1003void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
1004static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
1005{
1006	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
1007		return (u8 *)buf->direct.buf + offset;
1008	else
1009		return (u8 *)buf->page_list[offset >> PAGE_SHIFT].buf +
1010			(offset & (PAGE_SIZE - 1));
1011}
1012
1013int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
1014void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
1015int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
1016void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
1017
1018int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
1019void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
1020int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
1021void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
1022
1023int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
1024		  struct mlx4_mtt *mtt);
1025void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
1026u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
1027
1028int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
1029		  int npages, int page_shift, struct mlx4_mr *mr);
1030int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
1031int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
1032int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
1033		  struct mlx4_mw *mw);
1034void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw);
1035int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
1036int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
1037		   int start_index, int npages, u64 *page_list);
1038int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
1039		       struct mlx4_buf *buf);
1040
1041int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
1042void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
1043
1044int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
1045		       int size, int max_direct);
1046void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
1047		       int size);
1048
1049int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
1050		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
1051		  unsigned vector, int collapsed, int timestamp_en);
1052void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
1053
1054int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
1055			  int *base, u8 flags);
1056void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
1057
1058int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
1059void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
1060
1061int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
1062		   struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
1063void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
1064int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
1065int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
1066
1067int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
1068int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
1069
1070int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1071			int block_mcast_loopback, enum mlx4_protocol prot);
1072int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1073			enum mlx4_protocol prot);
1074int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1075			  u8 port, int block_mcast_loopback,
1076			  enum mlx4_protocol protocol, u64 *reg_id);
1077int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1078			  enum mlx4_protocol protocol, u64 reg_id);
1079
1080enum {
1081	MLX4_DOMAIN_UVERBS	= 0x1000,
1082	MLX4_DOMAIN_ETHTOOL     = 0x2000,
1083	MLX4_DOMAIN_RFS         = 0x3000,
1084	MLX4_DOMAIN_NIC    = 0x5000,
1085};
1086
1087enum mlx4_net_trans_rule_id {
1088	MLX4_NET_TRANS_RULE_ID_ETH = 0,
1089	MLX4_NET_TRANS_RULE_ID_IB,
1090	MLX4_NET_TRANS_RULE_ID_IPV6,
1091	MLX4_NET_TRANS_RULE_ID_IPV4,
1092	MLX4_NET_TRANS_RULE_ID_TCP,
1093	MLX4_NET_TRANS_RULE_ID_UDP,
1094	MLX4_NET_TRANS_RULE_NUM, /* should be last */
1095	MLX4_NET_TRANS_RULE_DUMMY = -1,	/* force enum to be signed */
1096};
1097
1098extern const u16 __sw_id_hw[];
1099
1100static inline int map_hw_to_sw_id(u16 header_id)
1101{
1102
1103	int i;
1104	for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) {
1105		if (header_id == __sw_id_hw[i])
1106			return i;
1107	}
1108	return -EINVAL;
1109}
1110
1111enum mlx4_net_trans_promisc_mode {
1112	MLX4_FS_REGULAR		= 1,
1113	MLX4_FS_ALL_DEFAULT,
1114	MLX4_FS_MC_DEFAULT,
1115	MLX4_FS_UC_SNIFFER,
1116	MLX4_FS_MC_SNIFFER,
1117	MLX4_FS_MODE_NUM, /* should be last */
1118	MLX4_FS_MODE_DUMMY = -1,	/* force enum to be signed */
1119};
1120
1121struct mlx4_spec_eth {
1122	u8	dst_mac[6];
1123	u8	dst_mac_msk[6];
1124	u8	src_mac[6];
1125	u8	src_mac_msk[6];
1126	u8	ether_type_enable;
1127	__be16	ether_type;
1128	__be16	vlan_id_msk;
1129	__be16	vlan_id;
1130};
1131
1132struct mlx4_spec_tcp_udp {
1133	__be16 dst_port;
1134	__be16 dst_port_msk;
1135	__be16 src_port;
1136	__be16 src_port_msk;
1137};
1138
1139struct mlx4_spec_ipv4 {
1140	__be32 dst_ip;
1141	__be32 dst_ip_msk;
1142	__be32 src_ip;
1143	__be32 src_ip_msk;
1144};
1145
1146struct mlx4_spec_ib {
1147	__be32 l3_qpn;
1148	__be32 qpn_msk;
1149	u8 dst_gid[16];
1150	u8 dst_gid_msk[16];
1151};
1152
1153struct mlx4_spec_list {
1154	struct	list_head list;
1155	enum	mlx4_net_trans_rule_id id;
1156	union {
1157		struct mlx4_spec_eth eth;
1158		struct mlx4_spec_ib ib;
1159		struct mlx4_spec_ipv4 ipv4;
1160		struct mlx4_spec_tcp_udp tcp_udp;
1161	};
1162};
1163
1164enum mlx4_net_trans_hw_rule_queue {
1165	MLX4_NET_TRANS_Q_FIFO,
1166	MLX4_NET_TRANS_Q_LIFO,
1167};
1168
1169struct mlx4_net_trans_rule {
1170	struct	list_head list;
1171	enum	mlx4_net_trans_hw_rule_queue queue_mode;
1172	bool	exclusive;
1173	bool	allow_loopback;
1174	enum	mlx4_net_trans_promisc_mode promisc_mode;
1175	u8	port;
1176	u16	priority;
1177	u32	qpn;
1178};
1179
1180struct mlx4_net_trans_rule_hw_ctrl {
1181	__be16 prio;
1182	u8 type;
1183	u8 flags;
1184	u8 rsvd1;
1185	u8 funcid;
1186	u8 vep;
1187	u8 port;
1188	__be32 qpn;
1189	__be32 rsvd2;
1190};
1191
1192struct mlx4_net_trans_rule_hw_ib {
1193	u8 size;
1194	u8 rsvd1;
1195	__be16 id;
1196	u32 rsvd2;
1197	__be32 l3_qpn;
1198	__be32 qpn_mask;
1199	u8 dst_gid[16];
1200	u8 dst_gid_msk[16];
1201} __packed;
1202
1203struct mlx4_net_trans_rule_hw_eth {
1204	u8	size;
1205	u8	rsvd;
1206	__be16	id;
1207	u8	rsvd1[6];
1208	u8	dst_mac[6];
1209	u16	rsvd2;
1210	u8	dst_mac_msk[6];
1211	u16	rsvd3;
1212	u8	src_mac[6];
1213	u16	rsvd4;
1214	u8	src_mac_msk[6];
1215	u8      rsvd5;
1216	u8      ether_type_enable;
1217	__be16  ether_type;
1218	__be16  vlan_tag_msk;
1219	__be16  vlan_tag;
1220} __packed;
1221
1222struct mlx4_net_trans_rule_hw_tcp_udp {
1223	u8	size;
1224	u8	rsvd;
1225	__be16	id;
1226	__be16	rsvd1[3];
1227	__be16	dst_port;
1228	__be16	rsvd2;
1229	__be16	dst_port_msk;
1230	__be16	rsvd3;
1231	__be16	src_port;
1232	__be16	rsvd4;
1233	__be16	src_port_msk;
1234} __packed;
1235
1236struct mlx4_net_trans_rule_hw_ipv4 {
1237	u8	size;
1238	u8	rsvd;
1239	__be16	id;
1240	__be32	rsvd1;
1241	__be32	dst_ip;
1242	__be32	dst_ip_msk;
1243	__be32	src_ip;
1244	__be32	src_ip_msk;
1245} __packed;
1246
1247struct _rule_hw {
1248	union {
1249		struct {
1250			u8 size;
1251			u8 rsvd;
1252			__be16 id;
1253		};
1254		struct mlx4_net_trans_rule_hw_eth eth;
1255		struct mlx4_net_trans_rule_hw_ib ib;
1256		struct mlx4_net_trans_rule_hw_ipv4 ipv4;
1257		struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
1258	};
1259};
1260
1261int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
1262				enum mlx4_net_trans_promisc_mode mode);
1263int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1264				   enum mlx4_net_trans_promisc_mode mode);
1265int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
1266int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
1267int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
1268int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
1269
1270int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
1271void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
1272int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
1273int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
1274void mlx4_set_stats_bitmap(struct mlx4_dev *dev, unsigned long *stats_bitmap);
1275int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1276			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
1277int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1278			   u8 promisc);
1279int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
1280int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
1281		u8 *pg, u16 *ratelimit);
1282int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1283int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1284void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1285
1286int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
1287		      int npages, u64 iova, u32 *lkey, u32 *rkey);
1288int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
1289		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
1290int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1291void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1292		    u32 *lkey, u32 *rkey);
1293int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1294int mlx4_SYNC_TPT(struct mlx4_dev *dev);
1295int mlx4_query_diag_counters(struct mlx4_dev *mlx4_dev, int array_length,
1296			     u8 op_modifier, u32 in_offset[],
1297			     u32 counter_out[]);
1298
1299int mlx4_test_interrupts(struct mlx4_dev *dev);
1300int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector);
1301void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1302
1303int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1304int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
1305
1306int mlx4_counter_alloc(struct mlx4_dev *dev, u8 port, u32 *idx);
1307void mlx4_counter_free(struct mlx4_dev *dev, u8 port, u32 idx);
1308
1309int mlx4_flow_attach(struct mlx4_dev *dev,
1310		     struct mlx4_net_trans_rule *rule, u64 *reg_id);
1311int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
1312int map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
1313			       enum mlx4_net_trans_promisc_mode flow_type);
1314int map_sw_to_hw_steering_id(struct mlx4_dev *dev,
1315			     enum mlx4_net_trans_rule_id id);
1316int hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
1317
1318void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
1319			  int i, int val);
1320
1321int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
1322
1323int mlx4_is_slave_active(struct mlx4_dev *dev, int slave);
1324int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port);
1325int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port);
1326int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr, u16 lid, u8 sl);
1327int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change);
1328enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port);
1329int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event);
1330
1331void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
1332__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
1333int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, int *slave_id);
1334int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, u8 *gid);
1335
1336int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn);
1337
1338int mlx4_read_clock(struct mlx4_dev *dev);
1339int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1340				   struct mlx4_clock_params *params);
1341
1342int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1343			u16 offset, u16 size, u8 *data);
1344
1345#endif /* MLX4_DEVICE_H */
1346