main.c revision 325611
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#define	LINUXKPI_PARAM_PREFIX mlx4_
35
36#include <linux/module.h>
37
38#ifdef __linux__
39#include <linux/proc_fs.h>
40#endif
41
42#include <linux/slab.h>
43#include <linux/errno.h>
44#include <linux/netdevice.h>
45#include <linux/inetdevice.h>
46#include <linux/if_vlan.h>
47#include <linux/bitops.h>
48#include <linux/if_ether.h>
49#include <linux/fs.h>
50
51#include <rdma/ib_smi.h>
52#include <rdma/ib_user_verbs.h>
53#include <rdma/ib_addr.h>
54
55#include <linux/mlx4/driver.h>
56#include <linux/mlx4/cmd.h>
57#include <linux/sched.h>
58#include "mlx4_ib.h"
59#include "user.h"
60#include "wc.h"
61
62#define DRV_NAME	MLX4_IB_DRV_NAME
63#define DRV_VERSION	"1.0"
64#define DRV_RELDATE	"April 4, 2008"
65
66#define MLX4_IB_DRIVER_PROC_DIR_NAME "driver/mlx4_ib"
67#define MLX4_IB_MRS_PROC_DIR_NAME "mrs"
68
69MODULE_AUTHOR("Roland Dreier");
70MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
71MODULE_LICENSE("Dual BSD/GPL");
72MODULE_VERSION(DRV_VERSION);
73
74int mlx4_ib_sm_guid_assign = 1;
75
76#ifdef __linux__
77struct proc_dir_entry *mlx4_mrs_dir_entry;
78static struct proc_dir_entry *mlx4_ib_driver_dir_entry;
79#endif
80
81module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
82MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
83
84static char dev_assign_str[512];
85//module_param_string(dev_assign_str, dev_assign_str, sizeof(dev_assign_str), 0644);
86MODULE_PARM_DESC(dev_assign_str, "Map all device function numbers to "
87		 "IB device numbers following the  pattern: "
88		 "bb:dd.f-0,bb:dd.f-1,... (all numbers are hexadecimals)."
89		 " Max supported devices - 32");
90
91static const char mlx4_ib_version[] =
92	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
93	DRV_VERSION " (" DRV_RELDATE ")\n";
94
95struct update_gid_work {
96	struct work_struct	work;
97	union ib_gid		gids[128];
98	struct mlx4_ib_dev     *dev;
99	int			port;
100};
101
102struct dev_rec {
103	int	bus;
104	int	dev;
105	int	func;
106	int	nr;
107};
108
109#define MAX_DR 32
110static struct dev_rec dr[MAX_DR];
111
112static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
113
114static struct workqueue_struct *wq;
115
116static void init_query_mad(struct ib_smp *mad)
117{
118	mad->base_version  = 1;
119	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
120	mad->class_version = 1;
121	mad->method	   = IB_MGMT_METHOD_GET;
122}
123
124static union ib_gid zgid;
125
126static int mlx4_ib_query_device(struct ib_device *ibdev,
127				struct ib_device_attr *props)
128{
129	struct mlx4_ib_dev *dev = to_mdev(ibdev);
130	struct ib_smp *in_mad  = NULL;
131	struct ib_smp *out_mad = NULL;
132	int err = -ENOMEM;
133
134	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
135	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
136	if (!in_mad || !out_mad)
137		goto out;
138
139	init_query_mad(in_mad);
140	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
141
142	err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
143			   1, NULL, NULL, in_mad, out_mad);
144	if (err)
145		goto out;
146
147	memset(props, 0, sizeof *props);
148
149	props->fw_ver = dev->dev->caps.fw_ver;
150	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
151		IB_DEVICE_PORT_ACTIVE_EVENT		|
152		IB_DEVICE_SYS_IMAGE_GUID		|
153		IB_DEVICE_RC_RNR_NAK_GEN		|
154		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	|
155		IB_DEVICE_SHARED_MR;
156
157	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
158		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
159	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
160		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
161	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
162		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
163	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
164		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
165	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
166		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
167	if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
168		props->device_cap_flags |= IB_DEVICE_UD_TSO;
169	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
170		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
171	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
172	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
173	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
174		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
175	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
176		props->device_cap_flags |= IB_DEVICE_XRC;
177
178	props->device_cap_flags |= IB_DEVICE_QPG;
179	if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
180		props->device_cap_flags |= IB_DEVICE_UD_RSS;
181		props->max_rss_tbl_sz = dev->dev->caps.max_rss_tbl_sz;
182	}
183	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
184		0xffffff;
185	props->vendor_part_id	   = dev->dev->pdev->device;
186	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
187	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
188
189	props->max_mr_size	   = ~0ull;
190	props->page_size_cap	   = dev->dev->caps.page_size_cap;
191	props->max_qp		   = dev->dev->quotas.qp;
192	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
193	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
194					 dev->dev->caps.max_rq_sg);
195	props->max_cq		   = dev->dev->quotas.cq;
196	props->max_cqe		   = dev->dev->caps.max_cqes;
197	props->max_mr		   = dev->dev->quotas.mpt;
198	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
199	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
200	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
201	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
202	props->max_srq		   = dev->dev->quotas.srq;
203	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
204	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
205	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
206	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
207	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
208		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
209	props->masked_atomic_cap   = props->atomic_cap;
210	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
211	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
212	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
213	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
214					   props->max_mcast_grp;
215	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
216
217out:
218	kfree(in_mad);
219	kfree(out_mad);
220
221	return err;
222}
223
224static enum rdma_link_layer
225mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
226{
227	struct mlx4_dev *dev = to_mdev(device)->dev;
228
229	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
230		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
231}
232
233static int ib_link_query_port(struct ib_device *ibdev, u8 port,
234			      struct ib_port_attr *props, int netw_view)
235{
236	struct ib_smp *in_mad  = NULL;
237	struct ib_smp *out_mad = NULL;
238	int ext_active_speed;
239	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
240	int err = -ENOMEM;
241
242	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
243	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
244	if (!in_mad || !out_mad)
245		goto out;
246
247	init_query_mad(in_mad);
248	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
249	in_mad->attr_mod = cpu_to_be32(port);
250
251	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
252		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
253
254	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
255				in_mad, out_mad);
256	if (err)
257		goto out;
258
259
260	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
261	props->lmc		= out_mad->data[34] & 0x7;
262	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
263	props->sm_sl		= out_mad->data[36] & 0xf;
264	props->state		= out_mad->data[32] & 0xf;
265	props->phys_state	= out_mad->data[33] >> 4;
266	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
267	if (netw_view)
268		props->gid_tbl_len = out_mad->data[50];
269	else
270		props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
271	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
272	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
273	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
274	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
275	props->active_width	= out_mad->data[31] & 0xf;
276	props->active_speed	= out_mad->data[35] >> 4;
277	props->max_mtu		= out_mad->data[41] & 0xf;
278	props->active_mtu	= out_mad->data[36] >> 4;
279	props->subnet_timeout	= out_mad->data[51] & 0x1f;
280	props->max_vl_num	= out_mad->data[37] >> 4;
281	props->init_type_reply	= out_mad->data[41] >> 4;
282
283	/* Check if extended speeds (EDR/FDR/...) are supported */
284	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
285		ext_active_speed = out_mad->data[62] >> 4;
286
287		switch (ext_active_speed) {
288		case 1:
289			props->active_speed = IB_SPEED_FDR;
290			break;
291		case 2:
292			props->active_speed = IB_SPEED_EDR;
293			break;
294		}
295	}
296
297	/* If reported active speed is QDR, check if is FDR-10 */
298	if (props->active_speed == IB_SPEED_QDR) {
299		init_query_mad(in_mad);
300		in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
301		in_mad->attr_mod = cpu_to_be32(port);
302
303		err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
304				   NULL, NULL, in_mad, out_mad);
305		if (err)
306			goto out;
307
308		/* Checking LinkSpeedActive for FDR-10 */
309		if (out_mad->data[15] & 0x1)
310			props->active_speed = IB_SPEED_FDR10;
311	}
312
313	/* Avoid wrong speed value returned by FW if the IB link is down. */
314	if (props->state == IB_PORT_DOWN)
315		 props->active_speed = IB_SPEED_SDR;
316
317out:
318	kfree(in_mad);
319	kfree(out_mad);
320	return err;
321}
322
323static u8 state_to_phys_state(enum ib_port_state state)
324{
325	return state == IB_PORT_ACTIVE ? 5 : 3;
326}
327
328static int eth_link_query_port(struct ib_device *ibdev, u8 port,
329			       struct ib_port_attr *props, int netw_view)
330{
331
332	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
333	struct mlx4_ib_iboe *iboe = &mdev->iboe;
334	struct net_device *ndev;
335	enum ib_mtu tmp;
336	struct mlx4_cmd_mailbox *mailbox;
337	int err = 0;
338
339	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
340	if (IS_ERR(mailbox))
341		return PTR_ERR(mailbox);
342
343	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
344			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
345			   MLX4_CMD_WRAPPED);
346	if (err)
347		goto out;
348
349	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
350						IB_WIDTH_4X : IB_WIDTH_1X;
351	props->active_speed	= IB_SPEED_QDR;
352	props->port_cap_flags	= IB_PORT_CM_SUP;
353	if (netw_view)
354		props->gid_tbl_len = MLX4_ROCE_MAX_GIDS;
355	else
356		props->gid_tbl_len   = mdev->dev->caps.gid_table_len[port];
357
358	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
359	props->pkey_tbl_len	= 1;
360	props->max_mtu		= IB_MTU_4096;
361	props->max_vl_num	= 2;
362	props->state		= IB_PORT_DOWN;
363	props->phys_state	= state_to_phys_state(props->state);
364	props->active_mtu	= IB_MTU_256;
365	spin_lock(&iboe->lock);
366	ndev = iboe->netdevs[port - 1];
367	if (!ndev)
368		goto out_unlock;
369
370	tmp = iboe_get_mtu(ndev->if_mtu);
371	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
372
373	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
374					IB_PORT_ACTIVE : IB_PORT_DOWN;
375	props->phys_state	= state_to_phys_state(props->state);
376out_unlock:
377	spin_unlock(&iboe->lock);
378out:
379	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
380	return err;
381}
382
383int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
384			 struct ib_port_attr *props, int netw_view)
385{
386	int err;
387
388	memset(props, 0, sizeof *props);
389
390	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
391		ib_link_query_port(ibdev, port, props, netw_view) :
392				eth_link_query_port(ibdev, port, props, netw_view);
393
394	return err;
395}
396
397static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
398			      struct ib_port_attr *props)
399{
400	/* returns host view */
401	return __mlx4_ib_query_port(ibdev, port, props, 0);
402}
403
404int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
405			union ib_gid *gid, int netw_view)
406{
407	struct ib_smp *in_mad  = NULL;
408	struct ib_smp *out_mad = NULL;
409	int err = -ENOMEM;
410	struct mlx4_ib_dev *dev = to_mdev(ibdev);
411	int clear = 0;
412	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
413
414	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
415	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
416	if (!in_mad || !out_mad)
417		goto out;
418
419	init_query_mad(in_mad);
420	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
421	in_mad->attr_mod = cpu_to_be32(port);
422
423	if (mlx4_is_mfunc(dev->dev) && netw_view)
424		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
425
426	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
427	if (err)
428		goto out;
429
430	memcpy(gid->raw, out_mad->data + 8, 8);
431
432	if (mlx4_is_mfunc(dev->dev) && !netw_view) {
433		if (index) {
434			/* For any index > 0, return the null guid */
435			err = 0;
436			clear = 1;
437			goto out;
438		}
439	}
440
441	init_query_mad(in_mad);
442	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
443	in_mad->attr_mod = cpu_to_be32(index / 8);
444
445	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
446			   NULL, NULL, in_mad, out_mad);
447	if (err)
448		goto out;
449
450	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
451
452out:
453	if (clear)
454		memset(gid->raw + 8, 0, 8);
455	kfree(in_mad);
456	kfree(out_mad);
457	return err;
458}
459
460static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
461			  union ib_gid *gid)
462{
463	struct mlx4_ib_dev *dev = to_mdev(ibdev);
464
465	*gid = dev->iboe.gid_table[port - 1][index];
466
467	return 0;
468}
469
470static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
471			     union ib_gid *gid)
472{
473	if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
474		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
475	else
476		return iboe_query_gid(ibdev, port, index, gid);
477}
478
479int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
480			 u16 *pkey, int netw_view)
481{
482	struct ib_smp *in_mad  = NULL;
483	struct ib_smp *out_mad = NULL;
484	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
485	int err = -ENOMEM;
486
487	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
488	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
489	if (!in_mad || !out_mad)
490		goto out;
491
492	init_query_mad(in_mad);
493	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
494	in_mad->attr_mod = cpu_to_be32(index / 32);
495
496	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
497		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
498
499	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
500			   in_mad, out_mad);
501	if (err)
502		goto out;
503
504	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
505
506out:
507	kfree(in_mad);
508	kfree(out_mad);
509	return err;
510}
511
512static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
513{
514	return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
515}
516
517static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
518				 struct ib_device_modify *props)
519{
520	struct mlx4_cmd_mailbox *mailbox;
521	unsigned long flags;
522
523	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
524		return -EOPNOTSUPP;
525
526	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
527		return 0;
528
529	if (mlx4_is_slave(to_mdev(ibdev)->dev))
530		return -EOPNOTSUPP;
531
532	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
533	memcpy(ibdev->node_desc, props->node_desc, 64);
534	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
535
536	/*
537	 * If possible, pass node desc to FW, so it can generate
538	 * a 144 trap.  If cmd fails, just ignore.
539	 */
540	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
541	if (IS_ERR(mailbox))
542		return 0;
543
544	memset(mailbox->buf, 0, 256);
545	memcpy(mailbox->buf, props->node_desc, 64);
546	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
547		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
548
549	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
550
551	return 0;
552}
553
554static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
555			 u32 cap_mask)
556{
557	struct mlx4_cmd_mailbox *mailbox;
558	int err;
559	u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
560
561	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
562	if (IS_ERR(mailbox))
563		return PTR_ERR(mailbox);
564
565	memset(mailbox->buf, 0, 256);
566
567	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
568		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
569		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
570	} else {
571		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
572		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
573	}
574
575	err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
576		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
577
578	mlx4_free_cmd_mailbox(dev->dev, mailbox);
579	return err;
580}
581
582static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
583			       struct ib_port_modify *props)
584{
585	struct ib_port_attr attr;
586	u32 cap_mask;
587	int err;
588
589	mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
590
591	err = mlx4_ib_query_port(ibdev, port, &attr);
592	if (err)
593		goto out;
594
595	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
596		~props->clr_port_cap_mask;
597
598	err = mlx4_SET_PORT(to_mdev(ibdev), port,
599			    !!(mask & IB_PORT_RESET_QKEY_CNTR),
600			    cap_mask);
601
602out:
603	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
604	return err;
605}
606
607static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
608						  struct ib_udata *udata)
609{
610	struct mlx4_ib_dev *dev = to_mdev(ibdev);
611	struct mlx4_ib_ucontext *context;
612	struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
613	struct mlx4_ib_alloc_ucontext_resp resp;
614	int err;
615
616	if (!dev->ib_active)
617		return ERR_PTR(-EAGAIN);
618
619	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
620		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
621		if (mlx4_wc_enabled()) {
622			resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
623			resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
624		} else {
625			resp_v3.bf_reg_size      = 0;
626			resp_v3.bf_regs_per_page = 0;
627		}
628	} else {
629		resp.dev_caps	      = dev->dev->caps.userspace_caps;
630		resp.qp_tab_size      = dev->dev->caps.num_qps;
631		if (mlx4_wc_enabled()) {
632			resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
633			resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
634		} else {
635			resp.bf_reg_size      = 0;
636			resp.bf_regs_per_page = 0;
637		}
638		resp.cqe_size	      = dev->dev->caps.cqe_size;
639	}
640
641	context = kmalloc(sizeof *context, GFP_KERNEL);
642	if (!context)
643		return ERR_PTR(-ENOMEM);
644
645	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
646	if (err) {
647		kfree(context);
648		return ERR_PTR(err);
649	}
650
651	INIT_LIST_HEAD(&context->db_page_list);
652	mutex_init(&context->db_page_mutex);
653
654	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
655		err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
656	else
657		err = ib_copy_to_udata(udata, &resp, sizeof(resp));
658
659	if (err) {
660		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
661		kfree(context);
662		return ERR_PTR(-EFAULT);
663	}
664
665	return &context->ibucontext;
666}
667
668static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
669{
670	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
671
672	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
673	kfree(context);
674
675	return 0;
676}
677#ifdef __linux__
678static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
679			unsigned long addr,
680			unsigned long len, unsigned long pgoff,
681			unsigned long flags)
682{
683	struct mm_struct *mm;
684	struct vm_area_struct *vma;
685	unsigned long start_addr;
686	unsigned long page_size_order;
687	unsigned long  command;
688
689	mm = current->mm;
690	if (addr)
691		return current->mm->get_unmapped_area(file, addr, len,
692						pgoff, flags);
693
694	/* Last 8 bits hold the  command others are data per that command */
695	command = pgoff & MLX4_IB_MMAP_CMD_MASK;
696	if (command != MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES)
697		return current->mm->get_unmapped_area(file, addr, len,
698						pgoff, flags);
699
700	page_size_order = pgoff >> MLX4_IB_MMAP_CMD_BITS;
701	/* code is based on the huge-pages get_unmapped_area code */
702	start_addr = mm->free_area_cache;
703
704	if (len <= mm->cached_hole_size)
705		start_addr = TASK_UNMAPPED_BASE;
706
707
708full_search:
709	addr = ALIGN(start_addr, 1 << page_size_order);
710
711	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
712		/* At this point:  (!vma || addr < vma->vm_end). */
713		if (TASK_SIZE - len < addr) {
714			/*
715			 * Start a new search - just in case we missed
716			 * some holes.
717			 */
718			if (start_addr != TASK_UNMAPPED_BASE) {
719				start_addr = TASK_UNMAPPED_BASE;
720				goto full_search;
721			}
722			return -ENOMEM;
723		}
724
725		if (!vma || addr + len <= vma->vm_start)
726			return addr;
727		addr = ALIGN(vma->vm_end, 1 << page_size_order);
728	}
729}
730#endif
731
732static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
733{
734	struct mlx4_ib_dev *dev = to_mdev(context->device);
735	int err;
736
737	/* Last 8 bits hold the  command others are data per that command */
738	unsigned long  command = vma->vm_pgoff & MLX4_IB_MMAP_CMD_MASK;
739
740	if (command < MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
741		/* compatability handling for commands 0 & 1*/
742		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
743			return -EINVAL;
744	}
745	if (command == MLX4_IB_MMAP_UAR_PAGE) {
746		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
747
748		if (io_remap_pfn_range(vma, vma->vm_start,
749				       to_mucontext(context)->uar.pfn,
750				       PAGE_SIZE, vma->vm_page_prot))
751			return -EAGAIN;
752	} else if (command == MLX4_IB_MMAP_BLUE_FLAME_PAGE &&
753			dev->dev->caps.bf_reg_size != 0) {
754		vma->vm_page_prot = pgprot_wc(vma->vm_page_prot);
755
756		if (io_remap_pfn_range(vma, vma->vm_start,
757				       to_mucontext(context)->uar.pfn +
758				       dev->dev->caps.num_uars,
759				       PAGE_SIZE, vma->vm_page_prot))
760			return -EAGAIN;
761	} else if (command == MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
762		/* Getting contiguous physical pages */
763		unsigned long total_size = vma->vm_end - vma->vm_start;
764		unsigned long page_size_order = (vma->vm_pgoff) >>
765						MLX4_IB_MMAP_CMD_BITS;
766		struct ib_cmem *ib_cmem;
767		ib_cmem = ib_cmem_alloc_contiguous_pages(context, total_size,
768							page_size_order);
769		if (IS_ERR(ib_cmem)) {
770			err = PTR_ERR(ib_cmem);
771			return err;
772		}
773
774		err = ib_cmem_map_contiguous_pages_to_vma(ib_cmem, vma);
775		if (err) {
776			ib_cmem_release_contiguous_pages(ib_cmem);
777			return err;
778		}
779		return 0;
780	} else
781		return -EINVAL;
782
783	return 0;
784}
785
786static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
787				      struct ib_ucontext *context,
788				      struct ib_udata *udata)
789{
790	struct mlx4_ib_pd *pd;
791	int err;
792
793	pd = kmalloc(sizeof *pd, GFP_KERNEL);
794	if (!pd)
795		return ERR_PTR(-ENOMEM);
796
797	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
798	if (err) {
799		kfree(pd);
800		return ERR_PTR(err);
801	}
802
803	if (context)
804		if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
805			mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
806			kfree(pd);
807			return ERR_PTR(-EFAULT);
808		}
809
810	return &pd->ibpd;
811}
812
813static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
814{
815	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
816	kfree(pd);
817
818	return 0;
819}
820
821static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
822					  struct ib_ucontext *context,
823					  struct ib_udata *udata)
824{
825	struct mlx4_ib_xrcd *xrcd;
826	int err;
827
828	if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
829		return ERR_PTR(-ENOSYS);
830
831	xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
832	if (!xrcd)
833		return ERR_PTR(-ENOMEM);
834
835	err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
836	if (err)
837		goto err1;
838
839	xrcd->pd = ib_alloc_pd(ibdev);
840	if (IS_ERR(xrcd->pd)) {
841		err = PTR_ERR(xrcd->pd);
842		goto err2;
843	}
844
845	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
846	if (IS_ERR(xrcd->cq)) {
847		err = PTR_ERR(xrcd->cq);
848		goto err3;
849	}
850
851	return &xrcd->ibxrcd;
852
853err3:
854	ib_dealloc_pd(xrcd->pd);
855err2:
856	mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
857err1:
858	kfree(xrcd);
859	return ERR_PTR(err);
860}
861
862static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
863{
864	ib_destroy_cq(to_mxrcd(xrcd)->cq);
865	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
866	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
867	kfree(xrcd);
868
869	return 0;
870}
871
872static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
873{
874	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
875	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
876	struct mlx4_ib_gid_entry *ge;
877
878	ge = kzalloc(sizeof *ge, GFP_KERNEL);
879	if (!ge)
880		return -ENOMEM;
881
882	ge->gid = *gid;
883	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
884		ge->port = mqp->port;
885		ge->added = 1;
886	}
887
888	mutex_lock(&mqp->mutex);
889	list_add_tail(&ge->list, &mqp->gid_list);
890	mutex_unlock(&mqp->mutex);
891
892	return 0;
893}
894
895int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
896		   union ib_gid *gid)
897{
898	u8 mac[6];
899	struct net_device *ndev;
900	int ret = 0;
901
902	if (!mqp->port)
903		return 0;
904
905	spin_lock(&mdev->iboe.lock);
906	ndev = mdev->iboe.netdevs[mqp->port - 1];
907	if (ndev)
908		dev_hold(ndev);
909	spin_unlock(&mdev->iboe.lock);
910
911	if (ndev) {
912		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
913		rtnl_lock();
914		dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0);
915		ret = 1;
916		rtnl_unlock();
917		dev_put(ndev);
918	}
919
920	return ret;
921}
922
923struct mlx4_ib_steering {
924	struct list_head list;
925	u64 reg_id;
926	union ib_gid gid;
927};
928
929static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
930{
931	int err;
932	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
933	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
934	u64 reg_id;
935	struct mlx4_ib_steering *ib_steering = NULL;
936
937	if (mdev->dev->caps.steering_mode ==
938	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
939		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
940		if (!ib_steering)
941			return -ENOMEM;
942	}
943
944	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
945				    !!(mqp->flags &
946				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
947				    MLX4_PROT_IB_IPV6, &reg_id);
948	if (err)
949		goto err_malloc;
950
951	err = add_gid_entry(ibqp, gid);
952	if (err)
953		goto err_add;
954
955	if (ib_steering) {
956		memcpy(ib_steering->gid.raw, gid->raw, 16);
957		ib_steering->reg_id = reg_id;
958		mutex_lock(&mqp->mutex);
959		list_add(&ib_steering->list, &mqp->steering_rules);
960		mutex_unlock(&mqp->mutex);
961	}
962	return 0;
963
964err_add:
965	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
966			      MLX4_PROT_IB_IPV6, reg_id);
967err_malloc:
968	kfree(ib_steering);
969
970	return err;
971}
972
973enum {
974	IBV_FLOW_L4_NONE = 0,
975	IBV_FLOW_L4_OTHER = 3,
976	IBV_FLOW_L4_UDP = 5,
977	IBV_FLOW_L4_TCP = 6
978};
979
980struct mlx4_cm_steering {
981	struct list_head list;
982	u64 reg_id;
983	struct ib_flow_spec spec;
984};
985
986static int flow_spec_to_net_rule(struct ib_device *dev, struct ib_flow_spec *flow_spec,
987				  struct list_head *rule_list_h)
988{
989	struct mlx4_spec_list *spec_l2, *spec_l3, *spec_l4;
990	u64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
991
992	spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
993	if (!spec_l2)
994		return -ENOMEM;
995
996	switch (flow_spec->type) {
997	case IB_FLOW_ETH:
998		spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
999		memcpy(spec_l2->eth.dst_mac, flow_spec->l2_id.eth.mac, ETH_ALEN);
1000		memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
1001		spec_l2->eth.ether_type = flow_spec->l2_id.eth.ethertype;
1002		if (flow_spec->l2_id.eth.vlan_present) {
1003			spec_l2->eth.vlan_id = flow_spec->l2_id.eth.vlan;
1004			spec_l2->eth.vlan_id_msk = cpu_to_be16(0x0fff);
1005		}
1006		break;
1007	case IB_FLOW_IB_UC:
1008		spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1009		if(flow_spec->l2_id.ib_uc.qpn) {
1010			spec_l2->ib.l3_qpn = cpu_to_be32(flow_spec->l2_id.ib_uc.qpn);
1011			spec_l2->ib.qpn_msk = cpu_to_be32(0xffffff);
1012                    }
1013		break;
1014	case IB_FLOW_IB_MC_IPV4:
1015	case IB_FLOW_IB_MC_IPV6:
1016		spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1017		memcpy(spec_l2->ib.dst_gid, flow_spec->l2_id.ib_mc.mgid, 16);
1018		memset(spec_l2->ib.dst_gid_msk, 0xff, 16);
1019		break;
1020	}
1021
1022
1023	list_add_tail(&spec_l2->list, rule_list_h);
1024
1025	if (flow_spec->l2_id.eth.ethertype == cpu_to_be16(ETH_P_IP) ||
1026	    flow_spec->type != IB_FLOW_ETH) {
1027		spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
1028		if (!spec_l3)
1029			return -ENOMEM;
1030
1031		spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1032		spec_l3->ipv4.src_ip = flow_spec->src_ip;
1033		if (flow_spec->type != IB_FLOW_IB_MC_IPV4 &&
1034		    flow_spec->type != IB_FLOW_IB_MC_IPV6)
1035			spec_l3->ipv4.dst_ip = flow_spec->dst_ip;
1036
1037		if (spec_l3->ipv4.src_ip)
1038			spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1039		if (spec_l3->ipv4.dst_ip)
1040			spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1041
1042		list_add_tail(&spec_l3->list, rule_list_h);
1043	}
1044
1045	if (flow_spec->l4_protocol) {
1046		spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
1047		if (!spec_l4)
1048			return -ENOMEM;
1049
1050		spec_l4->tcp_udp.src_port = flow_spec->src_port;
1051		spec_l4->tcp_udp.dst_port = flow_spec->dst_port;
1052		if (spec_l4->tcp_udp.src_port)
1053			spec_l4->tcp_udp.src_port_msk =
1054						MLX4_BE_SHORT_MASK;
1055		if (spec_l4->tcp_udp.dst_port)
1056			spec_l4->tcp_udp.dst_port_msk =
1057						MLX4_BE_SHORT_MASK;
1058
1059		switch (flow_spec->l4_protocol) {
1060		case IBV_FLOW_L4_UDP:
1061			spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
1062			break;
1063		case IBV_FLOW_L4_TCP:
1064			spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
1065			break;
1066		default:
1067			dev_err(dev->dma_device,
1068				"Unsupported l4 protocol.\n");
1069			kfree(spec_l4);
1070			return -EPROTONOSUPPORT;
1071		}
1072		list_add_tail(&spec_l4->list, rule_list_h);
1073	}
1074	return 0;
1075}
1076
1077static int __mlx4_ib_flow_attach(struct mlx4_ib_dev *mdev,
1078				 struct mlx4_ib_qp *mqp,
1079				 struct ib_flow_spec *flow_spec,
1080				 int priority, int lock_qp)
1081{
1082	u64 reg_id = 0;
1083	int err = 0;
1084	struct mlx4_cm_steering *cm_flow;
1085	struct mlx4_spec_list *spec, *tmp_spec;
1086
1087	struct mlx4_net_trans_rule rule =
1088	{	.queue_mode = MLX4_NET_TRANS_Q_FIFO,
1089		.exclusive = 0,
1090	};
1091
1092	rule.promisc_mode = flow_spec->rule_type;
1093	rule.port = mqp->port;
1094	rule.qpn = mqp->mqp.qpn;
1095	INIT_LIST_HEAD(&rule.list);
1096
1097	cm_flow = kmalloc(sizeof(*cm_flow), GFP_KERNEL);
1098	if (!cm_flow)
1099		return -ENOMEM;
1100
1101	if (rule.promisc_mode == MLX4_FS_REGULAR) {
1102		rule.allow_loopback = !flow_spec->block_mc_loopback;
1103		rule.priority = MLX4_DOMAIN_UVERBS | priority;
1104		err = flow_spec_to_net_rule(&mdev->ib_dev, flow_spec,
1105					    &rule.list);
1106		if (err)
1107			goto free_list;
1108	}
1109
1110	err = mlx4_flow_attach(mdev->dev, &rule, &reg_id);
1111	if (err)
1112		goto free_list;
1113
1114	memcpy(&cm_flow->spec, flow_spec, sizeof(*flow_spec));
1115	cm_flow->reg_id = reg_id;
1116
1117	if (lock_qp)
1118		mutex_lock(&mqp->mutex);
1119	list_add(&cm_flow->list, &mqp->rules_list);
1120	if (lock_qp)
1121                mutex_unlock(&mqp->mutex);
1122
1123free_list:
1124	list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
1125		list_del(&spec->list);
1126		kfree(spec);
1127	}
1128	if (err) {
1129		kfree(cm_flow);
1130		dev_err(mdev->ib_dev.dma_device,
1131			"Fail to attach flow steering rule\n");
1132	}
1133	return err;
1134}
1135
1136static int __mlx4_ib_flow_detach(struct mlx4_ib_dev *mdev,
1137				 struct mlx4_ib_qp *mqp,
1138				 struct ib_flow_spec *spec, int priority,
1139				 int lock_qp)
1140{
1141	struct mlx4_cm_steering *cm_flow;
1142	int ret;
1143
1144	if (lock_qp)
1145		mutex_lock(&mqp->mutex);
1146	list_for_each_entry(cm_flow, &mqp->rules_list, list) {
1147		if (!memcmp(&cm_flow->spec, spec, sizeof(*spec))) {
1148			list_del(&cm_flow->list);
1149			break;
1150		}
1151	}
1152	if (lock_qp)
1153		mutex_unlock(&mqp->mutex);
1154
1155	if (&cm_flow->list == &mqp->rules_list) {
1156		dev_err(mdev->ib_dev.dma_device, "Couldn't find reg_id for flow spec. "
1157			"Steering rule is left attached\n");
1158		return -EINVAL;
1159	}
1160
1161	ret = mlx4_flow_detach(mdev->dev, cm_flow->reg_id);
1162
1163	kfree(cm_flow);
1164	return ret;
1165}
1166
1167static int mlx4_ib_flow_attach(struct ib_qp *qp, struct ib_flow_spec *flow_spec,
1168			       int priority)
1169{
1170	return __mlx4_ib_flow_attach(to_mdev(qp->device), to_mqp(qp),
1171				     flow_spec, priority, 1);
1172}
1173
1174static int mlx4_ib_flow_detach(struct ib_qp *qp, struct ib_flow_spec *spec,
1175			       int priority)
1176{
1177	return __mlx4_ib_flow_detach(to_mdev(qp->device), to_mqp(qp),
1178				     spec, priority, 1);
1179}
1180
1181static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1182{
1183	struct mlx4_ib_gid_entry *ge;
1184	struct mlx4_ib_gid_entry *tmp;
1185	struct mlx4_ib_gid_entry *ret = NULL;
1186
1187	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1188		if (!memcmp(raw, ge->gid.raw, 16)) {
1189			ret = ge;
1190			break;
1191		}
1192	}
1193
1194	return ret;
1195}
1196
1197static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1198{
1199	int err;
1200	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1201	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1202	u8 mac[6];
1203	struct net_device *ndev;
1204	struct mlx4_ib_gid_entry *ge;
1205	u64 reg_id = 0;
1206
1207	if (mdev->dev->caps.steering_mode ==
1208	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1209		struct mlx4_ib_steering *ib_steering;
1210
1211		mutex_lock(&mqp->mutex);
1212		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1213			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1214				list_del(&ib_steering->list);
1215				break;
1216			}
1217		}
1218		mutex_unlock(&mqp->mutex);
1219		if (&ib_steering->list == &mqp->steering_rules) {
1220			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1221			return -EINVAL;
1222		}
1223		reg_id = ib_steering->reg_id;
1224		kfree(ib_steering);
1225	}
1226
1227	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1228				    MLX4_PROT_IB_IPV6, reg_id);
1229	if (err)
1230		return err;
1231
1232	mutex_lock(&mqp->mutex);
1233	ge = find_gid_entry(mqp, gid->raw);
1234	if (ge) {
1235		spin_lock(&mdev->iboe.lock);
1236		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1237		if (ndev)
1238			dev_hold(ndev);
1239		spin_unlock(&mdev->iboe.lock);
1240		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
1241		if (ndev) {
1242			rtnl_lock();
1243			dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0);
1244			rtnl_unlock();
1245			dev_put(ndev);
1246		}
1247		list_del(&ge->list);
1248		kfree(ge);
1249	} else
1250		pr_warn("could not find mgid entry\n");
1251
1252	mutex_unlock(&mqp->mutex);
1253
1254	return 0;
1255}
1256
1257static int init_node_data(struct mlx4_ib_dev *dev)
1258{
1259	struct ib_smp *in_mad  = NULL;
1260	struct ib_smp *out_mad = NULL;
1261	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1262	int err = -ENOMEM;
1263
1264	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1265	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1266	if (!in_mad || !out_mad)
1267		goto out;
1268
1269	init_query_mad(in_mad);
1270	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1271	if (mlx4_is_master(dev->dev))
1272		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1273
1274	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1275	if (err)
1276		goto out;
1277
1278	memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1279
1280	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1281
1282	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1283	if (err)
1284		goto out;
1285
1286	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1287	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1288
1289out:
1290	kfree(in_mad);
1291	kfree(out_mad);
1292	return err;
1293}
1294
1295static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1296			char *buf)
1297{
1298	struct mlx4_ib_dev *dev =
1299		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1300	return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1301}
1302
1303static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1304			   char *buf)
1305{
1306	struct mlx4_ib_dev *dev =
1307		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1308	return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1309		       (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1310		       (int) dev->dev->caps.fw_ver & 0xffff);
1311}
1312
1313static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1314			char *buf)
1315{
1316	struct mlx4_ib_dev *dev =
1317		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1318	return sprintf(buf, "%x\n", dev->dev->rev_id);
1319}
1320
1321static ssize_t show_board(struct device *device, struct device_attribute *attr,
1322			  char *buf)
1323{
1324	struct mlx4_ib_dev *dev =
1325		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1326	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1327		       dev->dev->board_id);
1328}
1329
1330static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1331static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1332static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1333static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1334
1335static struct device_attribute *mlx4_class_attributes[] = {
1336	&dev_attr_hw_rev,
1337	&dev_attr_fw_ver,
1338	&dev_attr_hca_type,
1339	&dev_attr_board_id
1340};
1341
1342static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
1343{
1344#ifdef __linux__
1345	memcpy(eui, dev->dev_addr, 3);
1346	memcpy(eui + 5, dev->dev_addr + 3, 3);
1347#else
1348        memcpy(eui, IF_LLADDR(dev), 3);
1349        memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
1350#endif
1351	if (vlan_id < 0x1000) {
1352		eui[3] = vlan_id >> 8;
1353		eui[4] = vlan_id & 0xff;
1354	} else {
1355		eui[3] = 0xff;
1356		eui[4] = 0xfe;
1357	}
1358	eui[0] ^= 2;
1359}
1360
1361static void update_gids_task(struct work_struct *work)
1362{
1363	struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1364	struct mlx4_cmd_mailbox *mailbox;
1365	union ib_gid *gids;
1366	int err;
1367	struct mlx4_dev	*dev = gw->dev->dev;
1368
1369	mailbox = mlx4_alloc_cmd_mailbox(dev);
1370	if (IS_ERR(mailbox)) {
1371		pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1372		return;
1373	}
1374
1375	gids = mailbox->buf;
1376	memcpy(gids, gw->gids, sizeof gw->gids);
1377
1378	err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1379		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1380		       MLX4_CMD_WRAPPED);
1381	if (err)
1382		pr_warn("set port command failed\n");
1383	else {
1384		memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
1385		mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1386	}
1387
1388	mlx4_free_cmd_mailbox(dev, mailbox);
1389	kfree(gw);
1390}
1391
1392static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
1393{
1394	struct net_device *ndev = dev->iboe.netdevs[port - 1];
1395	struct update_gid_work *work;
1396	struct net_device *tmp;
1397	int i;
1398	u8 *hits;
1399	union ib_gid gid;
1400	int index_free;
1401	int found;
1402	int need_update = 0;
1403	int max_gids;
1404	u16 vid;
1405
1406	work = kzalloc(sizeof *work, GFP_ATOMIC);
1407	if (!work)
1408		return -ENOMEM;
1409
1410	hits = kzalloc(128, GFP_ATOMIC);
1411	if (!hits) {
1412		kfree(work);
1413		return -ENOMEM;
1414	}
1415
1416	max_gids = dev->dev->caps.gid_table_len[port];
1417
1418#ifdef __linux__
1419	rcu_read_lock();
1420	for_each_netdev_rcu(&init_net, tmp) {
1421#else
1422        IFNET_RLOCK();
1423        TAILQ_FOREACH(tmp, &V_ifnet, if_link) {
1424#endif
1425		if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
1426			gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1427			vid = rdma_vlan_dev_vlan_id(tmp);
1428			mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1429			found = 0;
1430			index_free = -1;
1431			for (i = 0; i < max_gids; ++i) {
1432				if (index_free < 0 &&
1433				    !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1434					index_free = i;
1435				if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1436					hits[i] = 1;
1437					found = 1;
1438					break;
1439				}
1440			}
1441
1442			if (!found) {
1443				if (tmp == ndev &&
1444				    (memcmp(&dev->iboe.gid_table[port - 1][0],
1445					    &gid, sizeof gid) ||
1446				     !memcmp(&dev->iboe.gid_table[port - 1][0],
1447					     &zgid, sizeof gid))) {
1448					dev->iboe.gid_table[port - 1][0] = gid;
1449					++need_update;
1450					hits[0] = 1;
1451				} else if (index_free >= 0) {
1452					dev->iboe.gid_table[port - 1][index_free] = gid;
1453					hits[index_free] = 1;
1454					++need_update;
1455				}
1456			}
1457		}
1458#ifdef __linux__
1459        }
1460	rcu_read_unlock();
1461#else
1462        }
1463        IFNET_RUNLOCK();
1464#endif
1465
1466	for (i = 0; i < max_gids; ++i)
1467		if (!hits[i]) {
1468			if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1469				++need_update;
1470			dev->iboe.gid_table[port - 1][i] = zgid;
1471		}
1472
1473	if (need_update) {
1474		memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
1475		INIT_WORK(&work->work, update_gids_task);
1476		work->port = port;
1477		work->dev = dev;
1478		queue_work(wq, &work->work);
1479	} else
1480		kfree(work);
1481
1482	kfree(hits);
1483	return 0;
1484}
1485
1486static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
1487{
1488	switch (event) {
1489	case NETDEV_UP:
1490#ifdef __linux__
1491	case NETDEV_CHANGEADDR:
1492#endif
1493		update_ipv6_gids(dev, port, 0);
1494		break;
1495
1496	case NETDEV_DOWN:
1497		update_ipv6_gids(dev, port, 1);
1498		dev->iboe.netdevs[port - 1] = NULL;
1499	}
1500}
1501
1502static void netdev_added(struct mlx4_ib_dev *dev, int port)
1503{
1504	update_ipv6_gids(dev, port, 0);
1505}
1506
1507static void netdev_removed(struct mlx4_ib_dev *dev, int port)
1508{
1509	update_ipv6_gids(dev, port, 1);
1510}
1511
1512static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
1513				void *ptr)
1514{
1515	struct net_device *dev = ptr;
1516	struct mlx4_ib_dev *ibdev;
1517	struct net_device *oldnd;
1518	struct mlx4_ib_iboe *iboe;
1519	int port;
1520
1521#ifdef __linux__
1522	if (!net_eq(dev_net(dev), &init_net))
1523		return NOTIFY_DONE;
1524#endif
1525
1526	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1527	iboe = &ibdev->iboe;
1528
1529	spin_lock(&iboe->lock);
1530	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1531		oldnd = iboe->netdevs[port - 1];
1532		iboe->netdevs[port - 1] =
1533			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1534		if (oldnd != iboe->netdevs[port - 1]) {
1535			if (iboe->netdevs[port - 1])
1536				netdev_added(ibdev, port);
1537			else
1538				netdev_removed(ibdev, port);
1539		}
1540	}
1541
1542	if (dev == iboe->netdevs[0] ||
1543	    (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
1544		handle_en_event(ibdev, 1, event);
1545	else if (dev == iboe->netdevs[1]
1546		 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
1547		handle_en_event(ibdev, 2, event);
1548
1549	spin_unlock(&iboe->lock);
1550
1551	return NOTIFY_DONE;
1552}
1553
1554static void init_pkeys(struct mlx4_ib_dev *ibdev)
1555{
1556	int port;
1557	int slave;
1558	int i;
1559
1560	if (mlx4_is_master(ibdev->dev)) {
1561		for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1562			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1563				for (i = 0;
1564				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1565				     ++i) {
1566					ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1567					/* master has the identity virt2phys pkey mapping */
1568						(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1569							ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1570					mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1571							     ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1572				}
1573			}
1574		}
1575		/* initialize pkey cache */
1576		for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1577			for (i = 0;
1578			     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1579			     ++i)
1580				ibdev->pkeys.phys_pkey_cache[port-1][i] =
1581					(i) ? 0 : 0xFFFF;
1582		}
1583	}
1584}
1585
1586static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1587{
1588	char name[32];
1589	int eq_per_port = 0;
1590	int added_eqs = 0;
1591	int total_eqs = 0;
1592	int i, j, eq;
1593
1594	/* Legacy mode or comp_pool is not large enough */
1595	if (dev->caps.comp_pool == 0 ||
1596	    dev->caps.num_ports > dev->caps.comp_pool)
1597		return;
1598
1599	eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1600					dev->caps.num_ports);
1601
1602	/* Init eq table */
1603	added_eqs = 0;
1604	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1605		added_eqs += eq_per_port;
1606
1607	total_eqs = dev->caps.num_comp_vectors + added_eqs;
1608
1609	ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1610	if (!ibdev->eq_table)
1611		return;
1612
1613	ibdev->eq_added = added_eqs;
1614
1615	eq = 0;
1616	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1617		for (j = 0; j < eq_per_port; j++) {
1618			snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%d:%d:%d:%d", i, j,
1619			    pci_get_domain(dev->pdev->dev.bsddev),
1620			    pci_get_bus(dev->pdev->dev.bsddev),
1621			    PCI_SLOT(dev->pdev->devfn),
1622			    PCI_FUNC(dev->pdev->devfn));
1623
1624			/* Set IRQ for specific name (per ring) */
1625			if (mlx4_assign_eq(dev, name,
1626					   &ibdev->eq_table[eq])) {
1627				/* Use legacy (same as mlx4_en driver) */
1628				pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1629				ibdev->eq_table[eq] =
1630					(eq % dev->caps.num_comp_vectors);
1631			}
1632			eq++;
1633		}
1634	}
1635
1636	/* Fill the reset of the vector with legacy EQ */
1637	for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1638		ibdev->eq_table[eq++] = i;
1639
1640	/* Advertise the new number of EQs to clients */
1641	ibdev->ib_dev.num_comp_vectors = total_eqs;
1642}
1643
1644static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1645{
1646	int i;
1647
1648	/* no additional eqs were added */
1649	if (!ibdev->eq_table)
1650		return;
1651
1652	/* Reset the advertised EQ number */
1653	ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1654
1655	/* Free only the added eqs */
1656	for (i = 0; i < ibdev->eq_added; i++) {
1657		/* Don't free legacy eqs if used */
1658		if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1659			continue;
1660		mlx4_release_eq(dev, ibdev->eq_table[i]);
1661	}
1662
1663	kfree(ibdev->eq_table);
1664}
1665
1666/*
1667 * create show function and a device_attribute struct pointing to
1668 * the function for _name
1669 */
1670#define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod)		\
1671static ssize_t show_rprt_##_name(struct device *dev,		\
1672				 struct device_attribute *attr,	\
1673				 char *buf){			\
1674	return show_diag_rprt(dev, buf, _offset, _op_mod);	\
1675}								\
1676static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
1677
1678#define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
1679
1680static size_t show_diag_rprt(struct device *device, char *buf,
1681			     u32 offset, u8 op_modifier)
1682{
1683	size_t ret;
1684	u32 counter_offset = offset;
1685	u32 diag_counter = 0;
1686	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1687					       ib_dev.dev);
1688
1689	ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
1690				       &counter_offset, &diag_counter);
1691	if (ret)
1692		return ret;
1693
1694	return sprintf(buf, "%d\n", diag_counter);
1695}
1696
1697static ssize_t clear_diag_counters(struct device *device,
1698				   struct device_attribute *attr,
1699				   const char *buf, size_t length)
1700{
1701	size_t ret;
1702	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1703					       ib_dev.dev);
1704
1705	ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
1706				       NULL, NULL);
1707	if (ret)
1708		return ret;
1709
1710	return length;
1711}
1712
1713DEVICE_DIAG_RPRT_ATTR(rq_num_lle	, 0x00, 2);
1714DEVICE_DIAG_RPRT_ATTR(sq_num_lle	, 0x04, 2);
1715DEVICE_DIAG_RPRT_ATTR(rq_num_lqpoe	, 0x08, 2);
1716DEVICE_DIAG_RPRT_ATTR(sq_num_lqpoe 	, 0x0C, 2);
1717DEVICE_DIAG_RPRT_ATTR(rq_num_lpe	, 0x18, 2);
1718DEVICE_DIAG_RPRT_ATTR(sq_num_lpe	, 0x1C, 2);
1719DEVICE_DIAG_RPRT_ATTR(rq_num_wrfe	, 0x20, 2);
1720DEVICE_DIAG_RPRT_ATTR(sq_num_wrfe	, 0x24, 2);
1721DEVICE_DIAG_RPRT_ATTR(sq_num_mwbe	, 0x2C, 2);
1722DEVICE_DIAG_RPRT_ATTR(sq_num_bre	, 0x34, 2);
1723DEVICE_DIAG_RPRT_ATTR(rq_num_lae	, 0x38, 2);
1724DEVICE_DIAG_RPRT_ATTR(sq_num_rire	, 0x44, 2);
1725DEVICE_DIAG_RPRT_ATTR(rq_num_rire	, 0x48, 2);
1726DEVICE_DIAG_RPRT_ATTR(sq_num_rae	, 0x4C, 2);
1727DEVICE_DIAG_RPRT_ATTR(rq_num_rae	, 0x50, 2);
1728DEVICE_DIAG_RPRT_ATTR(sq_num_roe	, 0x54, 2);
1729DEVICE_DIAG_RPRT_ATTR(sq_num_tree	, 0x5C, 2);
1730DEVICE_DIAG_RPRT_ATTR(sq_num_rree	, 0x64, 2);
1731DEVICE_DIAG_RPRT_ATTR(rq_num_rnr	, 0x68, 2);
1732DEVICE_DIAG_RPRT_ATTR(sq_num_rnr	, 0x6C, 2);
1733DEVICE_DIAG_RPRT_ATTR(rq_num_oos	, 0x100, 2);
1734DEVICE_DIAG_RPRT_ATTR(sq_num_oos	, 0x104, 2);
1735DEVICE_DIAG_RPRT_ATTR(rq_num_mce	, 0x108, 2);
1736DEVICE_DIAG_RPRT_ATTR(rq_num_udsdprd	, 0x118, 2);
1737DEVICE_DIAG_RPRT_ATTR(rq_num_ucsdprd	, 0x120, 2);
1738DEVICE_DIAG_RPRT_ATTR(num_cqovf		, 0x1A0, 2);
1739DEVICE_DIAG_RPRT_ATTR(num_eqovf		, 0x1A4, 2);
1740DEVICE_DIAG_RPRT_ATTR(num_baddb		, 0x1A8, 2);
1741
1742static DEVICE_ATTR(clear_diag, S_IWUSR, NULL, clear_diag_counters);
1743
1744static struct attribute *diag_rprt_attrs[] = {
1745	&dev_attr_rq_num_lle.attr,
1746	&dev_attr_sq_num_lle.attr,
1747	&dev_attr_rq_num_lqpoe.attr,
1748	&dev_attr_sq_num_lqpoe.attr,
1749	&dev_attr_rq_num_lpe.attr,
1750	&dev_attr_sq_num_lpe.attr,
1751	&dev_attr_rq_num_wrfe.attr,
1752	&dev_attr_sq_num_wrfe.attr,
1753	&dev_attr_sq_num_mwbe.attr,
1754	&dev_attr_sq_num_bre.attr,
1755	&dev_attr_rq_num_lae.attr,
1756	&dev_attr_sq_num_rire.attr,
1757	&dev_attr_rq_num_rire.attr,
1758	&dev_attr_sq_num_rae.attr,
1759	&dev_attr_rq_num_rae.attr,
1760	&dev_attr_sq_num_roe.attr,
1761	&dev_attr_sq_num_tree.attr,
1762	&dev_attr_sq_num_rree.attr,
1763	&dev_attr_rq_num_rnr.attr,
1764	&dev_attr_sq_num_rnr.attr,
1765	&dev_attr_rq_num_oos.attr,
1766	&dev_attr_sq_num_oos.attr,
1767	&dev_attr_rq_num_mce.attr,
1768	&dev_attr_rq_num_udsdprd.attr,
1769	&dev_attr_rq_num_ucsdprd.attr,
1770	&dev_attr_num_cqovf.attr,
1771	&dev_attr_num_eqovf.attr,
1772	&dev_attr_num_baddb.attr,
1773	&dev_attr_clear_diag.attr,
1774	NULL
1775};
1776
1777static struct attribute_group diag_counters_group = {
1778	.name  = "diag_counters",
1779	.attrs  = diag_rprt_attrs
1780};
1781
1782#ifdef __linux__
1783static int mlx4_ib_proc_init(void)
1784{
1785	/* Creating procfs directories /proc/drivers/mlx4_ib/ &&
1786	      /proc/drivers/mlx4_ib/mrs for further use by the driver.
1787	*/
1788	int err;
1789
1790        mlx4_ib_driver_dir_entry = proc_mkdir(MLX4_IB_DRIVER_PROC_DIR_NAME,
1791				NULL);
1792	if (!mlx4_ib_driver_dir_entry) {
1793		pr_err("mlx4_ib_proc_init has failed for %s\n",
1794		       MLX4_IB_DRIVER_PROC_DIR_NAME);
1795		err = -ENODEV;
1796		goto error;
1797	}
1798
1799        mlx4_mrs_dir_entry = proc_mkdir(MLX4_IB_MRS_PROC_DIR_NAME,
1800					mlx4_ib_driver_dir_entry);
1801	if (!mlx4_mrs_dir_entry) {
1802		pr_err("mlx4_ib_proc_init has failed for %s\n",
1803		       MLX4_IB_MRS_PROC_DIR_NAME);
1804		err = -ENODEV;
1805		goto remove_entry;
1806	}
1807
1808	return 0;
1809
1810remove_entry:
1811	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME,
1812				NULL);
1813error:
1814	return err;
1815}
1816#endif
1817
1818static void init_dev_assign(void)
1819{
1820	int bus, slot, fn, ib_idx;
1821	char *p = dev_assign_str, *t;
1822	char curr_val[32] = {0};
1823	int ret;
1824	int j, i = 0;
1825
1826	memset(dr, 0, sizeof dr);
1827
1828	if (dev_assign_str[0] == 0)
1829		return;
1830
1831	while (strlen(p)) {
1832		ret = sscanf(p, "%02x:%02x.%x-%x", &bus, &slot, &fn, &ib_idx);
1833		if (ret != 4 || ib_idx < 0)
1834			goto err;
1835
1836		for (j = 0; j < i; j++)
1837			if (dr[j].nr == ib_idx)
1838				goto err;
1839
1840		dr[i].bus = bus;
1841		dr[i].dev = slot;
1842		dr[i].func = fn;
1843		dr[i].nr = ib_idx;
1844
1845		t = strchr(p, ',');
1846		sprintf(curr_val, "%02x:%02x.%x-%x", bus, slot, fn, ib_idx);
1847		if ((!t) && strlen(p) == strlen(curr_val))
1848			return;
1849
1850		if (!t || (t + 1) >= dev_assign_str + sizeof dev_assign_str)
1851			goto err;
1852
1853		++i;
1854		if (i >= MAX_DR)
1855			goto err;
1856
1857		p = t + 1;
1858	}
1859
1860	return;
1861err:
1862	memset(dr, 0, sizeof dr);
1863	printk(KERN_WARNING "mlx4_ib: The value of 'dev_assign_str' parameter "
1864			    "is incorrect. The parameter value is discarded!");
1865}
1866
1867static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
1868			       struct ib_port_immutable *immutable)
1869{
1870	struct ib_port_attr attr;
1871	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1872	int err;
1873
1874	if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
1875		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1876		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1877	} else {
1878		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
1879			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
1880		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCEV2)
1881			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
1882				RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
1883		immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
1884		if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
1885		    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
1886			immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1887	}
1888
1889	err = ib_query_port(ibdev, port_num, &attr);
1890	if (err)
1891		return err;
1892
1893	immutable->pkey_tbl_len = attr.pkey_tbl_len;
1894	immutable->gid_tbl_len = attr.gid_tbl_len;
1895
1896	return 0;
1897}
1898
1899static void *mlx4_ib_add(struct mlx4_dev *dev)
1900{
1901	struct mlx4_ib_dev *ibdev;
1902	int num_ports = 0;
1903	int i, j;
1904	int err;
1905	struct mlx4_ib_iboe *iboe;
1906
1907	printk(KERN_INFO "%s", mlx4_ib_version);
1908
1909	mlx4_foreach_ib_transport_port(i, dev)
1910		num_ports++;
1911
1912	/* No point in registering a device with no ports... */
1913	if (num_ports == 0)
1914		return NULL;
1915
1916	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1917	if (!ibdev) {
1918		dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1919		return NULL;
1920	}
1921
1922	iboe = &ibdev->iboe;
1923
1924	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1925		goto err_dealloc;
1926
1927	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1928		goto err_pd;
1929
1930	ibdev->priv_uar.map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT,
1931		PAGE_SIZE);
1932
1933	if (!ibdev->priv_uar.map)
1934		goto err_uar;
1935
1936	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1937
1938	ibdev->dev = dev;
1939
1940	strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1941	ibdev->ib_dev.owner		= THIS_MODULE;
1942	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
1943	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
1944	ibdev->num_ports		= num_ports;
1945	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
1946	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
1947	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
1948
1949	if (dev->caps.userspace_caps)
1950		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
1951	else
1952		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
1953
1954	ibdev->ib_dev.uverbs_cmd_mask	=
1955		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
1956		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
1957		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
1958		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
1959		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
1960		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
1961		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
1962		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
1963		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
1964		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
1965		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
1966		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
1967		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
1968		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
1969		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
1970		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
1971		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
1972		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
1973		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
1974		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
1975		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
1976		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
1977		(1ull << IB_USER_VERBS_CMD_OPEN_QP)		|
1978		(1ull << IB_USER_VERBS_CMD_ATTACH_FLOW)		|
1979		(1ull << IB_USER_VERBS_CMD_DETACH_FLOW)		|
1980		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1981
1982	ibdev->ib_dev.query_device	= mlx4_ib_query_device;
1983	ibdev->ib_dev.query_port	= mlx4_ib_query_port;
1984	ibdev->ib_dev.get_link_layer	= mlx4_ib_port_link_layer;
1985	ibdev->ib_dev.query_gid		= mlx4_ib_query_gid;
1986	ibdev->ib_dev.query_pkey	= mlx4_ib_query_pkey;
1987	ibdev->ib_dev.modify_device	= mlx4_ib_modify_device;
1988	ibdev->ib_dev.modify_port	= mlx4_ib_modify_port;
1989	ibdev->ib_dev.alloc_ucontext	= mlx4_ib_alloc_ucontext;
1990	ibdev->ib_dev.dealloc_ucontext	= mlx4_ib_dealloc_ucontext;
1991	ibdev->ib_dev.mmap		= mlx4_ib_mmap;
1992#ifdef __linux__
1993	ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
1994#endif
1995	ibdev->ib_dev.alloc_pd		= mlx4_ib_alloc_pd;
1996	ibdev->ib_dev.dealloc_pd	= mlx4_ib_dealloc_pd;
1997	ibdev->ib_dev.create_ah		= mlx4_ib_create_ah;
1998	ibdev->ib_dev.query_ah		= mlx4_ib_query_ah;
1999	ibdev->ib_dev.destroy_ah	= mlx4_ib_destroy_ah;
2000	ibdev->ib_dev.create_srq	= mlx4_ib_create_srq;
2001	ibdev->ib_dev.modify_srq	= mlx4_ib_modify_srq;
2002	ibdev->ib_dev.query_srq		= mlx4_ib_query_srq;
2003	ibdev->ib_dev.destroy_srq	= mlx4_ib_destroy_srq;
2004	ibdev->ib_dev.post_srq_recv	= mlx4_ib_post_srq_recv;
2005	ibdev->ib_dev.create_qp		= mlx4_ib_create_qp;
2006	ibdev->ib_dev.modify_qp		= mlx4_ib_modify_qp;
2007	ibdev->ib_dev.query_qp		= mlx4_ib_query_qp;
2008	ibdev->ib_dev.destroy_qp	= mlx4_ib_destroy_qp;
2009	ibdev->ib_dev.post_send		= mlx4_ib_post_send;
2010	ibdev->ib_dev.post_recv		= mlx4_ib_post_recv;
2011	ibdev->ib_dev.create_cq		= mlx4_ib_create_cq;
2012	ibdev->ib_dev.modify_cq		= mlx4_ib_modify_cq;
2013	ibdev->ib_dev.resize_cq		= mlx4_ib_resize_cq;
2014	ibdev->ib_dev.destroy_cq	= mlx4_ib_destroy_cq;
2015	ibdev->ib_dev.poll_cq		= mlx4_ib_poll_cq;
2016	ibdev->ib_dev.req_notify_cq	= mlx4_ib_arm_cq;
2017	ibdev->ib_dev.get_dma_mr	= mlx4_ib_get_dma_mr;
2018	ibdev->ib_dev.reg_user_mr	= mlx4_ib_reg_user_mr;
2019	ibdev->ib_dev.dereg_mr		= mlx4_ib_dereg_mr;
2020	ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
2021	ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
2022	ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
2023	ibdev->ib_dev.attach_mcast	= mlx4_ib_mcg_attach;
2024	ibdev->ib_dev.detach_mcast	= mlx4_ib_mcg_detach;
2025	ibdev->ib_dev.attach_flow	= mlx4_ib_flow_attach;
2026	ibdev->ib_dev.detach_flow	= mlx4_ib_flow_detach;
2027	ibdev->ib_dev.process_mad	= mlx4_ib_process_mad;
2028	ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
2029
2030	if (!mlx4_is_slave(ibdev->dev)) {
2031		ibdev->ib_dev.alloc_fmr		= mlx4_ib_fmr_alloc;
2032		ibdev->ib_dev.map_phys_fmr	= mlx4_ib_map_phys_fmr;
2033		ibdev->ib_dev.unmap_fmr		= mlx4_ib_unmap_fmr;
2034		ibdev->ib_dev.dealloc_fmr	= mlx4_ib_fmr_dealloc;
2035	}
2036
2037	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2038		ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2039		ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2040		ibdev->ib_dev.uverbs_cmd_mask |=
2041			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2042			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2043	}
2044
2045	mlx4_ib_alloc_eqs(dev, ibdev);
2046
2047	spin_lock_init(&iboe->lock);
2048
2049	if (init_node_data(ibdev))
2050		goto err_map;
2051
2052	for (i = 0; i < ibdev->num_ports; ++i) {
2053		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2054						IB_LINK_LAYER_ETHERNET) {
2055			err = mlx4_counter_alloc(ibdev->dev, i + 1, &ibdev->counters[i]);
2056			if (err)
2057				ibdev->counters[i] = -1;
2058		} else
2059				ibdev->counters[i] = -1;
2060	}
2061
2062	spin_lock_init(&ibdev->sm_lock);
2063	mutex_init(&ibdev->cap_mask_mutex);
2064
2065	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2066	    !mlx4_is_slave(dev)) {
2067		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2068		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2069					    MLX4_IB_UC_STEER_QPN_ALIGN, &ibdev->steer_qpn_base, 0);
2070		if (err)
2071			goto err_counter;
2072
2073		ibdev->ib_uc_qpns_bitmap =
2074			kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2075				sizeof(long),
2076				GFP_KERNEL);
2077		if (!ibdev->ib_uc_qpns_bitmap) {
2078			dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2079			goto err_steer_qp_release;
2080		}
2081
2082		bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2083
2084		err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(dev, ibdev->steer_qpn_base,
2085				ibdev->steer_qpn_base + ibdev->steer_qpn_count - 1);
2086		if (err)
2087			goto err_steer_free_bitmap;
2088	}
2089
2090	if (ib_register_device(&ibdev->ib_dev, NULL))
2091		goto err_steer_free_bitmap;
2092
2093	if (mlx4_ib_mad_init(ibdev))
2094		goto err_reg;
2095
2096	if (mlx4_ib_init_sriov(ibdev))
2097		goto err_mad;
2098
2099	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
2100		iboe->nb.notifier_call = mlx4_ib_netdev_event;
2101		err = register_netdevice_notifier(&iboe->nb);
2102		if (err)
2103			goto err_sriov;
2104	}
2105
2106	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2107		if (device_create_file(&ibdev->ib_dev.dev,
2108				       mlx4_class_attributes[j]))
2109			goto err_notif;
2110	}
2111	if (sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
2112		goto err_notif;
2113
2114	ibdev->ib_active = true;
2115
2116	if (mlx4_is_mfunc(ibdev->dev))
2117		init_pkeys(ibdev);
2118
2119	/* create paravirt contexts for any VFs which are active */
2120	if (mlx4_is_master(ibdev->dev)) {
2121		for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2122			if (j == mlx4_master_func_num(ibdev->dev))
2123				continue;
2124			if (mlx4_is_slave_active(ibdev->dev, j))
2125				do_slave_init(ibdev, j, 1);
2126		}
2127	}
2128	return ibdev;
2129
2130err_notif:
2131	if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2132		pr_warn("failure unregistering notifier\n");
2133	flush_workqueue(wq);
2134
2135err_sriov:
2136	mlx4_ib_close_sriov(ibdev);
2137
2138err_mad:
2139	mlx4_ib_mad_cleanup(ibdev);
2140
2141err_reg:
2142	ib_unregister_device(&ibdev->ib_dev);
2143
2144err_steer_free_bitmap:
2145	kfree(ibdev->ib_uc_qpns_bitmap);
2146
2147err_steer_qp_release:
2148	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED)
2149		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2150				ibdev->steer_qpn_count);
2151err_counter:
2152	for (; i; --i)
2153		if (ibdev->counters[i - 1] != -1)
2154			mlx4_counter_free(ibdev->dev, i, ibdev->counters[i - 1]);
2155
2156err_map:
2157	iounmap(ibdev->priv_uar.map);
2158	mlx4_ib_free_eqs(dev, ibdev);
2159
2160err_uar:
2161	mlx4_uar_free(dev, &ibdev->priv_uar);
2162
2163err_pd:
2164	mlx4_pd_free(dev, ibdev->priv_pdn);
2165
2166err_dealloc:
2167	ib_dealloc_device(&ibdev->ib_dev);
2168
2169	return NULL;
2170}
2171
2172int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2173{
2174	int offset;
2175
2176	WARN_ON(!dev->ib_uc_qpns_bitmap);
2177
2178	offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2179					 dev->steer_qpn_count,
2180					 get_count_order(count));
2181	if (offset < 0)
2182		return offset;
2183
2184	*qpn = dev->steer_qpn_base + offset;
2185	return 0;
2186}
2187
2188void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2189{
2190	if (!qpn ||
2191	    dev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
2192		return;
2193
2194	BUG_ON(qpn < dev->steer_qpn_base);
2195
2196	bitmap_release_region(dev->ib_uc_qpns_bitmap,
2197			qpn - dev->steer_qpn_base, get_count_order(count));
2198}
2199
2200int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2201			 int is_attach)
2202{
2203	struct ib_flow_spec spec = {
2204		.type = IB_FLOW_IB_UC,
2205		.l2_id.ib_uc.qpn  = mqp->ibqp.qp_num,
2206	};
2207
2208	return is_attach ?
2209		__mlx4_ib_flow_attach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0)
2210                : __mlx4_ib_flow_detach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0);
2211}
2212
2213static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2214{
2215	struct mlx4_ib_dev *ibdev = ibdev_ptr;
2216	int p,j;
2217
2218	mlx4_ib_close_sriov(ibdev);
2219	sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
2220	mlx4_ib_mad_cleanup(ibdev);
2221
2222	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2223		device_remove_file(&ibdev->ib_dev.dev, mlx4_class_attributes[j]);
2224	}
2225
2226	ib_unregister_device(&ibdev->ib_dev);
2227
2228	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2229		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2230				ibdev->steer_qpn_count);
2231		kfree(ibdev->ib_uc_qpns_bitmap);
2232	}
2233
2234	if (ibdev->iboe.nb.notifier_call) {
2235		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2236			pr_warn("failure unregistering notifier\n");
2237		ibdev->iboe.nb.notifier_call = NULL;
2238	}
2239	iounmap(ibdev->priv_uar.map);
2240	for (p = 0; p < ibdev->num_ports; ++p)
2241		if (ibdev->counters[p] != -1)
2242			mlx4_counter_free(ibdev->dev, p + 1, ibdev->counters[p]);
2243	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2244		mlx4_CLOSE_PORT(dev, p);
2245
2246	mlx4_ib_free_eqs(dev, ibdev);
2247
2248	mlx4_uar_free(dev, &ibdev->priv_uar);
2249	mlx4_pd_free(dev, ibdev->priv_pdn);
2250	ib_dealloc_device(&ibdev->ib_dev);
2251}
2252
2253static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2254{
2255	struct mlx4_ib_demux_work **dm = NULL;
2256	struct mlx4_dev *dev = ibdev->dev;
2257	int i;
2258	unsigned long flags;
2259
2260	if (!mlx4_is_master(dev))
2261		return;
2262
2263	dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
2264	if (!dm) {
2265		pr_err("failed to allocate memory for tunneling qp update\n");
2266		goto out;
2267	}
2268
2269	for (i = 0; i < dev->caps.num_ports; i++) {
2270		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2271		if (!dm[i]) {
2272			pr_err("failed to allocate memory for tunneling qp update work struct\n");
2273			for (i = 0; i < dev->caps.num_ports; i++) {
2274				if (dm[i])
2275					kfree(dm[i]);
2276			}
2277			goto out;
2278		}
2279	}
2280	/* initialize or tear down tunnel QPs for the slave */
2281	for (i = 0; i < dev->caps.num_ports; i++) {
2282		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2283		dm[i]->port = i + 1;
2284		dm[i]->slave = slave;
2285		dm[i]->do_init = do_init;
2286		dm[i]->dev = ibdev;
2287		spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2288		if (!ibdev->sriov.is_going_down)
2289			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2290		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2291	}
2292out:
2293	if (dm)
2294		kfree(dm);
2295	return;
2296}
2297
2298static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2299			  enum mlx4_dev_event event, unsigned long param)
2300{
2301	struct ib_event ibev;
2302	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2303	struct mlx4_eqe *eqe = NULL;
2304	struct ib_event_work *ew;
2305	int p = 0;
2306
2307	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2308		eqe = (struct mlx4_eqe *)param;
2309	else
2310		p = (int) param;
2311
2312	switch (event) {
2313	case MLX4_DEV_EVENT_PORT_UP:
2314		if (p > ibdev->num_ports)
2315			return;
2316		if (mlx4_is_master(dev) &&
2317		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2318			IB_LINK_LAYER_INFINIBAND) {
2319			mlx4_ib_invalidate_all_guid_record(ibdev, p);
2320		}
2321		mlx4_ib_info((struct ib_device *) ibdev_ptr,
2322			     "Port %d logical link is up\n", p);
2323		ibev.event = IB_EVENT_PORT_ACTIVE;
2324		break;
2325
2326	case MLX4_DEV_EVENT_PORT_DOWN:
2327		if (p > ibdev->num_ports)
2328			return;
2329		mlx4_ib_info((struct ib_device *) ibdev_ptr,
2330			     "Port %d logical link is down\n", p);
2331		ibev.event = IB_EVENT_PORT_ERR;
2332		break;
2333
2334	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2335		ibdev->ib_active = false;
2336		ibev.event = IB_EVENT_DEVICE_FATAL;
2337		break;
2338
2339	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2340		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2341		if (!ew) {
2342			pr_err("failed to allocate memory for events work\n");
2343			break;
2344		}
2345
2346		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2347		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2348		ew->ib_dev = ibdev;
2349		/* need to queue only for port owner, which uses GEN_EQE */
2350		if (mlx4_is_master(dev))
2351			queue_work(wq, &ew->work);
2352		else
2353			handle_port_mgmt_change_event(&ew->work);
2354		return;
2355
2356	case MLX4_DEV_EVENT_SLAVE_INIT:
2357		/* here, p is the slave id */
2358		do_slave_init(ibdev, p, 1);
2359		return;
2360
2361	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2362		/* here, p is the slave id */
2363		do_slave_init(ibdev, p, 0);
2364		return;
2365
2366	default:
2367		return;
2368	}
2369
2370	ibev.device	      = ibdev_ptr;
2371	ibev.element.port_num = (u8) p;
2372
2373	ib_dispatch_event(&ibev);
2374}
2375
2376static struct mlx4_interface mlx4_ib_interface = {
2377	.add		= mlx4_ib_add,
2378	.remove		= mlx4_ib_remove,
2379	.event		= mlx4_ib_event,
2380	.protocol	= MLX4_PROT_IB_IPV6
2381};
2382
2383static int __init mlx4_ib_init(void)
2384{
2385	int err;
2386
2387	wq = create_singlethread_workqueue("mlx4_ib");
2388	if (!wq)
2389		return -ENOMEM;
2390
2391#ifdef __linux__
2392	err = mlx4_ib_proc_init();
2393	if (err)
2394		goto clean_wq;
2395#endif
2396
2397	err = mlx4_ib_mcg_init();
2398	if (err)
2399		goto clean_proc;
2400
2401	init_dev_assign();
2402
2403	err = mlx4_register_interface(&mlx4_ib_interface);
2404	if (err)
2405		goto clean_mcg;
2406
2407	return 0;
2408
2409clean_mcg:
2410	mlx4_ib_mcg_destroy();
2411
2412clean_proc:
2413#ifdef __linux__
2414	remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2415			  mlx4_ib_driver_dir_entry);
2416	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2417
2418clean_wq:
2419#endif
2420	destroy_workqueue(wq);
2421	return err;
2422}
2423
2424static void __exit mlx4_ib_cleanup(void)
2425{
2426	mlx4_unregister_interface(&mlx4_ib_interface);
2427	mlx4_ib_mcg_destroy();
2428	destroy_workqueue(wq);
2429
2430	/* Remove proc entries */
2431#ifdef __linux__
2432	remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2433				mlx4_ib_driver_dir_entry);
2434	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2435#endif
2436
2437}
2438
2439module_init_order(mlx4_ib_init, SI_ORDER_MIDDLE);
2440module_exit(mlx4_ib_cleanup);
2441
2442#undef MODULE_VERSION
2443#include <sys/module.h>
2444static int
2445mlx4ib_evhand(module_t mod, int event, void *arg)
2446{
2447        return (0);
2448}
2449
2450static moduledata_t mlx4ib_mod = {
2451        .name = "mlx4ib",
2452        .evhand = mlx4ib_evhand,
2453};
2454
2455DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY);
2456MODULE_DEPEND(mlx4ib, mlx4, 1, 1, 1);
2457MODULE_DEPEND(mlx4ib, ibcore, 1, 1, 1);
2458