mad.c revision 271127
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h>
35#include <rdma/ib_sa.h>
36#include <rdma/ib_cache.h>
37
38#include <linux/random.h>
39#include <linux/mlx4/cmd.h>
40#include <linux/gfp.h>
41#include <rdma/ib_pma.h>
42
43#include "mlx4_ib.h"
44
45enum {
46	MLX4_IB_VENDOR_CLASS1 = 0x9,
47	MLX4_IB_VENDOR_CLASS2 = 0xa
48};
49
50#define MLX4_TUN_SEND_WRID_SHIFT 34
51#define MLX4_TUN_QPN_SHIFT 32
52#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
53#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
54
55#define MLX4_TUN_IS_RECV(a)  (((a) >>  MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
56#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
57
58 /* Port mgmt change event handling */
59
60#define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
61#define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
62#define NUM_IDX_IN_PKEY_TBL_BLK 32
63#define GUID_TBL_ENTRY_SIZE 8	   /* size in bytes */
64#define GUID_TBL_BLK_NUM_ENTRIES 8
65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
66
67struct mlx4_mad_rcv_buf {
68	struct ib_grh grh;
69	u8 payload[256];
70} __packed;
71
72struct mlx4_mad_snd_buf {
73	u8 payload[256];
74} __packed;
75
76struct mlx4_tunnel_mad {
77	struct ib_grh grh;
78	struct mlx4_ib_tunnel_header hdr;
79	struct ib_mad mad;
80} __packed;
81
82struct mlx4_rcv_tunnel_mad {
83	struct mlx4_rcv_tunnel_hdr hdr;
84	struct ib_grh grh;
85	struct ib_mad mad;
86} __packed;
87
88static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
89static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
90static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
91				int block, u32 change_bitmap);
92
93__be64 mlx4_ib_gen_node_guid(void)
94{
95#define NODE_GUID_HI	((u64) (((u64)IB_OPENIB_OUI) << 40))
96	return cpu_to_be64(NODE_GUID_HI | random());
97}
98
99__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
100{
101	return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
102		cpu_to_be64(0xff00000000000000LL);
103}
104
105int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
106		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
107		 void *in_mad, void *response_mad)
108{
109	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
110	void *inbox;
111	int err;
112	u32 in_modifier = port;
113	u8 op_modifier = 0;
114
115	inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
116	if (IS_ERR(inmailbox))
117		return PTR_ERR(inmailbox);
118	inbox = inmailbox->buf;
119
120	outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
121	if (IS_ERR(outmailbox)) {
122		mlx4_free_cmd_mailbox(dev->dev, inmailbox);
123		return PTR_ERR(outmailbox);
124	}
125
126	memcpy(inbox, in_mad, 256);
127
128	/*
129	 * Key check traps can't be generated unless we have in_wc to
130	 * tell us where to send the trap.
131	 */
132	if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
133		op_modifier |= 0x1;
134	if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
135		op_modifier |= 0x2;
136	if (mlx4_is_mfunc(dev->dev) &&
137	    (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
138		op_modifier |= 0x8;
139
140	if (in_wc) {
141		struct {
142			__be32		my_qpn;
143			u32		reserved1;
144			__be32		rqpn;
145			u8		sl;
146			u8		g_path;
147			u16		reserved2[2];
148			__be16		pkey;
149			u32		reserved3[11];
150			u8		grh[40];
151		} *ext_info;
152
153		memset(inbox + 256, 0, 256);
154		ext_info = inbox + 256;
155
156		ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
157		ext_info->rqpn   = cpu_to_be32(in_wc->src_qp);
158		ext_info->sl     = in_wc->sl << 4;
159		ext_info->g_path = in_wc->dlid_path_bits |
160			(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
161		ext_info->pkey   = cpu_to_be16(in_wc->pkey_index);
162
163		if (in_grh)
164			memcpy(ext_info->grh, in_grh, 40);
165
166		op_modifier |= 0x4;
167
168		in_modifier |= in_wc->slid << 16;
169	}
170
171	err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
172			   mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
173			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
174			   (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
175
176	if (!err)
177		memcpy(response_mad, outmailbox->buf, 256);
178
179	mlx4_free_cmd_mailbox(dev->dev, inmailbox);
180	mlx4_free_cmd_mailbox(dev->dev, outmailbox);
181
182	return err;
183}
184
185static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
186{
187	struct ib_ah *new_ah;
188	struct ib_ah_attr ah_attr;
189	unsigned long flags;
190
191	if (!dev->send_agent[port_num - 1][0])
192		return;
193
194	memset(&ah_attr, 0, sizeof ah_attr);
195	ah_attr.dlid     = lid;
196	ah_attr.sl       = sl;
197	ah_attr.port_num = port_num;
198
199	new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
200			      &ah_attr);
201	if (IS_ERR(new_ah))
202		return;
203
204	spin_lock_irqsave(&dev->sm_lock, flags);
205	if (dev->sm_ah[port_num - 1])
206		ib_destroy_ah(dev->sm_ah[port_num - 1]);
207	dev->sm_ah[port_num - 1] = new_ah;
208	spin_unlock_irqrestore(&dev->sm_lock, flags);
209}
210
211/*
212 * Snoop SM MADs for port info, GUID info, and  P_Key table sets, so we can
213 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
214 */
215static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
216		      u16 prev_lid)
217{
218	struct ib_port_info *pinfo;
219	u16 lid;
220	__be16 *base;
221	u32 bn, pkey_change_bitmap;
222	int i;
223
224
225	struct mlx4_ib_dev *dev = to_mdev(ibdev);
226	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
227	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
228	    mad->mad_hdr.method == IB_MGMT_METHOD_SET)
229		switch (mad->mad_hdr.attr_id) {
230		case IB_SMP_ATTR_PORT_INFO:
231			pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
232			lid = be16_to_cpu(pinfo->lid);
233
234			update_sm_ah(dev, port_num,
235				     be16_to_cpu(pinfo->sm_lid),
236				     pinfo->neighbormtu_mastersmsl & 0xf);
237
238			if (pinfo->clientrereg_resv_subnetto & 0x80)
239				handle_client_rereg_event(dev, port_num);
240
241			if (prev_lid != lid)
242				handle_lid_change_event(dev, port_num);
243			break;
244
245		case IB_SMP_ATTR_PKEY_TABLE:
246			if (!mlx4_is_mfunc(dev->dev)) {
247				mlx4_ib_dispatch_event(dev, port_num,
248						       IB_EVENT_PKEY_CHANGE);
249				break;
250			}
251
252			/* at this point, we are running in the master.
253			 * Slaves do not receive SMPs.
254			 */
255			bn  = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
256			base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
257			pkey_change_bitmap = 0;
258			for (i = 0; i < 32; i++) {
259				pr_debug("PKEY[%d] = x%x\n",
260					 i + bn*32, be16_to_cpu(base[i]));
261				if (be16_to_cpu(base[i]) !=
262				    dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
263					pkey_change_bitmap |= (1 << i);
264					dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
265						be16_to_cpu(base[i]);
266				}
267			}
268			pr_debug("PKEY Change event: port=%d, "
269				 "block=0x%x, change_bitmap=0x%x\n",
270				 port_num, bn, pkey_change_bitmap);
271
272			if (pkey_change_bitmap) {
273				mlx4_ib_dispatch_event(dev, port_num,
274						       IB_EVENT_PKEY_CHANGE);
275				if (!dev->sriov.is_going_down)
276					__propagate_pkey_ev(dev, port_num, bn,
277							    pkey_change_bitmap);
278			}
279			break;
280
281		case IB_SMP_ATTR_GUID_INFO:
282			/* paravirtualized master's guid is guid 0 -- does not change */
283			if (!mlx4_is_master(dev->dev))
284				mlx4_ib_dispatch_event(dev, port_num,
285						       IB_EVENT_GID_CHANGE);
286			/*if master, notify relevant slaves*/
287			if (mlx4_is_master(dev->dev) &&
288			    !dev->sriov.is_going_down) {
289				bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
290				mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
291								    (u8 *)(&((struct ib_smp *)mad)->data));
292				mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
293								     (u8 *)(&((struct ib_smp *)mad)->data));
294			}
295			break;
296
297		default:
298			break;
299		}
300}
301
302static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
303				int block, u32 change_bitmap)
304{
305	int i, ix, slave, err;
306	int have_event = 0;
307
308	for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
309		if (slave == mlx4_master_func_num(dev->dev))
310			continue;
311		if (!mlx4_is_slave_active(dev->dev, slave))
312			continue;
313
314		have_event = 0;
315		for (i = 0; i < 32; i++) {
316			if (!(change_bitmap & (1 << i)))
317				continue;
318			for (ix = 0;
319			     ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
320				if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
321				    [ix] == i + 32 * block) {
322					err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
323					pr_debug("propagate_pkey_ev: slave %d,"
324						 " port %d, ix %d (%d)\n",
325						 slave, port_num, ix, err);
326					have_event = 1;
327					break;
328				}
329			}
330			if (have_event)
331				break;
332		}
333	}
334}
335
336static void node_desc_override(struct ib_device *dev,
337			       struct ib_mad *mad)
338{
339	unsigned long flags;
340
341	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
342	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
343	    mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
344	    mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
345		spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
346		memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
347		spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
348	}
349}
350
351static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
352{
353	int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
354	struct ib_mad_send_buf *send_buf;
355	struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
356	int ret;
357	unsigned long flags;
358
359	if (agent) {
360		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
361					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
362		if (IS_ERR(send_buf))
363			return;
364		/*
365		 * We rely here on the fact that MLX QPs don't use the
366		 * address handle after the send is posted (this is
367		 * wrong following the IB spec strictly, but we know
368		 * it's OK for our devices).
369		 */
370		spin_lock_irqsave(&dev->sm_lock, flags);
371		memcpy(send_buf->mad, mad, sizeof *mad);
372		if ((send_buf->ah = dev->sm_ah[port_num - 1]))
373			ret = ib_post_send_mad(send_buf, NULL);
374		else
375			ret = -EINVAL;
376		spin_unlock_irqrestore(&dev->sm_lock, flags);
377
378		if (ret)
379			ib_free_send_mad(send_buf);
380	}
381}
382
383static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
384							     struct ib_sa_mad *sa_mad)
385{
386	int ret = 0;
387
388	/* dispatch to different sa handlers */
389	switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
390	case IB_SA_ATTR_MC_MEMBER_REC:
391		ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
392		break;
393	default:
394		break;
395	}
396	return ret;
397}
398
399int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
400{
401	struct mlx4_ib_dev *dev = to_mdev(ibdev);
402	int i;
403
404	for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
405		if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
406			return i;
407	}
408	return -1;
409}
410
411
412static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
413				   u8 port, u16 pkey, u16 *ix)
414{
415	int i, ret;
416	u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
417	u16 slot_pkey;
418
419	if (slave == mlx4_master_func_num(dev->dev))
420		return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
421
422	unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
423
424	for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
425		if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
426			continue;
427
428		pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
429
430		ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
431		if (ret)
432			continue;
433		if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
434			if (slot_pkey & 0x8000) {
435				*ix = (u16) pkey_ix;
436				return 0;
437			} else {
438				/* take first partial pkey index found */
439				if (partial_ix == 0xFF)
440					partial_ix = pkey_ix;
441			}
442		}
443	}
444
445	if (partial_ix < 0xFF) {
446		*ix = (u16) partial_ix;
447		return 0;
448	}
449
450	return -EINVAL;
451}
452
453int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
454			  enum ib_qp_type dest_qpt, struct ib_wc *wc,
455			  struct ib_grh *grh, struct ib_mad *mad)
456{
457	struct ib_sge list;
458	struct ib_send_wr wr, *bad_wr;
459	struct mlx4_ib_demux_pv_ctx *tun_ctx;
460	struct mlx4_ib_demux_pv_qp *tun_qp;
461	struct mlx4_rcv_tunnel_mad *tun_mad;
462	struct ib_ah_attr attr;
463	struct ib_ah *ah;
464	struct ib_qp *src_qp = NULL;
465	unsigned tun_tx_ix = 0;
466	int dqpn;
467	int ret = 0;
468	u16 tun_pkey_ix;
469	u16 cached_pkey;
470	u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
471
472	if (dest_qpt > IB_QPT_GSI)
473		return -EINVAL;
474
475	tun_ctx = dev->sriov.demux[port-1].tun[slave];
476
477	/* check if proxy qp created */
478	if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
479		return -EAGAIN;
480
481	/* QP0 forwarding only for Dom0 */
482	if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
483		return -EINVAL;
484
485	if (!dest_qpt)
486		tun_qp = &tun_ctx->qp[0];
487	else
488		tun_qp = &tun_ctx->qp[1];
489
490	/* compute P_Key index to put in tunnel header for slave */
491	if (dest_qpt) {
492		u16 pkey_ix;
493		ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
494		if (ret)
495			return -EINVAL;
496
497		ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
498		if (ret)
499			return -EINVAL;
500		tun_pkey_ix = pkey_ix;
501	} else
502		tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
503
504	dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
505
506	/* get tunnel tx data buf for slave */
507	src_qp = tun_qp->qp;
508
509	/* create ah. Just need an empty one with the port num for the post send.
510	 * The driver will set the force loopback bit in post_send */
511	memset(&attr, 0, sizeof attr);
512	attr.port_num = port;
513	if (is_eth) {
514		memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
515		attr.ah_flags = IB_AH_GRH;
516	}
517	ah = ib_create_ah(tun_ctx->pd, &attr);
518	if (IS_ERR(ah))
519		return -ENOMEM;
520
521	/* allocate tunnel tx buf after pass failure returns */
522	spin_lock(&tun_qp->tx_lock);
523	if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
524	    (MLX4_NUM_TUNNEL_BUFS - 1))
525		ret = -EAGAIN;
526	else
527		tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
528	spin_unlock(&tun_qp->tx_lock);
529	if (ret)
530		goto out;
531
532	tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
533	if (tun_qp->tx_ring[tun_tx_ix].ah)
534		ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
535	tun_qp->tx_ring[tun_tx_ix].ah = ah;
536	ib_dma_sync_single_for_cpu(&dev->ib_dev,
537				   tun_qp->tx_ring[tun_tx_ix].buf.map,
538				   sizeof (struct mlx4_rcv_tunnel_mad),
539				   DMA_TO_DEVICE);
540
541	/* copy over to tunnel buffer */
542	if (grh)
543		memcpy(&tun_mad->grh, grh, sizeof *grh);
544	memcpy(&tun_mad->mad, mad, sizeof *mad);
545
546	/* adjust tunnel data */
547	tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
548	tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
549	tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
550	tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
551	tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
552
553	ib_dma_sync_single_for_device(&dev->ib_dev,
554				      tun_qp->tx_ring[tun_tx_ix].buf.map,
555				      sizeof (struct mlx4_rcv_tunnel_mad),
556				      DMA_TO_DEVICE);
557
558	list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
559	list.length = sizeof (struct mlx4_rcv_tunnel_mad);
560	list.lkey = tun_ctx->mr->lkey;
561
562	wr.wr.ud.ah = ah;
563	wr.wr.ud.port_num = port;
564	wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
565	wr.wr.ud.remote_qpn = dqpn;
566	wr.next = NULL;
567	wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
568	wr.sg_list = &list;
569	wr.num_sge = 1;
570	wr.opcode = IB_WR_SEND;
571	wr.send_flags = IB_SEND_SIGNALED;
572
573	ret = ib_post_send(src_qp, &wr, &bad_wr);
574out:
575	if (ret)
576		ib_destroy_ah(ah);
577	return ret;
578}
579
580static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
581			struct ib_wc *wc, struct ib_grh *grh,
582			struct ib_mad *mad)
583{
584	struct mlx4_ib_dev *dev = to_mdev(ibdev);
585	int err;
586	int slave;
587	u8 *slave_id;
588	int is_eth = 0;
589
590	if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
591		is_eth = 0;
592	else
593		is_eth = 1;
594
595	if (is_eth) {
596		if (!wc->wc_flags & IB_WC_GRH) {
597			mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
598			return -EINVAL;
599		}
600		if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
601			mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
602			return -EINVAL;
603		}
604		if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
605			mlx4_ib_warn(ibdev, "failed matching grh\n");
606			return -ENOENT;
607		}
608		if (slave >= dev->dev->caps.sqp_demux) {
609			mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
610				     slave, dev->dev->caps.sqp_demux);
611			return -ENOENT;
612		}
613
614		if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad, is_eth))
615			return 0;
616
617		err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
618		if (err)
619			pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
620				 slave, err);
621		return 0;
622	}
623
624	/* Initially assume that this mad is for us */
625	slave = mlx4_master_func_num(dev->dev);
626
627	/* See if the slave id is encoded in a response mad */
628	if (mad->mad_hdr.method & 0x80) {
629		slave_id = (u8 *) &mad->mad_hdr.tid;
630		slave = *slave_id;
631		if (slave != 255) /*255 indicates the dom0*/
632			*slave_id = 0; /* remap tid */
633	}
634
635	/* If a grh is present, we demux according to it */
636	if (wc->wc_flags & IB_WC_GRH) {
637		slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
638		if (slave < 0) {
639			mlx4_ib_warn(ibdev, "failed matching grh\n");
640			return -ENOENT;
641		}
642	}
643	/* Class-specific handling */
644	switch (mad->mad_hdr.mgmt_class) {
645	case IB_MGMT_CLASS_SUBN_ADM:
646		if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
647					     (struct ib_sa_mad *) mad))
648			return 0;
649		break;
650	case IB_MGMT_CLASS_CM:
651		if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad, is_eth))
652			return 0;
653		break;
654	case IB_MGMT_CLASS_DEVICE_MGMT:
655		if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
656			return 0;
657		break;
658	default:
659		/* Drop unsupported classes for slaves in tunnel mode */
660		if (slave != mlx4_master_func_num(dev->dev)) {
661			pr_debug("dropping unsupported ingress mad from class:%d "
662				 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
663			return 0;
664		}
665	}
666	/*make sure that no slave==255 was not handled yet.*/
667	if (slave >= dev->dev->caps.sqp_demux) {
668		mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
669			     slave, dev->dev->caps.sqp_demux);
670		return -ENOENT;
671	}
672
673	err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
674	if (err)
675		pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
676			 slave, err);
677	return 0;
678}
679
680static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
681			struct ib_wc *in_wc, struct ib_grh *in_grh,
682			struct ib_mad *in_mad, struct ib_mad *out_mad)
683{
684	u16 slid, prev_lid = 0;
685	int err;
686	struct ib_port_attr pattr;
687
688	if (in_wc && in_wc->qp->qp_num) {
689		pr_debug("received MAD: slid:%d sqpn:%d "
690			"dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
691			in_wc->slid, in_wc->src_qp,
692			in_wc->dlid_path_bits,
693			in_wc->qp->qp_num,
694			in_wc->wc_flags,
695			in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
696			be16_to_cpu(in_mad->mad_hdr.attr_id));
697		if (in_wc->wc_flags & IB_WC_GRH) {
698			pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
699				 (long long)be64_to_cpu(in_grh->sgid.global.subnet_prefix),
700				 (long long)
701				 be64_to_cpu(in_grh->sgid.global.interface_id));
702			pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
703				 (long long)be64_to_cpu(in_grh->dgid.global.subnet_prefix),
704				 (long long)be64_to_cpu(in_grh->dgid.global.interface_id));
705		}
706	}
707
708	slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
709
710	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
711		forward_trap(to_mdev(ibdev), port_num, in_mad);
712		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
713	}
714
715	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
716	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
717		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
718		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
719		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
720			return IB_MAD_RESULT_SUCCESS;
721
722		/*
723		 * Don't process SMInfo queries -- the SMA can't handle them.
724		 */
725		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
726			return IB_MAD_RESULT_SUCCESS;
727	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
728		   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
729		   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2   ||
730		   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
731		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
732		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
733			return IB_MAD_RESULT_SUCCESS;
734	} else
735		return IB_MAD_RESULT_SUCCESS;
736
737	if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
738	     in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
739	    in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
740	    in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
741	    !ib_query_port(ibdev, port_num, &pattr))
742		prev_lid = pattr.lid;
743
744	err = mlx4_MAD_IFC(to_mdev(ibdev),
745			   (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
746			   (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
747			   MLX4_MAD_IFC_NET_VIEW,
748			   port_num, in_wc, in_grh, in_mad, out_mad);
749	if (err)
750		return IB_MAD_RESULT_FAILURE;
751
752	if (!out_mad->mad_hdr.status) {
753		if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
754			smp_snoop(ibdev, port_num, in_mad, prev_lid);
755		/* slaves get node desc from FW */
756		if (!mlx4_is_slave(to_mdev(ibdev)->dev))
757			node_desc_override(ibdev, out_mad);
758	}
759
760	/* set return bit in status of directed route responses */
761	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
762		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
763
764	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
765		/* no response for trap repress */
766		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
767
768	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
769}
770
771static void edit_counter_ext(struct mlx4_if_stat_extended *cnt, void *counters,
772			     __be16 attr_id)
773{
774	switch (attr_id) {
775	case IB_PMA_PORT_COUNTERS:
776	{
777		struct ib_pma_portcounters *pma_cnt =
778				(struct ib_pma_portcounters *)counters;
779		pma_cnt->port_xmit_data =
780			cpu_to_be32((be64_to_cpu(cnt->counters[0].
781						 IfTxUnicastOctets) +
782				     be64_to_cpu(cnt->counters[0].
783						 IfTxMulticastOctets) +
784				     be64_to_cpu(cnt->counters[0].
785						 IfTxBroadcastOctets) +
786				     be64_to_cpu(cnt->counters[0].
787						 IfTxDroppedOctets)) >> 2);
788		pma_cnt->port_rcv_data  =
789			cpu_to_be32((be64_to_cpu(cnt->counters[0].
790						 IfRxUnicastOctets) +
791				     be64_to_cpu(cnt->counters[0].
792						 IfRxMulticastOctets) +
793				     be64_to_cpu(cnt->counters[0].
794						 IfRxBroadcastOctets) +
795				     be64_to_cpu(cnt->counters[0].
796						 IfRxNoBufferOctets) +
797				     be64_to_cpu(cnt->counters[0].
798						 IfRxErrorOctets)) >> 2);
799		pma_cnt->port_xmit_packets =
800			cpu_to_be32(be64_to_cpu(cnt->counters[0].
801						IfTxUnicastFrames) +
802				    be64_to_cpu(cnt->counters[0].
803						IfTxMulticastFrames) +
804				    be64_to_cpu(cnt->counters[0].
805						IfTxBroadcastFrames) +
806				    be64_to_cpu(cnt->counters[0].
807						IfTxDroppedFrames));
808		pma_cnt->port_rcv_packets  =
809			cpu_to_be32(be64_to_cpu(cnt->counters[0].
810						IfRxUnicastFrames) +
811				    be64_to_cpu(cnt->counters[0].
812						IfRxMulticastFrames) +
813				    be64_to_cpu(cnt->counters[0].
814						IfRxBroadcastFrames) +
815				    be64_to_cpu(cnt->counters[0].
816						IfRxNoBufferFrames) +
817				    be64_to_cpu(cnt->counters[0].
818						IfRxErrorFrames));
819		pma_cnt->port_rcv_errors = cpu_to_be32(be64_to_cpu(cnt->
820						       counters[0].
821						       IfRxErrorFrames));
822		break;
823	}
824
825	case IB_PMA_PORT_COUNTERS_EXT:
826	{
827		struct ib_pma_portcounters_ext *pma_cnt_ext =
828				(struct ib_pma_portcounters_ext *)counters;
829
830		pma_cnt_ext->port_xmit_data =
831			cpu_to_be64((be64_to_cpu(cnt->counters[0].
832						 IfTxUnicastOctets) +
833				     be64_to_cpu(cnt->counters[0].
834						 IfTxMulticastOctets) +
835				     be64_to_cpu(cnt->counters[0].
836						 IfTxBroadcastOctets) +
837				     be64_to_cpu(cnt->counters[0].
838						 IfTxDroppedOctets)) >> 2);
839		pma_cnt_ext->port_rcv_data  =
840			cpu_to_be64((be64_to_cpu(cnt->counters[0].
841						 IfRxUnicastOctets) +
842				     be64_to_cpu(cnt->counters[0].
843						 IfRxMulticastOctets) +
844				     be64_to_cpu(cnt->counters[0].
845						 IfRxBroadcastOctets) +
846				     be64_to_cpu(cnt->counters[0].
847						 IfRxNoBufferOctets) +
848				     be64_to_cpu(cnt->counters[0].
849						 IfRxErrorOctets)) >> 2);
850		pma_cnt_ext->port_xmit_packets =
851			cpu_to_be64(be64_to_cpu(cnt->counters[0].
852						IfTxUnicastFrames) +
853				    be64_to_cpu(cnt->counters[0].
854						IfTxMulticastFrames) +
855				    be64_to_cpu(cnt->counters[0].
856						IfTxBroadcastFrames) +
857				    be64_to_cpu(cnt->counters[0].
858						IfTxDroppedFrames));
859		pma_cnt_ext->port_rcv_packets  =
860			cpu_to_be64(be64_to_cpu(cnt->counters[0].
861						IfRxUnicastFrames) +
862				    be64_to_cpu(cnt->counters[0].
863						IfRxMulticastFrames) +
864				    be64_to_cpu(cnt->counters[0].
865						IfRxBroadcastFrames) +
866				    be64_to_cpu(cnt->counters[0].
867						IfRxNoBufferFrames) +
868				    be64_to_cpu(cnt->counters[0].
869						IfRxErrorFrames));
870		pma_cnt_ext->port_unicast_xmit_packets = cnt->counters[0].
871						IfTxUnicastFrames;
872		pma_cnt_ext->port_unicast_rcv_packets = cnt->counters[0].
873						IfRxUnicastFrames;
874		pma_cnt_ext->port_multicast_xmit_packets =
875			cpu_to_be64(be64_to_cpu(cnt->counters[0].
876						IfTxMulticastFrames) +
877				    be64_to_cpu(cnt->counters[0].
878						IfTxBroadcastFrames));
879		pma_cnt_ext->port_multicast_rcv_packets =
880			cpu_to_be64(be64_to_cpu(cnt->counters[0].
881						IfTxMulticastFrames) +
882				    be64_to_cpu(cnt->counters[0].
883						IfTxBroadcastFrames));
884
885		break;
886	}
887
888	default:
889		pr_warn("Unsupported attr_id 0x%x\n", attr_id);
890		break;
891	}
892
893}
894
895static void edit_counter(struct mlx4_if_stat_basic *cnt, void *counters,
896			 __be16	attr_id)
897{
898	switch (attr_id) {
899	case IB_PMA_PORT_COUNTERS:
900	{
901		struct ib_pma_portcounters *pma_cnt =
902				(struct ib_pma_portcounters *) counters;
903		pma_cnt->port_xmit_data =
904			cpu_to_be32(be64_to_cpu(
905				    cnt->counters[0].IfTxOctets) >> 2);
906		pma_cnt->port_rcv_data  =
907			cpu_to_be32(be64_to_cpu(
908				    cnt->counters[0].IfRxOctets) >> 2);
909		pma_cnt->port_xmit_packets =
910			cpu_to_be32(be64_to_cpu(cnt->counters[0].IfTxFrames));
911		pma_cnt->port_rcv_packets  =
912			cpu_to_be32(be64_to_cpu(cnt->counters[0].IfRxFrames));
913		break;
914	}
915	case IB_PMA_PORT_COUNTERS_EXT:
916	{
917		struct ib_pma_portcounters_ext *pma_cnt_ext =
918				(struct ib_pma_portcounters_ext *) counters;
919
920		pma_cnt_ext->port_xmit_data =
921			cpu_to_be64((be64_to_cpu(cnt->counters[0].
922						 IfTxOctets) >> 2));
923		pma_cnt_ext->port_rcv_data  =
924			cpu_to_be64((be64_to_cpu(cnt->counters[0].
925						 IfRxOctets) >> 2));
926		pma_cnt_ext->port_xmit_packets = cnt->counters[0].IfTxFrames;
927		pma_cnt_ext->port_rcv_packets  = cnt->counters[0].IfRxFrames;
928		break;
929	}
930	default:
931		pr_warn("Unsupported attr_id 0x%x\n", attr_id);
932		break;
933	}
934}
935
936int mlx4_ib_query_if_stat(struct mlx4_ib_dev *dev, u32 counter_index,
937		       union mlx4_counter *counter, u8 clear)
938{
939	struct mlx4_cmd_mailbox *mailbox;
940	int err;
941	u32 inmod = counter_index | ((clear & 1) << 31);
942
943	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
944	if (IS_ERR(mailbox))
945		return IB_MAD_RESULT_FAILURE;
946
947	err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
948			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
949			   MLX4_CMD_WRAPPED);
950	if (!err)
951		memcpy(counter, mailbox->buf, MLX4_IF_STAT_SZ(1));
952
953	mlx4_free_cmd_mailbox(dev->dev, mailbox);
954
955	return err;
956}
957
958static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
959			struct ib_wc *in_wc, struct ib_grh *in_grh,
960			struct ib_mad *in_mad, struct ib_mad *out_mad)
961{
962	struct mlx4_ib_dev *dev = to_mdev(ibdev);
963	int err;
964	u32 counter_index = dev->counters[port_num - 1] & 0xffff;
965	u8 mode;
966	char				counter_buf[MLX4_IF_STAT_SZ(1)];
967	union  mlx4_counter		*counter = (union mlx4_counter *)
968						   counter_buf;
969
970	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
971		return -EINVAL;
972
973	if (mlx4_ib_query_if_stat(dev, counter_index, counter, 0)) {
974		err = IB_MAD_RESULT_FAILURE;
975	} else {
976		memset(out_mad->data, 0, sizeof out_mad->data);
977		mode = counter->control.cnt_mode & 0xFF;
978		err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
979		switch (mode & 0xf) {
980		case 0:
981			edit_counter((void *)counter,
982				     (void *)(out_mad->data + 40),
983				     in_mad->mad_hdr.attr_id);
984			break;
985		case 1:
986			edit_counter_ext((void *)counter,
987					 (void *)(out_mad->data + 40),
988					 in_mad->mad_hdr.attr_id);
989			break;
990		default:
991			err = IB_MAD_RESULT_FAILURE;
992		}
993	}
994
995
996	return err;
997}
998
999int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1000			struct ib_wc *in_wc, struct ib_grh *in_grh,
1001			struct ib_mad *in_mad, struct ib_mad *out_mad)
1002{
1003	switch (rdma_port_get_link_layer(ibdev, port_num)) {
1004	case IB_LINK_LAYER_INFINIBAND:
1005		return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
1006				      in_grh, in_mad, out_mad);
1007	case IB_LINK_LAYER_ETHERNET:
1008		return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1009					  in_grh, in_mad, out_mad);
1010	default:
1011		return -EINVAL;
1012	}
1013}
1014
1015static void send_handler(struct ib_mad_agent *agent,
1016			 struct ib_mad_send_wc *mad_send_wc)
1017{
1018	if (mad_send_wc->send_buf->context[0])
1019		ib_destroy_ah(mad_send_wc->send_buf->context[0]);
1020	ib_free_send_mad(mad_send_wc->send_buf);
1021}
1022
1023int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
1024{
1025	struct ib_mad_agent *agent;
1026	int p, q;
1027	int ret;
1028	enum rdma_link_layer ll;
1029
1030	for (p = 0; p < dev->num_ports; ++p) {
1031		ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
1032		for (q = 0; q <= 1; ++q) {
1033			if (ll == IB_LINK_LAYER_INFINIBAND) {
1034				agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
1035							      q ? IB_QPT_GSI : IB_QPT_SMI,
1036							      NULL, 0, send_handler,
1037							      NULL, NULL);
1038				if (IS_ERR(agent)) {
1039					ret = PTR_ERR(agent);
1040					goto err;
1041				}
1042				dev->send_agent[p][q] = agent;
1043			} else
1044				dev->send_agent[p][q] = NULL;
1045		}
1046	}
1047
1048	return 0;
1049
1050err:
1051	for (p = 0; p < dev->num_ports; ++p)
1052		for (q = 0; q <= 1; ++q)
1053			if (dev->send_agent[p][q])
1054				ib_unregister_mad_agent(dev->send_agent[p][q]);
1055
1056	return ret;
1057}
1058
1059void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
1060{
1061	struct ib_mad_agent *agent;
1062	int p, q;
1063
1064	for (p = 0; p < dev->num_ports; ++p) {
1065		for (q = 0; q <= 1; ++q) {
1066			agent = dev->send_agent[p][q];
1067			if (agent) {
1068				dev->send_agent[p][q] = NULL;
1069				ib_unregister_mad_agent(agent);
1070			}
1071		}
1072
1073		if (dev->sm_ah[p])
1074			ib_destroy_ah(dev->sm_ah[p]);
1075	}
1076}
1077
1078static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
1079{
1080	mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
1081
1082	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1083		mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1084					    MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
1085}
1086
1087static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
1088{
1089	/* re-configure the alias-guid and mcg's */
1090	if (mlx4_is_master(dev->dev)) {
1091		mlx4_ib_invalidate_all_guid_record(dev, port_num);
1092
1093		if (!dev->sriov.is_going_down) {
1094			mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
1095			mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1096						    MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
1097		}
1098	}
1099	mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
1100}
1101
1102static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
1103			      struct mlx4_eqe *eqe)
1104{
1105	__propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
1106			    GET_MASK_FROM_EQE(eqe));
1107}
1108
1109static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
1110				      u32 guid_tbl_blk_num, u32 change_bitmap)
1111{
1112	struct ib_smp *in_mad  = NULL;
1113	struct ib_smp *out_mad  = NULL;
1114	u16 i;
1115
1116	if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
1117		return;
1118
1119	in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
1120	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1121	if (!in_mad || !out_mad) {
1122		mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
1123		goto out;
1124	}
1125
1126	guid_tbl_blk_num  *= 4;
1127
1128	for (i = 0; i < 4; i++) {
1129		if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1130			continue;
1131		memset(in_mad, 0, sizeof *in_mad);
1132		memset(out_mad, 0, sizeof *out_mad);
1133
1134		in_mad->base_version  = 1;
1135		in_mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1136		in_mad->class_version = 1;
1137		in_mad->method        = IB_MGMT_METHOD_GET;
1138		in_mad->attr_id       = IB_SMP_ATTR_GUID_INFO;
1139		in_mad->attr_mod      = cpu_to_be32(guid_tbl_blk_num + i);
1140
1141		if (mlx4_MAD_IFC(dev,
1142				 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1143				 port_num, NULL, NULL, in_mad, out_mad)) {
1144			mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1145			goto out;
1146		}
1147
1148		mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1149						    port_num,
1150						    (u8 *)(&((struct ib_smp *)out_mad)->data));
1151		mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1152						     port_num,
1153						     (u8 *)(&((struct ib_smp *)out_mad)->data));
1154	}
1155
1156out:
1157	kfree(in_mad);
1158	kfree(out_mad);
1159	return;
1160}
1161
1162void handle_port_mgmt_change_event(struct work_struct *work)
1163{
1164	struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1165	struct mlx4_ib_dev *dev = ew->ib_dev;
1166	struct mlx4_eqe *eqe = &(ew->ib_eqe);
1167	u8 port = eqe->event.port_mgmt_change.port;
1168	u32 changed_attr;
1169	u32 tbl_block;
1170	u32 change_bitmap;
1171
1172	switch (eqe->subtype) {
1173	case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1174		changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1175
1176		/* Update the SM ah - This should be done before handling
1177		   the other changed attributes so that MADs can be sent to the SM */
1178		if (changed_attr & MSTR_SM_CHANGE_MASK) {
1179			u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1180			u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1181			update_sm_ah(dev, port, lid, sl);
1182		}
1183
1184		/* Check if it is a lid change event */
1185		if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1186			handle_lid_change_event(dev, port);
1187
1188		/* Generate GUID changed event */
1189		if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1190			mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1191			/*if master, notify all slaves*/
1192			if (mlx4_is_master(dev->dev))
1193				mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1194							    MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
1195		}
1196
1197		if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1198			handle_client_rereg_event(dev, port);
1199		break;
1200
1201	case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1202		mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1203		if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1204			propagate_pkey_ev(dev, port, eqe);
1205		break;
1206	case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1207		/* paravirtualized master's guid is guid 0 -- does not change */
1208		if (!mlx4_is_master(dev->dev))
1209			mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1210		/*if master, notify relevant slaves*/
1211		else if (!dev->sriov.is_going_down) {
1212			tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1213			change_bitmap = GET_MASK_FROM_EQE(eqe);
1214			handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1215		}
1216		break;
1217	default:
1218		pr_warn("Unsupported subtype 0x%x for "
1219			"Port Management Change event\n", eqe->subtype);
1220	}
1221
1222	kfree(ew);
1223}
1224
1225void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1226			    enum ib_event_type type)
1227{
1228	struct ib_event event;
1229
1230	event.device		= &dev->ib_dev;
1231	event.element.port_num	= port_num;
1232	event.event		= type;
1233
1234	ib_dispatch_event(&event);
1235}
1236
1237static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1238{
1239	unsigned long flags;
1240	struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1241	struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1242	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1243	if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1244		queue_work(ctx->wq, &ctx->work);
1245	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1246}
1247
1248static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1249				  struct mlx4_ib_demux_pv_qp *tun_qp,
1250				  int index)
1251{
1252	struct ib_sge sg_list;
1253	struct ib_recv_wr recv_wr, *bad_recv_wr;
1254	int size;
1255
1256	size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1257		sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1258
1259	sg_list.addr = tun_qp->ring[index].map;
1260	sg_list.length = size;
1261	sg_list.lkey = ctx->mr->lkey;
1262
1263	recv_wr.next = NULL;
1264	recv_wr.sg_list = &sg_list;
1265	recv_wr.num_sge = 1;
1266	recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1267		MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1268	ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1269				      size, DMA_FROM_DEVICE);
1270	return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1271}
1272
1273static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1274		int slave, struct ib_sa_mad *sa_mad)
1275{
1276	int ret = 0;
1277
1278	/* dispatch to different sa handlers */
1279	switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1280	case IB_SA_ATTR_MC_MEMBER_REC:
1281		ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1282		break;
1283	default:
1284		break;
1285	}
1286	return ret;
1287}
1288
1289static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1290{
1291	int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
1292
1293	return (qpn >= proxy_start && qpn <= proxy_start + 1);
1294}
1295
1296
1297int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1298			 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
1299			 u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
1300{
1301	struct ib_sge list;
1302	struct ib_send_wr wr, *bad_wr;
1303	struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1304	struct mlx4_ib_demux_pv_qp *sqp;
1305	struct mlx4_mad_snd_buf *sqp_mad;
1306	struct ib_ah *ah;
1307	struct ib_qp *send_qp = NULL;
1308	unsigned wire_tx_ix = 0;
1309	int ret = 0;
1310	u16 wire_pkey_ix;
1311	int src_qpnum;
1312	u8 sgid_index;
1313
1314
1315	sqp_ctx = dev->sriov.sqps[port-1];
1316
1317	/* check if proxy qp created */
1318	if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1319		return -EAGAIN;
1320
1321	/* QP0 forwarding only for Dom0 */
1322	if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
1323		return -EINVAL;
1324
1325	if (dest_qpt == IB_QPT_SMI) {
1326		src_qpnum = 0;
1327		sqp = &sqp_ctx->qp[0];
1328		wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1329	} else {
1330		src_qpnum = 1;
1331		sqp = &sqp_ctx->qp[1];
1332		wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1333	}
1334
1335	send_qp = sqp->qp;
1336
1337	/* create ah */
1338	sgid_index = attr->grh.sgid_index;
1339	attr->grh.sgid_index = 0;
1340	ah = ib_create_ah(sqp_ctx->pd, attr);
1341	if (IS_ERR(ah))
1342		return -ENOMEM;
1343	attr->grh.sgid_index = sgid_index;
1344	to_mah(ah)->av.ib.gid_index = sgid_index;
1345	/* get rid of force-loopback bit */
1346	to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1347	spin_lock(&sqp->tx_lock);
1348	if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1349	    (MLX4_NUM_TUNNEL_BUFS - 1))
1350		ret = -EAGAIN;
1351	else
1352		wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1353	spin_unlock(&sqp->tx_lock);
1354	if (ret)
1355		goto out;
1356
1357	sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1358	if (sqp->tx_ring[wire_tx_ix].ah)
1359		ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1360	sqp->tx_ring[wire_tx_ix].ah = ah;
1361	ib_dma_sync_single_for_cpu(&dev->ib_dev,
1362				   sqp->tx_ring[wire_tx_ix].buf.map,
1363				   sizeof (struct mlx4_mad_snd_buf),
1364				   DMA_TO_DEVICE);
1365
1366	memcpy(&sqp_mad->payload, mad, sizeof *mad);
1367
1368	ib_dma_sync_single_for_device(&dev->ib_dev,
1369				      sqp->tx_ring[wire_tx_ix].buf.map,
1370				      sizeof (struct mlx4_mad_snd_buf),
1371				      DMA_TO_DEVICE);
1372
1373	list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1374	list.length = sizeof (struct mlx4_mad_snd_buf);
1375	list.lkey = sqp_ctx->mr->lkey;
1376
1377	wr.wr.ud.ah = ah;
1378	wr.wr.ud.port_num = port;
1379	wr.wr.ud.pkey_index = wire_pkey_ix;
1380	wr.wr.ud.remote_qkey = qkey;
1381	wr.wr.ud.remote_qpn = remote_qpn;
1382	wr.next = NULL;
1383	wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1384	wr.sg_list = &list;
1385	wr.num_sge = 1;
1386	wr.opcode = IB_WR_SEND;
1387	wr.send_flags = IB_SEND_SIGNALED;
1388
1389	ret = ib_post_send(send_qp, &wr, &bad_wr);
1390out:
1391	if (ret)
1392		ib_destroy_ah(ah);
1393	return ret;
1394}
1395
1396static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1397{
1398	int gids;
1399	int vfs;
1400
1401	if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1402		return slave;
1403
1404	gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1405	vfs = dev->dev->num_vfs;
1406
1407	if (slave == 0)
1408		return 0;
1409	if (slave <= gids % vfs)
1410		return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave - 1);
1411
1412	return MLX4_ROCE_PF_GIDS + (gids % vfs) + ((gids / vfs) * (slave - 1));
1413}
1414
1415static int get_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1416			       struct ib_ah_attr *ah_attr)
1417{
1418	if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) {
1419		ah_attr->grh.sgid_index = slave;
1420		return 0;
1421	}
1422	ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
1423	return 0;
1424}
1425
1426static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1427{
1428	struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1429	struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1430	int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1431	struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1432	struct mlx4_ib_ah ah;
1433	struct ib_ah_attr ah_attr;
1434	u8 *slave_id;
1435	int slave;
1436
1437	/* Get slave that sent this packet */
1438	if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1439	    wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1440	    (wc->src_qp & 0x1) != ctx->port - 1 ||
1441	    wc->src_qp & 0x4) {
1442		mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1443		return;
1444	}
1445	slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1446	if (slave != ctx->slave) {
1447		mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1448			     "belongs to another slave\n", wc->src_qp);
1449		return;
1450	}
1451	if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1452		mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1453			     "non-master trying to send QP0 packets\n", wc->src_qp);
1454		return;
1455	}
1456
1457	/* Map transaction ID */
1458	ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1459				   sizeof (struct mlx4_tunnel_mad),
1460				   DMA_FROM_DEVICE);
1461	switch (tunnel->mad.mad_hdr.method) {
1462	case IB_MGMT_METHOD_SET:
1463	case IB_MGMT_METHOD_GET:
1464	case IB_MGMT_METHOD_REPORT:
1465	case IB_SA_METHOD_GET_TABLE:
1466	case IB_SA_METHOD_DELETE:
1467	case IB_SA_METHOD_GET_MULTI:
1468	case IB_SA_METHOD_GET_TRACE_TBL:
1469		slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1470		if (*slave_id) {
1471			mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1472				     "class:%d slave:%d\n", *slave_id,
1473				     tunnel->mad.mad_hdr.mgmt_class, slave);
1474			return;
1475		} else
1476			*slave_id = slave;
1477	default:
1478		/* nothing */;
1479	}
1480
1481	/* Class-specific handling */
1482	switch (tunnel->mad.mad_hdr.mgmt_class) {
1483	case IB_MGMT_CLASS_SUBN_ADM:
1484		if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1485			      (struct ib_sa_mad *) &tunnel->mad))
1486			return;
1487		break;
1488	case IB_MGMT_CLASS_CM:
1489		if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1490			      (struct ib_mad *) &tunnel->mad))
1491			return;
1492		break;
1493	case IB_MGMT_CLASS_DEVICE_MGMT:
1494		if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1495		    tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1496			return;
1497		break;
1498	default:
1499		/* Drop unsupported classes for slaves in tunnel mode */
1500		if (slave != mlx4_master_func_num(dev->dev)) {
1501			mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1502				     "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1503			return;
1504		}
1505	}
1506
1507	/* We are using standard ib_core services to send the mad, so generate a
1508	 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1509	memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1510	ah.ibah.device = ctx->ib_dev;
1511	mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1512	if (ah_attr.ah_flags & IB_AH_GRH)
1513		if (get_real_sgid_index(dev, slave, ctx->port, &ah_attr))
1514			return;
1515
1516	mlx4_ib_send_to_wire(dev, slave, ctx->port,
1517			     is_proxy_qp0(dev, wc->src_qp, slave) ?
1518			     IB_QPT_SMI : IB_QPT_GSI,
1519			     be16_to_cpu(tunnel->hdr.pkey_index),
1520			     be32_to_cpu(tunnel->hdr.remote_qpn),
1521			     be32_to_cpu(tunnel->hdr.qkey),
1522			     &ah_attr, &tunnel->mad);
1523}
1524
1525static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1526				 enum ib_qp_type qp_type, int is_tun)
1527{
1528	int i;
1529	struct mlx4_ib_demux_pv_qp *tun_qp;
1530	int rx_buf_size, tx_buf_size;
1531
1532	if (qp_type > IB_QPT_GSI)
1533		return -EINVAL;
1534
1535	tun_qp = &ctx->qp[qp_type];
1536
1537	tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1538			       GFP_KERNEL);
1539	if (!tun_qp->ring)
1540		return -ENOMEM;
1541
1542	tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1543				  sizeof (struct mlx4_ib_tun_tx_buf),
1544				  GFP_KERNEL);
1545	if (!tun_qp->tx_ring) {
1546		kfree(tun_qp->ring);
1547		tun_qp->ring = NULL;
1548		return -ENOMEM;
1549	}
1550
1551	if (is_tun) {
1552		rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1553		tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1554	} else {
1555		rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1556		tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1557	}
1558
1559	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1560		tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1561		if (!tun_qp->ring[i].addr)
1562			goto err;
1563		tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1564							tun_qp->ring[i].addr,
1565							rx_buf_size,
1566							DMA_FROM_DEVICE);
1567	}
1568
1569	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1570		tun_qp->tx_ring[i].buf.addr =
1571			kmalloc(tx_buf_size, GFP_KERNEL);
1572		if (!tun_qp->tx_ring[i].buf.addr)
1573			goto tx_err;
1574		tun_qp->tx_ring[i].buf.map =
1575			ib_dma_map_single(ctx->ib_dev,
1576					  tun_qp->tx_ring[i].buf.addr,
1577					  tx_buf_size,
1578					  DMA_TO_DEVICE);
1579		tun_qp->tx_ring[i].ah = NULL;
1580	}
1581	spin_lock_init(&tun_qp->tx_lock);
1582	tun_qp->tx_ix_head = 0;
1583	tun_qp->tx_ix_tail = 0;
1584	tun_qp->proxy_qpt = qp_type;
1585
1586	return 0;
1587
1588tx_err:
1589	while (i > 0) {
1590		--i;
1591		ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1592				    tx_buf_size, DMA_TO_DEVICE);
1593		kfree(tun_qp->tx_ring[i].buf.addr);
1594	}
1595	kfree(tun_qp->tx_ring);
1596	tun_qp->tx_ring = NULL;
1597	i = MLX4_NUM_TUNNEL_BUFS;
1598err:
1599	while (i > 0) {
1600		--i;
1601		ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1602				    rx_buf_size, DMA_FROM_DEVICE);
1603		kfree(tun_qp->ring[i].addr);
1604	}
1605	kfree(tun_qp->ring);
1606	tun_qp->ring = NULL;
1607	return -ENOMEM;
1608}
1609
1610static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1611				     enum ib_qp_type qp_type, int is_tun)
1612{
1613	int i;
1614	struct mlx4_ib_demux_pv_qp *tun_qp;
1615	int rx_buf_size, tx_buf_size;
1616
1617	if (qp_type > IB_QPT_GSI)
1618		return;
1619
1620	tun_qp = &ctx->qp[qp_type];
1621	if (is_tun) {
1622		rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1623		tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1624	} else {
1625		rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1626		tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1627	}
1628
1629
1630	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1631		ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1632				    rx_buf_size, DMA_FROM_DEVICE);
1633		kfree(tun_qp->ring[i].addr);
1634	}
1635
1636	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1637		ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1638				    tx_buf_size, DMA_TO_DEVICE);
1639		kfree(tun_qp->tx_ring[i].buf.addr);
1640		if (tun_qp->tx_ring[i].ah)
1641			ib_destroy_ah(tun_qp->tx_ring[i].ah);
1642	}
1643	kfree(tun_qp->tx_ring);
1644	kfree(tun_qp->ring);
1645}
1646
1647static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1648{
1649	struct mlx4_ib_demux_pv_ctx *ctx;
1650	struct mlx4_ib_demux_pv_qp *tun_qp;
1651	struct ib_wc wc;
1652	int ret;
1653	ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1654	ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1655
1656	while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1657		tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1658		if (wc.status == IB_WC_SUCCESS) {
1659			switch (wc.opcode) {
1660			case IB_WC_RECV:
1661				mlx4_ib_multiplex_mad(ctx, &wc);
1662				ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1663							     wc.wr_id &
1664							     (MLX4_NUM_TUNNEL_BUFS - 1));
1665				if (ret)
1666					pr_err("Failed reposting tunnel "
1667					       "buf:%lld\n", (long long)wc.wr_id);
1668				break;
1669			case IB_WC_SEND:
1670				pr_debug("received tunnel send completion:"
1671					 "wrid=0x%llx, status=0x%x\n",
1672					 (long long)wc.wr_id, wc.status);
1673				ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1674					      (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1675				tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1676					= NULL;
1677				spin_lock(&tun_qp->tx_lock);
1678				tun_qp->tx_ix_tail++;
1679				spin_unlock(&tun_qp->tx_lock);
1680
1681				break;
1682			default:
1683				break;
1684			}
1685		} else  {
1686			pr_debug("mlx4_ib: completion error in tunnel: %d."
1687				 " status = %d, wrid = 0x%llx\n",
1688				 ctx->slave, wc.status, (long long)wc.wr_id);
1689			if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1690				ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1691					      (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1692				tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1693					= NULL;
1694				spin_lock(&tun_qp->tx_lock);
1695				tun_qp->tx_ix_tail++;
1696				spin_unlock(&tun_qp->tx_lock);
1697			}
1698		}
1699	}
1700}
1701
1702static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1703{
1704	struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1705
1706	/* It's worse than that! He's dead, Jim! */
1707	pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1708	       event->event, sqp->port);
1709}
1710
1711static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1712			    enum ib_qp_type qp_type, int create_tun)
1713{
1714	int i, ret;
1715	struct mlx4_ib_demux_pv_qp *tun_qp;
1716	struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1717	struct ib_qp_attr attr;
1718	int qp_attr_mask_INIT;
1719
1720	if (qp_type > IB_QPT_GSI)
1721		return -EINVAL;
1722
1723	tun_qp = &ctx->qp[qp_type];
1724
1725	memset(&qp_init_attr, 0, sizeof qp_init_attr);
1726	qp_init_attr.init_attr.send_cq = ctx->cq;
1727	qp_init_attr.init_attr.recv_cq = ctx->cq;
1728	qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1729	qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1730	qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1731	qp_init_attr.init_attr.cap.max_send_sge = 1;
1732	qp_init_attr.init_attr.cap.max_recv_sge = 1;
1733	if (create_tun) {
1734		qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1735		qp_init_attr.init_attr.create_flags = (enum ib_qp_create_flags)MLX4_IB_SRIOV_TUNNEL_QP;
1736		qp_init_attr.port = ctx->port;
1737		qp_init_attr.slave = ctx->slave;
1738		qp_init_attr.proxy_qp_type = qp_type;
1739		qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1740			   IB_QP_QKEY | IB_QP_PORT;
1741	} else {
1742		qp_init_attr.init_attr.qp_type = qp_type;
1743		qp_init_attr.init_attr.create_flags = (enum ib_qp_create_flags)MLX4_IB_SRIOV_SQP;
1744		qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1745	}
1746	qp_init_attr.init_attr.port_num = ctx->port;
1747	qp_init_attr.init_attr.qp_context = ctx;
1748	qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1749	tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1750	if (IS_ERR(tun_qp->qp)) {
1751		ret = PTR_ERR(tun_qp->qp);
1752		tun_qp->qp = NULL;
1753		pr_err("Couldn't create %s QP (%d)\n",
1754		       create_tun ? "tunnel" : "special", ret);
1755		return ret;
1756	}
1757
1758	memset(&attr, 0, sizeof attr);
1759	attr.qp_state = IB_QPS_INIT;
1760	attr.pkey_index =
1761		to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1762	attr.qkey = IB_QP1_QKEY;
1763	attr.port_num = ctx->port;
1764	ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1765	if (ret) {
1766		pr_err("Couldn't change %s qp state to INIT (%d)\n",
1767		       create_tun ? "tunnel" : "special", ret);
1768		goto err_qp;
1769	}
1770	attr.qp_state = IB_QPS_RTR;
1771	ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1772	if (ret) {
1773		pr_err("Couldn't change %s qp state to RTR (%d)\n",
1774		       create_tun ? "tunnel" : "special", ret);
1775		goto err_qp;
1776	}
1777	attr.qp_state = IB_QPS_RTS;
1778	attr.sq_psn = 0;
1779	ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1780	if (ret) {
1781		pr_err("Couldn't change %s qp state to RTS (%d)\n",
1782		       create_tun ? "tunnel" : "special", ret);
1783		goto err_qp;
1784	}
1785
1786	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1787		ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1788		if (ret) {
1789			pr_err(" mlx4_ib_post_pv_buf error"
1790			       " (err = %d, i = %d)\n", ret, i);
1791			goto err_qp;
1792		}
1793	}
1794	return 0;
1795
1796err_qp:
1797	ib_destroy_qp(tun_qp->qp);
1798	tun_qp->qp = NULL;
1799	return ret;
1800}
1801
1802/*
1803 * IB MAD completion callback for real SQPs
1804 */
1805static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1806{
1807	struct mlx4_ib_demux_pv_ctx *ctx;
1808	struct mlx4_ib_demux_pv_qp *sqp;
1809	struct ib_wc wc;
1810	struct ib_grh *grh;
1811	struct ib_mad *mad;
1812
1813	ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1814	ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1815
1816	while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1817		sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1818		if (wc.status == IB_WC_SUCCESS) {
1819			switch (wc.opcode) {
1820			case IB_WC_SEND:
1821				ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1822					      (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1823				sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1824					= NULL;
1825				spin_lock(&sqp->tx_lock);
1826				sqp->tx_ix_tail++;
1827				spin_unlock(&sqp->tx_lock);
1828				break;
1829			case IB_WC_RECV:
1830				mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1831						(sqp->ring[wc.wr_id &
1832						(MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1833				grh = &(((struct mlx4_mad_rcv_buf *)
1834						(sqp->ring[wc.wr_id &
1835						(MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1836				mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1837				if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1838							   (MLX4_NUM_TUNNEL_BUFS - 1)))
1839					pr_err("Failed reposting SQP "
1840					       "buf:%lld\n", (long long)wc.wr_id);
1841				break;
1842			default:
1843				BUG_ON(1);
1844				break;
1845			}
1846		} else  {
1847			pr_debug("mlx4_ib: completion error in tunnel: %d."
1848				 " status = %d, wrid = 0x%llx\n",
1849				 ctx->slave, wc.status, (long long)wc.wr_id);
1850			if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1851				ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1852					      (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1853				sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1854					= NULL;
1855				spin_lock(&sqp->tx_lock);
1856				sqp->tx_ix_tail++;
1857				spin_unlock(&sqp->tx_lock);
1858			}
1859		}
1860	}
1861}
1862
1863static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1864			       struct mlx4_ib_demux_pv_ctx **ret_ctx)
1865{
1866	struct mlx4_ib_demux_pv_ctx *ctx;
1867
1868	*ret_ctx = NULL;
1869	ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1870	if (!ctx) {
1871		pr_err("failed allocating pv resource context "
1872		       "for port %d, slave %d\n", port, slave);
1873		return -ENOMEM;
1874	}
1875
1876	ctx->ib_dev = &dev->ib_dev;
1877	ctx->port = port;
1878	ctx->slave = slave;
1879	*ret_ctx = ctx;
1880	return 0;
1881}
1882
1883static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1884{
1885	if (dev->sriov.demux[port - 1].tun[slave]) {
1886		kfree(dev->sriov.demux[port - 1].tun[slave]);
1887		dev->sriov.demux[port - 1].tun[slave] = NULL;
1888	}
1889}
1890
1891static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1892			       int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1893{
1894	int ret, cq_size;
1895
1896	if (ctx->state != DEMUX_PV_STATE_DOWN)
1897		return -EEXIST;
1898
1899	ctx->state = DEMUX_PV_STATE_STARTING;
1900	/* have QP0 only on port owner, and only if link layer is IB */
1901	if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
1902	    rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
1903		ctx->has_smi = 1;
1904
1905	if (ctx->has_smi) {
1906		ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1907		if (ret) {
1908			pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1909			goto err_out;
1910		}
1911	}
1912
1913	ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1914	if (ret) {
1915		pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1916		goto err_out_qp0;
1917	}
1918
1919	cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1920	if (ctx->has_smi)
1921		cq_size *= 2;
1922
1923	ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1924			       NULL, ctx, cq_size, 0);
1925	if (IS_ERR(ctx->cq)) {
1926		ret = PTR_ERR(ctx->cq);
1927		pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1928		goto err_buf;
1929	}
1930
1931	ctx->pd = ib_alloc_pd(ctx->ib_dev);
1932	if (IS_ERR(ctx->pd)) {
1933		ret = PTR_ERR(ctx->pd);
1934		pr_err("Couldn't create tunnel PD (%d)\n", ret);
1935		goto err_cq;
1936	}
1937
1938	ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1939	if (IS_ERR(ctx->mr)) {
1940		ret = PTR_ERR(ctx->mr);
1941		pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
1942		goto err_pd;
1943	}
1944
1945	if (ctx->has_smi) {
1946		ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
1947		if (ret) {
1948			pr_err("Couldn't create %s QP0 (%d)\n",
1949			       create_tun ? "tunnel for" : "",  ret);
1950			goto err_mr;
1951		}
1952	}
1953
1954	ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1955	if (ret) {
1956		pr_err("Couldn't create %s QP1 (%d)\n",
1957		       create_tun ? "tunnel for" : "",  ret);
1958		goto err_qp0;
1959	}
1960
1961	if (create_tun)
1962		INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
1963	else
1964		INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
1965
1966	ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
1967
1968	ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1969	if (ret) {
1970		pr_err("Couldn't arm tunnel cq (%d)\n", ret);
1971		goto err_wq;
1972	}
1973	ctx->state = DEMUX_PV_STATE_ACTIVE;
1974	return 0;
1975
1976err_wq:
1977	ctx->wq = NULL;
1978	ib_destroy_qp(ctx->qp[1].qp);
1979	ctx->qp[1].qp = NULL;
1980
1981
1982err_qp0:
1983	if (ctx->has_smi)
1984		ib_destroy_qp(ctx->qp[0].qp);
1985	ctx->qp[0].qp = NULL;
1986
1987err_mr:
1988	ib_dereg_mr(ctx->mr);
1989	ctx->mr = NULL;
1990
1991err_pd:
1992	ib_dealloc_pd(ctx->pd);
1993	ctx->pd = NULL;
1994
1995err_cq:
1996	ib_destroy_cq(ctx->cq);
1997	ctx->cq = NULL;
1998
1999err_buf:
2000	mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
2001
2002err_out_qp0:
2003	if (ctx->has_smi)
2004		mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
2005err_out:
2006	ctx->state = DEMUX_PV_STATE_DOWN;
2007	return ret;
2008}
2009
2010static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
2011				 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
2012{
2013	if (!ctx)
2014		return;
2015	if (ctx->state > DEMUX_PV_STATE_DOWN) {
2016		ctx->state = DEMUX_PV_STATE_DOWNING;
2017		if (flush)
2018			flush_workqueue(ctx->wq);
2019		if (ctx->has_smi) {
2020			ib_destroy_qp(ctx->qp[0].qp);
2021			ctx->qp[0].qp = NULL;
2022			mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
2023		}
2024		ib_destroy_qp(ctx->qp[1].qp);
2025		ctx->qp[1].qp = NULL;
2026		mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
2027		ib_dereg_mr(ctx->mr);
2028		ctx->mr = NULL;
2029		ib_dealloc_pd(ctx->pd);
2030		ctx->pd = NULL;
2031		ib_destroy_cq(ctx->cq);
2032		ctx->cq = NULL;
2033		ctx->state = DEMUX_PV_STATE_DOWN;
2034	}
2035}
2036
2037static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
2038				  int port, int do_init)
2039{
2040	int ret = 0;
2041
2042	if (!do_init) {
2043		clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
2044		/* for master, destroy real sqp resources */
2045		if (slave == mlx4_master_func_num(dev->dev))
2046			destroy_pv_resources(dev, slave, port,
2047					     dev->sriov.sqps[port - 1], 1);
2048		/* destroy the tunnel qp resources */
2049		destroy_pv_resources(dev, slave, port,
2050				     dev->sriov.demux[port - 1].tun[slave], 1);
2051		return 0;
2052	}
2053
2054	/* create the tunnel qp resources */
2055	ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
2056				  dev->sriov.demux[port - 1].tun[slave]);
2057
2058	/* for master, create the real sqp resources */
2059	if (!ret && slave == mlx4_master_func_num(dev->dev))
2060		ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
2061					  dev->sriov.sqps[port - 1]);
2062	return ret;
2063}
2064
2065void mlx4_ib_tunnels_update_work(struct work_struct *work)
2066{
2067	struct mlx4_ib_demux_work *dmxw;
2068
2069	dmxw = container_of(work, struct mlx4_ib_demux_work, work);
2070	mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
2071			       dmxw->do_init);
2072	kfree(dmxw);
2073	return;
2074}
2075
2076static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
2077				       struct mlx4_ib_demux_ctx *ctx,
2078				       int port)
2079{
2080	char name[12];
2081	int ret = 0;
2082	int i;
2083
2084	ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
2085			   sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
2086	if (!ctx->tun)
2087		return -ENOMEM;
2088
2089	ctx->dev = dev;
2090	ctx->port = port;
2091	ctx->ib_dev = &dev->ib_dev;
2092
2093	for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2094		ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
2095		if (ret) {
2096			ret = -ENOMEM;
2097			goto err_mcg;
2098		}
2099	}
2100
2101	ret = mlx4_ib_mcg_port_init(ctx);
2102	if (ret) {
2103		pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2104		goto err_mcg;
2105	}
2106
2107	snprintf(name, sizeof name, "mlx4_ibt%d", port);
2108	ctx->wq = create_singlethread_workqueue(name);
2109	if (!ctx->wq) {
2110		pr_err("Failed to create tunnelling WQ for port %d\n", port);
2111		ret = -ENOMEM;
2112		goto err_wq;
2113	}
2114
2115	snprintf(name, sizeof name, "mlx4_ibud%d", port);
2116	ctx->ud_wq = create_singlethread_workqueue(name);
2117	if (!ctx->ud_wq) {
2118		pr_err("Failed to create up/down WQ for port %d\n", port);
2119		ret = -ENOMEM;
2120		goto err_udwq;
2121	}
2122
2123	return 0;
2124
2125err_udwq:
2126	destroy_workqueue(ctx->wq);
2127	ctx->wq = NULL;
2128
2129err_wq:
2130	mlx4_ib_mcg_port_cleanup(ctx, 1);
2131err_mcg:
2132	for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2133		free_pv_object(dev, i, port);
2134	kfree(ctx->tun);
2135	ctx->tun = NULL;
2136	return ret;
2137}
2138
2139static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2140{
2141	if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2142		sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2143		flush_workqueue(sqp_ctx->wq);
2144		if (sqp_ctx->has_smi) {
2145			ib_destroy_qp(sqp_ctx->qp[0].qp);
2146			sqp_ctx->qp[0].qp = NULL;
2147			mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2148		}
2149		ib_destroy_qp(sqp_ctx->qp[1].qp);
2150		sqp_ctx->qp[1].qp = NULL;
2151		mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2152		ib_dereg_mr(sqp_ctx->mr);
2153		sqp_ctx->mr = NULL;
2154		ib_dealloc_pd(sqp_ctx->pd);
2155		sqp_ctx->pd = NULL;
2156		ib_destroy_cq(sqp_ctx->cq);
2157		sqp_ctx->cq = NULL;
2158		sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2159	}
2160}
2161
2162static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2163{
2164	int i;
2165	if (ctx) {
2166		struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2167		mlx4_ib_mcg_port_cleanup(ctx, 1);
2168		for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2169			if (!ctx->tun[i])
2170				continue;
2171			if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2172				ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2173		}
2174		flush_workqueue(ctx->wq);
2175		for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2176			destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2177			free_pv_object(dev, i, ctx->port);
2178		}
2179		kfree(ctx->tun);
2180		destroy_workqueue(ctx->ud_wq);
2181		destroy_workqueue(ctx->wq);
2182	}
2183}
2184
2185static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2186{
2187	int i;
2188
2189	if (!mlx4_is_master(dev->dev))
2190		return;
2191	/* initialize or tear down tunnel QPs for the master */
2192	for (i = 0; i < dev->dev->caps.num_ports; i++)
2193		mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2194	return;
2195}
2196
2197int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2198{
2199	int i = 0;
2200	int err;
2201
2202	if (!mlx4_is_mfunc(dev->dev))
2203		return 0;
2204
2205	dev->sriov.is_going_down = 0;
2206	spin_lock_init(&dev->sriov.going_down_lock);
2207	mlx4_ib_cm_paravirt_init(dev);
2208
2209	mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2210
2211	if (mlx4_is_slave(dev->dev)) {
2212		mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2213		return 0;
2214	}
2215
2216	for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2217		if (i == mlx4_master_func_num(dev->dev))
2218			mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2219		else
2220			mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2221	}
2222
2223	err = mlx4_ib_init_alias_guid_service(dev);
2224	if (err) {
2225		mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2226		goto paravirt_err;
2227	}
2228	err = mlx4_ib_device_register_sysfs(dev);
2229	if (err) {
2230		mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2231		goto sysfs_err;
2232	}
2233
2234	mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2235		     dev->dev->caps.sqp_demux);
2236	for (i = 0; i < dev->num_ports; i++) {
2237		union ib_gid gid;
2238		err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2239		if (err)
2240			goto demux_err;
2241		dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2242		err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2243				      &dev->sriov.sqps[i]);
2244		if (err)
2245			goto demux_err;
2246		err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2247		if (err)
2248			goto demux_err;
2249	}
2250	mlx4_ib_master_tunnels(dev, 1);
2251	return 0;
2252
2253demux_err:
2254	while (i > 0) {
2255		free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2256		mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2257		--i;
2258	}
2259	mlx4_ib_device_unregister_sysfs(dev);
2260
2261sysfs_err:
2262	mlx4_ib_destroy_alias_guid_service(dev);
2263
2264paravirt_err:
2265	mlx4_ib_cm_paravirt_clean(dev, -1);
2266
2267	return err;
2268}
2269
2270void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2271{
2272	int i;
2273	unsigned long flags;
2274
2275	if (!mlx4_is_mfunc(dev->dev))
2276		return;
2277
2278	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2279	dev->sriov.is_going_down = 1;
2280	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2281	if (mlx4_is_master(dev->dev)) {
2282		for (i = 0; i < dev->num_ports; i++) {
2283			flush_workqueue(dev->sriov.demux[i].ud_wq);
2284			mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2285			kfree(dev->sriov.sqps[i]);
2286			dev->sriov.sqps[i] = NULL;
2287			mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2288		}
2289
2290		mlx4_ib_cm_paravirt_clean(dev, -1);
2291		mlx4_ib_destroy_alias_guid_service(dev);
2292		mlx4_ib_device_unregister_sysfs(dev);
2293	}
2294}
2295