1331772Shselasky/*-
2331772Shselasky * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3331772Shselasky *
4320592Shselasky * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
5320592Shselasky * Copyright (c) 2005 Intel Corporation.  All rights reserved.
6320592Shselasky * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
7320592Shselasky * Copyright (c) 2009 HNR Consulting. All rights reserved.
8320592Shselasky * Copyright (c) 2014 Intel Corporation.  All rights reserved.
9320592Shselasky *
10320592Shselasky * This software is available to you under a choice of one of two
11320592Shselasky * licenses.  You may choose to be licensed under the terms of the GNU
12320592Shselasky * General Public License (GPL) Version 2, available from the file
13320592Shselasky * COPYING in the main directory of this source tree, or the
14320592Shselasky * OpenIB.org BSD license below:
15320592Shselasky *
16320592Shselasky *     Redistribution and use in source and binary forms, with or
17320592Shselasky *     without modification, are permitted provided that the following
18320592Shselasky *     conditions are met:
19320592Shselasky *
20320592Shselasky *      - Redistributions of source code must retain the above
21320592Shselasky *        copyright notice, this list of conditions and the following
22320592Shselasky *        disclaimer.
23320592Shselasky *
24320592Shselasky *      - Redistributions in binary form must reproduce the above
25320592Shselasky *        copyright notice, this list of conditions and the following
26320592Shselasky *        disclaimer in the documentation and/or other materials
27320592Shselasky *        provided with the distribution.
28320592Shselasky *
29320592Shselasky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30320592Shselasky * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31320592Shselasky * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32320592Shselasky * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33320592Shselasky * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34320592Shselasky * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35320592Shselasky * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36320592Shselasky * SOFTWARE.
37320592Shselasky */
38320592Shselasky
39337096Shselasky#include <sys/cdefs.h>
40337096Shselasky__FBSDID("$FreeBSD: stable/11/sys/ofed/drivers/infiniband/core/ib_mad.c 341866 2018-12-12 10:29:48Z hselasky $");
41337096Shselasky
42320592Shselasky#define	LINUXKPI_PARAM_PREFIX ibcore_
43320592Shselasky#define	KBUILD_MODNAME "ibcore"
44320592Shselasky
45320592Shselasky#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46320592Shselasky
47320592Shselasky#include <linux/dma-mapping.h>
48320592Shselasky#include <linux/slab.h>
49320592Shselasky#include <linux/module.h>
50320592Shselasky#include <rdma/ib_cache.h>
51320592Shselasky
52320592Shselasky#include "mad_priv.h"
53320592Shselasky#include "mad_rmpp.h"
54320592Shselasky#include "smi.h"
55320592Shselasky#include "opa_smi.h"
56320592Shselasky#include "agent.h"
57320592Shselasky#include "core_priv.h"
58320592Shselasky
59320592Shselaskystatic int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
60320592Shselaskystatic int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
61320592Shselasky
62320592Shselaskymodule_param_named(send_queue_size, mad_sendq_size, int, 0444);
63320592ShselaskyMODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
64320592Shselaskymodule_param_named(recv_queue_size, mad_recvq_size, int, 0444);
65320592ShselaskyMODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
66320592Shselasky
67320592Shselaskystatic struct list_head ib_mad_port_list;
68320592Shselaskystatic u32 ib_mad_client_id = 0;
69320592Shselasky
70320592Shselasky/* Port list lock */
71320592Shselaskystatic DEFINE_SPINLOCK(ib_mad_port_list_lock);
72320592Shselasky
73320592Shselasky/* Forward declarations */
74320592Shselaskystatic int method_in_use(struct ib_mad_mgmt_method_table **method,
75320592Shselasky			 struct ib_mad_reg_req *mad_reg_req);
76320592Shselaskystatic void remove_mad_reg_req(struct ib_mad_agent_private *priv);
77320592Shselaskystatic struct ib_mad_agent_private *find_mad_agent(
78320592Shselasky					struct ib_mad_port_private *port_priv,
79320592Shselasky					const struct ib_mad_hdr *mad);
80320592Shselaskystatic int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
81320592Shselasky				    struct ib_mad_private *mad);
82320592Shselaskystatic void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
83320592Shselaskystatic void timeout_sends(struct work_struct *work);
84320592Shselaskystatic void local_completions(struct work_struct *work);
85320592Shselaskystatic int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86320592Shselasky			      struct ib_mad_agent_private *agent_priv,
87320592Shselasky			      u8 mgmt_class);
88320592Shselaskystatic int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
89320592Shselasky			   struct ib_mad_agent_private *agent_priv);
90320592Shselaskystatic bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
91320592Shselasky			      struct ib_wc *wc);
92320592Shselaskystatic void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
93320592Shselasky
94320592Shselasky/*
95320592Shselasky * Returns a ib_mad_port_private structure or NULL for a device/port
96320592Shselasky * Assumes ib_mad_port_list_lock is being held
97320592Shselasky */
98320592Shselaskystatic inline struct ib_mad_port_private *
99320592Shselasky__ib_get_mad_port(struct ib_device *device, int port_num)
100320592Shselasky{
101320592Shselasky	struct ib_mad_port_private *entry;
102320592Shselasky
103320592Shselasky	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
104320592Shselasky		if (entry->device == device && entry->port_num == port_num)
105320592Shselasky			return entry;
106320592Shselasky	}
107320592Shselasky	return NULL;
108320592Shselasky}
109320592Shselasky
110320592Shselasky/*
111320592Shselasky * Wrapper function to return a ib_mad_port_private structure or NULL
112320592Shselasky * for a device/port
113320592Shselasky */
114320592Shselaskystatic inline struct ib_mad_port_private *
115320592Shselaskyib_get_mad_port(struct ib_device *device, int port_num)
116320592Shselasky{
117320592Shselasky	struct ib_mad_port_private *entry;
118320592Shselasky	unsigned long flags;
119320592Shselasky
120320592Shselasky	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
121320592Shselasky	entry = __ib_get_mad_port(device, port_num);
122320592Shselasky	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
123320592Shselasky
124320592Shselasky	return entry;
125320592Shselasky}
126320592Shselasky
127320592Shselaskystatic inline u8 convert_mgmt_class(u8 mgmt_class)
128320592Shselasky{
129320592Shselasky	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
130320592Shselasky	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
131320592Shselasky		0 : mgmt_class;
132320592Shselasky}
133320592Shselasky
134320592Shselaskystatic int get_spl_qp_index(enum ib_qp_type qp_type)
135320592Shselasky{
136320592Shselasky	switch (qp_type)
137320592Shselasky	{
138320592Shselasky	case IB_QPT_SMI:
139320592Shselasky		return 0;
140320592Shselasky	case IB_QPT_GSI:
141320592Shselasky		return 1;
142320592Shselasky	default:
143320592Shselasky		return -1;
144320592Shselasky	}
145320592Shselasky}
146320592Shselasky
147320592Shselaskystatic int vendor_class_index(u8 mgmt_class)
148320592Shselasky{
149320592Shselasky	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
150320592Shselasky}
151320592Shselasky
152320592Shselaskystatic int is_vendor_class(u8 mgmt_class)
153320592Shselasky{
154320592Shselasky	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
155320592Shselasky	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
156320592Shselasky		return 0;
157320592Shselasky	return 1;
158320592Shselasky}
159320592Shselasky
160320592Shselaskystatic int is_vendor_oui(char *oui)
161320592Shselasky{
162320592Shselasky	if (oui[0] || oui[1] || oui[2])
163320592Shselasky		return 1;
164320592Shselasky	return 0;
165320592Shselasky}
166320592Shselasky
167320592Shselaskystatic int is_vendor_method_in_use(
168320592Shselasky		struct ib_mad_mgmt_vendor_class *vendor_class,
169320592Shselasky		struct ib_mad_reg_req *mad_reg_req)
170320592Shselasky{
171320592Shselasky	struct ib_mad_mgmt_method_table *method;
172320592Shselasky	int i;
173320592Shselasky
174320592Shselasky	for (i = 0; i < MAX_MGMT_OUI; i++) {
175320592Shselasky		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
176320592Shselasky			method = vendor_class->method_table[i];
177320592Shselasky			if (method) {
178320592Shselasky				if (method_in_use(&method, mad_reg_req))
179320592Shselasky					return 1;
180320592Shselasky				else
181320592Shselasky					break;
182320592Shselasky			}
183320592Shselasky		}
184320592Shselasky	}
185320592Shselasky	return 0;
186320592Shselasky}
187320592Shselasky
188320592Shselaskyint ib_response_mad(const struct ib_mad_hdr *hdr)
189320592Shselasky{
190320592Shselasky	return ((hdr->method & IB_MGMT_METHOD_RESP) ||
191320592Shselasky		(hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
192320592Shselasky		((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
193320592Shselasky		 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
194320592Shselasky}
195320592ShselaskyEXPORT_SYMBOL(ib_response_mad);
196320592Shselasky
197320592Shselasky/*
198320592Shselasky * ib_register_mad_agent - Register to send/receive MADs
199320592Shselasky */
200320592Shselaskystruct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
201320592Shselasky					   u8 port_num,
202320592Shselasky					   enum ib_qp_type qp_type,
203320592Shselasky					   struct ib_mad_reg_req *mad_reg_req,
204320592Shselasky					   u8 rmpp_version,
205320592Shselasky					   ib_mad_send_handler send_handler,
206320592Shselasky					   ib_mad_recv_handler recv_handler,
207320592Shselasky					   void *context,
208320592Shselasky					   u32 registration_flags)
209320592Shselasky{
210320592Shselasky	struct ib_mad_port_private *port_priv;
211320592Shselasky	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
212320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
213320592Shselasky	struct ib_mad_reg_req *reg_req = NULL;
214320592Shselasky	struct ib_mad_mgmt_class_table *class;
215320592Shselasky	struct ib_mad_mgmt_vendor_class_table *vendor;
216320592Shselasky	struct ib_mad_mgmt_vendor_class *vendor_class;
217320592Shselasky	struct ib_mad_mgmt_method_table *method;
218320592Shselasky	int ret2, qpn;
219320592Shselasky	unsigned long flags;
220320592Shselasky	u8 mgmt_class, vclass;
221320592Shselasky
222320592Shselasky	/* Validate parameters */
223320592Shselasky	qpn = get_spl_qp_index(qp_type);
224320592Shselasky	if (qpn == -1) {
225320592Shselasky		dev_notice(&device->dev,
226320592Shselasky			   "ib_register_mad_agent: invalid QP Type %d\n",
227320592Shselasky			   qp_type);
228320592Shselasky		goto error1;
229320592Shselasky	}
230320592Shselasky
231320592Shselasky	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
232320592Shselasky		dev_notice(&device->dev,
233320592Shselasky			   "ib_register_mad_agent: invalid RMPP Version %u\n",
234320592Shselasky			   rmpp_version);
235320592Shselasky		goto error1;
236320592Shselasky	}
237320592Shselasky
238320592Shselasky	/* Validate MAD registration request if supplied */
239320592Shselasky	if (mad_reg_req) {
240320592Shselasky		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
241320592Shselasky			dev_notice(&device->dev,
242320592Shselasky				   "ib_register_mad_agent: invalid Class Version %u\n",
243320592Shselasky				   mad_reg_req->mgmt_class_version);
244320592Shselasky			goto error1;
245320592Shselasky		}
246320592Shselasky		if (!recv_handler) {
247320592Shselasky			dev_notice(&device->dev,
248320592Shselasky				   "ib_register_mad_agent: no recv_handler\n");
249320592Shselasky			goto error1;
250320592Shselasky		}
251320592Shselasky		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
252320592Shselasky			/*
253320592Shselasky			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
254320592Shselasky			 * one in this range currently allowed
255320592Shselasky			 */
256320592Shselasky			if (mad_reg_req->mgmt_class !=
257320592Shselasky			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
258320592Shselasky				dev_notice(&device->dev,
259320592Shselasky					   "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
260320592Shselasky					   mad_reg_req->mgmt_class);
261320592Shselasky				goto error1;
262320592Shselasky			}
263320592Shselasky		} else if (mad_reg_req->mgmt_class == 0) {
264320592Shselasky			/*
265320592Shselasky			 * Class 0 is reserved in IBA and is used for
266320592Shselasky			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
267320592Shselasky			 */
268320592Shselasky			dev_notice(&device->dev,
269320592Shselasky				   "ib_register_mad_agent: Invalid Mgmt Class 0\n");
270320592Shselasky			goto error1;
271320592Shselasky		} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
272320592Shselasky			/*
273320592Shselasky			 * If class is in "new" vendor range,
274320592Shselasky			 * ensure supplied OUI is not zero
275320592Shselasky			 */
276320592Shselasky			if (!is_vendor_oui(mad_reg_req->oui)) {
277320592Shselasky				dev_notice(&device->dev,
278320592Shselasky					   "ib_register_mad_agent: No OUI specified for class 0x%x\n",
279320592Shselasky					   mad_reg_req->mgmt_class);
280320592Shselasky				goto error1;
281320592Shselasky			}
282320592Shselasky		}
283320592Shselasky		/* Make sure class supplied is consistent with RMPP */
284320592Shselasky		if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
285320592Shselasky			if (rmpp_version) {
286320592Shselasky				dev_notice(&device->dev,
287320592Shselasky					   "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
288320592Shselasky					   mad_reg_req->mgmt_class);
289320592Shselasky				goto error1;
290320592Shselasky			}
291320592Shselasky		}
292320592Shselasky
293320592Shselasky		/* Make sure class supplied is consistent with QP type */
294320592Shselasky		if (qp_type == IB_QPT_SMI) {
295320592Shselasky			if ((mad_reg_req->mgmt_class !=
296320592Shselasky					IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
297320592Shselasky			    (mad_reg_req->mgmt_class !=
298320592Shselasky					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
299320592Shselasky				dev_notice(&device->dev,
300320592Shselasky					   "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
301320592Shselasky					   mad_reg_req->mgmt_class);
302320592Shselasky				goto error1;
303320592Shselasky			}
304320592Shselasky		} else {
305320592Shselasky			if ((mad_reg_req->mgmt_class ==
306320592Shselasky					IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
307320592Shselasky			    (mad_reg_req->mgmt_class ==
308320592Shselasky					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
309320592Shselasky				dev_notice(&device->dev,
310320592Shselasky					   "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
311320592Shselasky					   mad_reg_req->mgmt_class);
312320592Shselasky				goto error1;
313320592Shselasky			}
314320592Shselasky		}
315320592Shselasky	} else {
316320592Shselasky		/* No registration request supplied */
317320592Shselasky		if (!send_handler)
318320592Shselasky			goto error1;
319320592Shselasky		if (registration_flags & IB_MAD_USER_RMPP)
320320592Shselasky			goto error1;
321320592Shselasky	}
322320592Shselasky
323320592Shselasky	/* Validate device and port */
324320592Shselasky	port_priv = ib_get_mad_port(device, port_num);
325320592Shselasky	if (!port_priv) {
326320592Shselasky		dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
327320592Shselasky		ret = ERR_PTR(-ENODEV);
328320592Shselasky		goto error1;
329320592Shselasky	}
330320592Shselasky
331320592Shselasky	/* Verify the QP requested is supported.  For example, Ethernet devices
332320592Shselasky	 * will not have QP0 */
333320592Shselasky	if (!port_priv->qp_info[qpn].qp) {
334320592Shselasky		dev_notice(&device->dev,
335320592Shselasky			   "ib_register_mad_agent: QP %d not supported\n", qpn);
336320592Shselasky		ret = ERR_PTR(-EPROTONOSUPPORT);
337320592Shselasky		goto error1;
338320592Shselasky	}
339320592Shselasky
340320592Shselasky	/* Allocate structures */
341320592Shselasky	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
342320592Shselasky	if (!mad_agent_priv) {
343320592Shselasky		ret = ERR_PTR(-ENOMEM);
344320592Shselasky		goto error1;
345320592Shselasky	}
346320592Shselasky
347320592Shselasky	if (mad_reg_req) {
348320592Shselasky		reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
349320592Shselasky		if (!reg_req) {
350320592Shselasky			ret = ERR_PTR(-ENOMEM);
351320592Shselasky			goto error3;
352320592Shselasky		}
353320592Shselasky	}
354320592Shselasky
355320592Shselasky	/* Now, fill in the various structures */
356320592Shselasky	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
357320592Shselasky	mad_agent_priv->reg_req = reg_req;
358320592Shselasky	mad_agent_priv->agent.rmpp_version = rmpp_version;
359320592Shselasky	mad_agent_priv->agent.device = device;
360320592Shselasky	mad_agent_priv->agent.recv_handler = recv_handler;
361320592Shselasky	mad_agent_priv->agent.send_handler = send_handler;
362320592Shselasky	mad_agent_priv->agent.context = context;
363320592Shselasky	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
364320592Shselasky	mad_agent_priv->agent.port_num = port_num;
365320592Shselasky	mad_agent_priv->agent.flags = registration_flags;
366320592Shselasky	spin_lock_init(&mad_agent_priv->lock);
367320592Shselasky	INIT_LIST_HEAD(&mad_agent_priv->send_list);
368320592Shselasky	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
369320592Shselasky	INIT_LIST_HEAD(&mad_agent_priv->done_list);
370320592Shselasky	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
371320592Shselasky	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
372320592Shselasky	INIT_LIST_HEAD(&mad_agent_priv->local_list);
373320592Shselasky	INIT_WORK(&mad_agent_priv->local_work, local_completions);
374320592Shselasky	atomic_set(&mad_agent_priv->refcount, 1);
375320592Shselasky	init_completion(&mad_agent_priv->comp);
376320592Shselasky
377320592Shselasky	spin_lock_irqsave(&port_priv->reg_lock, flags);
378320592Shselasky	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
379320592Shselasky
380320592Shselasky	/*
381320592Shselasky	 * Make sure MAD registration (if supplied)
382320592Shselasky	 * is non overlapping with any existing ones
383320592Shselasky	 */
384320592Shselasky	if (mad_reg_req) {
385320592Shselasky		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
386320592Shselasky		if (!is_vendor_class(mgmt_class)) {
387320592Shselasky			class = port_priv->version[mad_reg_req->
388320592Shselasky						   mgmt_class_version].class;
389320592Shselasky			if (class) {
390320592Shselasky				method = class->method_table[mgmt_class];
391320592Shselasky				if (method) {
392320592Shselasky					if (method_in_use(&method,
393320592Shselasky							   mad_reg_req))
394320592Shselasky						goto error4;
395320592Shselasky				}
396320592Shselasky			}
397320592Shselasky			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
398320592Shselasky						  mgmt_class);
399320592Shselasky		} else {
400320592Shselasky			/* "New" vendor class range */
401320592Shselasky			vendor = port_priv->version[mad_reg_req->
402320592Shselasky						    mgmt_class_version].vendor;
403320592Shselasky			if (vendor) {
404320592Shselasky				vclass = vendor_class_index(mgmt_class);
405320592Shselasky				vendor_class = vendor->vendor_class[vclass];
406320592Shselasky				if (vendor_class) {
407320592Shselasky					if (is_vendor_method_in_use(
408320592Shselasky							vendor_class,
409320592Shselasky							mad_reg_req))
410320592Shselasky						goto error4;
411320592Shselasky				}
412320592Shselasky			}
413320592Shselasky			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
414320592Shselasky		}
415320592Shselasky		if (ret2) {
416320592Shselasky			ret = ERR_PTR(ret2);
417320592Shselasky			goto error4;
418320592Shselasky		}
419320592Shselasky	}
420320592Shselasky
421320592Shselasky	/* Add mad agent into port's agent list */
422320592Shselasky	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
423320592Shselasky	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
424320592Shselasky
425320592Shselasky	return &mad_agent_priv->agent;
426320592Shselasky
427320592Shselaskyerror4:
428320592Shselasky	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
429320592Shselasky	kfree(reg_req);
430320592Shselaskyerror3:
431320592Shselasky	kfree(mad_agent_priv);
432320592Shselaskyerror1:
433320592Shselasky	return ret;
434320592Shselasky}
435320592ShselaskyEXPORT_SYMBOL(ib_register_mad_agent);
436320592Shselasky
437320592Shselaskystatic inline int is_snooping_sends(int mad_snoop_flags)
438320592Shselasky{
439320592Shselasky	return (mad_snoop_flags &
440320592Shselasky		(/*IB_MAD_SNOOP_POSTED_SENDS |
441320592Shselasky		 IB_MAD_SNOOP_RMPP_SENDS |*/
442320592Shselasky		 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
443320592Shselasky		 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
444320592Shselasky}
445320592Shselasky
446320592Shselaskystatic inline int is_snooping_recvs(int mad_snoop_flags)
447320592Shselasky{
448320592Shselasky	return (mad_snoop_flags &
449320592Shselasky		(IB_MAD_SNOOP_RECVS /*|
450320592Shselasky		 IB_MAD_SNOOP_RMPP_RECVS*/));
451320592Shselasky}
452320592Shselasky
453320592Shselaskystatic int register_snoop_agent(struct ib_mad_qp_info *qp_info,
454320592Shselasky				struct ib_mad_snoop_private *mad_snoop_priv)
455320592Shselasky{
456320592Shselasky	struct ib_mad_snoop_private **new_snoop_table;
457320592Shselasky	unsigned long flags;
458320592Shselasky	int i;
459320592Shselasky
460320592Shselasky	spin_lock_irqsave(&qp_info->snoop_lock, flags);
461320592Shselasky	/* Check for empty slot in array. */
462320592Shselasky	for (i = 0; i < qp_info->snoop_table_size; i++)
463320592Shselasky		if (!qp_info->snoop_table[i])
464320592Shselasky			break;
465320592Shselasky
466320592Shselasky	if (i == qp_info->snoop_table_size) {
467320592Shselasky		/* Grow table. */
468320592Shselasky		new_snoop_table = krealloc(qp_info->snoop_table,
469320592Shselasky					   sizeof mad_snoop_priv *
470320592Shselasky					   (qp_info->snoop_table_size + 1),
471320592Shselasky					   GFP_ATOMIC);
472320592Shselasky		if (!new_snoop_table) {
473320592Shselasky			i = -ENOMEM;
474320592Shselasky			goto out;
475320592Shselasky		}
476320592Shselasky
477320592Shselasky		qp_info->snoop_table = new_snoop_table;
478320592Shselasky		qp_info->snoop_table_size++;
479320592Shselasky	}
480320592Shselasky	qp_info->snoop_table[i] = mad_snoop_priv;
481320592Shselasky	atomic_inc(&qp_info->snoop_count);
482320592Shselaskyout:
483320592Shselasky	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
484320592Shselasky	return i;
485320592Shselasky}
486320592Shselasky
487320592Shselaskystruct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
488320592Shselasky					   u8 port_num,
489320592Shselasky					   enum ib_qp_type qp_type,
490320592Shselasky					   int mad_snoop_flags,
491320592Shselasky					   ib_mad_snoop_handler snoop_handler,
492320592Shselasky					   ib_mad_recv_handler recv_handler,
493320592Shselasky					   void *context)
494320592Shselasky{
495320592Shselasky	struct ib_mad_port_private *port_priv;
496320592Shselasky	struct ib_mad_agent *ret;
497320592Shselasky	struct ib_mad_snoop_private *mad_snoop_priv;
498320592Shselasky	int qpn;
499320592Shselasky
500320592Shselasky	/* Validate parameters */
501320592Shselasky	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
502320592Shselasky	    (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
503320592Shselasky		ret = ERR_PTR(-EINVAL);
504320592Shselasky		goto error1;
505320592Shselasky	}
506320592Shselasky	qpn = get_spl_qp_index(qp_type);
507320592Shselasky	if (qpn == -1) {
508320592Shselasky		ret = ERR_PTR(-EINVAL);
509320592Shselasky		goto error1;
510320592Shselasky	}
511320592Shselasky	port_priv = ib_get_mad_port(device, port_num);
512320592Shselasky	if (!port_priv) {
513320592Shselasky		ret = ERR_PTR(-ENODEV);
514320592Shselasky		goto error1;
515320592Shselasky	}
516320592Shselasky	/* Allocate structures */
517320592Shselasky	mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
518320592Shselasky	if (!mad_snoop_priv) {
519320592Shselasky		ret = ERR_PTR(-ENOMEM);
520320592Shselasky		goto error1;
521320592Shselasky	}
522320592Shselasky
523320592Shselasky	/* Now, fill in the various structures */
524320592Shselasky	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
525320592Shselasky	mad_snoop_priv->agent.device = device;
526320592Shselasky	mad_snoop_priv->agent.recv_handler = recv_handler;
527320592Shselasky	mad_snoop_priv->agent.snoop_handler = snoop_handler;
528320592Shselasky	mad_snoop_priv->agent.context = context;
529320592Shselasky	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
530320592Shselasky	mad_snoop_priv->agent.port_num = port_num;
531320592Shselasky	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
532320592Shselasky	init_completion(&mad_snoop_priv->comp);
533320592Shselasky	mad_snoop_priv->snoop_index = register_snoop_agent(
534320592Shselasky						&port_priv->qp_info[qpn],
535320592Shselasky						mad_snoop_priv);
536320592Shselasky	if (mad_snoop_priv->snoop_index < 0) {
537320592Shselasky		ret = ERR_PTR(mad_snoop_priv->snoop_index);
538320592Shselasky		goto error2;
539320592Shselasky	}
540320592Shselasky
541320592Shselasky	atomic_set(&mad_snoop_priv->refcount, 1);
542320592Shselasky	return &mad_snoop_priv->agent;
543320592Shselasky
544320592Shselaskyerror2:
545320592Shselasky	kfree(mad_snoop_priv);
546320592Shselaskyerror1:
547320592Shselasky	return ret;
548320592Shselasky}
549320592ShselaskyEXPORT_SYMBOL(ib_register_mad_snoop);
550320592Shselasky
551320592Shselaskystatic inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
552320592Shselasky{
553320592Shselasky	if (atomic_dec_and_test(&mad_agent_priv->refcount))
554320592Shselasky		complete(&mad_agent_priv->comp);
555320592Shselasky}
556320592Shselasky
557320592Shselaskystatic inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
558320592Shselasky{
559320592Shselasky	if (atomic_dec_and_test(&mad_snoop_priv->refcount))
560320592Shselasky		complete(&mad_snoop_priv->comp);
561320592Shselasky}
562320592Shselasky
563320592Shselaskystatic void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
564320592Shselasky{
565320592Shselasky	struct ib_mad_port_private *port_priv;
566320592Shselasky	unsigned long flags;
567320592Shselasky
568320592Shselasky	/* Note that we could still be handling received MADs */
569320592Shselasky
570320592Shselasky	/*
571320592Shselasky	 * Canceling all sends results in dropping received response
572320592Shselasky	 * MADs, preventing us from queuing additional work
573320592Shselasky	 */
574320592Shselasky	cancel_mads(mad_agent_priv);
575320592Shselasky	port_priv = mad_agent_priv->qp_info->port_priv;
576331785Shselasky	cancel_delayed_work_sync(&mad_agent_priv->timed_work);
577320592Shselasky
578320592Shselasky	spin_lock_irqsave(&port_priv->reg_lock, flags);
579320592Shselasky	remove_mad_reg_req(mad_agent_priv);
580320592Shselasky	list_del(&mad_agent_priv->agent_list);
581320592Shselasky	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
582320592Shselasky
583320592Shselasky	flush_workqueue(port_priv->wq);
584320592Shselasky	ib_cancel_rmpp_recvs(mad_agent_priv);
585320592Shselasky
586320592Shselasky	deref_mad_agent(mad_agent_priv);
587320592Shselasky	wait_for_completion(&mad_agent_priv->comp);
588320592Shselasky
589320592Shselasky	kfree(mad_agent_priv->reg_req);
590320592Shselasky	kfree(mad_agent_priv);
591320592Shselasky}
592320592Shselasky
593320592Shselaskystatic void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
594320592Shselasky{
595320592Shselasky	struct ib_mad_qp_info *qp_info;
596320592Shselasky	unsigned long flags;
597320592Shselasky
598320592Shselasky	qp_info = mad_snoop_priv->qp_info;
599320592Shselasky	spin_lock_irqsave(&qp_info->snoop_lock, flags);
600320592Shselasky	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
601320592Shselasky	atomic_dec(&qp_info->snoop_count);
602320592Shselasky	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
603320592Shselasky
604320592Shselasky	deref_snoop_agent(mad_snoop_priv);
605320592Shselasky	wait_for_completion(&mad_snoop_priv->comp);
606320592Shselasky
607320592Shselasky	kfree(mad_snoop_priv);
608320592Shselasky}
609320592Shselasky
610320592Shselasky/*
611320592Shselasky * ib_unregister_mad_agent - Unregisters a client from using MAD services
612320592Shselasky */
613320592Shselaskyint ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
614320592Shselasky{
615320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
616320592Shselasky	struct ib_mad_snoop_private *mad_snoop_priv;
617320592Shselasky
618320592Shselasky	/* If the TID is zero, the agent can only snoop. */
619320592Shselasky	if (mad_agent->hi_tid) {
620320592Shselasky		mad_agent_priv = container_of(mad_agent,
621320592Shselasky					      struct ib_mad_agent_private,
622320592Shselasky					      agent);
623320592Shselasky		unregister_mad_agent(mad_agent_priv);
624320592Shselasky	} else {
625320592Shselasky		mad_snoop_priv = container_of(mad_agent,
626320592Shselasky					      struct ib_mad_snoop_private,
627320592Shselasky					      agent);
628320592Shselasky		unregister_mad_snoop(mad_snoop_priv);
629320592Shselasky	}
630320592Shselasky	return 0;
631320592Shselasky}
632320592ShselaskyEXPORT_SYMBOL(ib_unregister_mad_agent);
633320592Shselasky
634320592Shselaskystatic void dequeue_mad(struct ib_mad_list_head *mad_list)
635320592Shselasky{
636320592Shselasky	struct ib_mad_queue *mad_queue;
637320592Shselasky	unsigned long flags;
638320592Shselasky
639320592Shselasky	BUG_ON(!mad_list->mad_queue);
640320592Shselasky	mad_queue = mad_list->mad_queue;
641320592Shselasky	spin_lock_irqsave(&mad_queue->lock, flags);
642320592Shselasky	list_del(&mad_list->list);
643320592Shselasky	mad_queue->count--;
644320592Shselasky	spin_unlock_irqrestore(&mad_queue->lock, flags);
645320592Shselasky}
646320592Shselasky
647320592Shselaskystatic void snoop_send(struct ib_mad_qp_info *qp_info,
648320592Shselasky		       struct ib_mad_send_buf *send_buf,
649320592Shselasky		       struct ib_mad_send_wc *mad_send_wc,
650320592Shselasky		       int mad_snoop_flags)
651320592Shselasky{
652320592Shselasky	struct ib_mad_snoop_private *mad_snoop_priv;
653320592Shselasky	unsigned long flags;
654320592Shselasky	int i;
655320592Shselasky
656320592Shselasky	spin_lock_irqsave(&qp_info->snoop_lock, flags);
657320592Shselasky	for (i = 0; i < qp_info->snoop_table_size; i++) {
658320592Shselasky		mad_snoop_priv = qp_info->snoop_table[i];
659320592Shselasky		if (!mad_snoop_priv ||
660320592Shselasky		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
661320592Shselasky			continue;
662320592Shselasky
663320592Shselasky		atomic_inc(&mad_snoop_priv->refcount);
664320592Shselasky		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
665320592Shselasky		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
666320592Shselasky						    send_buf, mad_send_wc);
667320592Shselasky		deref_snoop_agent(mad_snoop_priv);
668320592Shselasky		spin_lock_irqsave(&qp_info->snoop_lock, flags);
669320592Shselasky	}
670320592Shselasky	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
671320592Shselasky}
672320592Shselasky
673320592Shselaskystatic void snoop_recv(struct ib_mad_qp_info *qp_info,
674320592Shselasky		       struct ib_mad_recv_wc *mad_recv_wc,
675320592Shselasky		       int mad_snoop_flags)
676320592Shselasky{
677320592Shselasky	struct ib_mad_snoop_private *mad_snoop_priv;
678320592Shselasky	unsigned long flags;
679320592Shselasky	int i;
680320592Shselasky
681320592Shselasky	spin_lock_irqsave(&qp_info->snoop_lock, flags);
682320592Shselasky	for (i = 0; i < qp_info->snoop_table_size; i++) {
683320592Shselasky		mad_snoop_priv = qp_info->snoop_table[i];
684320592Shselasky		if (!mad_snoop_priv ||
685320592Shselasky		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
686320592Shselasky			continue;
687320592Shselasky
688320592Shselasky		atomic_inc(&mad_snoop_priv->refcount);
689320592Shselasky		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
690320592Shselasky		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
691320592Shselasky						   mad_recv_wc);
692320592Shselasky		deref_snoop_agent(mad_snoop_priv);
693320592Shselasky		spin_lock_irqsave(&qp_info->snoop_lock, flags);
694320592Shselasky	}
695320592Shselasky	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
696320592Shselasky}
697320592Shselasky
698320592Shselaskystatic void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
699320592Shselasky		u16 pkey_index, u8 port_num, struct ib_wc *wc)
700320592Shselasky{
701320592Shselasky	memset(wc, 0, sizeof *wc);
702320592Shselasky	wc->wr_cqe = cqe;
703320592Shselasky	wc->status = IB_WC_SUCCESS;
704320592Shselasky	wc->opcode = IB_WC_RECV;
705320592Shselasky	wc->pkey_index = pkey_index;
706320592Shselasky	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
707320592Shselasky	wc->src_qp = IB_QP0;
708320592Shselasky	wc->qp = qp;
709320592Shselasky	wc->slid = slid;
710320592Shselasky	wc->sl = 0;
711320592Shselasky	wc->dlid_path_bits = 0;
712320592Shselasky	wc->port_num = port_num;
713320592Shselasky}
714320592Shselasky
715320592Shselaskystatic size_t mad_priv_size(const struct ib_mad_private *mp)
716320592Shselasky{
717320592Shselasky	return sizeof(struct ib_mad_private) + mp->mad_size;
718320592Shselasky}
719320592Shselasky
720320592Shselaskystatic struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
721320592Shselasky{
722320592Shselasky	size_t size = sizeof(struct ib_mad_private) + mad_size;
723320592Shselasky	struct ib_mad_private *ret = kzalloc(size, flags);
724320592Shselasky
725320592Shselasky	if (ret)
726320592Shselasky		ret->mad_size = mad_size;
727320592Shselasky
728320592Shselasky	return ret;
729320592Shselasky}
730320592Shselasky
731320592Shselaskystatic size_t port_mad_size(const struct ib_mad_port_private *port_priv)
732320592Shselasky{
733320592Shselasky	return rdma_max_mad_size(port_priv->device, port_priv->port_num);
734320592Shselasky}
735320592Shselasky
736320592Shselaskystatic size_t mad_priv_dma_size(const struct ib_mad_private *mp)
737320592Shselasky{
738320592Shselasky	return sizeof(struct ib_grh) + mp->mad_size;
739320592Shselasky}
740320592Shselasky
741320592Shselasky/*
742320592Shselasky * Return 0 if SMP is to be sent
743320592Shselasky * Return 1 if SMP was consumed locally (whether or not solicited)
744320592Shselasky * Return < 0 if error
745320592Shselasky */
746320592Shselaskystatic int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
747320592Shselasky				  struct ib_mad_send_wr_private *mad_send_wr)
748320592Shselasky{
749320592Shselasky	int ret = 0;
750320592Shselasky	struct ib_smp *smp = mad_send_wr->send_buf.mad;
751320592Shselasky	struct opa_smp *opa_smp = (struct opa_smp *)smp;
752320592Shselasky	unsigned long flags;
753320592Shselasky	struct ib_mad_local_private *local;
754320592Shselasky	struct ib_mad_private *mad_priv;
755320592Shselasky	struct ib_mad_port_private *port_priv;
756320592Shselasky	struct ib_mad_agent_private *recv_mad_agent = NULL;
757320592Shselasky	struct ib_device *device = mad_agent_priv->agent.device;
758320592Shselasky	u8 port_num;
759320592Shselasky	struct ib_wc mad_wc;
760320592Shselasky	struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
761320592Shselasky	size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
762320592Shselasky	u16 out_mad_pkey_index = 0;
763320592Shselasky	u16 drslid;
764320592Shselasky	bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
765320592Shselasky				    mad_agent_priv->qp_info->port_priv->port_num);
766320592Shselasky
767320592Shselasky	if (rdma_cap_ib_switch(device) &&
768320592Shselasky	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
769320592Shselasky		port_num = send_wr->port_num;
770320592Shselasky	else
771320592Shselasky		port_num = mad_agent_priv->agent.port_num;
772320592Shselasky
773320592Shselasky	/*
774320592Shselasky	 * Directed route handling starts if the initial LID routed part of
775320592Shselasky	 * a request or the ending LID routed part of a response is empty.
776320592Shselasky	 * If we are at the start of the LID routed part, don't update the
777320592Shselasky	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
778320592Shselasky	 */
779320592Shselasky	if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
780320592Shselasky		u32 opa_drslid;
781320592Shselasky
782320592Shselasky		if ((opa_get_smp_direction(opa_smp)
783320592Shselasky		     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
784320592Shselasky		     OPA_LID_PERMISSIVE &&
785320592Shselasky		     opa_smi_handle_dr_smp_send(opa_smp,
786320592Shselasky						rdma_cap_ib_switch(device),
787320592Shselasky						port_num) == IB_SMI_DISCARD) {
788320592Shselasky			ret = -EINVAL;
789320592Shselasky			dev_err(&device->dev, "OPA Invalid directed route\n");
790320592Shselasky			goto out;
791320592Shselasky		}
792320592Shselasky		opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
793320592Shselasky		if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
794320592Shselasky		    opa_drslid & 0xffff0000) {
795320592Shselasky			ret = -EINVAL;
796320592Shselasky			dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
797320592Shselasky			       opa_drslid);
798320592Shselasky			goto out;
799320592Shselasky		}
800320592Shselasky		drslid = (u16)(opa_drslid & 0x0000ffff);
801320592Shselasky
802320592Shselasky		/* Check to post send on QP or process locally */
803320592Shselasky		if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
804320592Shselasky		    opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
805320592Shselasky			goto out;
806320592Shselasky	} else {
807320592Shselasky		if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
808320592Shselasky		     IB_LID_PERMISSIVE &&
809320592Shselasky		     smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
810320592Shselasky		     IB_SMI_DISCARD) {
811320592Shselasky			ret = -EINVAL;
812320592Shselasky			dev_err(&device->dev, "Invalid directed route\n");
813320592Shselasky			goto out;
814320592Shselasky		}
815320592Shselasky		drslid = be16_to_cpu(smp->dr_slid);
816320592Shselasky
817320592Shselasky		/* Check to post send on QP or process locally */
818320592Shselasky		if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
819320592Shselasky		    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
820320592Shselasky			goto out;
821320592Shselasky	}
822320592Shselasky
823320592Shselasky	local = kmalloc(sizeof *local, GFP_ATOMIC);
824320592Shselasky	if (!local) {
825320592Shselasky		ret = -ENOMEM;
826320592Shselasky		dev_err(&device->dev, "No memory for ib_mad_local_private\n");
827320592Shselasky		goto out;
828320592Shselasky	}
829320592Shselasky	local->mad_priv = NULL;
830320592Shselasky	local->recv_mad_agent = NULL;
831320592Shselasky	mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
832320592Shselasky	if (!mad_priv) {
833320592Shselasky		ret = -ENOMEM;
834320592Shselasky		dev_err(&device->dev, "No memory for local response MAD\n");
835320592Shselasky		kfree(local);
836320592Shselasky		goto out;
837320592Shselasky	}
838320592Shselasky
839320592Shselasky	build_smp_wc(mad_agent_priv->agent.qp,
840320592Shselasky		     send_wr->wr.wr_cqe, drslid,
841320592Shselasky		     send_wr->pkey_index,
842320592Shselasky		     send_wr->port_num, &mad_wc);
843320592Shselasky
844320592Shselasky	if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
845320592Shselasky		mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
846320592Shselasky					+ mad_send_wr->send_buf.data_len
847320592Shselasky					+ sizeof(struct ib_grh);
848320592Shselasky	}
849320592Shselasky
850320592Shselasky	/* No GRH for DR SMP */
851320592Shselasky	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
852320592Shselasky				  (const struct ib_mad_hdr *)smp, mad_size,
853320592Shselasky				  (struct ib_mad_hdr *)mad_priv->mad,
854320592Shselasky				  &mad_size, &out_mad_pkey_index);
855320592Shselasky	switch (ret)
856320592Shselasky	{
857320592Shselasky	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
858320592Shselasky		if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
859320592Shselasky		    mad_agent_priv->agent.recv_handler) {
860320592Shselasky			local->mad_priv = mad_priv;
861320592Shselasky			local->recv_mad_agent = mad_agent_priv;
862320592Shselasky			/*
863320592Shselasky			 * Reference MAD agent until receive
864320592Shselasky			 * side of local completion handled
865320592Shselasky			 */
866320592Shselasky			atomic_inc(&mad_agent_priv->refcount);
867320592Shselasky		} else
868320592Shselasky			kfree(mad_priv);
869320592Shselasky		break;
870320592Shselasky	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
871320592Shselasky		kfree(mad_priv);
872320592Shselasky		break;
873320592Shselasky	case IB_MAD_RESULT_SUCCESS:
874320592Shselasky		/* Treat like an incoming receive MAD */
875320592Shselasky		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
876320592Shselasky					    mad_agent_priv->agent.port_num);
877320592Shselasky		if (port_priv) {
878320592Shselasky			memcpy(mad_priv->mad, smp, mad_priv->mad_size);
879320592Shselasky			recv_mad_agent = find_mad_agent(port_priv,
880320592Shselasky						        (const struct ib_mad_hdr *)mad_priv->mad);
881320592Shselasky		}
882320592Shselasky		if (!port_priv || !recv_mad_agent) {
883320592Shselasky			/*
884320592Shselasky			 * No receiving agent so drop packet and
885320592Shselasky			 * generate send completion.
886320592Shselasky			 */
887320592Shselasky			kfree(mad_priv);
888320592Shselasky			break;
889320592Shselasky		}
890320592Shselasky		local->mad_priv = mad_priv;
891320592Shselasky		local->recv_mad_agent = recv_mad_agent;
892320592Shselasky		break;
893320592Shselasky	default:
894320592Shselasky		kfree(mad_priv);
895320592Shselasky		kfree(local);
896320592Shselasky		ret = -EINVAL;
897320592Shselasky		goto out;
898320592Shselasky	}
899320592Shselasky
900320592Shselasky	local->mad_send_wr = mad_send_wr;
901320592Shselasky	if (opa) {
902320592Shselasky		local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
903320592Shselasky		local->return_wc_byte_len = mad_size;
904320592Shselasky	}
905320592Shselasky	/* Reference MAD agent until send side of local completion handled */
906320592Shselasky	atomic_inc(&mad_agent_priv->refcount);
907320592Shselasky	/* Queue local completion to local list */
908320592Shselasky	spin_lock_irqsave(&mad_agent_priv->lock, flags);
909320592Shselasky	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
910320592Shselasky	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
911320592Shselasky	queue_work(mad_agent_priv->qp_info->port_priv->wq,
912320592Shselasky		   &mad_agent_priv->local_work);
913320592Shselasky	ret = 1;
914320592Shselaskyout:
915320592Shselasky	return ret;
916320592Shselasky}
917320592Shselasky
918320592Shselaskystatic int get_pad_size(int hdr_len, int data_len, size_t mad_size)
919320592Shselasky{
920320592Shselasky	int seg_size, pad;
921320592Shselasky
922320592Shselasky	seg_size = mad_size - hdr_len;
923320592Shselasky	if (data_len && seg_size) {
924320592Shselasky		pad = seg_size - data_len % seg_size;
925320592Shselasky		return pad == seg_size ? 0 : pad;
926320592Shselasky	} else
927320592Shselasky		return seg_size;
928320592Shselasky}
929320592Shselasky
930320592Shselaskystatic void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
931320592Shselasky{
932320592Shselasky	struct ib_rmpp_segment *s, *t;
933320592Shselasky
934320592Shselasky	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
935320592Shselasky		list_del(&s->list);
936320592Shselasky		kfree(s);
937320592Shselasky	}
938320592Shselasky}
939320592Shselasky
940320592Shselaskystatic int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
941320592Shselasky				size_t mad_size, gfp_t gfp_mask)
942320592Shselasky{
943320592Shselasky	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
944320592Shselasky	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
945320592Shselasky	struct ib_rmpp_segment *seg = NULL;
946320592Shselasky	int left, seg_size, pad;
947320592Shselasky
948320592Shselasky	send_buf->seg_size = mad_size - send_buf->hdr_len;
949320592Shselasky	send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
950320592Shselasky	seg_size = send_buf->seg_size;
951320592Shselasky	pad = send_wr->pad;
952320592Shselasky
953320592Shselasky	/* Allocate data segments. */
954320592Shselasky	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
955320592Shselasky		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
956320592Shselasky		if (!seg) {
957320592Shselasky			dev_err(&send_buf->mad_agent->device->dev,
958320592Shselasky				"alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
959320592Shselasky				sizeof (*seg) + seg_size, gfp_mask);
960320592Shselasky			free_send_rmpp_list(send_wr);
961320592Shselasky			return -ENOMEM;
962320592Shselasky		}
963320592Shselasky		seg->num = ++send_buf->seg_count;
964320592Shselasky		list_add_tail(&seg->list, &send_wr->rmpp_list);
965320592Shselasky	}
966320592Shselasky
967320592Shselasky	/* Zero any padding */
968320592Shselasky	if (pad)
969320592Shselasky		memset(seg->data + seg_size - pad, 0, pad);
970320592Shselasky
971320592Shselasky	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
972320592Shselasky					  agent.rmpp_version;
973320592Shselasky	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
974320592Shselasky	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
975320592Shselasky
976320592Shselasky	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
977320592Shselasky					struct ib_rmpp_segment, list);
978320592Shselasky	send_wr->last_ack_seg = send_wr->cur_seg;
979320592Shselasky	return 0;
980320592Shselasky}
981320592Shselasky
982320592Shselaskyint ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
983320592Shselasky{
984320592Shselasky	return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
985320592Shselasky}
986320592ShselaskyEXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
987320592Shselasky
988320592Shselaskystruct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
989320592Shselasky					    u32 remote_qpn, u16 pkey_index,
990320592Shselasky					    int rmpp_active,
991320592Shselasky					    int hdr_len, int data_len,
992320592Shselasky					    gfp_t gfp_mask,
993320592Shselasky					    u8 base_version)
994320592Shselasky{
995320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
996320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
997320592Shselasky	int pad, message_size, ret, size;
998320592Shselasky	void *buf;
999320592Shselasky	size_t mad_size;
1000320592Shselasky	bool opa;
1001320592Shselasky
1002320592Shselasky	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1003320592Shselasky				      agent);
1004320592Shselasky
1005320592Shselasky	opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1006320592Shselasky
1007320592Shselasky	if (opa && base_version == OPA_MGMT_BASE_VERSION)
1008320592Shselasky		mad_size = sizeof(struct opa_mad);
1009320592Shselasky	else
1010320592Shselasky		mad_size = sizeof(struct ib_mad);
1011320592Shselasky
1012320592Shselasky	pad = get_pad_size(hdr_len, data_len, mad_size);
1013320592Shselasky	message_size = hdr_len + data_len + pad;
1014320592Shselasky
1015320592Shselasky	if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1016320592Shselasky		if (!rmpp_active && message_size > mad_size)
1017320592Shselasky			return ERR_PTR(-EINVAL);
1018320592Shselasky	} else
1019320592Shselasky		if (rmpp_active || message_size > mad_size)
1020320592Shselasky			return ERR_PTR(-EINVAL);
1021320592Shselasky
1022320592Shselasky	size = rmpp_active ? hdr_len : mad_size;
1023320592Shselasky	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1024320592Shselasky	if (!buf)
1025320592Shselasky		return ERR_PTR(-ENOMEM);
1026320592Shselasky
1027320592Shselasky	mad_send_wr = (struct ib_mad_send_wr_private *)((char *)buf + size);
1028320592Shselasky	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1029320592Shselasky	mad_send_wr->send_buf.mad = buf;
1030320592Shselasky	mad_send_wr->send_buf.hdr_len = hdr_len;
1031320592Shselasky	mad_send_wr->send_buf.data_len = data_len;
1032320592Shselasky	mad_send_wr->pad = pad;
1033320592Shselasky
1034320592Shselasky	mad_send_wr->mad_agent_priv = mad_agent_priv;
1035320592Shselasky	mad_send_wr->sg_list[0].length = hdr_len;
1036320592Shselasky	mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1037320592Shselasky
1038320592Shselasky	/* OPA MADs don't have to be the full 2048 bytes */
1039320592Shselasky	if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1040320592Shselasky	    data_len < mad_size - hdr_len)
1041320592Shselasky		mad_send_wr->sg_list[1].length = data_len;
1042320592Shselasky	else
1043320592Shselasky		mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1044320592Shselasky
1045320592Shselasky	mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1046320592Shselasky
1047320592Shselasky	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1048320592Shselasky
1049320592Shselasky	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1050320592Shselasky	mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1051320592Shselasky	mad_send_wr->send_wr.wr.num_sge = 2;
1052320592Shselasky	mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1053320592Shselasky	mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1054320592Shselasky	mad_send_wr->send_wr.remote_qpn = remote_qpn;
1055320592Shselasky	mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1056320592Shselasky	mad_send_wr->send_wr.pkey_index = pkey_index;
1057320592Shselasky
1058320592Shselasky	if (rmpp_active) {
1059320592Shselasky		ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1060320592Shselasky		if (ret) {
1061320592Shselasky			kfree(buf);
1062320592Shselasky			return ERR_PTR(ret);
1063320592Shselasky		}
1064320592Shselasky	}
1065320592Shselasky
1066320592Shselasky	mad_send_wr->send_buf.mad_agent = mad_agent;
1067320592Shselasky	atomic_inc(&mad_agent_priv->refcount);
1068320592Shselasky	return &mad_send_wr->send_buf;
1069320592Shselasky}
1070320592ShselaskyEXPORT_SYMBOL(ib_create_send_mad);
1071320592Shselasky
1072320592Shselaskyint ib_get_mad_data_offset(u8 mgmt_class)
1073320592Shselasky{
1074320592Shselasky	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1075320592Shselasky		return IB_MGMT_SA_HDR;
1076320592Shselasky	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1077320592Shselasky		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1078320592Shselasky		 (mgmt_class == IB_MGMT_CLASS_BIS))
1079320592Shselasky		return IB_MGMT_DEVICE_HDR;
1080320592Shselasky	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1081320592Shselasky		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1082320592Shselasky		return IB_MGMT_VENDOR_HDR;
1083320592Shselasky	else
1084320592Shselasky		return IB_MGMT_MAD_HDR;
1085320592Shselasky}
1086320592ShselaskyEXPORT_SYMBOL(ib_get_mad_data_offset);
1087320592Shselasky
1088320592Shselaskyint ib_is_mad_class_rmpp(u8 mgmt_class)
1089320592Shselasky{
1090320592Shselasky	if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1091320592Shselasky	    (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1092320592Shselasky	    (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1093320592Shselasky	    (mgmt_class == IB_MGMT_CLASS_BIS) ||
1094320592Shselasky	    ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1095320592Shselasky	     (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1096320592Shselasky		return 1;
1097320592Shselasky	return 0;
1098320592Shselasky}
1099320592ShselaskyEXPORT_SYMBOL(ib_is_mad_class_rmpp);
1100320592Shselasky
1101320592Shselaskyvoid *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1102320592Shselasky{
1103320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
1104320592Shselasky	struct list_head *list;
1105320592Shselasky
1106320592Shselasky	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1107320592Shselasky				   send_buf);
1108320592Shselasky	list = &mad_send_wr->cur_seg->list;
1109320592Shselasky
1110320592Shselasky	if (mad_send_wr->cur_seg->num < seg_num) {
1111320592Shselasky		list_for_each_entry(mad_send_wr->cur_seg, list, list)
1112320592Shselasky			if (mad_send_wr->cur_seg->num == seg_num)
1113320592Shselasky				break;
1114320592Shselasky	} else if (mad_send_wr->cur_seg->num > seg_num) {
1115320592Shselasky		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1116320592Shselasky			if (mad_send_wr->cur_seg->num == seg_num)
1117320592Shselasky				break;
1118320592Shselasky	}
1119320592Shselasky	return mad_send_wr->cur_seg->data;
1120320592Shselasky}
1121320592ShselaskyEXPORT_SYMBOL(ib_get_rmpp_segment);
1122320592Shselasky
1123320592Shselaskystatic inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1124320592Shselasky{
1125320592Shselasky	if (mad_send_wr->send_buf.seg_count)
1126320592Shselasky		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1127320592Shselasky					   mad_send_wr->seg_num);
1128320592Shselasky	else
1129320592Shselasky		return (char *)mad_send_wr->send_buf.mad +
1130320592Shselasky		       mad_send_wr->send_buf.hdr_len;
1131320592Shselasky}
1132320592Shselasky
1133320592Shselaskyvoid ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1134320592Shselasky{
1135320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
1136320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
1137320592Shselasky
1138320592Shselasky	mad_agent_priv = container_of(send_buf->mad_agent,
1139320592Shselasky				      struct ib_mad_agent_private, agent);
1140320592Shselasky	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1141320592Shselasky				   send_buf);
1142320592Shselasky
1143320592Shselasky	free_send_rmpp_list(mad_send_wr);
1144320592Shselasky	kfree(send_buf->mad);
1145320592Shselasky	deref_mad_agent(mad_agent_priv);
1146320592Shselasky}
1147320592ShselaskyEXPORT_SYMBOL(ib_free_send_mad);
1148320592Shselasky
1149320592Shselaskyint ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1150320592Shselasky{
1151320592Shselasky	struct ib_mad_qp_info *qp_info;
1152320592Shselasky	struct list_head *list;
1153320592Shselasky	struct ib_send_wr *bad_send_wr;
1154320592Shselasky	struct ib_mad_agent *mad_agent;
1155320592Shselasky	struct ib_sge *sge;
1156320592Shselasky	unsigned long flags;
1157320592Shselasky	int ret;
1158320592Shselasky
1159320592Shselasky	/* Set WR ID to find mad_send_wr upon completion */
1160320592Shselasky	qp_info = mad_send_wr->mad_agent_priv->qp_info;
1161320592Shselasky	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1162320592Shselasky	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1163320592Shselasky	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1164320592Shselasky
1165320592Shselasky	mad_agent = mad_send_wr->send_buf.mad_agent;
1166320592Shselasky	sge = mad_send_wr->sg_list;
1167320592Shselasky	sge[0].addr = ib_dma_map_single(mad_agent->device,
1168320592Shselasky					mad_send_wr->send_buf.mad,
1169320592Shselasky					sge[0].length,
1170320592Shselasky					DMA_TO_DEVICE);
1171320592Shselasky	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1172320592Shselasky		return -ENOMEM;
1173320592Shselasky
1174320592Shselasky	mad_send_wr->header_mapping = sge[0].addr;
1175320592Shselasky
1176320592Shselasky	sge[1].addr = ib_dma_map_single(mad_agent->device,
1177320592Shselasky					ib_get_payload(mad_send_wr),
1178320592Shselasky					sge[1].length,
1179320592Shselasky					DMA_TO_DEVICE);
1180320592Shselasky	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1181320592Shselasky		ib_dma_unmap_single(mad_agent->device,
1182320592Shselasky				    mad_send_wr->header_mapping,
1183320592Shselasky				    sge[0].length, DMA_TO_DEVICE);
1184320592Shselasky		return -ENOMEM;
1185320592Shselasky	}
1186320592Shselasky	mad_send_wr->payload_mapping = sge[1].addr;
1187320592Shselasky
1188320592Shselasky	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1189320592Shselasky	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1190320592Shselasky		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1191320592Shselasky				   &bad_send_wr);
1192320592Shselasky		list = &qp_info->send_queue.list;
1193320592Shselasky	} else {
1194320592Shselasky		ret = 0;
1195320592Shselasky		list = &qp_info->overflow_list;
1196320592Shselasky	}
1197320592Shselasky
1198320592Shselasky	if (!ret) {
1199320592Shselasky		qp_info->send_queue.count++;
1200320592Shselasky		list_add_tail(&mad_send_wr->mad_list.list, list);
1201320592Shselasky	}
1202320592Shselasky	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1203320592Shselasky	if (ret) {
1204320592Shselasky		ib_dma_unmap_single(mad_agent->device,
1205320592Shselasky				    mad_send_wr->header_mapping,
1206320592Shselasky				    sge[0].length, DMA_TO_DEVICE);
1207320592Shselasky		ib_dma_unmap_single(mad_agent->device,
1208320592Shselasky				    mad_send_wr->payload_mapping,
1209320592Shselasky				    sge[1].length, DMA_TO_DEVICE);
1210320592Shselasky	}
1211320592Shselasky	return ret;
1212320592Shselasky}
1213320592Shselasky
1214320592Shselasky/*
1215320592Shselasky * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1216320592Shselasky *  with the registered client
1217320592Shselasky */
1218320592Shselaskyint ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1219320592Shselasky		     struct ib_mad_send_buf **bad_send_buf)
1220320592Shselasky{
1221320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
1222320592Shselasky	struct ib_mad_send_buf *next_send_buf;
1223320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
1224320592Shselasky	unsigned long flags;
1225320592Shselasky	int ret = -EINVAL;
1226320592Shselasky
1227320592Shselasky	/* Walk list of send WRs and post each on send list */
1228320592Shselasky	for (; send_buf; send_buf = next_send_buf) {
1229320592Shselasky
1230320592Shselasky		mad_send_wr = container_of(send_buf,
1231320592Shselasky					   struct ib_mad_send_wr_private,
1232320592Shselasky					   send_buf);
1233320592Shselasky		mad_agent_priv = mad_send_wr->mad_agent_priv;
1234320592Shselasky
1235320592Shselasky		if (!send_buf->mad_agent->send_handler ||
1236320592Shselasky		    (send_buf->timeout_ms &&
1237320592Shselasky		     !send_buf->mad_agent->recv_handler)) {
1238320592Shselasky			ret = -EINVAL;
1239320592Shselasky			goto error;
1240320592Shselasky		}
1241320592Shselasky
1242320592Shselasky		if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1243320592Shselasky			if (mad_agent_priv->agent.rmpp_version) {
1244320592Shselasky				ret = -EINVAL;
1245320592Shselasky				goto error;
1246320592Shselasky			}
1247320592Shselasky		}
1248320592Shselasky
1249320592Shselasky		/*
1250320592Shselasky		 * Save pointer to next work request to post in case the
1251320592Shselasky		 * current one completes, and the user modifies the work
1252320592Shselasky		 * request associated with the completion
1253320592Shselasky		 */
1254320592Shselasky		next_send_buf = send_buf->next;
1255320592Shselasky		mad_send_wr->send_wr.ah = send_buf->ah;
1256320592Shselasky
1257320592Shselasky		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1258320592Shselasky		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1259320592Shselasky			ret = handle_outgoing_dr_smp(mad_agent_priv,
1260320592Shselasky						     mad_send_wr);
1261320592Shselasky			if (ret < 0)		/* error */
1262320592Shselasky				goto error;
1263320592Shselasky			else if (ret == 1)	/* locally consumed */
1264320592Shselasky				continue;
1265320592Shselasky		}
1266320592Shselasky
1267320592Shselasky		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1268320592Shselasky		/* Timeout will be updated after send completes */
1269320592Shselasky		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1270320592Shselasky		mad_send_wr->max_retries = send_buf->retries;
1271320592Shselasky		mad_send_wr->retries_left = send_buf->retries;
1272320592Shselasky		send_buf->retries = 0;
1273320592Shselasky		/* Reference for work request to QP + response */
1274320592Shselasky		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1275320592Shselasky		mad_send_wr->status = IB_WC_SUCCESS;
1276320592Shselasky
1277320592Shselasky		/* Reference MAD agent until send completes */
1278320592Shselasky		atomic_inc(&mad_agent_priv->refcount);
1279320592Shselasky		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1280320592Shselasky		list_add_tail(&mad_send_wr->agent_list,
1281320592Shselasky			      &mad_agent_priv->send_list);
1282320592Shselasky		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1283320592Shselasky
1284320592Shselasky		if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1285320592Shselasky			ret = ib_send_rmpp_mad(mad_send_wr);
1286320592Shselasky			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1287320592Shselasky				ret = ib_send_mad(mad_send_wr);
1288320592Shselasky		} else
1289320592Shselasky			ret = ib_send_mad(mad_send_wr);
1290320592Shselasky		if (ret < 0) {
1291320592Shselasky			/* Fail send request */
1292320592Shselasky			spin_lock_irqsave(&mad_agent_priv->lock, flags);
1293320592Shselasky			list_del(&mad_send_wr->agent_list);
1294320592Shselasky			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1295320592Shselasky			atomic_dec(&mad_agent_priv->refcount);
1296320592Shselasky			goto error;
1297320592Shselasky		}
1298320592Shselasky	}
1299320592Shselasky	return 0;
1300320592Shselaskyerror:
1301320592Shselasky	if (bad_send_buf)
1302320592Shselasky		*bad_send_buf = send_buf;
1303320592Shselasky	return ret;
1304320592Shselasky}
1305320592ShselaskyEXPORT_SYMBOL(ib_post_send_mad);
1306320592Shselasky
1307320592Shselasky/*
1308320592Shselasky * ib_free_recv_mad - Returns data buffers used to receive
1309320592Shselasky *  a MAD to the access layer
1310320592Shselasky */
1311320592Shselaskyvoid ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1312320592Shselasky{
1313320592Shselasky	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1314320592Shselasky	struct ib_mad_private_header *mad_priv_hdr;
1315320592Shselasky	struct ib_mad_private *priv;
1316320592Shselasky	struct list_head free_list;
1317320592Shselasky
1318320592Shselasky	INIT_LIST_HEAD(&free_list);
1319320592Shselasky	list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1320320592Shselasky
1321320592Shselasky	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1322320592Shselasky					&free_list, list) {
1323320592Shselasky		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1324320592Shselasky					   recv_buf);
1325320592Shselasky		mad_priv_hdr = container_of(mad_recv_wc,
1326320592Shselasky					    struct ib_mad_private_header,
1327320592Shselasky					    recv_wc);
1328320592Shselasky		priv = container_of(mad_priv_hdr, struct ib_mad_private,
1329320592Shselasky				    header);
1330320592Shselasky		kfree(priv);
1331320592Shselasky	}
1332320592Shselasky}
1333320592ShselaskyEXPORT_SYMBOL(ib_free_recv_mad);
1334320592Shselasky
1335320592Shselaskystruct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1336320592Shselasky					u8 rmpp_version,
1337320592Shselasky					ib_mad_send_handler send_handler,
1338320592Shselasky					ib_mad_recv_handler recv_handler,
1339320592Shselasky					void *context)
1340320592Shselasky{
1341320592Shselasky	return ERR_PTR(-EINVAL);	/* XXX: for now */
1342320592Shselasky}
1343320592ShselaskyEXPORT_SYMBOL(ib_redirect_mad_qp);
1344320592Shselasky
1345320592Shselaskyint ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1346320592Shselasky		      struct ib_wc *wc)
1347320592Shselasky{
1348320592Shselasky	dev_err(&mad_agent->device->dev,
1349320592Shselasky		"ib_process_mad_wc() not implemented yet\n");
1350320592Shselasky	return 0;
1351320592Shselasky}
1352320592ShselaskyEXPORT_SYMBOL(ib_process_mad_wc);
1353320592Shselasky
1354320592Shselaskystatic int method_in_use(struct ib_mad_mgmt_method_table **method,
1355320592Shselasky			 struct ib_mad_reg_req *mad_reg_req)
1356320592Shselasky{
1357320592Shselasky	int i;
1358320592Shselasky
1359320592Shselasky	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1360320592Shselasky		if ((*method)->agent[i]) {
1361320592Shselasky			pr_err("Method %d already in use\n", i);
1362320592Shselasky			return -EINVAL;
1363320592Shselasky		}
1364320592Shselasky	}
1365320592Shselasky	return 0;
1366320592Shselasky}
1367320592Shselasky
1368320592Shselaskystatic int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1369320592Shselasky{
1370320592Shselasky	/* Allocate management method table */
1371320592Shselasky	*method = kzalloc(sizeof **method, GFP_ATOMIC);
1372320592Shselasky	if (!*method) {
1373320592Shselasky		pr_err("No memory for ib_mad_mgmt_method_table\n");
1374320592Shselasky		return -ENOMEM;
1375320592Shselasky	}
1376320592Shselasky
1377320592Shselasky	return 0;
1378320592Shselasky}
1379320592Shselasky
1380320592Shselasky/*
1381320592Shselasky * Check to see if there are any methods still in use
1382320592Shselasky */
1383320592Shselaskystatic int check_method_table(struct ib_mad_mgmt_method_table *method)
1384320592Shselasky{
1385320592Shselasky	int i;
1386320592Shselasky
1387320592Shselasky	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1388320592Shselasky		if (method->agent[i])
1389320592Shselasky			return 1;
1390320592Shselasky	return 0;
1391320592Shselasky}
1392320592Shselasky
1393320592Shselasky/*
1394320592Shselasky * Check to see if there are any method tables for this class still in use
1395320592Shselasky */
1396320592Shselaskystatic int check_class_table(struct ib_mad_mgmt_class_table *class)
1397320592Shselasky{
1398320592Shselasky	int i;
1399320592Shselasky
1400320592Shselasky	for (i = 0; i < MAX_MGMT_CLASS; i++)
1401320592Shselasky		if (class->method_table[i])
1402320592Shselasky			return 1;
1403320592Shselasky	return 0;
1404320592Shselasky}
1405320592Shselasky
1406320592Shselaskystatic int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1407320592Shselasky{
1408320592Shselasky	int i;
1409320592Shselasky
1410320592Shselasky	for (i = 0; i < MAX_MGMT_OUI; i++)
1411320592Shselasky		if (vendor_class->method_table[i])
1412320592Shselasky			return 1;
1413320592Shselasky	return 0;
1414320592Shselasky}
1415320592Shselasky
1416320592Shselaskystatic int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1417320592Shselasky			   const char *oui)
1418320592Shselasky{
1419320592Shselasky	int i;
1420320592Shselasky
1421320592Shselasky	for (i = 0; i < MAX_MGMT_OUI; i++)
1422320592Shselasky		/* Is there matching OUI for this vendor class ? */
1423320592Shselasky		if (!memcmp(vendor_class->oui[i], oui, 3))
1424320592Shselasky			return i;
1425320592Shselasky
1426320592Shselasky	return -1;
1427320592Shselasky}
1428320592Shselasky
1429320592Shselaskystatic int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1430320592Shselasky{
1431320592Shselasky	int i;
1432320592Shselasky
1433320592Shselasky	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1434320592Shselasky		if (vendor->vendor_class[i])
1435320592Shselasky			return 1;
1436320592Shselasky
1437320592Shselasky	return 0;
1438320592Shselasky}
1439320592Shselasky
1440320592Shselaskystatic void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1441320592Shselasky				     struct ib_mad_agent_private *agent)
1442320592Shselasky{
1443320592Shselasky	int i;
1444320592Shselasky
1445320592Shselasky	/* Remove any methods for this mad agent */
1446320592Shselasky	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1447320592Shselasky		if (method->agent[i] == agent) {
1448320592Shselasky			method->agent[i] = NULL;
1449320592Shselasky		}
1450320592Shselasky	}
1451320592Shselasky}
1452320592Shselasky
1453320592Shselaskystatic int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1454320592Shselasky			      struct ib_mad_agent_private *agent_priv,
1455320592Shselasky			      u8 mgmt_class)
1456320592Shselasky{
1457320592Shselasky	struct ib_mad_port_private *port_priv;
1458320592Shselasky	struct ib_mad_mgmt_class_table **class;
1459320592Shselasky	struct ib_mad_mgmt_method_table **method;
1460320592Shselasky	int i, ret;
1461320592Shselasky
1462320592Shselasky	port_priv = agent_priv->qp_info->port_priv;
1463320592Shselasky	class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1464320592Shselasky	if (!*class) {
1465320592Shselasky		/* Allocate management class table for "new" class version */
1466320592Shselasky		*class = kzalloc(sizeof **class, GFP_ATOMIC);
1467320592Shselasky		if (!*class) {
1468320592Shselasky			dev_err(&agent_priv->agent.device->dev,
1469320592Shselasky				"No memory for ib_mad_mgmt_class_table\n");
1470320592Shselasky			ret = -ENOMEM;
1471320592Shselasky			goto error1;
1472320592Shselasky		}
1473320592Shselasky
1474320592Shselasky		/* Allocate method table for this management class */
1475320592Shselasky		method = &(*class)->method_table[mgmt_class];
1476320592Shselasky		if ((ret = allocate_method_table(method)))
1477320592Shselasky			goto error2;
1478320592Shselasky	} else {
1479320592Shselasky		method = &(*class)->method_table[mgmt_class];
1480320592Shselasky		if (!*method) {
1481320592Shselasky			/* Allocate method table for this management class */
1482320592Shselasky			if ((ret = allocate_method_table(method)))
1483320592Shselasky				goto error1;
1484320592Shselasky		}
1485320592Shselasky	}
1486320592Shselasky
1487320592Shselasky	/* Now, make sure methods are not already in use */
1488320592Shselasky	if (method_in_use(method, mad_reg_req))
1489320592Shselasky		goto error3;
1490320592Shselasky
1491320592Shselasky	/* Finally, add in methods being registered */
1492320592Shselasky	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1493320592Shselasky		(*method)->agent[i] = agent_priv;
1494320592Shselasky
1495320592Shselasky	return 0;
1496320592Shselasky
1497320592Shselaskyerror3:
1498320592Shselasky	/* Remove any methods for this mad agent */
1499320592Shselasky	remove_methods_mad_agent(*method, agent_priv);
1500320592Shselasky	/* Now, check to see if there are any methods in use */
1501320592Shselasky	if (!check_method_table(*method)) {
1502320592Shselasky		/* If not, release management method table */
1503320592Shselasky		kfree(*method);
1504320592Shselasky		*method = NULL;
1505320592Shselasky	}
1506320592Shselasky	ret = -EINVAL;
1507320592Shselasky	goto error1;
1508320592Shselaskyerror2:
1509320592Shselasky	kfree(*class);
1510320592Shselasky	*class = NULL;
1511320592Shselaskyerror1:
1512320592Shselasky	return ret;
1513320592Shselasky}
1514320592Shselasky
1515320592Shselaskystatic int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1516320592Shselasky			   struct ib_mad_agent_private *agent_priv)
1517320592Shselasky{
1518320592Shselasky	struct ib_mad_port_private *port_priv;
1519320592Shselasky	struct ib_mad_mgmt_vendor_class_table **vendor_table;
1520320592Shselasky	struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1521320592Shselasky	struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1522320592Shselasky	struct ib_mad_mgmt_method_table **method;
1523320592Shselasky	int i, ret = -ENOMEM;
1524320592Shselasky	u8 vclass;
1525320592Shselasky
1526320592Shselasky	/* "New" vendor (with OUI) class */
1527320592Shselasky	vclass = vendor_class_index(mad_reg_req->mgmt_class);
1528320592Shselasky	port_priv = agent_priv->qp_info->port_priv;
1529320592Shselasky	vendor_table = &port_priv->version[
1530320592Shselasky				mad_reg_req->mgmt_class_version].vendor;
1531320592Shselasky	if (!*vendor_table) {
1532320592Shselasky		/* Allocate mgmt vendor class table for "new" class version */
1533320592Shselasky		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1534320592Shselasky		if (!vendor) {
1535320592Shselasky			dev_err(&agent_priv->agent.device->dev,
1536320592Shselasky				"No memory for ib_mad_mgmt_vendor_class_table\n");
1537320592Shselasky			goto error1;
1538320592Shselasky		}
1539320592Shselasky
1540320592Shselasky		*vendor_table = vendor;
1541320592Shselasky	}
1542320592Shselasky	if (!(*vendor_table)->vendor_class[vclass]) {
1543320592Shselasky		/* Allocate table for this management vendor class */
1544320592Shselasky		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1545320592Shselasky		if (!vendor_class) {
1546320592Shselasky			dev_err(&agent_priv->agent.device->dev,
1547320592Shselasky				"No memory for ib_mad_mgmt_vendor_class\n");
1548320592Shselasky			goto error2;
1549320592Shselasky		}
1550320592Shselasky
1551320592Shselasky		(*vendor_table)->vendor_class[vclass] = vendor_class;
1552320592Shselasky	}
1553320592Shselasky	for (i = 0; i < MAX_MGMT_OUI; i++) {
1554320592Shselasky		/* Is there matching OUI for this vendor class ? */
1555320592Shselasky		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1556320592Shselasky			    mad_reg_req->oui, 3)) {
1557320592Shselasky			method = &(*vendor_table)->vendor_class[
1558320592Shselasky						vclass]->method_table[i];
1559320592Shselasky			BUG_ON(!*method);
1560320592Shselasky			goto check_in_use;
1561320592Shselasky		}
1562320592Shselasky	}
1563320592Shselasky	for (i = 0; i < MAX_MGMT_OUI; i++) {
1564320592Shselasky		/* OUI slot available ? */
1565320592Shselasky		if (!is_vendor_oui((*vendor_table)->vendor_class[
1566320592Shselasky				vclass]->oui[i])) {
1567320592Shselasky			method = &(*vendor_table)->vendor_class[
1568320592Shselasky				vclass]->method_table[i];
1569320592Shselasky			BUG_ON(*method);
1570320592Shselasky			/* Allocate method table for this OUI */
1571320592Shselasky			if ((ret = allocate_method_table(method)))
1572320592Shselasky				goto error3;
1573320592Shselasky			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1574320592Shselasky			       mad_reg_req->oui, 3);
1575320592Shselasky			goto check_in_use;
1576320592Shselasky		}
1577320592Shselasky	}
1578320592Shselasky	dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1579320592Shselasky	goto error3;
1580320592Shselasky
1581320592Shselaskycheck_in_use:
1582320592Shselasky	/* Now, make sure methods are not already in use */
1583320592Shselasky	if (method_in_use(method, mad_reg_req))
1584320592Shselasky		goto error4;
1585320592Shselasky
1586320592Shselasky	/* Finally, add in methods being registered */
1587320592Shselasky	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1588320592Shselasky		(*method)->agent[i] = agent_priv;
1589320592Shselasky
1590320592Shselasky	return 0;
1591320592Shselasky
1592320592Shselaskyerror4:
1593320592Shselasky	/* Remove any methods for this mad agent */
1594320592Shselasky	remove_methods_mad_agent(*method, agent_priv);
1595320592Shselasky	/* Now, check to see if there are any methods in use */
1596320592Shselasky	if (!check_method_table(*method)) {
1597320592Shselasky		/* If not, release management method table */
1598320592Shselasky		kfree(*method);
1599320592Shselasky		*method = NULL;
1600320592Shselasky	}
1601320592Shselasky	ret = -EINVAL;
1602320592Shselaskyerror3:
1603320592Shselasky	if (vendor_class) {
1604320592Shselasky		(*vendor_table)->vendor_class[vclass] = NULL;
1605320592Shselasky		kfree(vendor_class);
1606320592Shselasky	}
1607320592Shselaskyerror2:
1608320592Shselasky	if (vendor) {
1609320592Shselasky		*vendor_table = NULL;
1610320592Shselasky		kfree(vendor);
1611320592Shselasky	}
1612320592Shselaskyerror1:
1613320592Shselasky	return ret;
1614320592Shselasky}
1615320592Shselasky
1616320592Shselaskystatic void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1617320592Shselasky{
1618320592Shselasky	struct ib_mad_port_private *port_priv;
1619320592Shselasky	struct ib_mad_mgmt_class_table *class;
1620320592Shselasky	struct ib_mad_mgmt_method_table *method;
1621320592Shselasky	struct ib_mad_mgmt_vendor_class_table *vendor;
1622320592Shselasky	struct ib_mad_mgmt_vendor_class *vendor_class;
1623320592Shselasky	int index;
1624320592Shselasky	u8 mgmt_class;
1625320592Shselasky
1626320592Shselasky	/*
1627320592Shselasky	 * Was MAD registration request supplied
1628320592Shselasky	 * with original registration ?
1629320592Shselasky	 */
1630320592Shselasky	if (!agent_priv->reg_req) {
1631320592Shselasky		goto out;
1632320592Shselasky	}
1633320592Shselasky
1634320592Shselasky	port_priv = agent_priv->qp_info->port_priv;
1635320592Shselasky	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1636320592Shselasky	class = port_priv->version[
1637320592Shselasky			agent_priv->reg_req->mgmt_class_version].class;
1638320592Shselasky	if (!class)
1639320592Shselasky		goto vendor_check;
1640320592Shselasky
1641320592Shselasky	method = class->method_table[mgmt_class];
1642320592Shselasky	if (method) {
1643320592Shselasky		/* Remove any methods for this mad agent */
1644320592Shselasky		remove_methods_mad_agent(method, agent_priv);
1645320592Shselasky		/* Now, check to see if there are any methods still in use */
1646320592Shselasky		if (!check_method_table(method)) {
1647320592Shselasky			/* If not, release management method table */
1648320592Shselasky			kfree(method);
1649320592Shselasky			class->method_table[mgmt_class] = NULL;
1650320592Shselasky			/* Any management classes left ? */
1651320592Shselasky			if (!check_class_table(class)) {
1652320592Shselasky				/* If not, release management class table */
1653320592Shselasky				kfree(class);
1654320592Shselasky				port_priv->version[
1655320592Shselasky					agent_priv->reg_req->
1656320592Shselasky					mgmt_class_version].class = NULL;
1657320592Shselasky			}
1658320592Shselasky		}
1659320592Shselasky	}
1660320592Shselasky
1661320592Shselaskyvendor_check:
1662320592Shselasky	if (!is_vendor_class(mgmt_class))
1663320592Shselasky		goto out;
1664320592Shselasky
1665320592Shselasky	/* normalize mgmt_class to vendor range 2 */
1666320592Shselasky	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1667320592Shselasky	vendor = port_priv->version[
1668320592Shselasky			agent_priv->reg_req->mgmt_class_version].vendor;
1669320592Shselasky
1670320592Shselasky	if (!vendor)
1671320592Shselasky		goto out;
1672320592Shselasky
1673320592Shselasky	vendor_class = vendor->vendor_class[mgmt_class];
1674320592Shselasky	if (vendor_class) {
1675320592Shselasky		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1676320592Shselasky		if (index < 0)
1677320592Shselasky			goto out;
1678320592Shselasky		method = vendor_class->method_table[index];
1679320592Shselasky		if (method) {
1680320592Shselasky			/* Remove any methods for this mad agent */
1681320592Shselasky			remove_methods_mad_agent(method, agent_priv);
1682320592Shselasky			/*
1683320592Shselasky			 * Now, check to see if there are
1684320592Shselasky			 * any methods still in use
1685320592Shselasky			 */
1686320592Shselasky			if (!check_method_table(method)) {
1687320592Shselasky				/* If not, release management method table */
1688320592Shselasky				kfree(method);
1689320592Shselasky				vendor_class->method_table[index] = NULL;
1690320592Shselasky				memset(vendor_class->oui[index], 0, 3);
1691320592Shselasky				/* Any OUIs left ? */
1692320592Shselasky				if (!check_vendor_class(vendor_class)) {
1693320592Shselasky					/* If not, release vendor class table */
1694320592Shselasky					kfree(vendor_class);
1695320592Shselasky					vendor->vendor_class[mgmt_class] = NULL;
1696320592Shselasky					/* Any other vendor classes left ? */
1697320592Shselasky					if (!check_vendor_table(vendor)) {
1698320592Shselasky						kfree(vendor);
1699320592Shselasky						port_priv->version[
1700320592Shselasky							agent_priv->reg_req->
1701320592Shselasky							mgmt_class_version].
1702320592Shselasky							vendor = NULL;
1703320592Shselasky					}
1704320592Shselasky				}
1705320592Shselasky			}
1706320592Shselasky		}
1707320592Shselasky	}
1708320592Shselasky
1709320592Shselaskyout:
1710320592Shselasky	return;
1711320592Shselasky}
1712320592Shselasky
1713320592Shselaskystatic struct ib_mad_agent_private *
1714320592Shselaskyfind_mad_agent(struct ib_mad_port_private *port_priv,
1715320592Shselasky	       const struct ib_mad_hdr *mad_hdr)
1716320592Shselasky{
1717320592Shselasky	struct ib_mad_agent_private *mad_agent = NULL;
1718320592Shselasky	unsigned long flags;
1719320592Shselasky
1720320592Shselasky	spin_lock_irqsave(&port_priv->reg_lock, flags);
1721320592Shselasky	if (ib_response_mad(mad_hdr)) {
1722320592Shselasky		u32 hi_tid;
1723320592Shselasky		struct ib_mad_agent_private *entry;
1724320592Shselasky
1725320592Shselasky		/*
1726320592Shselasky		 * Routing is based on high 32 bits of transaction ID
1727320592Shselasky		 * of MAD.
1728320592Shselasky		 */
1729320592Shselasky		hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1730320592Shselasky		list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1731320592Shselasky			if (entry->agent.hi_tid == hi_tid) {
1732320592Shselasky				mad_agent = entry;
1733320592Shselasky				break;
1734320592Shselasky			}
1735320592Shselasky		}
1736320592Shselasky	} else {
1737320592Shselasky		struct ib_mad_mgmt_class_table *class;
1738320592Shselasky		struct ib_mad_mgmt_method_table *method;
1739320592Shselasky		struct ib_mad_mgmt_vendor_class_table *vendor;
1740320592Shselasky		struct ib_mad_mgmt_vendor_class *vendor_class;
1741320592Shselasky		const struct ib_vendor_mad *vendor_mad;
1742320592Shselasky		int index;
1743320592Shselasky
1744320592Shselasky		/*
1745320592Shselasky		 * Routing is based on version, class, and method
1746320592Shselasky		 * For "newer" vendor MADs, also based on OUI
1747320592Shselasky		 */
1748320592Shselasky		if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1749320592Shselasky			goto out;
1750320592Shselasky		if (!is_vendor_class(mad_hdr->mgmt_class)) {
1751320592Shselasky			class = port_priv->version[
1752320592Shselasky					mad_hdr->class_version].class;
1753320592Shselasky			if (!class)
1754320592Shselasky				goto out;
1755320592Shselasky			if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1756341866Shselasky			    ARRAY_SIZE(class->method_table))
1757320592Shselasky				goto out;
1758320592Shselasky			method = class->method_table[convert_mgmt_class(
1759320592Shselasky							mad_hdr->mgmt_class)];
1760320592Shselasky			if (method)
1761320592Shselasky				mad_agent = method->agent[mad_hdr->method &
1762320592Shselasky							  ~IB_MGMT_METHOD_RESP];
1763320592Shselasky		} else {
1764320592Shselasky			vendor = port_priv->version[
1765320592Shselasky					mad_hdr->class_version].vendor;
1766320592Shselasky			if (!vendor)
1767320592Shselasky				goto out;
1768320592Shselasky			vendor_class = vendor->vendor_class[vendor_class_index(
1769320592Shselasky						mad_hdr->mgmt_class)];
1770320592Shselasky			if (!vendor_class)
1771320592Shselasky				goto out;
1772320592Shselasky			/* Find matching OUI */
1773320592Shselasky			vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1774320592Shselasky			index = find_vendor_oui(vendor_class, vendor_mad->oui);
1775320592Shselasky			if (index == -1)
1776320592Shselasky				goto out;
1777320592Shselasky			method = vendor_class->method_table[index];
1778320592Shselasky			if (method) {
1779320592Shselasky				mad_agent = method->agent[mad_hdr->method &
1780320592Shselasky							  ~IB_MGMT_METHOD_RESP];
1781320592Shselasky			}
1782320592Shselasky		}
1783320592Shselasky	}
1784320592Shselasky
1785320592Shselasky	if (mad_agent) {
1786320592Shselasky		if (mad_agent->agent.recv_handler)
1787320592Shselasky			atomic_inc(&mad_agent->refcount);
1788320592Shselasky		else {
1789320592Shselasky			dev_notice(&port_priv->device->dev,
1790320592Shselasky				   "No receive handler for client %p on port %d\n",
1791320592Shselasky				   &mad_agent->agent, port_priv->port_num);
1792320592Shselasky			mad_agent = NULL;
1793320592Shselasky		}
1794320592Shselasky	}
1795320592Shselaskyout:
1796320592Shselasky	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1797320592Shselasky
1798320592Shselasky	return mad_agent;
1799320592Shselasky}
1800320592Shselasky
1801320592Shselaskystatic int validate_mad(const struct ib_mad_hdr *mad_hdr,
1802320592Shselasky			const struct ib_mad_qp_info *qp_info,
1803320592Shselasky			bool opa)
1804320592Shselasky{
1805320592Shselasky	int valid = 0;
1806320592Shselasky	u32 qp_num = qp_info->qp->qp_num;
1807320592Shselasky
1808320592Shselasky	/* Make sure MAD base version is understood */
1809320592Shselasky	if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1810320592Shselasky	    (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1811320592Shselasky		pr_err("MAD received with unsupported base version %d %s\n",
1812320592Shselasky		       mad_hdr->base_version, opa ? "(opa)" : "");
1813320592Shselasky		goto out;
1814320592Shselasky	}
1815320592Shselasky
1816320592Shselasky	/* Filter SMI packets sent to other than QP0 */
1817320592Shselasky	if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1818320592Shselasky	    (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1819320592Shselasky		if (qp_num == 0)
1820320592Shselasky			valid = 1;
1821320592Shselasky	} else {
1822320592Shselasky		/* CM attributes other than ClassPortInfo only use Send method */
1823320592Shselasky		if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1824320592Shselasky		    (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1825320592Shselasky		    (mad_hdr->method != IB_MGMT_METHOD_SEND))
1826320592Shselasky			goto out;
1827320592Shselasky		/* Filter GSI packets sent to QP0 */
1828320592Shselasky		if (qp_num != 0)
1829320592Shselasky			valid = 1;
1830320592Shselasky	}
1831320592Shselasky
1832320592Shselaskyout:
1833320592Shselasky	return valid;
1834320592Shselasky}
1835320592Shselasky
1836320592Shselaskystatic int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1837320592Shselasky			    const struct ib_mad_hdr *mad_hdr)
1838320592Shselasky{
1839320592Shselasky	const struct ib_rmpp_mad *rmpp_mad;
1840320592Shselasky
1841320592Shselasky	rmpp_mad = (const struct ib_rmpp_mad *)mad_hdr;
1842320592Shselasky	return !mad_agent_priv->agent.rmpp_version ||
1843320592Shselasky		!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1844320592Shselasky		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1845320592Shselasky				    IB_MGMT_RMPP_FLAG_ACTIVE) ||
1846320592Shselasky		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1847320592Shselasky}
1848320592Shselasky
1849320592Shselaskystatic inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1850320592Shselasky				     const struct ib_mad_recv_wc *rwc)
1851320592Shselasky{
1852320592Shselasky	return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1853320592Shselasky		rwc->recv_buf.mad->mad_hdr.mgmt_class;
1854320592Shselasky}
1855320592Shselasky
1856320592Shselaskystatic inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1857320592Shselasky				   const struct ib_mad_send_wr_private *wr,
1858320592Shselasky				   const struct ib_mad_recv_wc *rwc )
1859320592Shselasky{
1860320592Shselasky	struct ib_ah_attr attr;
1861320592Shselasky	u8 send_resp, rcv_resp;
1862320592Shselasky	union ib_gid sgid;
1863320592Shselasky	struct ib_device *device = mad_agent_priv->agent.device;
1864320592Shselasky	u8 port_num = mad_agent_priv->agent.port_num;
1865320592Shselasky	u8 lmc;
1866320592Shselasky
1867320592Shselasky	send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1868320592Shselasky	rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1869320592Shselasky
1870320592Shselasky	if (send_resp == rcv_resp)
1871320592Shselasky		/* both requests, or both responses. GIDs different */
1872320592Shselasky		return 0;
1873320592Shselasky
1874320592Shselasky	if (ib_query_ah(wr->send_buf.ah, &attr))
1875320592Shselasky		/* Assume not equal, to avoid false positives. */
1876320592Shselasky		return 0;
1877320592Shselasky
1878320592Shselasky	if (!!(attr.ah_flags & IB_AH_GRH) !=
1879320592Shselasky	    !!(rwc->wc->wc_flags & IB_WC_GRH))
1880320592Shselasky		/* one has GID, other does not.  Assume different */
1881320592Shselasky		return 0;
1882320592Shselasky
1883320592Shselasky	if (!send_resp && rcv_resp) {
1884320592Shselasky		/* is request/response. */
1885320592Shselasky		if (!(attr.ah_flags & IB_AH_GRH)) {
1886320592Shselasky			if (ib_get_cached_lmc(device, port_num, &lmc))
1887320592Shselasky				return 0;
1888320592Shselasky			return (!lmc || !((attr.src_path_bits ^
1889320592Shselasky					   rwc->wc->dlid_path_bits) &
1890320592Shselasky					  ((1 << lmc) - 1)));
1891320592Shselasky		} else {
1892320592Shselasky			if (ib_get_cached_gid(device, port_num,
1893320592Shselasky					      attr.grh.sgid_index, &sgid, NULL))
1894320592Shselasky				return 0;
1895320592Shselasky			return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1896320592Shselasky				       16);
1897320592Shselasky		}
1898320592Shselasky	}
1899320592Shselasky
1900320592Shselasky	if (!(attr.ah_flags & IB_AH_GRH))
1901320592Shselasky		return attr.dlid == rwc->wc->slid;
1902320592Shselasky	else
1903320592Shselasky		return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1904320592Shselasky			       16);
1905320592Shselasky}
1906320592Shselasky
1907320592Shselaskystatic inline int is_direct(u8 class)
1908320592Shselasky{
1909320592Shselasky	return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1910320592Shselasky}
1911320592Shselasky
1912320592Shselaskystruct ib_mad_send_wr_private*
1913320592Shselaskyib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1914320592Shselasky		 const struct ib_mad_recv_wc *wc)
1915320592Shselasky{
1916320592Shselasky	struct ib_mad_send_wr_private *wr;
1917320592Shselasky	const struct ib_mad_hdr *mad_hdr;
1918320592Shselasky
1919320592Shselasky	mad_hdr = &wc->recv_buf.mad->mad_hdr;
1920320592Shselasky
1921320592Shselasky	list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1922320592Shselasky		if ((wr->tid == mad_hdr->tid) &&
1923320592Shselasky		    rcv_has_same_class(wr, wc) &&
1924320592Shselasky		    /*
1925320592Shselasky		     * Don't check GID for direct routed MADs.
1926320592Shselasky		     * These might have permissive LIDs.
1927320592Shselasky		     */
1928320592Shselasky		    (is_direct(mad_hdr->mgmt_class) ||
1929320592Shselasky		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1930320592Shselasky			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1931320592Shselasky	}
1932320592Shselasky
1933320592Shselasky	/*
1934320592Shselasky	 * It's possible to receive the response before we've
1935320592Shselasky	 * been notified that the send has completed
1936320592Shselasky	 */
1937320592Shselasky	list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1938320592Shselasky		if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1939320592Shselasky		    wr->tid == mad_hdr->tid &&
1940320592Shselasky		    wr->timeout &&
1941320592Shselasky		    rcv_has_same_class(wr, wc) &&
1942320592Shselasky		    /*
1943320592Shselasky		     * Don't check GID for direct routed MADs.
1944320592Shselasky		     * These might have permissive LIDs.
1945320592Shselasky		     */
1946320592Shselasky		    (is_direct(mad_hdr->mgmt_class) ||
1947320592Shselasky		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1948320592Shselasky			/* Verify request has not been canceled */
1949320592Shselasky			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1950320592Shselasky	}
1951320592Shselasky	return NULL;
1952320592Shselasky}
1953320592Shselasky
1954320592Shselaskyvoid ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1955320592Shselasky{
1956320592Shselasky	mad_send_wr->timeout = 0;
1957320592Shselasky	if (mad_send_wr->refcount == 1)
1958320592Shselasky		list_move_tail(&mad_send_wr->agent_list,
1959320592Shselasky			      &mad_send_wr->mad_agent_priv->done_list);
1960320592Shselasky}
1961320592Shselasky
1962320592Shselaskystatic void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1963320592Shselasky				 struct ib_mad_recv_wc *mad_recv_wc)
1964320592Shselasky{
1965320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
1966320592Shselasky	struct ib_mad_send_wc mad_send_wc;
1967320592Shselasky	unsigned long flags;
1968320592Shselasky
1969320592Shselasky	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1970320592Shselasky	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1971320592Shselasky	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1972320592Shselasky		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1973320592Shselasky						      mad_recv_wc);
1974320592Shselasky		if (!mad_recv_wc) {
1975320592Shselasky			deref_mad_agent(mad_agent_priv);
1976320592Shselasky			return;
1977320592Shselasky		}
1978320592Shselasky	}
1979320592Shselasky
1980320592Shselasky	/* Complete corresponding request */
1981320592Shselasky	if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1982320592Shselasky		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1983320592Shselasky		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1984320592Shselasky		if (!mad_send_wr) {
1985320592Shselasky			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1986320592Shselasky			if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1987320592Shselasky			   && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1988320592Shselasky			   && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1989320592Shselasky					& IB_MGMT_RMPP_FLAG_ACTIVE)) {
1990320592Shselasky				/* user rmpp is in effect
1991320592Shselasky				 * and this is an active RMPP MAD
1992320592Shselasky				 */
1993320592Shselasky				mad_agent_priv->agent.recv_handler(
1994320592Shselasky						&mad_agent_priv->agent, NULL,
1995320592Shselasky						mad_recv_wc);
1996320592Shselasky				atomic_dec(&mad_agent_priv->refcount);
1997320592Shselasky			} else {
1998320592Shselasky				/* not user rmpp, revert to normal behavior and
1999320592Shselasky				 * drop the mad */
2000320592Shselasky				ib_free_recv_mad(mad_recv_wc);
2001320592Shselasky				deref_mad_agent(mad_agent_priv);
2002320592Shselasky				return;
2003320592Shselasky			}
2004320592Shselasky		} else {
2005320592Shselasky			ib_mark_mad_done(mad_send_wr);
2006320592Shselasky			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2007320592Shselasky
2008320592Shselasky			/* Defined behavior is to complete response before request */
2009320592Shselasky			mad_agent_priv->agent.recv_handler(
2010320592Shselasky					&mad_agent_priv->agent,
2011320592Shselasky					&mad_send_wr->send_buf,
2012320592Shselasky					mad_recv_wc);
2013320592Shselasky			atomic_dec(&mad_agent_priv->refcount);
2014320592Shselasky
2015320592Shselasky			mad_send_wc.status = IB_WC_SUCCESS;
2016320592Shselasky			mad_send_wc.vendor_err = 0;
2017320592Shselasky			mad_send_wc.send_buf = &mad_send_wr->send_buf;
2018320592Shselasky			ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2019320592Shselasky		}
2020320592Shselasky	} else {
2021320592Shselasky		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2022320592Shselasky						   mad_recv_wc);
2023320592Shselasky		deref_mad_agent(mad_agent_priv);
2024320592Shselasky	}
2025320592Shselasky}
2026320592Shselasky
2027320592Shselaskystatic enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2028320592Shselasky				     const struct ib_mad_qp_info *qp_info,
2029320592Shselasky				     const struct ib_wc *wc,
2030320592Shselasky				     int port_num,
2031320592Shselasky				     struct ib_mad_private *recv,
2032320592Shselasky				     struct ib_mad_private *response)
2033320592Shselasky{
2034320592Shselasky	enum smi_forward_action retsmi;
2035320592Shselasky	struct ib_smp *smp = (struct ib_smp *)recv->mad;
2036320592Shselasky
2037320592Shselasky	if (smi_handle_dr_smp_recv(smp,
2038320592Shselasky				   rdma_cap_ib_switch(port_priv->device),
2039320592Shselasky				   port_num,
2040320592Shselasky				   port_priv->device->phys_port_cnt) ==
2041320592Shselasky				   IB_SMI_DISCARD)
2042320592Shselasky		return IB_SMI_DISCARD;
2043320592Shselasky
2044320592Shselasky	retsmi = smi_check_forward_dr_smp(smp);
2045320592Shselasky	if (retsmi == IB_SMI_LOCAL)
2046320592Shselasky		return IB_SMI_HANDLE;
2047320592Shselasky
2048320592Shselasky	if (retsmi == IB_SMI_SEND) { /* don't forward */
2049320592Shselasky		if (smi_handle_dr_smp_send(smp,
2050320592Shselasky					   rdma_cap_ib_switch(port_priv->device),
2051320592Shselasky					   port_num) == IB_SMI_DISCARD)
2052320592Shselasky			return IB_SMI_DISCARD;
2053320592Shselasky
2054320592Shselasky		if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2055320592Shselasky			return IB_SMI_DISCARD;
2056320592Shselasky	} else if (rdma_cap_ib_switch(port_priv->device)) {
2057320592Shselasky		/* forward case for switches */
2058320592Shselasky		memcpy(response, recv, mad_priv_size(response));
2059320592Shselasky		response->header.recv_wc.wc = &response->header.wc;
2060320592Shselasky		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2061320592Shselasky		response->header.recv_wc.recv_buf.grh = &response->grh;
2062320592Shselasky
2063320592Shselasky		agent_send_response((const struct ib_mad_hdr *)response->mad,
2064320592Shselasky				    &response->grh, wc,
2065320592Shselasky				    port_priv->device,
2066320592Shselasky				    smi_get_fwd_port(smp),
2067320592Shselasky				    qp_info->qp->qp_num,
2068320592Shselasky				    response->mad_size,
2069320592Shselasky				    false);
2070320592Shselasky
2071320592Shselasky		return IB_SMI_DISCARD;
2072320592Shselasky	}
2073320592Shselasky	return IB_SMI_HANDLE;
2074320592Shselasky}
2075320592Shselasky
2076320592Shselaskystatic bool generate_unmatched_resp(const struct ib_mad_private *recv,
2077320592Shselasky				    struct ib_mad_private *response,
2078320592Shselasky				    size_t *resp_len, bool opa)
2079320592Shselasky{
2080320592Shselasky	const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2081320592Shselasky	struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2082320592Shselasky
2083320592Shselasky	if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2084320592Shselasky	    recv_hdr->method == IB_MGMT_METHOD_SET) {
2085320592Shselasky		memcpy(response, recv, mad_priv_size(response));
2086320592Shselasky		response->header.recv_wc.wc = &response->header.wc;
2087320592Shselasky		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2088320592Shselasky		response->header.recv_wc.recv_buf.grh = &response->grh;
2089320592Shselasky		resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2090320592Shselasky		resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2091320592Shselasky		if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2092320592Shselasky			resp_hdr->status |= IB_SMP_DIRECTION;
2093320592Shselasky
2094320592Shselasky		if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2095320592Shselasky			if (recv_hdr->mgmt_class ==
2096320592Shselasky			    IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2097320592Shselasky			    recv_hdr->mgmt_class ==
2098320592Shselasky			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2099320592Shselasky				*resp_len = opa_get_smp_header_size(
2100320592Shselasky							(const struct opa_smp *)recv->mad);
2101320592Shselasky			else
2102320592Shselasky				*resp_len = sizeof(struct ib_mad_hdr);
2103320592Shselasky		}
2104320592Shselasky
2105320592Shselasky		return true;
2106320592Shselasky	} else {
2107320592Shselasky		return false;
2108320592Shselasky	}
2109320592Shselasky}
2110320592Shselasky
2111320592Shselaskystatic enum smi_action
2112320592Shselaskyhandle_opa_smi(struct ib_mad_port_private *port_priv,
2113320592Shselasky	       struct ib_mad_qp_info *qp_info,
2114320592Shselasky	       struct ib_wc *wc,
2115320592Shselasky	       int port_num,
2116320592Shselasky	       struct ib_mad_private *recv,
2117320592Shselasky	       struct ib_mad_private *response)
2118320592Shselasky{
2119320592Shselasky	enum smi_forward_action retsmi;
2120320592Shselasky	struct opa_smp *smp = (struct opa_smp *)recv->mad;
2121320592Shselasky
2122320592Shselasky	if (opa_smi_handle_dr_smp_recv(smp,
2123320592Shselasky				   rdma_cap_ib_switch(port_priv->device),
2124320592Shselasky				   port_num,
2125320592Shselasky				   port_priv->device->phys_port_cnt) ==
2126320592Shselasky				   IB_SMI_DISCARD)
2127320592Shselasky		return IB_SMI_DISCARD;
2128320592Shselasky
2129320592Shselasky	retsmi = opa_smi_check_forward_dr_smp(smp);
2130320592Shselasky	if (retsmi == IB_SMI_LOCAL)
2131320592Shselasky		return IB_SMI_HANDLE;
2132320592Shselasky
2133320592Shselasky	if (retsmi == IB_SMI_SEND) { /* don't forward */
2134320592Shselasky		if (opa_smi_handle_dr_smp_send(smp,
2135320592Shselasky					   rdma_cap_ib_switch(port_priv->device),
2136320592Shselasky					   port_num) == IB_SMI_DISCARD)
2137320592Shselasky			return IB_SMI_DISCARD;
2138320592Shselasky
2139320592Shselasky		if (opa_smi_check_local_smp(smp, port_priv->device) ==
2140320592Shselasky		    IB_SMI_DISCARD)
2141320592Shselasky			return IB_SMI_DISCARD;
2142320592Shselasky
2143320592Shselasky	} else if (rdma_cap_ib_switch(port_priv->device)) {
2144320592Shselasky		/* forward case for switches */
2145320592Shselasky		memcpy(response, recv, mad_priv_size(response));
2146320592Shselasky		response->header.recv_wc.wc = &response->header.wc;
2147320592Shselasky		response->header.recv_wc.recv_buf.opa_mad =
2148320592Shselasky				(struct opa_mad *)response->mad;
2149320592Shselasky		response->header.recv_wc.recv_buf.grh = &response->grh;
2150320592Shselasky
2151320592Shselasky		agent_send_response((const struct ib_mad_hdr *)response->mad,
2152320592Shselasky				    &response->grh, wc,
2153320592Shselasky				    port_priv->device,
2154320592Shselasky				    opa_smi_get_fwd_port(smp),
2155320592Shselasky				    qp_info->qp->qp_num,
2156320592Shselasky				    recv->header.wc.byte_len,
2157320592Shselasky				    true);
2158320592Shselasky
2159320592Shselasky		return IB_SMI_DISCARD;
2160320592Shselasky	}
2161320592Shselasky
2162320592Shselasky	return IB_SMI_HANDLE;
2163320592Shselasky}
2164320592Shselasky
2165320592Shselaskystatic enum smi_action
2166320592Shselaskyhandle_smi(struct ib_mad_port_private *port_priv,
2167320592Shselasky	   struct ib_mad_qp_info *qp_info,
2168320592Shselasky	   struct ib_wc *wc,
2169320592Shselasky	   int port_num,
2170320592Shselasky	   struct ib_mad_private *recv,
2171320592Shselasky	   struct ib_mad_private *response,
2172320592Shselasky	   bool opa)
2173320592Shselasky{
2174320592Shselasky	struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2175320592Shselasky
2176320592Shselasky	if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2177320592Shselasky	    mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2178320592Shselasky		return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2179320592Shselasky				      response);
2180320592Shselasky
2181320592Shselasky	return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2182320592Shselasky}
2183320592Shselasky
2184320592Shselaskystatic void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2185320592Shselasky{
2186320592Shselasky	struct ib_mad_port_private *port_priv = cq->cq_context;
2187320592Shselasky	struct ib_mad_list_head *mad_list =
2188320592Shselasky		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2189320592Shselasky	struct ib_mad_qp_info *qp_info;
2190320592Shselasky	struct ib_mad_private_header *mad_priv_hdr;
2191320592Shselasky	struct ib_mad_private *recv, *response = NULL;
2192320592Shselasky	struct ib_mad_agent_private *mad_agent;
2193320592Shselasky	int port_num;
2194320592Shselasky	int ret = IB_MAD_RESULT_SUCCESS;
2195320592Shselasky	size_t mad_size;
2196320592Shselasky	u16 resp_mad_pkey_index = 0;
2197320592Shselasky	bool opa;
2198320592Shselasky
2199320592Shselasky	if (list_empty_careful(&port_priv->port_list))
2200320592Shselasky		return;
2201320592Shselasky
2202320592Shselasky	if (wc->status != IB_WC_SUCCESS) {
2203320592Shselasky		/*
2204320592Shselasky		 * Receive errors indicate that the QP has entered the error
2205320592Shselasky		 * state - error handling/shutdown code will cleanup
2206320592Shselasky		 */
2207320592Shselasky		return;
2208320592Shselasky	}
2209320592Shselasky
2210320592Shselasky	qp_info = mad_list->mad_queue->qp_info;
2211320592Shselasky	dequeue_mad(mad_list);
2212320592Shselasky
2213320592Shselasky	opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2214320592Shselasky			       qp_info->port_priv->port_num);
2215320592Shselasky
2216320592Shselasky	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2217320592Shselasky				    mad_list);
2218320592Shselasky	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2219320592Shselasky	ib_dma_unmap_single(port_priv->device,
2220320592Shselasky			    recv->header.mapping,
2221320592Shselasky			    mad_priv_dma_size(recv),
2222320592Shselasky			    DMA_FROM_DEVICE);
2223320592Shselasky
2224320592Shselasky	/* Setup MAD receive work completion from "normal" work completion */
2225320592Shselasky	recv->header.wc = *wc;
2226320592Shselasky	recv->header.recv_wc.wc = &recv->header.wc;
2227320592Shselasky
2228320592Shselasky	if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2229320592Shselasky		recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2230320592Shselasky		recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2231320592Shselasky	} else {
2232320592Shselasky		recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2233320592Shselasky		recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2234320592Shselasky	}
2235320592Shselasky
2236320592Shselasky	recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2237320592Shselasky	recv->header.recv_wc.recv_buf.grh = &recv->grh;
2238320592Shselasky
2239320592Shselasky	if (atomic_read(&qp_info->snoop_count))
2240320592Shselasky		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2241320592Shselasky
2242320592Shselasky	/* Validate MAD */
2243320592Shselasky	if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2244320592Shselasky		goto out;
2245320592Shselasky
2246320592Shselasky	mad_size = recv->mad_size;
2247320592Shselasky	response = alloc_mad_private(mad_size, GFP_KERNEL);
2248320592Shselasky	if (!response) {
2249320592Shselasky		dev_err(&port_priv->device->dev,
2250320592Shselasky			"%s: no memory for response buffer\n", __func__);
2251320592Shselasky		goto out;
2252320592Shselasky	}
2253320592Shselasky
2254320592Shselasky	if (rdma_cap_ib_switch(port_priv->device))
2255320592Shselasky		port_num = wc->port_num;
2256320592Shselasky	else
2257320592Shselasky		port_num = port_priv->port_num;
2258320592Shselasky
2259320592Shselasky	if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2260320592Shselasky	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2261320592Shselasky		if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2262320592Shselasky			       response, opa)
2263320592Shselasky		    == IB_SMI_DISCARD)
2264320592Shselasky			goto out;
2265320592Shselasky	}
2266320592Shselasky
2267320592Shselasky	/* Give driver "right of first refusal" on incoming MAD */
2268320592Shselasky	if (port_priv->device->process_mad) {
2269320592Shselasky		ret = port_priv->device->process_mad(port_priv->device, 0,
2270320592Shselasky						     port_priv->port_num,
2271320592Shselasky						     wc, &recv->grh,
2272320592Shselasky						     (const struct ib_mad_hdr *)recv->mad,
2273320592Shselasky						     recv->mad_size,
2274320592Shselasky						     (struct ib_mad_hdr *)response->mad,
2275320592Shselasky						     &mad_size, &resp_mad_pkey_index);
2276320592Shselasky
2277320592Shselasky		if (opa)
2278320592Shselasky			wc->pkey_index = resp_mad_pkey_index;
2279320592Shselasky
2280320592Shselasky		if (ret & IB_MAD_RESULT_SUCCESS) {
2281320592Shselasky			if (ret & IB_MAD_RESULT_CONSUMED)
2282320592Shselasky				goto out;
2283320592Shselasky			if (ret & IB_MAD_RESULT_REPLY) {
2284320592Shselasky				agent_send_response((const struct ib_mad_hdr *)response->mad,
2285320592Shselasky						    &recv->grh, wc,
2286320592Shselasky						    port_priv->device,
2287320592Shselasky						    port_num,
2288320592Shselasky						    qp_info->qp->qp_num,
2289320592Shselasky						    mad_size, opa);
2290320592Shselasky				goto out;
2291320592Shselasky			}
2292320592Shselasky		}
2293320592Shselasky	}
2294320592Shselasky
2295320592Shselasky	mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2296320592Shselasky	if (mad_agent) {
2297320592Shselasky		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2298320592Shselasky		/*
2299320592Shselasky		 * recv is freed up in error cases in ib_mad_complete_recv
2300320592Shselasky		 * or via recv_handler in ib_mad_complete_recv()
2301320592Shselasky		 */
2302320592Shselasky		recv = NULL;
2303320592Shselasky	} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2304320592Shselasky		   generate_unmatched_resp(recv, response, &mad_size, opa)) {
2305320592Shselasky		agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2306320592Shselasky				    port_priv->device, port_num,
2307320592Shselasky				    qp_info->qp->qp_num, mad_size, opa);
2308320592Shselasky	}
2309320592Shselasky
2310320592Shselaskyout:
2311320592Shselasky	/* Post another receive request for this QP */
2312320592Shselasky	if (response) {
2313320592Shselasky		ib_mad_post_receive_mads(qp_info, response);
2314320592Shselasky		kfree(recv);
2315320592Shselasky	} else
2316320592Shselasky		ib_mad_post_receive_mads(qp_info, recv);
2317320592Shselasky}
2318320592Shselasky
2319320592Shselaskystatic void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2320320592Shselasky{
2321320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
2322320592Shselasky	unsigned long delay;
2323320592Shselasky
2324320592Shselasky	if (list_empty(&mad_agent_priv->wait_list)) {
2325320592Shselasky		cancel_delayed_work(&mad_agent_priv->timed_work);
2326320592Shselasky	} else {
2327320592Shselasky		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2328320592Shselasky					 struct ib_mad_send_wr_private,
2329320592Shselasky					 agent_list);
2330320592Shselasky
2331320592Shselasky		if (time_after(mad_agent_priv->timeout,
2332320592Shselasky			       mad_send_wr->timeout)) {
2333320592Shselasky			mad_agent_priv->timeout = mad_send_wr->timeout;
2334320592Shselasky			delay = mad_send_wr->timeout - jiffies;
2335320592Shselasky			if ((long)delay <= 0)
2336320592Shselasky				delay = 1;
2337320592Shselasky			mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2338320592Shselasky					 &mad_agent_priv->timed_work, delay);
2339320592Shselasky		}
2340320592Shselasky	}
2341320592Shselasky}
2342320592Shselasky
2343320592Shselaskystatic void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2344320592Shselasky{
2345320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
2346320592Shselasky	struct ib_mad_send_wr_private *temp_mad_send_wr;
2347320592Shselasky	struct list_head *list_item;
2348320592Shselasky	unsigned long delay;
2349320592Shselasky
2350320592Shselasky	mad_agent_priv = mad_send_wr->mad_agent_priv;
2351320592Shselasky	list_del(&mad_send_wr->agent_list);
2352320592Shselasky
2353320592Shselasky	delay = mad_send_wr->timeout;
2354320592Shselasky	mad_send_wr->timeout += jiffies;
2355320592Shselasky
2356320592Shselasky	if (delay) {
2357320592Shselasky		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2358320592Shselasky			temp_mad_send_wr = list_entry(list_item,
2359320592Shselasky						struct ib_mad_send_wr_private,
2360320592Shselasky						agent_list);
2361320592Shselasky			if (time_after(mad_send_wr->timeout,
2362320592Shselasky				       temp_mad_send_wr->timeout))
2363320592Shselasky				break;
2364320592Shselasky		}
2365320592Shselasky	}
2366320592Shselasky	else
2367320592Shselasky		list_item = &mad_agent_priv->wait_list;
2368320592Shselasky	list_add(&mad_send_wr->agent_list, list_item);
2369320592Shselasky
2370320592Shselasky	/* Reschedule a work item if we have a shorter timeout */
2371320592Shselasky	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2372320592Shselasky		mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2373320592Shselasky				 &mad_agent_priv->timed_work, delay);
2374320592Shselasky}
2375320592Shselasky
2376320592Shselaskyvoid ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2377320592Shselasky			  int timeout_ms)
2378320592Shselasky{
2379320592Shselasky	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2380320592Shselasky	wait_for_response(mad_send_wr);
2381320592Shselasky}
2382320592Shselasky
2383320592Shselasky/*
2384320592Shselasky * Process a send work completion
2385320592Shselasky */
2386320592Shselaskyvoid ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2387320592Shselasky			     struct ib_mad_send_wc *mad_send_wc)
2388320592Shselasky{
2389320592Shselasky	struct ib_mad_agent_private	*mad_agent_priv;
2390320592Shselasky	unsigned long			flags;
2391320592Shselasky	int				ret;
2392320592Shselasky
2393320592Shselasky	mad_agent_priv = mad_send_wr->mad_agent_priv;
2394320592Shselasky	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2395320592Shselasky	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2396320592Shselasky		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2397320592Shselasky		if (ret == IB_RMPP_RESULT_CONSUMED)
2398320592Shselasky			goto done;
2399320592Shselasky	} else
2400320592Shselasky		ret = IB_RMPP_RESULT_UNHANDLED;
2401320592Shselasky
2402320592Shselasky	if (mad_send_wc->status != IB_WC_SUCCESS &&
2403320592Shselasky	    mad_send_wr->status == IB_WC_SUCCESS) {
2404320592Shselasky		mad_send_wr->status = mad_send_wc->status;
2405320592Shselasky		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2406320592Shselasky	}
2407320592Shselasky
2408320592Shselasky	if (--mad_send_wr->refcount > 0) {
2409320592Shselasky		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2410320592Shselasky		    mad_send_wr->status == IB_WC_SUCCESS) {
2411320592Shselasky			wait_for_response(mad_send_wr);
2412320592Shselasky		}
2413320592Shselasky		goto done;
2414320592Shselasky	}
2415320592Shselasky
2416320592Shselasky	/* Remove send from MAD agent and notify client of completion */
2417320592Shselasky	list_del(&mad_send_wr->agent_list);
2418320592Shselasky	adjust_timeout(mad_agent_priv);
2419320592Shselasky	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2420320592Shselasky
2421320592Shselasky	if (mad_send_wr->status != IB_WC_SUCCESS )
2422320592Shselasky		mad_send_wc->status = mad_send_wr->status;
2423320592Shselasky	if (ret == IB_RMPP_RESULT_INTERNAL)
2424320592Shselasky		ib_rmpp_send_handler(mad_send_wc);
2425320592Shselasky	else
2426320592Shselasky		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2427320592Shselasky						   mad_send_wc);
2428320592Shselasky
2429320592Shselasky	/* Release reference on agent taken when sending */
2430320592Shselasky	deref_mad_agent(mad_agent_priv);
2431320592Shselasky	return;
2432320592Shselaskydone:
2433320592Shselasky	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2434320592Shselasky}
2435320592Shselasky
2436320592Shselaskystatic void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2437320592Shselasky{
2438320592Shselasky	struct ib_mad_port_private *port_priv = cq->cq_context;
2439320592Shselasky	struct ib_mad_list_head *mad_list =
2440320592Shselasky		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2441320592Shselasky	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;
2442320592Shselasky	struct ib_mad_qp_info		*qp_info;
2443320592Shselasky	struct ib_mad_queue		*send_queue;
2444320592Shselasky	struct ib_send_wr		*bad_send_wr;
2445320592Shselasky	struct ib_mad_send_wc		mad_send_wc;
2446320592Shselasky	unsigned long flags;
2447320592Shselasky	int ret;
2448320592Shselasky
2449320592Shselasky	if (list_empty_careful(&port_priv->port_list))
2450320592Shselasky		return;
2451320592Shselasky
2452320592Shselasky	if (wc->status != IB_WC_SUCCESS) {
2453320592Shselasky		if (!ib_mad_send_error(port_priv, wc))
2454320592Shselasky			return;
2455320592Shselasky	}
2456320592Shselasky
2457320592Shselasky	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2458320592Shselasky				   mad_list);
2459320592Shselasky	send_queue = mad_list->mad_queue;
2460320592Shselasky	qp_info = send_queue->qp_info;
2461320592Shselasky
2462320592Shselaskyretry:
2463320592Shselasky	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2464320592Shselasky			    mad_send_wr->header_mapping,
2465320592Shselasky			    mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2466320592Shselasky	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2467320592Shselasky			    mad_send_wr->payload_mapping,
2468320592Shselasky			    mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2469320592Shselasky	queued_send_wr = NULL;
2470320592Shselasky	spin_lock_irqsave(&send_queue->lock, flags);
2471320592Shselasky	list_del(&mad_list->list);
2472320592Shselasky
2473320592Shselasky	/* Move queued send to the send queue */
2474320592Shselasky	if (send_queue->count-- > send_queue->max_active) {
2475320592Shselasky		mad_list = container_of(qp_info->overflow_list.next,
2476320592Shselasky					struct ib_mad_list_head, list);
2477320592Shselasky		queued_send_wr = container_of(mad_list,
2478320592Shselasky					struct ib_mad_send_wr_private,
2479320592Shselasky					mad_list);
2480320592Shselasky		list_move_tail(&mad_list->list, &send_queue->list);
2481320592Shselasky	}
2482320592Shselasky	spin_unlock_irqrestore(&send_queue->lock, flags);
2483320592Shselasky
2484320592Shselasky	mad_send_wc.send_buf = &mad_send_wr->send_buf;
2485320592Shselasky	mad_send_wc.status = wc->status;
2486320592Shselasky	mad_send_wc.vendor_err = wc->vendor_err;
2487320592Shselasky	if (atomic_read(&qp_info->snoop_count))
2488320592Shselasky		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2489320592Shselasky			   IB_MAD_SNOOP_SEND_COMPLETIONS);
2490320592Shselasky	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2491320592Shselasky
2492320592Shselasky	if (queued_send_wr) {
2493320592Shselasky		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2494320592Shselasky				   &bad_send_wr);
2495320592Shselasky		if (ret) {
2496320592Shselasky			dev_err(&port_priv->device->dev,
2497320592Shselasky				"ib_post_send failed: %d\n", ret);
2498320592Shselasky			mad_send_wr = queued_send_wr;
2499320592Shselasky			wc->status = IB_WC_LOC_QP_OP_ERR;
2500320592Shselasky			goto retry;
2501320592Shselasky		}
2502320592Shselasky	}
2503320592Shselasky}
2504320592Shselasky
2505320592Shselaskystatic void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2506320592Shselasky{
2507320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
2508320592Shselasky	struct ib_mad_list_head *mad_list;
2509320592Shselasky	unsigned long flags;
2510320592Shselasky
2511320592Shselasky	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2512320592Shselasky	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2513320592Shselasky		mad_send_wr = container_of(mad_list,
2514320592Shselasky					   struct ib_mad_send_wr_private,
2515320592Shselasky					   mad_list);
2516320592Shselasky		mad_send_wr->retry = 1;
2517320592Shselasky	}
2518320592Shselasky	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2519320592Shselasky}
2520320592Shselasky
2521320592Shselaskystatic bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2522320592Shselasky		struct ib_wc *wc)
2523320592Shselasky{
2524320592Shselasky	struct ib_mad_list_head *mad_list =
2525320592Shselasky		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2526320592Shselasky	struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2527320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
2528320592Shselasky	int ret;
2529320592Shselasky
2530320592Shselasky	/*
2531320592Shselasky	 * Send errors will transition the QP to SQE - move
2532320592Shselasky	 * QP to RTS and repost flushed work requests
2533320592Shselasky	 */
2534320592Shselasky	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2535320592Shselasky				   mad_list);
2536320592Shselasky	if (wc->status == IB_WC_WR_FLUSH_ERR) {
2537320592Shselasky		if (mad_send_wr->retry) {
2538320592Shselasky			/* Repost send */
2539320592Shselasky			struct ib_send_wr *bad_send_wr;
2540320592Shselasky
2541320592Shselasky			mad_send_wr->retry = 0;
2542320592Shselasky			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2543320592Shselasky					&bad_send_wr);
2544320592Shselasky			if (!ret)
2545320592Shselasky				return false;
2546320592Shselasky		}
2547320592Shselasky	} else {
2548320592Shselasky		struct ib_qp_attr *attr;
2549320592Shselasky
2550320592Shselasky		/* Transition QP to RTS and fail offending send */
2551320592Shselasky		attr = kmalloc(sizeof *attr, GFP_KERNEL);
2552320592Shselasky		if (attr) {
2553320592Shselasky			attr->qp_state = IB_QPS_RTS;
2554320592Shselasky			attr->cur_qp_state = IB_QPS_SQE;
2555320592Shselasky			ret = ib_modify_qp(qp_info->qp, attr,
2556320592Shselasky					   IB_QP_STATE | IB_QP_CUR_STATE);
2557320592Shselasky			kfree(attr);
2558320592Shselasky			if (ret)
2559320592Shselasky				dev_err(&port_priv->device->dev,
2560320592Shselasky					"%s - ib_modify_qp to RTS: %d\n",
2561320592Shselasky					__func__, ret);
2562320592Shselasky			else
2563320592Shselasky				mark_sends_for_retry(qp_info);
2564320592Shselasky		}
2565320592Shselasky	}
2566320592Shselasky
2567320592Shselasky	return true;
2568320592Shselasky}
2569320592Shselasky
2570320592Shselaskystatic void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2571320592Shselasky{
2572320592Shselasky	unsigned long flags;
2573320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2574320592Shselasky	struct ib_mad_send_wc mad_send_wc;
2575320592Shselasky	struct list_head cancel_list;
2576320592Shselasky
2577320592Shselasky	INIT_LIST_HEAD(&cancel_list);
2578320592Shselasky
2579320592Shselasky	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2580320592Shselasky	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2581320592Shselasky				 &mad_agent_priv->send_list, agent_list) {
2582320592Shselasky		if (mad_send_wr->status == IB_WC_SUCCESS) {
2583320592Shselasky			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2584320592Shselasky			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2585320592Shselasky		}
2586320592Shselasky	}
2587320592Shselasky
2588320592Shselasky	/* Empty wait list to prevent receives from finding a request */
2589320592Shselasky	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2590320592Shselasky	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2591320592Shselasky
2592320592Shselasky	/* Report all cancelled requests */
2593320592Shselasky	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2594320592Shselasky	mad_send_wc.vendor_err = 0;
2595320592Shselasky
2596320592Shselasky	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2597320592Shselasky				 &cancel_list, agent_list) {
2598320592Shselasky		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2599320592Shselasky		list_del(&mad_send_wr->agent_list);
2600320592Shselasky		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2601320592Shselasky						   &mad_send_wc);
2602320592Shselasky		atomic_dec(&mad_agent_priv->refcount);
2603320592Shselasky	}
2604320592Shselasky}
2605320592Shselasky
2606320592Shselaskystatic struct ib_mad_send_wr_private*
2607320592Shselaskyfind_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2608320592Shselasky	     struct ib_mad_send_buf *send_buf)
2609320592Shselasky{
2610320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
2611320592Shselasky
2612320592Shselasky	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2613320592Shselasky			    agent_list) {
2614320592Shselasky		if (&mad_send_wr->send_buf == send_buf)
2615320592Shselasky			return mad_send_wr;
2616320592Shselasky	}
2617320592Shselasky
2618320592Shselasky	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2619320592Shselasky			    agent_list) {
2620320592Shselasky		if (is_rmpp_data_mad(mad_agent_priv,
2621320592Shselasky				     mad_send_wr->send_buf.mad) &&
2622320592Shselasky		    &mad_send_wr->send_buf == send_buf)
2623320592Shselasky			return mad_send_wr;
2624320592Shselasky	}
2625320592Shselasky	return NULL;
2626320592Shselasky}
2627320592Shselasky
2628320592Shselaskyint ib_modify_mad(struct ib_mad_agent *mad_agent,
2629320592Shselasky		  struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2630320592Shselasky{
2631320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
2632320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
2633320592Shselasky	unsigned long flags;
2634320592Shselasky	int active;
2635320592Shselasky
2636320592Shselasky	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2637320592Shselasky				      agent);
2638320592Shselasky	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2639320592Shselasky	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2640320592Shselasky	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2641320592Shselasky		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2642320592Shselasky		return -EINVAL;
2643320592Shselasky	}
2644320592Shselasky
2645320592Shselasky	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2646320592Shselasky	if (!timeout_ms) {
2647320592Shselasky		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2648320592Shselasky		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2649320592Shselasky	}
2650320592Shselasky
2651320592Shselasky	mad_send_wr->send_buf.timeout_ms = timeout_ms;
2652320592Shselasky	if (active)
2653320592Shselasky		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2654320592Shselasky	else
2655320592Shselasky		ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2656320592Shselasky
2657320592Shselasky	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2658320592Shselasky	return 0;
2659320592Shselasky}
2660320592ShselaskyEXPORT_SYMBOL(ib_modify_mad);
2661320592Shselasky
2662320592Shselaskyvoid ib_cancel_mad(struct ib_mad_agent *mad_agent,
2663320592Shselasky		   struct ib_mad_send_buf *send_buf)
2664320592Shselasky{
2665320592Shselasky	ib_modify_mad(mad_agent, send_buf, 0);
2666320592Shselasky}
2667320592ShselaskyEXPORT_SYMBOL(ib_cancel_mad);
2668320592Shselasky
2669320592Shselaskystatic void local_completions(struct work_struct *work)
2670320592Shselasky{
2671320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
2672320592Shselasky	struct ib_mad_local_private *local;
2673320592Shselasky	struct ib_mad_agent_private *recv_mad_agent;
2674320592Shselasky	unsigned long flags;
2675320592Shselasky	int free_mad;
2676320592Shselasky	struct ib_wc wc;
2677320592Shselasky	struct ib_mad_send_wc mad_send_wc;
2678320592Shselasky	bool opa;
2679320592Shselasky
2680320592Shselasky	mad_agent_priv =
2681320592Shselasky		container_of(work, struct ib_mad_agent_private, local_work);
2682320592Shselasky
2683320592Shselasky	opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2684320592Shselasky			       mad_agent_priv->qp_info->port_priv->port_num);
2685320592Shselasky
2686320592Shselasky	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2687320592Shselasky	while (!list_empty(&mad_agent_priv->local_list)) {
2688320592Shselasky		local = list_entry(mad_agent_priv->local_list.next,
2689320592Shselasky				   struct ib_mad_local_private,
2690320592Shselasky				   completion_list);
2691320592Shselasky		list_del(&local->completion_list);
2692320592Shselasky		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2693320592Shselasky		free_mad = 0;
2694320592Shselasky		if (local->mad_priv) {
2695320592Shselasky			u8 base_version;
2696320592Shselasky			recv_mad_agent = local->recv_mad_agent;
2697320592Shselasky			if (!recv_mad_agent) {
2698320592Shselasky				dev_err(&mad_agent_priv->agent.device->dev,
2699320592Shselasky					"No receive MAD agent for local completion\n");
2700320592Shselasky				free_mad = 1;
2701320592Shselasky				goto local_send_completion;
2702320592Shselasky			}
2703320592Shselasky
2704320592Shselasky			/*
2705320592Shselasky			 * Defined behavior is to complete response
2706320592Shselasky			 * before request
2707320592Shselasky			 */
2708320592Shselasky			build_smp_wc(recv_mad_agent->agent.qp,
2709320592Shselasky				     local->mad_send_wr->send_wr.wr.wr_cqe,
2710320592Shselasky				     be16_to_cpu(IB_LID_PERMISSIVE),
2711320592Shselasky				     local->mad_send_wr->send_wr.pkey_index,
2712320592Shselasky				     recv_mad_agent->agent.port_num, &wc);
2713320592Shselasky
2714320592Shselasky			local->mad_priv->header.recv_wc.wc = &wc;
2715320592Shselasky
2716320592Shselasky			base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2717320592Shselasky			if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2718320592Shselasky				local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2719320592Shselasky				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2720320592Shselasky			} else {
2721320592Shselasky				local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2722320592Shselasky				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2723320592Shselasky			}
2724320592Shselasky
2725320592Shselasky			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2726320592Shselasky			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2727320592Shselasky				 &local->mad_priv->header.recv_wc.rmpp_list);
2728320592Shselasky			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2729320592Shselasky			local->mad_priv->header.recv_wc.recv_buf.mad =
2730320592Shselasky						(struct ib_mad *)local->mad_priv->mad;
2731320592Shselasky			if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2732320592Shselasky				snoop_recv(recv_mad_agent->qp_info,
2733320592Shselasky					  &local->mad_priv->header.recv_wc,
2734320592Shselasky					   IB_MAD_SNOOP_RECVS);
2735320592Shselasky			recv_mad_agent->agent.recv_handler(
2736320592Shselasky						&recv_mad_agent->agent,
2737320592Shselasky						&local->mad_send_wr->send_buf,
2738320592Shselasky						&local->mad_priv->header.recv_wc);
2739320592Shselasky			spin_lock_irqsave(&recv_mad_agent->lock, flags);
2740320592Shselasky			atomic_dec(&recv_mad_agent->refcount);
2741320592Shselasky			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2742320592Shselasky		}
2743320592Shselasky
2744320592Shselaskylocal_send_completion:
2745320592Shselasky		/* Complete send */
2746320592Shselasky		mad_send_wc.status = IB_WC_SUCCESS;
2747320592Shselasky		mad_send_wc.vendor_err = 0;
2748320592Shselasky		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2749320592Shselasky		if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2750320592Shselasky			snoop_send(mad_agent_priv->qp_info,
2751320592Shselasky				   &local->mad_send_wr->send_buf,
2752320592Shselasky				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2753320592Shselasky		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2754320592Shselasky						   &mad_send_wc);
2755320592Shselasky
2756320592Shselasky		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2757320592Shselasky		atomic_dec(&mad_agent_priv->refcount);
2758320592Shselasky		if (free_mad)
2759320592Shselasky			kfree(local->mad_priv);
2760320592Shselasky		kfree(local);
2761320592Shselasky	}
2762320592Shselasky	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2763320592Shselasky}
2764320592Shselasky
2765320592Shselaskystatic int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2766320592Shselasky{
2767320592Shselasky	int ret;
2768320592Shselasky
2769320592Shselasky	if (!mad_send_wr->retries_left)
2770320592Shselasky		return -ETIMEDOUT;
2771320592Shselasky
2772320592Shselasky	mad_send_wr->retries_left--;
2773320592Shselasky	mad_send_wr->send_buf.retries++;
2774320592Shselasky
2775320592Shselasky	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2776320592Shselasky
2777320592Shselasky	if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2778320592Shselasky		ret = ib_retry_rmpp(mad_send_wr);
2779320592Shselasky		switch (ret) {
2780320592Shselasky		case IB_RMPP_RESULT_UNHANDLED:
2781320592Shselasky			ret = ib_send_mad(mad_send_wr);
2782320592Shselasky			break;
2783320592Shselasky		case IB_RMPP_RESULT_CONSUMED:
2784320592Shselasky			ret = 0;
2785320592Shselasky			break;
2786320592Shselasky		default:
2787320592Shselasky			ret = -ECOMM;
2788320592Shselasky			break;
2789320592Shselasky		}
2790320592Shselasky	} else
2791320592Shselasky		ret = ib_send_mad(mad_send_wr);
2792320592Shselasky
2793320592Shselasky	if (!ret) {
2794320592Shselasky		mad_send_wr->refcount++;
2795320592Shselasky		list_add_tail(&mad_send_wr->agent_list,
2796320592Shselasky			      &mad_send_wr->mad_agent_priv->send_list);
2797320592Shselasky	}
2798320592Shselasky	return ret;
2799320592Shselasky}
2800320592Shselasky
2801320592Shselaskystatic void timeout_sends(struct work_struct *work)
2802320592Shselasky{
2803320592Shselasky	struct ib_mad_agent_private *mad_agent_priv;
2804320592Shselasky	struct ib_mad_send_wr_private *mad_send_wr;
2805320592Shselasky	struct ib_mad_send_wc mad_send_wc;
2806320592Shselasky	unsigned long flags, delay;
2807320592Shselasky
2808320592Shselasky	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2809320592Shselasky				      timed_work.work);
2810320592Shselasky	mad_send_wc.vendor_err = 0;
2811320592Shselasky
2812320592Shselasky	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2813320592Shselasky	while (!list_empty(&mad_agent_priv->wait_list)) {
2814320592Shselasky		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2815320592Shselasky					 struct ib_mad_send_wr_private,
2816320592Shselasky					 agent_list);
2817320592Shselasky
2818320592Shselasky		if (time_after(mad_send_wr->timeout, jiffies)) {
2819320592Shselasky			delay = mad_send_wr->timeout - jiffies;
2820320592Shselasky			if ((long)delay <= 0)
2821320592Shselasky				delay = 1;
2822320592Shselasky			queue_delayed_work(mad_agent_priv->qp_info->
2823320592Shselasky					   port_priv->wq,
2824320592Shselasky					   &mad_agent_priv->timed_work, delay);
2825320592Shselasky			break;
2826320592Shselasky		}
2827320592Shselasky
2828320592Shselasky		list_del(&mad_send_wr->agent_list);
2829320592Shselasky		if (mad_send_wr->status == IB_WC_SUCCESS &&
2830320592Shselasky		    !retry_send(mad_send_wr))
2831320592Shselasky			continue;
2832320592Shselasky
2833320592Shselasky		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2834320592Shselasky
2835320592Shselasky		if (mad_send_wr->status == IB_WC_SUCCESS)
2836320592Shselasky			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2837320592Shselasky		else
2838320592Shselasky			mad_send_wc.status = mad_send_wr->status;
2839320592Shselasky		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2840320592Shselasky		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2841320592Shselasky						   &mad_send_wc);
2842320592Shselasky
2843320592Shselasky		atomic_dec(&mad_agent_priv->refcount);
2844320592Shselasky		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2845320592Shselasky	}
2846320592Shselasky	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2847320592Shselasky}
2848320592Shselasky
2849320592Shselasky/*
2850320592Shselasky * Allocate receive MADs and post receive WRs for them
2851320592Shselasky */
2852320592Shselaskystatic int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2853320592Shselasky				    struct ib_mad_private *mad)
2854320592Shselasky{
2855320592Shselasky	unsigned long flags;
2856320592Shselasky	int post, ret;
2857320592Shselasky	struct ib_mad_private *mad_priv;
2858320592Shselasky	struct ib_sge sg_list;
2859320592Shselasky	struct ib_recv_wr recv_wr, *bad_recv_wr;
2860320592Shselasky	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2861320592Shselasky
2862320592Shselasky	/* Initialize common scatter list fields */
2863320592Shselasky	sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2864320592Shselasky
2865320592Shselasky	/* Initialize common receive WR fields */
2866320592Shselasky	recv_wr.next = NULL;
2867320592Shselasky	recv_wr.sg_list = &sg_list;
2868320592Shselasky	recv_wr.num_sge = 1;
2869320592Shselasky
2870320592Shselasky	do {
2871320592Shselasky		/* Allocate and map receive buffer */
2872320592Shselasky		if (mad) {
2873320592Shselasky			mad_priv = mad;
2874320592Shselasky			mad = NULL;
2875320592Shselasky		} else {
2876320592Shselasky			mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2877320592Shselasky						     GFP_ATOMIC);
2878320592Shselasky			if (!mad_priv) {
2879320592Shselasky				dev_err(&qp_info->port_priv->device->dev,
2880320592Shselasky					"No memory for receive buffer\n");
2881320592Shselasky				ret = -ENOMEM;
2882320592Shselasky				break;
2883320592Shselasky			}
2884320592Shselasky		}
2885320592Shselasky		sg_list.length = mad_priv_dma_size(mad_priv);
2886320592Shselasky		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2887320592Shselasky						 &mad_priv->grh,
2888320592Shselasky						 mad_priv_dma_size(mad_priv),
2889320592Shselasky						 DMA_FROM_DEVICE);
2890320592Shselasky		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2891320592Shselasky						  sg_list.addr))) {
2892320592Shselasky			ret = -ENOMEM;
2893320592Shselasky			break;
2894320592Shselasky		}
2895320592Shselasky		mad_priv->header.mapping = sg_list.addr;
2896320592Shselasky		mad_priv->header.mad_list.mad_queue = recv_queue;
2897320592Shselasky		mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2898320592Shselasky		recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2899320592Shselasky
2900320592Shselasky		/* Post receive WR */
2901320592Shselasky		spin_lock_irqsave(&recv_queue->lock, flags);
2902320592Shselasky		post = (++recv_queue->count < recv_queue->max_active);
2903320592Shselasky		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2904320592Shselasky		spin_unlock_irqrestore(&recv_queue->lock, flags);
2905320592Shselasky		ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2906320592Shselasky		if (ret) {
2907320592Shselasky			spin_lock_irqsave(&recv_queue->lock, flags);
2908320592Shselasky			list_del(&mad_priv->header.mad_list.list);
2909320592Shselasky			recv_queue->count--;
2910320592Shselasky			spin_unlock_irqrestore(&recv_queue->lock, flags);
2911320592Shselasky			ib_dma_unmap_single(qp_info->port_priv->device,
2912320592Shselasky					    mad_priv->header.mapping,
2913320592Shselasky					    mad_priv_dma_size(mad_priv),
2914320592Shselasky					    DMA_FROM_DEVICE);
2915320592Shselasky			kfree(mad_priv);
2916320592Shselasky			dev_err(&qp_info->port_priv->device->dev,
2917320592Shselasky				"ib_post_recv failed: %d\n", ret);
2918320592Shselasky			break;
2919320592Shselasky		}
2920320592Shselasky	} while (post);
2921320592Shselasky
2922320592Shselasky	return ret;
2923320592Shselasky}
2924320592Shselasky
2925320592Shselasky/*
2926320592Shselasky * Return all the posted receive MADs
2927320592Shselasky */
2928320592Shselaskystatic void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2929320592Shselasky{
2930320592Shselasky	struct ib_mad_private_header *mad_priv_hdr;
2931320592Shselasky	struct ib_mad_private *recv;
2932320592Shselasky	struct ib_mad_list_head *mad_list;
2933320592Shselasky
2934320592Shselasky	if (!qp_info->qp)
2935320592Shselasky		return;
2936320592Shselasky
2937320592Shselasky	while (!list_empty(&qp_info->recv_queue.list)) {
2938320592Shselasky
2939320592Shselasky		mad_list = list_entry(qp_info->recv_queue.list.next,
2940320592Shselasky				      struct ib_mad_list_head, list);
2941320592Shselasky		mad_priv_hdr = container_of(mad_list,
2942320592Shselasky					    struct ib_mad_private_header,
2943320592Shselasky					    mad_list);
2944320592Shselasky		recv = container_of(mad_priv_hdr, struct ib_mad_private,
2945320592Shselasky				    header);
2946320592Shselasky
2947320592Shselasky		/* Remove from posted receive MAD list */
2948320592Shselasky		list_del(&mad_list->list);
2949320592Shselasky
2950320592Shselasky		ib_dma_unmap_single(qp_info->port_priv->device,
2951320592Shselasky				    recv->header.mapping,
2952320592Shselasky				    mad_priv_dma_size(recv),
2953320592Shselasky				    DMA_FROM_DEVICE);
2954320592Shselasky		kfree(recv);
2955320592Shselasky	}
2956320592Shselasky
2957320592Shselasky	qp_info->recv_queue.count = 0;
2958320592Shselasky}
2959320592Shselasky
2960320592Shselasky/*
2961320592Shselasky * Start the port
2962320592Shselasky */
2963320592Shselaskystatic int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2964320592Shselasky{
2965320592Shselasky	int ret, i;
2966320592Shselasky	struct ib_qp_attr *attr;
2967320592Shselasky	struct ib_qp *qp;
2968320592Shselasky	u16 pkey_index;
2969320592Shselasky
2970320592Shselasky	attr = kmalloc(sizeof *attr, GFP_KERNEL);
2971320592Shselasky	if (!attr) {
2972320592Shselasky		dev_err(&port_priv->device->dev,
2973320592Shselasky			"Couldn't kmalloc ib_qp_attr\n");
2974320592Shselasky		return -ENOMEM;
2975320592Shselasky	}
2976320592Shselasky
2977320592Shselasky	ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2978320592Shselasky			   IB_DEFAULT_PKEY_FULL, &pkey_index);
2979320592Shselasky	if (ret)
2980320592Shselasky		pkey_index = 0;
2981320592Shselasky
2982320592Shselasky	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2983320592Shselasky		qp = port_priv->qp_info[i].qp;
2984320592Shselasky		if (!qp)
2985320592Shselasky			continue;
2986320592Shselasky
2987320592Shselasky		/*
2988320592Shselasky		 * PKey index for QP1 is irrelevant but
2989320592Shselasky		 * one is needed for the Reset to Init transition
2990320592Shselasky		 */
2991320592Shselasky		attr->qp_state = IB_QPS_INIT;
2992320592Shselasky		attr->pkey_index = pkey_index;
2993320592Shselasky		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2994320592Shselasky		ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2995320592Shselasky					     IB_QP_PKEY_INDEX | IB_QP_QKEY);
2996320592Shselasky		if (ret) {
2997320592Shselasky			dev_err(&port_priv->device->dev,
2998320592Shselasky				"Couldn't change QP%d state to INIT: %d\n",
2999320592Shselasky				i, ret);
3000320592Shselasky			goto out;
3001320592Shselasky		}
3002320592Shselasky
3003320592Shselasky		attr->qp_state = IB_QPS_RTR;
3004320592Shselasky		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3005320592Shselasky		if (ret) {
3006320592Shselasky			dev_err(&port_priv->device->dev,
3007320592Shselasky				"Couldn't change QP%d state to RTR: %d\n",
3008320592Shselasky				i, ret);
3009320592Shselasky			goto out;
3010320592Shselasky		}
3011320592Shselasky
3012320592Shselasky		attr->qp_state = IB_QPS_RTS;
3013320592Shselasky		attr->sq_psn = IB_MAD_SEND_Q_PSN;
3014320592Shselasky		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3015320592Shselasky		if (ret) {
3016320592Shselasky			dev_err(&port_priv->device->dev,
3017320592Shselasky				"Couldn't change QP%d state to RTS: %d\n",
3018320592Shselasky				i, ret);
3019320592Shselasky			goto out;
3020320592Shselasky		}
3021320592Shselasky	}
3022320592Shselasky
3023320592Shselasky	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3024320592Shselasky	if (ret) {
3025320592Shselasky		dev_err(&port_priv->device->dev,
3026320592Shselasky			"Failed to request completion notification: %d\n",
3027320592Shselasky			ret);
3028320592Shselasky		goto out;
3029320592Shselasky	}
3030320592Shselasky
3031320592Shselasky	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3032320592Shselasky		if (!port_priv->qp_info[i].qp)
3033320592Shselasky			continue;
3034320592Shselasky
3035320592Shselasky		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3036320592Shselasky		if (ret) {
3037320592Shselasky			dev_err(&port_priv->device->dev,
3038320592Shselasky				"Couldn't post receive WRs\n");
3039320592Shselasky			goto out;
3040320592Shselasky		}
3041320592Shselasky	}
3042320592Shselaskyout:
3043320592Shselasky	kfree(attr);
3044320592Shselasky	return ret;
3045320592Shselasky}
3046320592Shselasky
3047320592Shselaskystatic void qp_event_handler(struct ib_event *event, void *qp_context)
3048320592Shselasky{
3049320592Shselasky	struct ib_mad_qp_info	*qp_info = qp_context;
3050320592Shselasky
3051320592Shselasky	/* It's worse than that! He's dead, Jim! */
3052320592Shselasky	dev_err(&qp_info->port_priv->device->dev,
3053320592Shselasky		"Fatal error (%d) on MAD QP (%d)\n",
3054320592Shselasky		event->event, qp_info->qp->qp_num);
3055320592Shselasky}
3056320592Shselasky
3057320592Shselaskystatic void init_mad_queue(struct ib_mad_qp_info *qp_info,
3058320592Shselasky			   struct ib_mad_queue *mad_queue)
3059320592Shselasky{
3060320592Shselasky	mad_queue->qp_info = qp_info;
3061320592Shselasky	mad_queue->count = 0;
3062320592Shselasky	spin_lock_init(&mad_queue->lock);
3063320592Shselasky	INIT_LIST_HEAD(&mad_queue->list);
3064320592Shselasky}
3065320592Shselasky
3066320592Shselaskystatic void init_mad_qp(struct ib_mad_port_private *port_priv,
3067320592Shselasky			struct ib_mad_qp_info *qp_info)
3068320592Shselasky{
3069320592Shselasky	qp_info->port_priv = port_priv;
3070320592Shselasky	init_mad_queue(qp_info, &qp_info->send_queue);
3071320592Shselasky	init_mad_queue(qp_info, &qp_info->recv_queue);
3072320592Shselasky	INIT_LIST_HEAD(&qp_info->overflow_list);
3073320592Shselasky	spin_lock_init(&qp_info->snoop_lock);
3074320592Shselasky	qp_info->snoop_table = NULL;
3075320592Shselasky	qp_info->snoop_table_size = 0;
3076320592Shselasky	atomic_set(&qp_info->snoop_count, 0);
3077320592Shselasky}
3078320592Shselasky
3079320592Shselaskystatic int create_mad_qp(struct ib_mad_qp_info *qp_info,
3080320592Shselasky			 enum ib_qp_type qp_type)
3081320592Shselasky{
3082320592Shselasky	struct ib_qp_init_attr	qp_init_attr;
3083320592Shselasky	int ret;
3084320592Shselasky
3085320592Shselasky	memset(&qp_init_attr, 0, sizeof qp_init_attr);
3086320592Shselasky	qp_init_attr.send_cq = qp_info->port_priv->cq;
3087320592Shselasky	qp_init_attr.recv_cq = qp_info->port_priv->cq;
3088320592Shselasky	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3089320592Shselasky	qp_init_attr.cap.max_send_wr = mad_sendq_size;
3090320592Shselasky	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3091320592Shselasky	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3092320592Shselasky	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3093320592Shselasky	qp_init_attr.qp_type = qp_type;
3094320592Shselasky	qp_init_attr.port_num = qp_info->port_priv->port_num;
3095320592Shselasky	qp_init_attr.qp_context = qp_info;
3096320592Shselasky	qp_init_attr.event_handler = qp_event_handler;
3097320592Shselasky	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3098320592Shselasky	if (IS_ERR(qp_info->qp)) {
3099320592Shselasky		dev_err(&qp_info->port_priv->device->dev,
3100320592Shselasky			"Couldn't create ib_mad QP%d\n",
3101320592Shselasky			get_spl_qp_index(qp_type));
3102320592Shselasky		ret = PTR_ERR(qp_info->qp);
3103320592Shselasky		goto error;
3104320592Shselasky	}
3105320592Shselasky	/* Use minimum queue sizes unless the CQ is resized */
3106320592Shselasky	qp_info->send_queue.max_active = mad_sendq_size;
3107320592Shselasky	qp_info->recv_queue.max_active = mad_recvq_size;
3108320592Shselasky	return 0;
3109320592Shselasky
3110320592Shselaskyerror:
3111320592Shselasky	return ret;
3112320592Shselasky}
3113320592Shselasky
3114320592Shselaskystatic void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3115320592Shselasky{
3116320592Shselasky	if (!qp_info->qp)
3117320592Shselasky		return;
3118320592Shselasky
3119320592Shselasky	ib_destroy_qp(qp_info->qp);
3120320592Shselasky	kfree(qp_info->snoop_table);
3121320592Shselasky}
3122320592Shselasky
3123320592Shselasky/*
3124320592Shselasky * Open the port
3125320592Shselasky * Create the QP, PD, MR, and CQ if needed
3126320592Shselasky */
3127320592Shselaskystatic int ib_mad_port_open(struct ib_device *device,
3128320592Shselasky			    int port_num)
3129320592Shselasky{
3130320592Shselasky	int ret, cq_size;
3131320592Shselasky	struct ib_mad_port_private *port_priv;
3132320592Shselasky	unsigned long flags;
3133320592Shselasky	char name[sizeof "ib_mad123"];
3134320592Shselasky	int has_smi;
3135320592Shselasky
3136320592Shselasky	if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3137320592Shselasky		return -EFAULT;
3138320592Shselasky
3139320592Shselasky	if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3140320592Shselasky		    rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3141320592Shselasky		return -EFAULT;
3142320592Shselasky
3143320592Shselasky	/* Create new device info */
3144320592Shselasky	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3145320592Shselasky	if (!port_priv) {
3146320592Shselasky		dev_err(&device->dev, "No memory for ib_mad_port_private\n");
3147320592Shselasky		return -ENOMEM;
3148320592Shselasky	}
3149320592Shselasky
3150320592Shselasky	port_priv->device = device;
3151320592Shselasky	port_priv->port_num = port_num;
3152320592Shselasky	spin_lock_init(&port_priv->reg_lock);
3153320592Shselasky	INIT_LIST_HEAD(&port_priv->agent_list);
3154320592Shselasky	init_mad_qp(port_priv, &port_priv->qp_info[0]);
3155320592Shselasky	init_mad_qp(port_priv, &port_priv->qp_info[1]);
3156320592Shselasky
3157320592Shselasky	cq_size = mad_sendq_size + mad_recvq_size;
3158320592Shselasky	has_smi = rdma_cap_ib_smi(device, port_num);
3159320592Shselasky	if (has_smi)
3160320592Shselasky		cq_size *= 2;
3161320592Shselasky
3162320592Shselasky	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3163320592Shselasky			IB_POLL_WORKQUEUE);
3164320592Shselasky	if (IS_ERR(port_priv->cq)) {
3165320592Shselasky		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3166320592Shselasky		ret = PTR_ERR(port_priv->cq);
3167320592Shselasky		goto error3;
3168320592Shselasky	}
3169320592Shselasky
3170320592Shselasky	port_priv->pd = ib_alloc_pd(device, 0);
3171320592Shselasky	if (IS_ERR(port_priv->pd)) {
3172320592Shselasky		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3173320592Shselasky		ret = PTR_ERR(port_priv->pd);
3174320592Shselasky		goto error4;
3175320592Shselasky	}
3176320592Shselasky
3177320592Shselasky	if (has_smi) {
3178320592Shselasky		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3179320592Shselasky		if (ret)
3180320592Shselasky			goto error6;
3181320592Shselasky	}
3182320592Shselasky	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3183320592Shselasky	if (ret)
3184320592Shselasky		goto error7;
3185320592Shselasky
3186320592Shselasky	snprintf(name, sizeof name, "ib_mad%d", port_num);
3187320592Shselasky	port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3188320592Shselasky	if (!port_priv->wq) {
3189320592Shselasky		ret = -ENOMEM;
3190320592Shselasky		goto error8;
3191320592Shselasky	}
3192320592Shselasky
3193320592Shselasky	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3194320592Shselasky	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3195320592Shselasky	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3196320592Shselasky
3197320592Shselasky	ret = ib_mad_port_start(port_priv);
3198320592Shselasky	if (ret) {
3199320592Shselasky		dev_err(&device->dev, "Couldn't start port\n");
3200320592Shselasky		goto error9;
3201320592Shselasky	}
3202320592Shselasky
3203320592Shselasky	return 0;
3204320592Shselasky
3205320592Shselaskyerror9:
3206320592Shselasky	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3207320592Shselasky	list_del_init(&port_priv->port_list);
3208320592Shselasky	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3209320592Shselasky
3210320592Shselasky	destroy_workqueue(port_priv->wq);
3211320592Shselaskyerror8:
3212320592Shselasky	destroy_mad_qp(&port_priv->qp_info[1]);
3213320592Shselaskyerror7:
3214320592Shselasky	destroy_mad_qp(&port_priv->qp_info[0]);
3215320592Shselaskyerror6:
3216320592Shselasky	ib_dealloc_pd(port_priv->pd);
3217320592Shselaskyerror4:
3218320592Shselasky	ib_free_cq(port_priv->cq);
3219320592Shselasky	cleanup_recv_queue(&port_priv->qp_info[1]);
3220320592Shselasky	cleanup_recv_queue(&port_priv->qp_info[0]);
3221320592Shselaskyerror3:
3222320592Shselasky	kfree(port_priv);
3223320592Shselasky
3224320592Shselasky	return ret;
3225320592Shselasky}
3226320592Shselasky
3227320592Shselasky/*
3228320592Shselasky * Close the port
3229320592Shselasky * If there are no classes using the port, free the port
3230320592Shselasky * resources (CQ, MR, PD, QP) and remove the port's info structure
3231320592Shselasky */
3232320592Shselaskystatic int ib_mad_port_close(struct ib_device *device, int port_num)
3233320592Shselasky{
3234320592Shselasky	struct ib_mad_port_private *port_priv;
3235320592Shselasky	unsigned long flags;
3236320592Shselasky
3237320592Shselasky	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3238320592Shselasky	port_priv = __ib_get_mad_port(device, port_num);
3239320592Shselasky	if (port_priv == NULL) {
3240320592Shselasky		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3241320592Shselasky		dev_err(&device->dev, "Port %d not found\n", port_num);
3242320592Shselasky		return -ENODEV;
3243320592Shselasky	}
3244320592Shselasky	list_del_init(&port_priv->port_list);
3245320592Shselasky	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3246320592Shselasky
3247320592Shselasky	destroy_workqueue(port_priv->wq);
3248320592Shselasky	destroy_mad_qp(&port_priv->qp_info[1]);
3249320592Shselasky	destroy_mad_qp(&port_priv->qp_info[0]);
3250320592Shselasky	ib_dealloc_pd(port_priv->pd);
3251320592Shselasky	ib_free_cq(port_priv->cq);
3252320592Shselasky	cleanup_recv_queue(&port_priv->qp_info[1]);
3253320592Shselasky	cleanup_recv_queue(&port_priv->qp_info[0]);
3254320592Shselasky	/* XXX: Handle deallocation of MAD registration tables */
3255320592Shselasky
3256320592Shselasky	kfree(port_priv);
3257320592Shselasky
3258320592Shselasky	return 0;
3259320592Shselasky}
3260320592Shselasky
3261320592Shselaskystatic void ib_mad_init_device(struct ib_device *device)
3262320592Shselasky{
3263320592Shselasky	int start, i;
3264320592Shselasky
3265320592Shselasky	start = rdma_start_port(device);
3266320592Shselasky
3267320592Shselasky	for (i = start; i <= rdma_end_port(device); i++) {
3268320592Shselasky		if (!rdma_cap_ib_mad(device, i))
3269320592Shselasky			continue;
3270320592Shselasky
3271320592Shselasky		if (ib_mad_port_open(device, i)) {
3272320592Shselasky			dev_err(&device->dev, "Couldn't open port %d\n", i);
3273320592Shselasky			goto error;
3274320592Shselasky		}
3275320592Shselasky		if (ib_agent_port_open(device, i)) {
3276320592Shselasky			dev_err(&device->dev,
3277320592Shselasky				"Couldn't open port %d for agents\n", i);
3278320592Shselasky			goto error_agent;
3279320592Shselasky		}
3280320592Shselasky	}
3281320592Shselasky	return;
3282320592Shselasky
3283320592Shselaskyerror_agent:
3284320592Shselasky	if (ib_mad_port_close(device, i))
3285320592Shselasky		dev_err(&device->dev, "Couldn't close port %d\n", i);
3286320592Shselasky
3287320592Shselaskyerror:
3288320592Shselasky	while (--i >= start) {
3289320592Shselasky		if (!rdma_cap_ib_mad(device, i))
3290320592Shselasky			continue;
3291320592Shselasky
3292320592Shselasky		if (ib_agent_port_close(device, i))
3293320592Shselasky			dev_err(&device->dev,
3294320592Shselasky				"Couldn't close port %d for agents\n", i);
3295320592Shselasky		if (ib_mad_port_close(device, i))
3296320592Shselasky			dev_err(&device->dev, "Couldn't close port %d\n", i);
3297320592Shselasky	}
3298320592Shselasky}
3299320592Shselasky
3300320592Shselaskystatic void ib_mad_remove_device(struct ib_device *device, void *client_data)
3301320592Shselasky{
3302320592Shselasky	int i;
3303320592Shselasky
3304320592Shselasky	for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3305320592Shselasky		if (!rdma_cap_ib_mad(device, i))
3306320592Shselasky			continue;
3307320592Shselasky
3308320592Shselasky		if (ib_agent_port_close(device, i))
3309320592Shselasky			dev_err(&device->dev,
3310320592Shselasky				"Couldn't close port %d for agents\n", i);
3311320592Shselasky		if (ib_mad_port_close(device, i))
3312320592Shselasky			dev_err(&device->dev, "Couldn't close port %d\n", i);
3313320592Shselasky	}
3314320592Shselasky}
3315320592Shselasky
3316320592Shselaskystatic struct ib_client mad_client = {
3317320592Shselasky	.name   = "mad",
3318320592Shselasky	.add = ib_mad_init_device,
3319320592Shselasky	.remove = ib_mad_remove_device
3320320592Shselasky};
3321320592Shselasky
3322320592Shselaskyint ib_mad_init(void)
3323320592Shselasky{
3324320592Shselasky	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3325320592Shselasky	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3326320592Shselasky
3327320592Shselasky	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3328320592Shselasky	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3329320592Shselasky
3330320592Shselasky	INIT_LIST_HEAD(&ib_mad_port_list);
3331320592Shselasky
3332320592Shselasky	if (ib_register_client(&mad_client)) {
3333320592Shselasky		pr_err("Couldn't register ib_mad client\n");
3334320592Shselasky		return -EINVAL;
3335320592Shselasky	}
3336320592Shselasky
3337320592Shselasky	return 0;
3338320592Shselasky}
3339320592Shselasky
3340320592Shselaskyvoid ib_mad_cleanup(void)
3341320592Shselasky{
3342320592Shselasky	ib_unregister_client(&mad_client);
3343320592Shselasky}
3344