cmd.c revision 272407
150276Speter/*
2174993Srafan * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
350276Speter * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
450276Speter * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
550276Speter *
650276Speter * This software is available to you under a choice of one of two
750276Speter * licenses.  You may choose to be licensed under the terms of the GNU
850276Speter * General Public License (GPL) Version 2, available from the file
950276Speter * COPYING in the main directory of this source tree, or the
1050276Speter * OpenIB.org BSD license below:
1150276Speter *
1250276Speter *     Redistribution and use in source and binary forms, with or
1350276Speter *     without modification, are permitted provided that the following
1450276Speter *     conditions are met:
1550276Speter *
1650276Speter *      - Redistributions of source code must retain the above
1750276Speter *        copyright notice, this list of conditions and the following
1850276Speter *        disclaimer.
1950276Speter *
2050276Speter *      - Redistributions in binary form must reproduce the above
2150276Speter *        copyright notice, this list of conditions and the following
2250276Speter *        disclaimer in the documentation and/or other materials
2350276Speter *        provided with the distribution.
2450276Speter *
2550276Speter * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2650276Speter * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2750276Speter * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2850276Speter * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2950276Speter * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
3050276Speter * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
3150276Speter * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3250276Speter * SOFTWARE.
3350276Speter */
3450276Speter
3550276Speter#include <linux/sched.h>
3650276Speter#include <linux/slab.h>
3750276Speter#include <linux/module.h>
3850276Speter#include <linux/pci.h>
3950276Speter#include <linux/errno.h>
4050276Speter
4150276Speter#include <linux/mlx4/cmd.h>
4250276Speter#include <linux/mlx4/device.h>
43178866Srafan#include <linux/semaphore.h>
4450276Speter#include <rdma/ib_smi.h>
4576726Speter
46178866Srafan#include <asm/io.h>
4750276Speter#include <linux/ktime.h>
48178866Srafan
49166124Srafan#include "mlx4.h"
50166124Srafan#include "fw.h"
51166124Srafan
52166124Srafan#define CMD_POLL_TOKEN 0xffff
53166124Srafan#define INBOX_MASK	0xffffffffffffff00ULL
5450276Speter
55178866Srafan#define CMD_CHAN_VER 1
5650276Speter#define CMD_CHAN_IF_REV 1
57178866Srafan
58178866Srafanenum {
59178866Srafan	/* command completed successfully: */
60178866Srafan	CMD_STAT_OK		= 0x00,
61166124Srafan	/* Internal error (such as a bus error) occurred while processing command: */
62178866Srafan	CMD_STAT_INTERNAL_ERR	= 0x01,
63178866Srafan	/* Operation/command not supported or opcode modifier not supported: */
64178866Srafan	CMD_STAT_BAD_OP		= 0x02,
65178866Srafan	/* Parameter not supported or parameter out of range: */
66166124Srafan	CMD_STAT_BAD_PARAM	= 0x03,
67178866Srafan	/* System not enabled or bad system state: */
68178866Srafan	CMD_STAT_BAD_SYS_STATE	= 0x04,
69178866Srafan	/* Attempt to access reserved or unallocaterd resource: */
70178866Srafan	CMD_STAT_BAD_RESOURCE	= 0x05,
7150276Speter	/* Requested resource is currently executing a command, or is otherwise busy: */
72178866Srafan	CMD_STAT_RESOURCE_BUSY	= 0x06,
73178866Srafan	/* Required capability exceeds device limits: */
74178866Srafan	CMD_STAT_EXCEED_LIM	= 0x08,
75178866Srafan	/* Resource is not in the appropriate state or ownership: */
7676726Speter	CMD_STAT_BAD_RES_STATE	= 0x09,
77178866Srafan	/* Index out of range: */
78178866Srafan	CMD_STAT_BAD_INDEX	= 0x0a,
79178866Srafan	/* FW image corrupted: */
80178866Srafan	CMD_STAT_BAD_NVMEM	= 0x0b,
81166124Srafan	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
82178866Srafan	CMD_STAT_ICM_ERROR	= 0x0c,
83166124Srafan	/* Attempt to modify a QP/EE which is not in the presumed state: */
84166124Srafan	CMD_STAT_BAD_QP_STATE   = 0x10,
85166124Srafan	/* Bad segment parameters (Address/Size): */
86166124Srafan	CMD_STAT_BAD_SEG_PARAM	= 0x20,
87166124Srafan	/* Memory Region has Memory Windows bound to: */
88166124Srafan	CMD_STAT_REG_BOUND	= 0x21,
89166124Srafan	/* HCA local attached memory not present: */
90178866Srafan	CMD_STAT_LAM_NOT_PRE	= 0x22,
91178866Srafan	/* Bad management packet (silently discarded): */
92178866Srafan	CMD_STAT_BAD_PKT	= 0x30,
93178866Srafan	/* More outstanding CQEs in CQ than new CQ size: */
94178866Srafan	CMD_STAT_BAD_SIZE	= 0x40,
95166124Srafan	/* Multi Function device support required: */
96178866Srafan	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
97178866Srafan};
98166124Srafan
99178866Srafanenum {
10050276Speter	HCR_IN_PARAM_OFFSET	= 0x00,
10150276Speter	HCR_IN_MODIFIER_OFFSET	= 0x08,
10250276Speter	HCR_OUT_PARAM_OFFSET	= 0x0c,
10350276Speter	HCR_TOKEN_OFFSET	= 0x14,
10450276Speter	HCR_STATUS_OFFSET	= 0x18,
10550276Speter
10650276Speter	HCR_OPMOD_SHIFT		= 12,
10750276Speter	HCR_T_BIT		= 21,
10850276Speter	HCR_E_BIT		= 22,
10950276Speter	HCR_GO_BIT		= 23
11050276Speter};
11150276Speter
11276726Speterenum {
11376726Speter	GO_BIT_TIMEOUT_MSECS	= 10000
11450276Speter};
11576726Speter
11676726Speterenum mlx4_vlan_transition {
11750276Speter	MLX4_VLAN_TRANSITION_VST_VST = 0,
11850276Speter	MLX4_VLAN_TRANSITION_VST_VGT = 1,
11950276Speter	MLX4_VLAN_TRANSITION_VGT_VST = 2,
12050276Speter	MLX4_VLAN_TRANSITION_VGT_VGT = 3,
12150276Speter};
12250276Speter
12350276Speter
12450276Speterstruct mlx4_cmd_context {
12550276Speter	struct completion	done;
12650276Speter	int			result;
12750276Speter	int			next;
12850276Speter	u64			out_param;
12976726Speter	u16			token;
13076726Speter	u8			fw_status;
13150276Speter};
13276726Speter
13376726Speterstatic int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
13450276Speter				    struct mlx4_vhcr_cmd *in_vhcr);
13550276Speter
13676726Speterstatic int mlx4_status_to_errno(u8 status)
137166124Srafan{
138166124Srafan	static const int trans_table[] = {
139166124Srafan		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
140166124Srafan		[CMD_STAT_BAD_OP]	  = -EPERM,
141166124Srafan		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
14250276Speter		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
143178866Srafan		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
14476726Speter		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
14576726Speter		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
146174993Srafan		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
147174993Srafan		[CMD_STAT_BAD_INDEX]	  = -EBADF,
14850276Speter		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
14976726Speter		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
15076726Speter		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
15150276Speter		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
152178866Srafan		[CMD_STAT_REG_BOUND]	  = -EBUSY,
15350276Speter		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
154178866Srafan		[CMD_STAT_BAD_PKT]	  = -EINVAL,
155178866Srafan		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
156174993Srafan		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
157178866Srafan	};
158178866Srafan
15950276Speter	if (status >= ARRAY_SIZE(trans_table) ||
160178866Srafan	    (status != CMD_STAT_OK && trans_table[status] == 0))
161178866Srafan		return -EIO;
162178866Srafan
16350276Speter	return trans_table[status];
164178866Srafan}
16550276Speter
166178866Srafanstatic const char *cmd_to_str(u16 cmd)
167178866Srafan{
16850276Speter	switch (cmd) {
169178866Srafan	case MLX4_CMD_SYS_EN:		return "SYS_EN";
170178866Srafan	case MLX4_CMD_SYS_DIS:		return "SYS_DIS";
171178866Srafan	case MLX4_CMD_MAP_FA:		return "MAP_FA";
172178866Srafan	case MLX4_CMD_UNMAP_FA:		return "UNMAP_FA";
173178866Srafan	case MLX4_CMD_RUN_FW:		return "RUN_FW";
174178866Srafan	case MLX4_CMD_MOD_STAT_CFG:	return "MOD_STAT_CFG";
175178866Srafan	case MLX4_CMD_QUERY_DEV_CAP:	return "QUERY_DEV_CAP";
176178866Srafan	case MLX4_CMD_QUERY_FW:		return "QUERY_FW";
177178866Srafan	case MLX4_CMD_ENABLE_LAM:	return "ENABLE_LAM";
178178866Srafan	case MLX4_CMD_DISABLE_LAM:	return "DISABLE_LAM";
179178866Srafan	case MLX4_CMD_QUERY_DDR:	return "QUERY_DDR";
180178866Srafan	case MLX4_CMD_QUERY_ADAPTER:	return "QUERY_ADAPTER";
181178866Srafan	case MLX4_CMD_INIT_HCA:		return "INIT_HCA";
182178866Srafan	case MLX4_CMD_CLOSE_HCA:	return "CLOSE_HCA";
183178866Srafan	case MLX4_CMD_INIT_PORT:	return "INIT_PORT";
184178866Srafan	case MLX4_CMD_CLOSE_PORT:	return "CLOSE_PORT";
185178866Srafan	case MLX4_CMD_QUERY_HCA:	return "QUERY_HCA";
186178866Srafan	case MLX4_CMD_QUERY_PORT:	return "QUERY_PORT";
187178866Srafan	case MLX4_CMD_SENSE_PORT:	return "SENSE_PORT";
188178866Srafan	case MLX4_CMD_HW_HEALTH_CHECK:  return "HW_HEALTH_CHECK";
189178866Srafan	case MLX4_CMD_SET_PORT:		return "SET_PORT";
190178866Srafan	case MLX4_CMD_SET_NODE:		return "SET_NODE";
191178866Srafan	case MLX4_CMD_QUERY_FUNC:	return "QUERY_FUNC";
192178866Srafan	case MLX4_CMD_MAP_ICM:		return "MAP_ICM";
193178866Srafan	case MLX4_CMD_UNMAP_ICM:	return "UNMAP_ICM";
194178866Srafan	case MLX4_CMD_MAP_ICM_AUX:	return "MAP_ICM_AUX";
195178866Srafan	case MLX4_CMD_UNMAP_ICM_AUX:	return "UNMAP_ICM_AUX";
196178866Srafan	case MLX4_CMD_SET_ICM_SIZE:	return "SET_ICM_SIZE";
197178866Srafan		/*master notify fw on finish for slave's flr*/
198178866Srafan	case MLX4_CMD_INFORM_FLR_DONE:	return "INFORM_FLR_DONE";
199178866Srafan	case MLX4_CMD_GET_OP_REQ:	return "GET_OP_REQ";
200178866Srafan
201178866Srafan		/* TPT commands */
20276726Speter	case MLX4_CMD_SW2HW_MPT:	return "SW2HW_MPT";
203178866Srafan	case MLX4_CMD_QUERY_MPT:	return "QUERY_MPT";
204178866Srafan	case MLX4_CMD_HW2SW_MPT:	return "HW2SW_MPT";
20576726Speter	case MLX4_CMD_READ_MTT:		return "READ_MTT";
20650276Speter	case MLX4_CMD_WRITE_MTT:	return "WRITE_MTT";
207178866Srafan	case MLX4_CMD_SYNC_TPT:		return "SYNC_TPT";
208178866Srafan
20976726Speter		/* EQ commands */
210178866Srafan	case MLX4_CMD_MAP_EQ:		return "MAP_EQ";
21150276Speter	case MLX4_CMD_SW2HW_EQ:		return "SW2HW_EQ";
212	case MLX4_CMD_HW2SW_EQ:		return "HW2SW_EQ";
213	case MLX4_CMD_QUERY_EQ:		return "QUERY_EQ";
214
215		/* CQ commands */
216	case MLX4_CMD_SW2HW_CQ:		return "SW2HW_CQ";
217	case MLX4_CMD_HW2SW_CQ:		return "HW2SW_CQ";
218	case MLX4_CMD_QUERY_CQ:		return "QUERY_CQ:";
219	case MLX4_CMD_MODIFY_CQ:	return "MODIFY_CQ:";
220
221		/* SRQ commands */
222	case MLX4_CMD_SW2HW_SRQ:	return "SW2HW_SRQ";
223	case MLX4_CMD_HW2SW_SRQ:	return "HW2SW_SRQ";
224	case MLX4_CMD_QUERY_SRQ:	return "QUERY_SRQ";
225	case MLX4_CMD_ARM_SRQ:		return "ARM_SRQ";
226
227		/* QP/EE commands */
228	case MLX4_CMD_RST2INIT_QP:	return "RST2INIT_QP";
229	case MLX4_CMD_INIT2RTR_QP:	return "INIT2RTR_QP";
230	case MLX4_CMD_RTR2RTS_QP:	return "RTR2RTS_QP";
231	case MLX4_CMD_RTS2RTS_QP:	return "RTS2RTS_QP";
232	case MLX4_CMD_SQERR2RTS_QP:	return "SQERR2RTS_QP";
233	case MLX4_CMD_2ERR_QP:		return "2ERR_QP";
234	case MLX4_CMD_RTS2SQD_QP:	return "RTS2SQD_QP";
235	case MLX4_CMD_SQD2SQD_QP:	return "SQD2SQD_QP";
236	case MLX4_CMD_SQD2RTS_QP:	return "SQD2RTS_QP";
237	case MLX4_CMD_2RST_QP:		return "2RST_QP";
238	case MLX4_CMD_QUERY_QP:		return "QUERY_QP";
239	case MLX4_CMD_INIT2INIT_QP:	return "INIT2INIT_QP";
240	case MLX4_CMD_SUSPEND_QP:	return "SUSPEND_QP";
241	case MLX4_CMD_UNSUSPEND_QP:	return "UNSUSPEND_QP";
242		/* special QP and management commands */
243	case MLX4_CMD_CONF_SPECIAL_QP:	return "CONF_SPECIAL_QP";
244	case MLX4_CMD_MAD_IFC:		return "MAD_IFC";
245
246		/* multicast commands */
247	case MLX4_CMD_READ_MCG:		return "READ_MCG";
248	case MLX4_CMD_WRITE_MCG:	return "WRITE_MCG";
249	case MLX4_CMD_MGID_HASH:	return "MGID_HASH";
250
251		/* miscellaneous commands */
252	case MLX4_CMD_DIAG_RPRT:	return "DIAG_RPRT";
253	case MLX4_CMD_NOP:		return "NOP";
254	case MLX4_CMD_ACCESS_MEM:	return "ACCESS_MEM";
255	case MLX4_CMD_SET_VEP:		return "SET_VEP";
256
257		/* Ethernet specific commands */
258	case MLX4_CMD_SET_VLAN_FLTR:	return "SET_VLAN_FLTR";
259	case MLX4_CMD_SET_MCAST_FLTR:	return "SET_MCAST_FLTR";
260	case MLX4_CMD_DUMP_ETH_STATS:	return "DUMP_ETH_STATS";
261
262		/* Communication channel commands */
263	case MLX4_CMD_ARM_COMM_CHANNEL:	return "ARM_COMM_CHANNEL";
264	case MLX4_CMD_GEN_EQE:		return "GEN_EQE";
265
266		/* virtual commands */
267	case MLX4_CMD_ALLOC_RES:	return "ALLOC_RES";
268	case MLX4_CMD_FREE_RES:		return "FREE_RES";
269	case MLX4_CMD_MCAST_ATTACH:	return "MCAST_ATTACH";
270	case MLX4_CMD_UCAST_ATTACH:	return "UCAST_ATTACH";
271	case MLX4_CMD_PROMISC:		return "PROMISC";
272	case MLX4_CMD_QUERY_FUNC_CAP:	return "QUERY_FUNC_CAP";
273	case MLX4_CMD_QP_ATTACH:	return "QP_ATTACH";
274
275		/* debug commands */
276	case MLX4_CMD_QUERY_DEBUG_MSG:	return "QUERY_DEBUG_MSG";
277	case MLX4_CMD_SET_DEBUG_MSG:	return "SET_DEBUG_MSG";
278
279		/* statistics commands */
280	case MLX4_CMD_QUERY_IF_STAT:	return "QUERY_IF_STAT";
281	case MLX4_CMD_SET_IF_STAT:	return "SET_IF_STAT";
282
283		/* register/delete flow steering network rules */
284	case MLX4_QP_FLOW_STEERING_ATTACH:	return "QP_FLOW_STEERING_ATTACH";
285	case MLX4_QP_FLOW_STEERING_DETACH:	return "QP_FLOW_STEERING_DETACH";
286	case MLX4_FLOW_STEERING_IB_UC_QP_RANGE:	return "FLOW_STEERING_IB_UC_QP_RANGE";
287	default: return "OTHER";
288	}
289}
290
291static u8 mlx4_errno_to_status(int errno)
292{
293	switch (errno) {
294	case -EPERM:
295		return CMD_STAT_BAD_OP;
296	case -EINVAL:
297		return CMD_STAT_BAD_PARAM;
298	case -ENXIO:
299		return CMD_STAT_BAD_SYS_STATE;
300	case -EBUSY:
301		return CMD_STAT_RESOURCE_BUSY;
302	case -ENOMEM:
303		return CMD_STAT_EXCEED_LIM;
304	case -ENFILE:
305		return CMD_STAT_ICM_ERROR;
306	default:
307		return CMD_STAT_INTERNAL_ERR;
308	}
309}
310
311static int comm_pending(struct mlx4_dev *dev)
312{
313	struct mlx4_priv *priv = mlx4_priv(dev);
314	u32 status = readl(&priv->mfunc.comm->slave_read);
315
316	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
317}
318
319static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
320{
321	struct mlx4_priv *priv = mlx4_priv(dev);
322	u32 val;
323
324	priv->cmd.comm_toggle ^= 1;
325	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
326	__raw_writel((__force u32) cpu_to_be32(val),
327		     &priv->mfunc.comm->slave_write);
328	mmiowb();
329}
330
331static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
332		       unsigned long timeout)
333{
334	struct mlx4_priv *priv = mlx4_priv(dev);
335	unsigned long end;
336	int err = 0;
337	int ret_from_pending = 0;
338
339	/* First, verify that the master reports correct status */
340	if (comm_pending(dev)) {
341		mlx4_warn(dev, "Communication channel is not idle."
342			  "my toggle is %d (cmd:0x%x)\n",
343			  priv->cmd.comm_toggle, cmd);
344		return -EAGAIN;
345	}
346
347	/* Write command */
348	down(&priv->cmd.poll_sem);
349	mlx4_comm_cmd_post(dev, cmd, param);
350
351	end = msecs_to_jiffies(timeout) + jiffies;
352	while (comm_pending(dev) && time_before(jiffies, end))
353		cond_resched();
354	ret_from_pending = comm_pending(dev);
355	if (ret_from_pending) {
356		/* check if the slave is trying to boot in the middle of
357		 * FLR process. The only non-zero result in the RESET command
358		 * is MLX4_DELAY_RESET_SLAVE*/
359		if ((MLX4_COMM_CMD_RESET == cmd)) {
360			mlx4_warn(dev, "Got slave FLRed from Communication"
361				  " channel (ret:0x%x)\n", ret_from_pending);
362			err = MLX4_DELAY_RESET_SLAVE;
363		} else {
364			mlx4_warn(dev, "Communication channel timed out\n");
365			err = -ETIMEDOUT;
366		}
367	}
368
369	up(&priv->cmd.poll_sem);
370	return err;
371}
372
373static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
374			      u16 param, unsigned long timeout)
375{
376	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
377	struct mlx4_cmd_context *context;
378	unsigned long end;
379	int err = 0;
380
381	down(&cmd->event_sem);
382
383	end = msecs_to_jiffies(timeout) + jiffies;
384	while (comm_pending(dev) && time_before(jiffies, end))
385		cond_resched();
386	if (comm_pending(dev)) {
387		mlx4_warn(dev, "mlx4_comm_cmd_wait: Comm channel "
388			  "is not idle. My toggle is %d (op: 0x%x)\n",
389			  mlx4_priv(dev)->cmd.comm_toggle, op);
390		up(&cmd->event_sem);
391		return -EAGAIN;
392	}
393
394	spin_lock(&cmd->context_lock);
395	BUG_ON(cmd->free_head < 0);
396	context = &cmd->context[cmd->free_head];
397	context->token += cmd->token_mask + 1;
398	cmd->free_head = context->next;
399	spin_unlock(&cmd->context_lock);
400
401	init_completion(&context->done);
402
403	mlx4_comm_cmd_post(dev, op, param);
404
405	/* In slave, wait unconditionally for completion */
406	wait_for_completion(&context->done);
407
408	err = context->result;
409	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
410		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
411			 op, context->fw_status);
412		goto out;
413	}
414
415out:
416	/* wait for comm channel ready
417	 * this is necessary for prevention the race
418	 * when switching between event to polling mode
419	 */
420	end = msecs_to_jiffies(timeout) + jiffies;
421	while (comm_pending(dev) && time_before(jiffies, end))
422		cond_resched();
423
424	spin_lock(&cmd->context_lock);
425	context->next = cmd->free_head;
426	cmd->free_head = context - cmd->context;
427	spin_unlock(&cmd->context_lock);
428
429	up(&cmd->event_sem);
430	return err;
431}
432
433int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
434		  unsigned long timeout)
435{
436	if (mlx4_priv(dev)->cmd.use_events)
437		return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
438	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
439}
440
441static int cmd_pending(struct mlx4_dev *dev)
442{
443	u32 status;
444
445	if (pci_channel_offline(dev->pdev))
446		return -EIO;
447
448	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
449
450	return (status & swab32(1 << HCR_GO_BIT)) ||
451		(mlx4_priv(dev)->cmd.toggle ==
452		 !!(status & swab32(1 << HCR_T_BIT)));
453}
454
455static int get_status(struct mlx4_dev *dev, u32 *status, int *go_bit,
456		      int *t_bit)
457{
458	if (pci_channel_offline(dev->pdev))
459		return -EIO;
460
461	*status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
462	*t_bit = !!(*status & swab32(1 << HCR_T_BIT));
463	*go_bit = !!(*status & swab32(1 << HCR_GO_BIT));
464
465	return 0;
466}
467
468static int mlx4_cmd_post(struct mlx4_dev *dev, struct timespec *ts1,
469			 u64 in_param, u64 out_param, u32 in_modifier,
470			 u8 op_modifier, u16 op, u16 token, int event)
471{
472	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
473	u32 __iomem *hcr = cmd->hcr;
474	int ret = -EAGAIN;
475	unsigned long end;
476	int err, go_bit = 0, t_bit = 0;
477	u32 status = 0;
478
479	mutex_lock(&cmd->hcr_mutex);
480
481	if (pci_channel_offline(dev->pdev)) {
482		/*
483		 * Device is going through error recovery
484		 * and cannot accept commands.
485		 */
486		ret = -EIO;
487		goto out;
488	}
489
490	end = jiffies;
491	if (event)
492		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
493
494	while (cmd_pending(dev)) {
495		if (pci_channel_offline(dev->pdev)) {
496			/*
497			 * Device is going through error recovery
498			 * and cannot accept commands.
499			 */
500			ret = -EIO;
501			goto out;
502		}
503
504		if (time_after_eq(jiffies, end)) {
505			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
506			goto out;
507		}
508		cond_resched();
509	}
510
511	/*
512	 * We use writel (instead of something like memcpy_toio)
513	 * because writes of less than 32 bits to the HCR don't work
514	 * (and some architectures such as ia64 implement memcpy_toio
515	 * in terms of writeb).
516	 */
517	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
518	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
519	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
520	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
521	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
522	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
523
524	if (ts1)
525		ktime_get_ts(ts1);
526
527	/* __raw_writel may not order writes. */
528	wmb();
529
530	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
531					       (cmd->toggle << HCR_T_BIT)	|
532					       (event ? (1 << HCR_E_BIT) : 0)	|
533					       (op_modifier << HCR_OPMOD_SHIFT) |
534					       op), hcr + 6);
535
536	/*
537	 * Make sure that our HCR writes don't get mixed in with
538	 * writes from another CPU starting a FW command.
539	 */
540	mmiowb();
541
542	cmd->toggle = cmd->toggle ^ 1;
543
544	ret = 0;
545
546out:
547	if (ret) {
548		err = get_status(dev, &status, &go_bit, &t_bit);
549		mlx4_warn(dev, "Could not post command %s (0x%x): ret=%d, "
550			  "in_param=0x%llx, in_mod=0x%x, op_mod=0x%x, "
551			  "get_status err=%d, status_reg=0x%x, go_bit=%d, "
552			  "t_bit=%d, toggle=0x%x\n", cmd_to_str(op), op, ret,
553			  (unsigned long long) in_param, in_modifier, op_modifier, err, status,
554			  go_bit, t_bit, cmd->toggle);
555	}
556	mutex_unlock(&cmd->hcr_mutex);
557	return ret;
558}
559
560static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
561			  int out_is_imm, u32 in_modifier, u8 op_modifier,
562			  u16 op, unsigned long timeout)
563{
564	struct mlx4_priv *priv = mlx4_priv(dev);
565	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
566	int ret;
567
568	mutex_lock(&priv->cmd.slave_cmd_mutex);
569
570	vhcr->in_param = cpu_to_be64(in_param);
571	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
572	vhcr->in_modifier = cpu_to_be32(in_modifier);
573	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
574	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
575	vhcr->status = 0;
576	vhcr->flags = !!(priv->cmd.use_events) << 6;
577
578	if (mlx4_is_master(dev)) {
579		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
580		if (!ret) {
581			if (out_is_imm) {
582				if (out_param)
583					*out_param =
584						be64_to_cpu(vhcr->out_param);
585				else {
586					mlx4_err(dev, "response expected while"
587						 "output mailbox is NULL for "
588						 "command 0x%x\n", op);
589					vhcr->status = CMD_STAT_BAD_PARAM;
590				}
591			}
592			ret = mlx4_status_to_errno(vhcr->status);
593		}
594	} else {
595		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
596				    MLX4_COMM_TIME + timeout);
597		if (!ret) {
598			if (out_is_imm) {
599				if (out_param)
600					*out_param =
601						be64_to_cpu(vhcr->out_param);
602				else {
603					mlx4_err(dev, "response expected while"
604						 "output mailbox is NULL for "
605						 "command 0x%x\n", op);
606					vhcr->status = CMD_STAT_BAD_PARAM;
607				}
608			}
609			ret = mlx4_status_to_errno(vhcr->status);
610		} else
611			mlx4_err(dev, "failed execution of VHCR_POST command"
612				 "opcode %s (0x%x)\n", cmd_to_str(op), op);
613	}
614
615	mutex_unlock(&priv->cmd.slave_cmd_mutex);
616	return ret;
617}
618
619static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
620			 int out_is_imm, u32 in_modifier, u8 op_modifier,
621			 u16 op, unsigned long timeout)
622{
623	struct mlx4_priv *priv = mlx4_priv(dev);
624	void __iomem *hcr = priv->cmd.hcr;
625	int err = 0;
626	unsigned long end;
627	u32 stat;
628
629	down(&priv->cmd.poll_sem);
630
631	if (pci_channel_offline(dev->pdev)) {
632		/*
633		 * Device is going through error recovery
634		 * and cannot accept commands.
635		 */
636		err = -EIO;
637		goto out;
638	}
639
640	err = mlx4_cmd_post(dev, NULL, in_param, out_param ? *out_param : 0,
641			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
642	if (err)
643		goto out;
644
645	end = msecs_to_jiffies(timeout) + jiffies;
646	while (cmd_pending(dev) && time_before(jiffies, end)) {
647		if (pci_channel_offline(dev->pdev)) {
648			/*
649			 * Device is going through error recovery
650			 * and cannot accept commands.
651			 */
652			err = -EIO;
653			goto out;
654		}
655
656		cond_resched();
657	}
658
659	if (cmd_pending(dev)) {
660		mlx4_warn(dev, "command %s (0x%x) timed out (go bit not cleared)\n",
661			  cmd_to_str(op), op);
662		err = -ETIMEDOUT;
663		goto out;
664	}
665
666	if (out_is_imm)
667		*out_param =
668			(u64) be32_to_cpu((__force __be32)
669					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
670			(u64) be32_to_cpu((__force __be32)
671					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
672	stat = be32_to_cpu((__force __be32)
673			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
674	err = mlx4_status_to_errno(stat);
675	if (err)
676		mlx4_err(dev, "command %s (0x%x) failed: fw status = 0x%x\n",
677			 cmd_to_str(op), op, stat);
678
679out:
680	up(&priv->cmd.poll_sem);
681	return err;
682}
683
684void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
685{
686	struct mlx4_priv *priv = mlx4_priv(dev);
687	struct mlx4_cmd_context *context =
688		&priv->cmd.context[token & priv->cmd.token_mask];
689
690	/* previously timed out command completing at long last */
691	if (token != context->token)
692		return;
693
694	context->fw_status = status;
695	context->result    = mlx4_status_to_errno(status);
696	context->out_param = out_param;
697
698	complete(&context->done);
699}
700
701static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
702			 int out_is_imm, u32 in_modifier, u8 op_modifier,
703			 u16 op, unsigned long timeout)
704{
705	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
706	struct mlx4_cmd_context *context;
707	int err = 0;
708	int go_bit = 0, t_bit = 0, stat_err;
709	u32 status = 0;
710	struct timespec	ts1, ts2;
711	ktime_t t1, t2, delta;
712	s64 ds;
713
714	if (out_is_imm && !out_param)
715		return -EINVAL;
716
717	down(&cmd->event_sem);
718
719	spin_lock(&cmd->context_lock);
720	BUG_ON(cmd->free_head < 0);
721	context = &cmd->context[cmd->free_head];
722	context->token += cmd->token_mask + 1;
723	cmd->free_head = context->next;
724	spin_unlock(&cmd->context_lock);
725
726	init_completion(&context->done);
727
728	err = mlx4_cmd_post(dev, &ts1, in_param, out_param ? *out_param : 0,
729			    in_modifier, op_modifier, op, context->token, 1);
730	if (err)
731		goto out;
732
733	if (!wait_for_completion_timeout(&context->done,
734					 msecs_to_jiffies(timeout))) {
735		stat_err = get_status(dev, &status, &go_bit, &t_bit);
736		mlx4_warn(dev, "command %s (0x%x) timed out: in_param=0x%llx, "
737			  "in_mod=0x%x, op_mod=0x%x, get_status err=%d, "
738			  "status_reg=0x%x, go_bit=%d, t_bit=%d, toggle=0x%x\n"
739			  , cmd_to_str(op), op, (unsigned long long) in_param, in_modifier,
740			  op_modifier, stat_err, status, go_bit, t_bit,
741			  mlx4_priv(dev)->cmd.toggle);
742		err = -EBUSY;
743		goto out;
744	}
745	if (mlx4_debug_level & MLX4_DEBUG_MASK_CMD_TIME) {
746		ktime_get_ts(&ts2);
747		t1 = timespec_to_ktime(ts1);
748		t2 = timespec_to_ktime(ts2);
749		delta = ktime_sub(t2, t1);
750		ds = ktime_to_ns(delta);
751		pr_info("mlx4: fw exec time for %s is %lld nsec\n", cmd_to_str(op), (long long) ds);
752	}
753
754	err = context->result;
755	if (err) {
756		mlx4_err(dev, "command %s (0x%x) failed: in_param=0x%llx, "
757			 "in_mod=0x%x, op_mod=0x%x, fw status = 0x%x\n",
758			 cmd_to_str(op), op, (unsigned long long) in_param, in_modifier,
759			 op_modifier, context->fw_status);
760		goto out;
761	}
762
763	if (out_is_imm)
764		*out_param = context->out_param;
765
766out:
767	spin_lock(&cmd->context_lock);
768	context->next = cmd->free_head;
769	cmd->free_head = context - cmd->context;
770	spin_unlock(&cmd->context_lock);
771
772	up(&cmd->event_sem);
773	return err;
774}
775
776int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
777	       int out_is_imm, u32 in_modifier, u8 op_modifier,
778	       u16 op, unsigned long timeout, int native)
779{
780	if (pci_channel_offline(dev->pdev))
781		return -EIO;
782
783	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
784		if (mlx4_priv(dev)->cmd.use_events)
785			return mlx4_cmd_wait(dev, in_param, out_param,
786					     out_is_imm, in_modifier,
787					     op_modifier, op, timeout);
788		else
789			return mlx4_cmd_poll(dev, in_param, out_param,
790					     out_is_imm, in_modifier,
791					     op_modifier, op, timeout);
792	}
793	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
794			      in_modifier, op_modifier, op, timeout);
795}
796EXPORT_SYMBOL_GPL(__mlx4_cmd);
797
798
799static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
800{
801	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
802			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
803}
804
805static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
806			   int slave, u64 slave_addr,
807			   int size, int is_read)
808{
809	u64 in_param;
810	u64 out_param;
811
812	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
813	    (slave & ~0x7f) | (size & 0xff)) {
814		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
815			      "master_addr:0x%llx slave_id:%d size:%d\n",
816			      (unsigned long long) slave_addr, (unsigned long long) master_addr, slave, size);
817		return -EINVAL;
818	}
819
820	if (is_read) {
821		in_param = (u64) slave | slave_addr;
822		out_param = (u64) dev->caps.function | master_addr;
823	} else {
824		in_param = (u64) dev->caps.function | master_addr;
825		out_param = (u64) slave | slave_addr;
826	}
827
828	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
829			    MLX4_CMD_ACCESS_MEM,
830			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
831}
832
833static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
834			       struct mlx4_cmd_mailbox *inbox,
835			       struct mlx4_cmd_mailbox *outbox)
836{
837	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
838	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
839	int err;
840	int i;
841
842	if (index & 0x1f)
843		return -EINVAL;
844
845	in_mad->attr_mod = cpu_to_be32(index / 32);
846
847	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
848			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
849			   MLX4_CMD_NATIVE);
850	if (err)
851		return err;
852
853	for (i = 0; i < 32; ++i)
854		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
855
856	return err;
857}
858
859static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
860			       struct mlx4_cmd_mailbox *inbox,
861			       struct mlx4_cmd_mailbox *outbox)
862{
863	int i;
864	int err;
865
866	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
867		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
868		if (err)
869			return err;
870	}
871
872	return 0;
873}
874#define PORT_CAPABILITY_LOCATION_IN_SMP 20
875#define PORT_STATE_OFFSET 32
876
877static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
878{
879	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
880		return IB_PORT_ACTIVE;
881	else
882		return IB_PORT_DOWN;
883}
884
885static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
886				struct mlx4_vhcr *vhcr,
887				struct mlx4_cmd_mailbox *inbox,
888				struct mlx4_cmd_mailbox *outbox,
889				struct mlx4_cmd_info *cmd)
890{
891	struct ib_smp *smp = inbox->buf;
892	u32 index;
893	u8 port;
894	u16 *table;
895	int err;
896	int vidx, pidx;
897	struct mlx4_priv *priv = mlx4_priv(dev);
898	struct ib_smp *outsmp = outbox->buf;
899	__be16 *outtab = (__be16 *)(outsmp->data);
900	__be32 slave_cap_mask;
901	__be64 slave_node_guid;
902	port = vhcr->in_modifier;
903
904	if (smp->base_version == 1 &&
905	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
906	    smp->class_version == 1) {
907		if (smp->method	== IB_MGMT_METHOD_GET) {
908			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
909				index = be32_to_cpu(smp->attr_mod);
910				if (port < 1 || port > dev->caps.num_ports)
911					return -EINVAL;
912				table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
913				if (!table)
914					return -ENOMEM;
915				/* need to get the full pkey table because the paravirtualized
916				 * pkeys may be scattered among several pkey blocks.
917				 */
918				err = get_full_pkey_table(dev, port, table, inbox, outbox);
919				if (!err) {
920					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
921						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
922						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
923					}
924				}
925				kfree(table);
926				return err;
927			}
928			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
929				/*get the slave specific caps:*/
930				/*do the command */
931				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
932					    vhcr->in_modifier, vhcr->op_modifier,
933					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
934				/* modify the response for slaves */
935				if (!err && slave != mlx4_master_func_num(dev)) {
936					u8 *state = outsmp->data + PORT_STATE_OFFSET;
937
938					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
939					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
940					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
941				}
942				return err;
943			}
944			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
945				/* compute slave's gid block */
946				smp->attr_mod = cpu_to_be32(slave / 8);
947				/* execute cmd */
948				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
949					     vhcr->in_modifier, vhcr->op_modifier,
950					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
951				if (!err) {
952					/* if needed, move slave gid to index 0 */
953					if (slave % 8)
954						memcpy(outsmp->data,
955						       outsmp->data + (slave % 8) * 8, 8);
956					/* delete all other gids */
957					memset(outsmp->data + 8, 0, 56);
958				}
959				return err;
960			}
961			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
962				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
963					     vhcr->in_modifier, vhcr->op_modifier,
964					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
965				if (!err) {
966					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
967					memcpy(outsmp->data + 12, &slave_node_guid, 8);
968				}
969				return err;
970			}
971		}
972	}
973	if (slave != mlx4_master_func_num(dev) &&
974	    ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
975	     (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
976	      smp->method == IB_MGMT_METHOD_SET))) {
977		mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
978			 "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
979			 slave, smp->method, smp->mgmt_class,
980			 be16_to_cpu(smp->attr_id));
981		return -EPERM;
982	}
983	/*default:*/
984	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
985				    vhcr->in_modifier, vhcr->op_modifier,
986				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
987}
988
989static int MLX4_CMD_DIAG_RPRT_wrapper(struct mlx4_dev *dev, int slave,
990		     struct mlx4_vhcr *vhcr,
991		     struct mlx4_cmd_mailbox *inbox,
992		     struct mlx4_cmd_mailbox *outbox,
993		     struct mlx4_cmd_info *cmd)
994{
995	return -EPERM;
996}
997
998static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
999		     struct mlx4_vhcr *vhcr,
1000		     struct mlx4_cmd_mailbox *inbox,
1001		     struct mlx4_cmd_mailbox *outbox,
1002		     struct mlx4_cmd_info *cmd)
1003{
1004	return -EPERM;
1005}
1006
1007int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1008		     struct mlx4_vhcr *vhcr,
1009		     struct mlx4_cmd_mailbox *inbox,
1010		     struct mlx4_cmd_mailbox *outbox,
1011		     struct mlx4_cmd_info *cmd)
1012{
1013	u64 in_param;
1014	u64 out_param;
1015	int err;
1016
1017	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1018	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1019	if (cmd->encode_slave_id) {
1020		in_param &= 0xffffffffffffff00ll;
1021		in_param |= slave;
1022	}
1023
1024	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1025			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1026			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1027
1028	if (cmd->out_is_imm)
1029		vhcr->out_param = out_param;
1030
1031	return err;
1032}
1033
1034static struct mlx4_cmd_info cmd_info[] = {
1035	{
1036		.opcode = MLX4_CMD_QUERY_FW,
1037		.has_inbox = false,
1038		.has_outbox = true,
1039		.out_is_imm = false,
1040		.encode_slave_id = false,
1041		.verify = NULL,
1042		.wrapper = mlx4_QUERY_FW_wrapper
1043	},
1044	{
1045		.opcode = MLX4_CMD_QUERY_HCA,
1046		.has_inbox = false,
1047		.has_outbox = true,
1048		.out_is_imm = false,
1049		.encode_slave_id = false,
1050		.verify = NULL,
1051		.wrapper = NULL
1052	},
1053	{
1054		.opcode = MLX4_CMD_QUERY_DEV_CAP,
1055		.has_inbox = false,
1056		.has_outbox = true,
1057		.out_is_imm = false,
1058		.encode_slave_id = false,
1059		.verify = NULL,
1060		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
1061	},
1062	{
1063		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
1064		.has_inbox = false,
1065		.has_outbox = true,
1066		.out_is_imm = false,
1067		.encode_slave_id = false,
1068		.verify = NULL,
1069		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1070	},
1071	{
1072		.opcode = MLX4_CMD_QUERY_ADAPTER,
1073		.has_inbox = false,
1074		.has_outbox = true,
1075		.out_is_imm = false,
1076		.encode_slave_id = false,
1077		.verify = NULL,
1078		.wrapper = NULL
1079	},
1080	{
1081		.opcode = MLX4_CMD_INIT_PORT,
1082		.has_inbox = false,
1083		.has_outbox = false,
1084		.out_is_imm = false,
1085		.encode_slave_id = false,
1086		.verify = NULL,
1087		.wrapper = mlx4_INIT_PORT_wrapper
1088	},
1089	{
1090		.opcode = MLX4_CMD_CLOSE_PORT,
1091		.has_inbox = false,
1092		.has_outbox = false,
1093		.out_is_imm  = false,
1094		.encode_slave_id = false,
1095		.verify = NULL,
1096		.wrapper = mlx4_CLOSE_PORT_wrapper
1097	},
1098	{
1099		.opcode = MLX4_CMD_QUERY_PORT,
1100		.has_inbox = false,
1101		.has_outbox = true,
1102		.out_is_imm = false,
1103		.encode_slave_id = false,
1104		.verify = NULL,
1105		.wrapper = mlx4_QUERY_PORT_wrapper
1106	},
1107	{
1108		.opcode = MLX4_CMD_SET_PORT,
1109		.has_inbox = true,
1110		.has_outbox = false,
1111		.out_is_imm = false,
1112		.encode_slave_id = false,
1113		.verify = NULL,
1114		.wrapper = mlx4_SET_PORT_wrapper
1115	},
1116	{
1117		.opcode = MLX4_CMD_MAP_EQ,
1118		.has_inbox = false,
1119		.has_outbox = false,
1120		.out_is_imm = false,
1121		.encode_slave_id = false,
1122		.verify = NULL,
1123		.wrapper = mlx4_MAP_EQ_wrapper
1124	},
1125	{
1126		.opcode = MLX4_CMD_SW2HW_EQ,
1127		.has_inbox = true,
1128		.has_outbox = false,
1129		.out_is_imm = false,
1130		.encode_slave_id = true,
1131		.verify = NULL,
1132		.wrapper = mlx4_SW2HW_EQ_wrapper
1133	},
1134	{
1135		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
1136		.has_inbox = false,
1137		.has_outbox = false,
1138		.out_is_imm = false,
1139		.encode_slave_id = false,
1140		.verify = NULL,
1141		.wrapper = NULL
1142	},
1143	{
1144		.opcode = MLX4_CMD_DIAG_RPRT,
1145		.has_inbox = false,
1146		.has_outbox = false,
1147		.out_is_imm = false,
1148		.encode_slave_id = false,
1149		.skip_err_print = true,
1150		.verify = NULL,
1151		.wrapper = MLX4_CMD_DIAG_RPRT_wrapper
1152	},
1153	{
1154		.opcode = MLX4_CMD_NOP,
1155		.has_inbox = false,
1156		.has_outbox = false,
1157		.out_is_imm = false,
1158		.encode_slave_id = false,
1159		.verify = NULL,
1160		.wrapper = NULL
1161	},
1162	{
1163		.opcode = MLX4_CMD_ALLOC_RES,
1164		.has_inbox = false,
1165		.has_outbox = false,
1166		.out_is_imm = true,
1167		.encode_slave_id = false,
1168		.verify = NULL,
1169		.wrapper = mlx4_ALLOC_RES_wrapper
1170	},
1171	{
1172		.opcode = MLX4_CMD_FREE_RES,
1173		.has_inbox = false,
1174		.has_outbox = false,
1175		.out_is_imm = false,
1176		.encode_slave_id = false,
1177		.verify = NULL,
1178		.wrapper = mlx4_FREE_RES_wrapper
1179	},
1180	{
1181		.opcode = MLX4_CMD_SW2HW_MPT,
1182		.has_inbox = true,
1183		.has_outbox = false,
1184		.out_is_imm = false,
1185		.encode_slave_id = true,
1186		.verify = NULL,
1187		.wrapper = mlx4_SW2HW_MPT_wrapper
1188	},
1189	{
1190		.opcode = MLX4_CMD_QUERY_MPT,
1191		.has_inbox = false,
1192		.has_outbox = true,
1193		.out_is_imm = false,
1194		.encode_slave_id = false,
1195		.verify = NULL,
1196		.wrapper = mlx4_QUERY_MPT_wrapper
1197	},
1198	{
1199		.opcode = MLX4_CMD_HW2SW_MPT,
1200		.has_inbox = false,
1201		.has_outbox = false,
1202		.out_is_imm = false,
1203		.encode_slave_id = false,
1204		.verify = NULL,
1205		.wrapper = mlx4_HW2SW_MPT_wrapper
1206	},
1207	{
1208		.opcode = MLX4_CMD_READ_MTT,
1209		.has_inbox = false,
1210		.has_outbox = true,
1211		.out_is_imm = false,
1212		.encode_slave_id = false,
1213		.verify = NULL,
1214		.wrapper = NULL
1215	},
1216	{
1217		.opcode = MLX4_CMD_WRITE_MTT,
1218		.has_inbox = true,
1219		.has_outbox = false,
1220		.out_is_imm = false,
1221		.encode_slave_id = false,
1222		.verify = NULL,
1223		.wrapper = mlx4_WRITE_MTT_wrapper
1224	},
1225	{
1226		.opcode = MLX4_CMD_SYNC_TPT,
1227		.has_inbox = true,
1228		.has_outbox = false,
1229		.out_is_imm = false,
1230		.encode_slave_id = false,
1231		.verify = NULL,
1232		.wrapper = NULL
1233	},
1234	{
1235		.opcode = MLX4_CMD_HW2SW_EQ,
1236		.has_inbox = false,
1237		.has_outbox = true,
1238		.out_is_imm = false,
1239		.encode_slave_id = true,
1240		.verify = NULL,
1241		.wrapper = mlx4_HW2SW_EQ_wrapper
1242	},
1243	{
1244		.opcode = MLX4_CMD_QUERY_EQ,
1245		.has_inbox = false,
1246		.has_outbox = true,
1247		.out_is_imm = false,
1248		.encode_slave_id = true,
1249		.verify = NULL,
1250		.wrapper = mlx4_QUERY_EQ_wrapper
1251	},
1252	{
1253		.opcode = MLX4_CMD_SW2HW_CQ,
1254		.has_inbox = true,
1255		.has_outbox = false,
1256		.out_is_imm = false,
1257		.encode_slave_id = true,
1258		.verify = NULL,
1259		.wrapper = mlx4_SW2HW_CQ_wrapper
1260	},
1261	{
1262		.opcode = MLX4_CMD_HW2SW_CQ,
1263		.has_inbox = false,
1264		.has_outbox = false,
1265		.out_is_imm = false,
1266		.encode_slave_id = false,
1267		.verify = NULL,
1268		.wrapper = mlx4_HW2SW_CQ_wrapper
1269	},
1270	{
1271		.opcode = MLX4_CMD_QUERY_CQ,
1272		.has_inbox = false,
1273		.has_outbox = true,
1274		.out_is_imm = false,
1275		.encode_slave_id = false,
1276		.verify = NULL,
1277		.wrapper = mlx4_QUERY_CQ_wrapper
1278	},
1279	{
1280		.opcode = MLX4_CMD_MODIFY_CQ,
1281		.has_inbox = true,
1282		.has_outbox = false,
1283		.out_is_imm = true,
1284		.encode_slave_id = false,
1285		.verify = NULL,
1286		.wrapper = mlx4_MODIFY_CQ_wrapper
1287	},
1288	{
1289		.opcode = MLX4_CMD_SW2HW_SRQ,
1290		.has_inbox = true,
1291		.has_outbox = false,
1292		.out_is_imm = false,
1293		.encode_slave_id = true,
1294		.verify = NULL,
1295		.wrapper = mlx4_SW2HW_SRQ_wrapper
1296	},
1297	{
1298		.opcode = MLX4_CMD_HW2SW_SRQ,
1299		.has_inbox = false,
1300		.has_outbox = false,
1301		.out_is_imm = false,
1302		.encode_slave_id = false,
1303		.verify = NULL,
1304		.wrapper = mlx4_HW2SW_SRQ_wrapper
1305	},
1306	{
1307		.opcode = MLX4_CMD_QUERY_SRQ,
1308		.has_inbox = false,
1309		.has_outbox = true,
1310		.out_is_imm = false,
1311		.encode_slave_id = false,
1312		.verify = NULL,
1313		.wrapper = mlx4_QUERY_SRQ_wrapper
1314	},
1315	{
1316		.opcode = MLX4_CMD_ARM_SRQ,
1317		.has_inbox = false,
1318		.has_outbox = false,
1319		.out_is_imm = false,
1320		.encode_slave_id = false,
1321		.verify = NULL,
1322		.wrapper = mlx4_ARM_SRQ_wrapper
1323	},
1324	{
1325		.opcode = MLX4_CMD_RST2INIT_QP,
1326		.has_inbox = true,
1327		.has_outbox = false,
1328		.out_is_imm = false,
1329		.encode_slave_id = true,
1330		.verify = NULL,
1331		.wrapper = mlx4_RST2INIT_QP_wrapper
1332	},
1333	{
1334		.opcode = MLX4_CMD_INIT2INIT_QP,
1335		.has_inbox = true,
1336		.has_outbox = false,
1337		.out_is_imm = false,
1338		.encode_slave_id = false,
1339		.verify = NULL,
1340		.wrapper = mlx4_INIT2INIT_QP_wrapper
1341	},
1342	{
1343		.opcode = MLX4_CMD_INIT2RTR_QP,
1344		.has_inbox = true,
1345		.has_outbox = false,
1346		.out_is_imm = false,
1347		.encode_slave_id = false,
1348		.verify = NULL,
1349		.wrapper = mlx4_INIT2RTR_QP_wrapper
1350	},
1351	{
1352		.opcode = MLX4_CMD_RTR2RTS_QP,
1353		.has_inbox = true,
1354		.has_outbox = false,
1355		.out_is_imm = false,
1356		.encode_slave_id = false,
1357		.verify = NULL,
1358		.wrapper = mlx4_RTR2RTS_QP_wrapper
1359	},
1360	{
1361		.opcode = MLX4_CMD_RTS2RTS_QP,
1362		.has_inbox = true,
1363		.has_outbox = false,
1364		.out_is_imm = false,
1365		.encode_slave_id = false,
1366		.verify = NULL,
1367		.wrapper = mlx4_RTS2RTS_QP_wrapper
1368	},
1369	{
1370		.opcode = MLX4_CMD_SQERR2RTS_QP,
1371		.has_inbox = true,
1372		.has_outbox = false,
1373		.out_is_imm = false,
1374		.encode_slave_id = false,
1375		.verify = NULL,
1376		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1377	},
1378	{
1379		.opcode = MLX4_CMD_2ERR_QP,
1380		.has_inbox = false,
1381		.has_outbox = false,
1382		.out_is_imm = false,
1383		.encode_slave_id = false,
1384		.verify = NULL,
1385		.wrapper = mlx4_GEN_QP_wrapper
1386	},
1387	{
1388		.opcode = MLX4_CMD_RTS2SQD_QP,
1389		.has_inbox = false,
1390		.has_outbox = false,
1391		.out_is_imm = false,
1392		.encode_slave_id = false,
1393		.verify = NULL,
1394		.wrapper = mlx4_GEN_QP_wrapper
1395	},
1396	{
1397		.opcode = MLX4_CMD_SQD2SQD_QP,
1398		.has_inbox = true,
1399		.has_outbox = false,
1400		.out_is_imm = false,
1401		.encode_slave_id = false,
1402		.verify = NULL,
1403		.wrapper = mlx4_SQD2SQD_QP_wrapper
1404	},
1405	{
1406		.opcode = MLX4_CMD_SQD2RTS_QP,
1407		.has_inbox = true,
1408		.has_outbox = false,
1409		.out_is_imm = false,
1410		.encode_slave_id = false,
1411		.verify = NULL,
1412		.wrapper = mlx4_SQD2RTS_QP_wrapper
1413	},
1414	{
1415		.opcode = MLX4_CMD_2RST_QP,
1416		.has_inbox = false,
1417		.has_outbox = false,
1418		.out_is_imm = false,
1419		.encode_slave_id = false,
1420		.verify = NULL,
1421		.wrapper = mlx4_2RST_QP_wrapper
1422	},
1423	{
1424		.opcode = MLX4_CMD_QUERY_QP,
1425		.has_inbox = false,
1426		.has_outbox = true,
1427		.out_is_imm = false,
1428		.encode_slave_id = false,
1429		.verify = NULL,
1430		.wrapper = mlx4_GEN_QP_wrapper
1431	},
1432	{
1433		.opcode = MLX4_CMD_SUSPEND_QP,
1434		.has_inbox = false,
1435		.has_outbox = false,
1436		.out_is_imm = false,
1437		.encode_slave_id = false,
1438		.verify = NULL,
1439		.wrapper = mlx4_GEN_QP_wrapper
1440	},
1441	{
1442		.opcode = MLX4_CMD_UNSUSPEND_QP,
1443		.has_inbox = false,
1444		.has_outbox = false,
1445		.out_is_imm = false,
1446		.encode_slave_id = false,
1447		.verify = NULL,
1448		.wrapper = mlx4_GEN_QP_wrapper
1449	},
1450	{
1451		.opcode = MLX4_CMD_UPDATE_QP,
1452		.has_inbox = false,
1453		.has_outbox = false,
1454		.out_is_imm = false,
1455		.encode_slave_id = false,
1456		.skip_err_print = true,
1457		.verify = NULL,
1458		.wrapper = MLX4_CMD_UPDATE_QP_wrapper
1459	},
1460	{
1461		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1462		.has_inbox = false,
1463		.has_outbox = false,
1464		.out_is_imm = false,
1465		.encode_slave_id = false,
1466		.verify = NULL, /* XXX verify: only demux can do this */
1467		.wrapper = NULL
1468	},
1469	{
1470		.opcode = MLX4_CMD_MAD_IFC,
1471		.has_inbox = true,
1472		.has_outbox = true,
1473		.out_is_imm = false,
1474		.encode_slave_id = false,
1475		.verify = NULL,
1476		.wrapper = mlx4_MAD_IFC_wrapper
1477	},
1478	{
1479		.opcode = MLX4_CMD_QUERY_IF_STAT,
1480		.has_inbox = false,
1481		.has_outbox = true,
1482		.out_is_imm = false,
1483		.encode_slave_id = false,
1484		.verify = NULL,
1485		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1486	},
1487	/* Native multicast commands are not available for guests */
1488	{
1489		.opcode = MLX4_CMD_QP_ATTACH,
1490		.has_inbox = true,
1491		.has_outbox = false,
1492		.out_is_imm = false,
1493		.encode_slave_id = false,
1494		.verify = NULL,
1495		.wrapper = mlx4_QP_ATTACH_wrapper
1496	},
1497	{
1498		.opcode = MLX4_CMD_PROMISC,
1499		.has_inbox = false,
1500		.has_outbox = false,
1501		.out_is_imm = false,
1502		.encode_slave_id = false,
1503		.verify = NULL,
1504		.wrapper = mlx4_PROMISC_wrapper
1505	},
1506	/* Ethernet specific commands */
1507	{
1508		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1509		.has_inbox = true,
1510		.has_outbox = false,
1511		.out_is_imm = false,
1512		.encode_slave_id = false,
1513		.verify = NULL,
1514		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1515	},
1516	{
1517		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1518		.has_inbox = false,
1519		.has_outbox = false,
1520		.out_is_imm = false,
1521		.encode_slave_id = false,
1522		.verify = NULL,
1523		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1524	},
1525	{
1526		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1527		.has_inbox = false,
1528		.has_outbox = true,
1529		.out_is_imm = false,
1530		.encode_slave_id = false,
1531		.verify = NULL,
1532		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1533	},
1534	{
1535		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1536		.has_inbox = false,
1537		.has_outbox = false,
1538		.out_is_imm = false,
1539		.encode_slave_id = false,
1540		.verify = NULL,
1541		.wrapper = NULL
1542	},
1543	/* flow steering commands */
1544	{
1545		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1546		.has_inbox = true,
1547		.has_outbox = false,
1548		.out_is_imm = true,
1549		.encode_slave_id = false,
1550		.verify = NULL,
1551		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1552	},
1553	{
1554		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1555		.has_inbox = false,
1556		.has_outbox = false,
1557		.out_is_imm = false,
1558		.encode_slave_id = false,
1559		.verify = NULL,
1560		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1561	},
1562	/* wol commands */
1563	{
1564		.opcode = MLX4_CMD_MOD_STAT_CFG,
1565		.has_inbox = false,
1566		.has_outbox = false,
1567		.out_is_imm = false,
1568		.encode_slave_id = false,
1569		.skip_err_print = true,
1570		.verify = NULL,
1571		.wrapper = mlx4_MOD_STAT_CFG_wrapper
1572	},
1573};
1574
1575static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1576				    struct mlx4_vhcr_cmd *in_vhcr)
1577{
1578	struct mlx4_priv *priv = mlx4_priv(dev);
1579	struct mlx4_cmd_info *cmd = NULL;
1580	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1581	struct mlx4_vhcr *vhcr;
1582	struct mlx4_cmd_mailbox *inbox = NULL;
1583	struct mlx4_cmd_mailbox *outbox = NULL;
1584	u64 in_param;
1585	u64 out_param;
1586	int ret = 0;
1587	int i;
1588	int err = 0;
1589
1590	/* Create sw representation of Virtual HCR */
1591	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1592	if (!vhcr)
1593		return -ENOMEM;
1594
1595	/* DMA in the vHCR */
1596	if (!in_vhcr) {
1597		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1598				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1599				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1600					    MLX4_ACCESS_MEM_ALIGN), 1);
1601		if (ret) {
1602			mlx4_err(dev, "%s:Failed reading vhcr"
1603				 "ret: 0x%x\n", __func__, ret);
1604			kfree(vhcr);
1605			return ret;
1606		}
1607	}
1608
1609	/* Fill SW VHCR fields */
1610	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1611	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1612	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1613	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1614	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1615	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1616	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1617
1618	/* Lookup command */
1619	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1620		if (vhcr->op == cmd_info[i].opcode) {
1621			cmd = &cmd_info[i];
1622			break;
1623		}
1624	}
1625	if (!cmd) {
1626		mlx4_err(dev, "unparavirt command: %s (0x%x) accepted from slave:%d\n",
1627			 cmd_to_str(vhcr->op), vhcr->op, slave);
1628		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1629		goto out_status;
1630	}
1631
1632	/* Read inbox */
1633	if (cmd->has_inbox) {
1634		vhcr->in_param &= INBOX_MASK;
1635		inbox = mlx4_alloc_cmd_mailbox(dev);
1636		if (IS_ERR(inbox)) {
1637			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1638			inbox = NULL;
1639			goto out_status;
1640		}
1641
1642		if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1643				    vhcr->in_param,
1644				    MLX4_MAILBOX_SIZE, 1)) {
1645			mlx4_err(dev, "%s: Failed reading inbox for cmd %s (0x%x)\n",
1646				 __func__, cmd_to_str(cmd->opcode), cmd->opcode);
1647			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1648			goto out_status;
1649		}
1650	}
1651
1652	/* Apply permission and bound checks if applicable */
1653	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1654		mlx4_warn(dev, "Command %s (0x%x) from slave: %d failed protection "
1655			  "checks for resource_id: %d\n", cmd_to_str(vhcr->op),
1656			  vhcr->op, slave, vhcr->in_modifier);
1657		vhcr_cmd->status = CMD_STAT_BAD_OP;
1658		goto out_status;
1659	}
1660
1661	/* Allocate outbox */
1662	if (cmd->has_outbox) {
1663		outbox = mlx4_alloc_cmd_mailbox(dev);
1664		if (IS_ERR(outbox)) {
1665			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1666			outbox = NULL;
1667			goto out_status;
1668		}
1669	}
1670
1671	/* Execute the command! */
1672	if (cmd->wrapper) {
1673		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1674				   cmd);
1675		if (cmd->out_is_imm)
1676			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1677	} else {
1678		in_param = cmd->has_inbox ? (u64) inbox->dma :
1679			vhcr->in_param;
1680		out_param = cmd->has_outbox ? (u64) outbox->dma :
1681			vhcr->out_param;
1682		err = __mlx4_cmd(dev, in_param, &out_param,
1683				 cmd->out_is_imm, vhcr->in_modifier,
1684				 vhcr->op_modifier, vhcr->op,
1685				 MLX4_CMD_TIME_CLASS_A,
1686				 MLX4_CMD_NATIVE);
1687
1688		if (cmd->out_is_imm) {
1689			vhcr->out_param = out_param;
1690			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1691		}
1692	}
1693
1694	if (err) {
1695		if (!cmd->skip_err_print)
1696			mlx4_warn(dev, "vhcr command %s (0x%x) slave:%d "
1697				  "in_param 0x%llx in_mod=0x%x, op_mod=0x%x "
1698				  "failed with error:%d, status %d\n",
1699				  cmd_to_str(vhcr->op), vhcr->op, slave,
1700				  (unsigned long long) vhcr->in_param, vhcr->in_modifier,
1701				  vhcr->op_modifier, vhcr->errno, err);
1702		vhcr_cmd->status = mlx4_errno_to_status(err);
1703		goto out_status;
1704	}
1705
1706
1707	/* Write outbox if command completed successfully */
1708	if (cmd->has_outbox && !vhcr_cmd->status) {
1709		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1710				      vhcr->out_param,
1711				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1712		if (ret) {
1713			/* If we failed to write back the outbox after the
1714			 *command was successfully executed, we must fail this
1715			 * slave, as it is now in undefined state */
1716			mlx4_err(dev, "%s: Failed writing outbox\n", __func__);
1717			goto out;
1718		}
1719	}
1720
1721out_status:
1722	/* DMA back vhcr result */
1723	if (!in_vhcr) {
1724		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1725				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1726				      ALIGN(sizeof(struct mlx4_vhcr),
1727					    MLX4_ACCESS_MEM_ALIGN),
1728				      MLX4_CMD_WRAPPED);
1729		if (ret)
1730			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1731				 __func__);
1732		else if (vhcr->e_bit &&
1733			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1734				mlx4_warn(dev, "Failed to generate command completion "
1735					  "eqe for slave %d\n", slave);
1736	}
1737
1738out:
1739	kfree(vhcr);
1740	mlx4_free_cmd_mailbox(dev, inbox);
1741	mlx4_free_cmd_mailbox(dev, outbox);
1742	return ret;
1743}
1744
1745static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1746					    int slave, int port)
1747{
1748	struct mlx4_vport_oper_state *vp_oper;
1749	struct mlx4_vport_state *vp_admin;
1750	struct mlx4_vf_immed_vlan_work *work;
1751	int err;
1752	int admin_vlan_ix = NO_INDX;
1753
1754	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1755	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1756
1757	if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1758	    vp_oper->state.default_qos == vp_admin->default_qos)
1759		return 0;
1760
1761	work = kzalloc(sizeof(*work), GFP_KERNEL);
1762	if (!work)
1763		return -ENOMEM;
1764
1765	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1766		if (MLX4_VGT != vp_admin->default_vlan) {
1767			err = __mlx4_register_vlan(&priv->dev, port,
1768						   vp_admin->default_vlan,
1769						   &admin_vlan_ix);
1770			if (err) {
1771				mlx4_warn((&priv->dev),
1772					  "No vlan resources slave %d, port %d\n",
1773					  slave, port);
1774				return err;
1775			}
1776		} else {
1777			admin_vlan_ix = NO_INDX;
1778		}
1779		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1780		mlx4_dbg((&(priv->dev)),
1781			 "alloc vlan %d idx  %d slave %d port %d\n",
1782			 (int)(vp_admin->default_vlan),
1783			 admin_vlan_ix, slave, port);
1784	}
1785
1786	/* save original vlan ix and vlan id */
1787	work->orig_vlan_id = vp_oper->state.default_vlan;
1788	work->orig_vlan_ix = vp_oper->vlan_idx;
1789
1790	/* handle new qos */
1791	if (vp_oper->state.default_qos != vp_admin->default_qos)
1792		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1793
1794	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1795		vp_oper->vlan_idx = admin_vlan_ix;
1796
1797	vp_oper->state.default_vlan = vp_admin->default_vlan;
1798	vp_oper->state.default_qos = vp_admin->default_qos;
1799
1800	/* iterate over QPs owned by this slave, using UPDATE_QP */
1801	work->port = port;
1802	work->slave = slave;
1803	work->qos = vp_oper->state.default_qos;
1804	work->vlan_id = vp_oper->state.default_vlan;
1805	work->vlan_ix = vp_oper->vlan_idx;
1806	work->priv = priv;
1807	INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1808	queue_work(priv->mfunc.master.comm_wq, &work->work);
1809
1810	return 0;
1811}
1812
1813
1814static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1815{
1816	int port, err;
1817	struct mlx4_vport_state *vp_admin;
1818	struct mlx4_vport_oper_state *vp_oper;
1819
1820	for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1821		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1822		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1823		vp_oper->state = *vp_admin;
1824		if (MLX4_VGT != vp_admin->default_vlan) {
1825			err = __mlx4_register_vlan(&priv->dev, port,
1826						 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1827			if (err) {
1828				vp_oper->vlan_idx = NO_INDX;
1829				mlx4_warn((&priv->dev),
1830					  "No vlan resorces slave %d, port %d\n",
1831					  slave, port);
1832				return err;
1833			}
1834			mlx4_dbg((&(priv->dev)), "alloc vlan %d idx  %d slave %d port %d\n",
1835				 (int)(vp_oper->state.default_vlan),
1836				 vp_oper->vlan_idx, slave, port);
1837		}
1838		if (vp_admin->spoofchk) {
1839			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1840							       port,
1841							       vp_admin->mac);
1842			if (0 > vp_oper->mac_idx) {
1843				err = vp_oper->mac_idx;
1844				vp_oper->mac_idx = NO_INDX;
1845				mlx4_warn((&priv->dev),
1846					  "No mac resources slave %d, port %d\n",
1847					  slave, port);
1848				return err;
1849			}
1850			mlx4_dbg((&(priv->dev)), "alloc mac %llx idx  %d slave %d port %d\n",
1851				 (unsigned long long) vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1852		}
1853	}
1854	return 0;
1855}
1856
1857static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1858{
1859	int port;
1860	struct mlx4_vport_oper_state *vp_oper;
1861
1862	for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1863		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1864		if (NO_INDX != vp_oper->vlan_idx) {
1865			__mlx4_unregister_vlan(&priv->dev,
1866					       port, vp_oper->state.default_vlan);
1867			vp_oper->vlan_idx = NO_INDX;
1868		}
1869		if (NO_INDX != vp_oper->mac_idx) {
1870			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
1871			vp_oper->mac_idx = NO_INDX;
1872		}
1873	}
1874	return;
1875}
1876
1877static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1878			       u16 param, u8 toggle)
1879{
1880	struct mlx4_priv *priv = mlx4_priv(dev);
1881	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1882	u32 reply;
1883	u8 is_going_down = 0;
1884	int i;
1885	unsigned long flags;
1886
1887	slave_state[slave].comm_toggle ^= 1;
1888	reply = (u32) slave_state[slave].comm_toggle << 31;
1889	if (toggle != slave_state[slave].comm_toggle) {
1890		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
1891			  "STATE COMPROMISIED ***\n", toggle, slave);
1892		goto reset_slave;
1893	}
1894	if (cmd == MLX4_COMM_CMD_RESET) {
1895		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1896		slave_state[slave].active = false;
1897		slave_state[slave].old_vlan_api = false;
1898		mlx4_master_deactivate_admin_state(priv, slave);
1899		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1900				slave_state[slave].event_eq[i].eqn = -1;
1901				slave_state[slave].event_eq[i].token = 0;
1902		}
1903		/*check if we are in the middle of FLR process,
1904		if so return "retry" status to the slave*/
1905		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1906			goto inform_slave_state;
1907
1908		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1909
1910		/* write the version in the event field */
1911		reply |= mlx4_comm_get_version();
1912
1913		goto reset_slave;
1914	}
1915	/*command from slave in the middle of FLR*/
1916	if (cmd != MLX4_COMM_CMD_RESET &&
1917	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1918		mlx4_warn(dev, "slave:%d is Trying to run cmd (0x%x) "
1919			  "in the middle of FLR\n", slave, cmd);
1920		return;
1921	}
1922
1923	switch (cmd) {
1924	case MLX4_COMM_CMD_VHCR0:
1925		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1926			goto reset_slave;
1927		slave_state[slave].vhcr_dma = ((u64) param) << 48;
1928		priv->mfunc.master.slave_state[slave].cookie = 0;
1929		break;
1930	case MLX4_COMM_CMD_VHCR1:
1931		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1932			goto reset_slave;
1933		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1934		break;
1935	case MLX4_COMM_CMD_VHCR2:
1936		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1937			goto reset_slave;
1938		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1939		break;
1940	case MLX4_COMM_CMD_VHCR_EN:
1941		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1942			goto reset_slave;
1943		slave_state[slave].vhcr_dma |= param;
1944		if (mlx4_master_activate_admin_state(priv, slave))
1945				goto reset_slave;
1946		slave_state[slave].active = true;
1947		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1948		break;
1949	case MLX4_COMM_CMD_VHCR_POST:
1950		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1951		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1952			goto reset_slave;
1953
1954		mutex_lock(&priv->cmd.slave_cmd_mutex);
1955		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1956			mlx4_err(dev, "Failed processing vhcr for slave: %d,"
1957				 " resetting slave.\n", slave);
1958			mutex_unlock(&priv->cmd.slave_cmd_mutex);
1959			goto reset_slave;
1960		}
1961		mutex_unlock(&priv->cmd.slave_cmd_mutex);
1962		break;
1963	default:
1964		mlx4_warn(dev, "Bad comm cmd: %d from slave: %d\n", cmd, slave);
1965		goto reset_slave;
1966	}
1967	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1968	if (!slave_state[slave].is_slave_going_down)
1969		slave_state[slave].last_cmd = cmd;
1970	else
1971		is_going_down = 1;
1972	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1973	if (is_going_down) {
1974		mlx4_warn(dev, "Slave is going down aborting command (%d)"
1975			  " executing from slave: %d\n",
1976			  cmd, slave);
1977		return;
1978	}
1979	__raw_writel((__force u32) cpu_to_be32(reply),
1980		     &priv->mfunc.comm[slave].slave_read);
1981	mmiowb();
1982
1983	return;
1984
1985reset_slave:
1986	/* cleanup any slave resources */
1987	mlx4_delete_all_resources_for_slave(dev, slave);
1988	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1989	if (!slave_state[slave].is_slave_going_down)
1990		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
1991	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1992	/*with slave in the middle of flr, no need to clean resources again.*/
1993inform_slave_state:
1994	__raw_writel((__force u32) cpu_to_be32(reply),
1995		     &priv->mfunc.comm[slave].slave_read);
1996	wmb();
1997}
1998
1999/* master command processing */
2000void mlx4_master_comm_channel(struct work_struct *work)
2001{
2002	struct mlx4_mfunc_master_ctx *master =
2003		container_of(work,
2004			     struct mlx4_mfunc_master_ctx,
2005			     comm_work);
2006	struct mlx4_mfunc *mfunc =
2007		container_of(master, struct mlx4_mfunc, master);
2008	struct mlx4_priv *priv =
2009		container_of(mfunc, struct mlx4_priv, mfunc);
2010	struct mlx4_dev *dev = &priv->dev;
2011	__be32 *bit_vec;
2012	u32 comm_cmd;
2013	u32 vec;
2014	int i, j, slave;
2015	int toggle;
2016	int served = 0;
2017	int reported = 0;
2018	u32 slt;
2019
2020	bit_vec = master->comm_arm_bit_vector;
2021	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2022		vec = be32_to_cpu(bit_vec[i]);
2023		for (j = 0; j < 32; j++) {
2024			if (!(vec & (1 << j)))
2025				continue;
2026			++reported;
2027			slave = (i * 32) + j;
2028			comm_cmd = swab32(readl(
2029					  &mfunc->comm[slave].slave_write));
2030			slt = swab32(readl(&mfunc->comm[slave].slave_read))
2031				     >> 31;
2032			toggle = comm_cmd >> 31;
2033			if (toggle != slt) {
2034				if (master->slave_state[slave].comm_toggle
2035				    != slt) {
2036					mlx4_info(dev, "slave %d out of sync."
2037						  " read toggle %d, state toggle %d. "
2038						  "Resynching.\n", slave, slt,
2039						  master->slave_state[slave].comm_toggle);
2040					master->slave_state[slave].comm_toggle =
2041						slt;
2042				}
2043				mlx4_master_do_cmd(dev, slave,
2044						   comm_cmd >> 16 & 0xff,
2045						   comm_cmd & 0xffff, toggle);
2046				++served;
2047			} else
2048				mlx4_err(dev, "slave %d out of sync."
2049				  " read toggle %d, write toggle %d.\n", slave, slt,
2050				  toggle);
2051		}
2052	}
2053
2054	if (reported && reported != served)
2055		mlx4_warn(dev, "Got command event with bitmask from %d slaves"
2056			  " but %d were served\n",
2057			  reported, served);
2058}
2059/* master command processing */
2060void mlx4_master_arm_comm_channel(struct work_struct *work)
2061{
2062	struct mlx4_mfunc_master_ctx *master =
2063		container_of(work,
2064			     struct mlx4_mfunc_master_ctx,
2065			     arm_comm_work);
2066	struct mlx4_mfunc *mfunc =
2067		container_of(master, struct mlx4_mfunc, master);
2068	struct mlx4_priv *priv =
2069		container_of(mfunc, struct mlx4_priv, mfunc);
2070	struct mlx4_dev *dev = &priv->dev;
2071
2072	if (mlx4_ARM_COMM_CHANNEL(dev))
2073		mlx4_warn(dev, "Failed to arm comm channel events\n");
2074}
2075
2076static int sync_toggles(struct mlx4_dev *dev)
2077{
2078	struct mlx4_priv *priv = mlx4_priv(dev);
2079	int wr_toggle;
2080	int rd_toggle;
2081	unsigned long end;
2082
2083	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
2084	end = jiffies + msecs_to_jiffies(5000);
2085
2086	while (time_before(jiffies, end)) {
2087		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
2088		if (rd_toggle == wr_toggle) {
2089			priv->cmd.comm_toggle = rd_toggle;
2090			return 0;
2091		}
2092
2093		cond_resched();
2094	}
2095
2096	/*
2097	 * we could reach here if for example the previous VM using this
2098	 * function misbehaved and left the channel with unsynced state. We
2099	 * should fix this here and give this VM a chance to use a properly
2100	 * synced channel
2101	 */
2102	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2103	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2104	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2105	priv->cmd.comm_toggle = 0;
2106
2107	return 0;
2108}
2109
2110int mlx4_multi_func_init(struct mlx4_dev *dev)
2111{
2112	struct mlx4_priv *priv = mlx4_priv(dev);
2113	struct mlx4_slave_state *s_state;
2114	int i, j, err, port;
2115
2116	if (mlx4_is_master(dev))
2117		priv->mfunc.comm =
2118		ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
2119			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2120	else
2121		priv->mfunc.comm =
2122		ioremap(pci_resource_start(dev->pdev, 2) +
2123			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2124	if (!priv->mfunc.comm) {
2125		mlx4_err(dev, "Couldn't map communication vector.\n");
2126		goto err_vhcr;
2127	}
2128
2129	if (mlx4_is_master(dev)) {
2130		priv->mfunc.master.slave_state =
2131			kzalloc(dev->num_slaves *
2132				sizeof(struct mlx4_slave_state), GFP_KERNEL);
2133		if (!priv->mfunc.master.slave_state)
2134			goto err_comm;
2135
2136		priv->mfunc.master.vf_admin =
2137			kzalloc(dev->num_slaves *
2138				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2139		if (!priv->mfunc.master.vf_admin)
2140			goto err_comm_admin;
2141
2142		priv->mfunc.master.vf_oper =
2143			kzalloc(dev->num_slaves *
2144				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2145		if (!priv->mfunc.master.vf_oper)
2146			goto err_comm_oper;
2147
2148		for (i = 0; i < dev->num_slaves; ++i) {
2149			s_state = &priv->mfunc.master.slave_state[i];
2150			s_state->last_cmd = MLX4_COMM_CMD_RESET;
2151			mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2152			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2153				s_state->event_eq[j].eqn = -1;
2154			__raw_writel((__force u32) 0,
2155				     &priv->mfunc.comm[i].slave_write);
2156			__raw_writel((__force u32) 0,
2157				     &priv->mfunc.comm[i].slave_read);
2158			mmiowb();
2159			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2160				s_state->vlan_filter[port] =
2161					kzalloc(sizeof(struct mlx4_vlan_fltr),
2162						GFP_KERNEL);
2163				if (!s_state->vlan_filter[port]) {
2164					if (--port)
2165						kfree(s_state->vlan_filter[port]);
2166					goto err_slaves;
2167				}
2168				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2169				priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
2170				priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
2171				priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
2172				priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
2173			}
2174			spin_lock_init(&s_state->lock);
2175		}
2176
2177		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
2178		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2179		INIT_WORK(&priv->mfunc.master.comm_work,
2180			  mlx4_master_comm_channel);
2181		INIT_WORK(&priv->mfunc.master.arm_comm_work,
2182			  mlx4_master_arm_comm_channel);
2183		INIT_WORK(&priv->mfunc.master.slave_event_work,
2184			  mlx4_gen_slave_eqe);
2185		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2186			  mlx4_master_handle_slave_flr);
2187		spin_lock_init(&priv->mfunc.master.slave_state_lock);
2188		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2189		priv->mfunc.master.comm_wq =
2190			create_singlethread_workqueue("mlx4_comm");
2191		if (!priv->mfunc.master.comm_wq)
2192			goto err_slaves;
2193
2194		if (mlx4_init_resource_tracker(dev))
2195			goto err_thread;
2196
2197		err = mlx4_ARM_COMM_CHANNEL(dev);
2198		if (err) {
2199			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
2200				 err);
2201			goto err_resource;
2202		}
2203
2204	} else {
2205		err = sync_toggles(dev);
2206		if (err) {
2207			mlx4_err(dev, "Couldn't sync toggles\n");
2208			goto err_comm;
2209		}
2210	}
2211	return 0;
2212
2213err_resource:
2214	mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
2215err_thread:
2216	flush_workqueue(priv->mfunc.master.comm_wq);
2217	destroy_workqueue(priv->mfunc.master.comm_wq);
2218err_slaves:
2219	while (--i) {
2220		for (port = 1; port <= MLX4_MAX_PORTS; port++)
2221			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2222	}
2223	kfree(priv->mfunc.master.vf_oper);
2224err_comm_oper:
2225	kfree(priv->mfunc.master.vf_admin);
2226err_comm_admin:
2227	kfree(priv->mfunc.master.slave_state);
2228err_comm:
2229	iounmap(priv->mfunc.comm);
2230err_vhcr:
2231	dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2232					     priv->mfunc.vhcr,
2233					     priv->mfunc.vhcr_dma);
2234	priv->mfunc.vhcr = NULL;
2235	return -ENOMEM;
2236}
2237
2238int mlx4_cmd_init(struct mlx4_dev *dev)
2239{
2240	struct mlx4_priv *priv = mlx4_priv(dev);
2241
2242	mutex_init(&priv->cmd.hcr_mutex);
2243	mutex_init(&priv->cmd.slave_cmd_mutex);
2244	sema_init(&priv->cmd.poll_sem, 1);
2245	priv->cmd.use_events = 0;
2246	priv->cmd.toggle     = 1;
2247
2248	priv->cmd.hcr = NULL;
2249	priv->mfunc.vhcr = NULL;
2250
2251	if (!mlx4_is_slave(dev)) {
2252		priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2253					MLX4_HCR_BASE, MLX4_HCR_SIZE);
2254		if (!priv->cmd.hcr) {
2255			mlx4_err(dev, "Couldn't map command register.\n");
2256			return -ENOMEM;
2257		}
2258	}
2259
2260	if (mlx4_is_mfunc(dev)) {
2261		priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
2262						      &priv->mfunc.vhcr_dma,
2263						      GFP_KERNEL);
2264		if (!priv->mfunc.vhcr) {
2265			mlx4_err(dev, "Couldn't allocate VHCR.\n");
2266			goto err_hcr;
2267		}
2268	}
2269
2270	priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
2271					 MLX4_MAILBOX_SIZE,
2272					 MLX4_MAILBOX_SIZE, 0);
2273	if (!priv->cmd.pool)
2274		goto err_vhcr;
2275
2276	return 0;
2277
2278err_vhcr:
2279	if (mlx4_is_mfunc(dev))
2280		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2281				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2282	priv->mfunc.vhcr = NULL;
2283
2284err_hcr:
2285	if (!mlx4_is_slave(dev))
2286		iounmap(priv->cmd.hcr);
2287	return -ENOMEM;
2288}
2289
2290void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2291{
2292	struct mlx4_priv *priv = mlx4_priv(dev);
2293	int i, port;
2294
2295	if (mlx4_is_master(dev)) {
2296		flush_workqueue(priv->mfunc.master.comm_wq);
2297		destroy_workqueue(priv->mfunc.master.comm_wq);
2298		for (i = 0; i < dev->num_slaves; i++) {
2299			for (port = 1; port <= MLX4_MAX_PORTS; port++)
2300				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2301		}
2302		kfree(priv->mfunc.master.slave_state);
2303		kfree(priv->mfunc.master.vf_admin);
2304		kfree(priv->mfunc.master.vf_oper);
2305	}
2306
2307	iounmap(priv->mfunc.comm);
2308}
2309
2310void mlx4_cmd_cleanup(struct mlx4_dev *dev)
2311{
2312	struct mlx4_priv *priv = mlx4_priv(dev);
2313
2314	pci_pool_destroy(priv->cmd.pool);
2315
2316	if (!mlx4_is_slave(dev))
2317		iounmap(priv->cmd.hcr);
2318	if (mlx4_is_mfunc(dev))
2319		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2320				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2321	priv->mfunc.vhcr = NULL;
2322}
2323
2324/*
2325 * Switch to using events to issue FW commands (can only be called
2326 * after event queue for command events has been initialized).
2327 */
2328int mlx4_cmd_use_events(struct mlx4_dev *dev)
2329{
2330	struct mlx4_priv *priv = mlx4_priv(dev);
2331	int i;
2332	int err = 0;
2333
2334	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2335				   sizeof (struct mlx4_cmd_context),
2336				   GFP_KERNEL);
2337	if (!priv->cmd.context)
2338		return -ENOMEM;
2339
2340	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2341		priv->cmd.context[i].token = i;
2342		priv->cmd.context[i].next  = i + 1;
2343	}
2344
2345	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2346	priv->cmd.free_head = 0;
2347
2348	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2349	spin_lock_init(&priv->cmd.context_lock);
2350
2351	for (priv->cmd.token_mask = 1;
2352	     priv->cmd.token_mask < priv->cmd.max_cmds;
2353	     priv->cmd.token_mask <<= 1)
2354		; /* nothing */
2355	--priv->cmd.token_mask;
2356
2357	down(&priv->cmd.poll_sem);
2358	priv->cmd.use_events = 1;
2359
2360	return err;
2361}
2362
2363/*
2364 * Switch back to polling (used when shutting down the device)
2365 */
2366void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2367{
2368	struct mlx4_priv *priv = mlx4_priv(dev);
2369	int i;
2370
2371	priv->cmd.use_events = 0;
2372
2373	for (i = 0; i < priv->cmd.max_cmds; ++i)
2374		down(&priv->cmd.event_sem);
2375
2376	kfree(priv->cmd.context);
2377
2378	up(&priv->cmd.poll_sem);
2379}
2380
2381struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2382{
2383	struct mlx4_cmd_mailbox *mailbox;
2384
2385	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2386	if (!mailbox)
2387		return ERR_PTR(-ENOMEM);
2388
2389	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2390				      &mailbox->dma);
2391	if (!mailbox->buf) {
2392		kfree(mailbox);
2393		return ERR_PTR(-ENOMEM);
2394	}
2395
2396	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2397
2398	return mailbox;
2399}
2400EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2401
2402void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2403			   struct mlx4_cmd_mailbox *mailbox)
2404{
2405	if (!mailbox)
2406		return;
2407
2408	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2409	kfree(mailbox);
2410}
2411EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2412
2413u32 mlx4_comm_get_version(void)
2414{
2415	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2416}
2417
2418static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2419{
2420	if ((vf < 0) || (vf >= dev->num_vfs)) {
2421		mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
2422		return -EINVAL;
2423	}
2424	return (vf+1);
2425}
2426
2427int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
2428{
2429	struct mlx4_priv *priv = mlx4_priv(dev);
2430	struct mlx4_vport_state *s_info;
2431	int slave;
2432
2433	if (!mlx4_is_master(dev))
2434		return -EPROTONOSUPPORT;
2435
2436	slave = mlx4_get_slave_indx(dev, vf);
2437	if (slave < 0)
2438		return -EINVAL;
2439
2440	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2441	s_info->mac = mlx4_mac_to_u64(mac);
2442	mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2443		  vf, port, (unsigned long long) s_info->mac);
2444	return 0;
2445}
2446EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2447
2448int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2449{
2450	struct mlx4_priv *priv = mlx4_priv(dev);
2451	struct mlx4_vport_oper_state *vf_oper;
2452	struct mlx4_vport_state *vf_admin;
2453	int slave;
2454
2455	if ((!mlx4_is_master(dev)) ||
2456	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2457		return -EPROTONOSUPPORT;
2458
2459	if ((vlan > 4095) || (qos > 7))
2460		return -EINVAL;
2461
2462	slave = mlx4_get_slave_indx(dev, vf);
2463	if (slave < 0)
2464		return -EINVAL;
2465
2466	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2467	vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2468
2469	if ((0 == vlan) && (0 == qos))
2470		vf_admin->default_vlan = MLX4_VGT;
2471	else
2472		vf_admin->default_vlan = vlan;
2473	vf_admin->default_qos = qos;
2474
2475	if (priv->mfunc.master.slave_state[slave].active &&
2476	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
2477		mlx4_info(dev, "updating vf %d port %d config params immediately\n",
2478			  vf, port);
2479		mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
2480	}
2481	return 0;
2482}
2483EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2484
2485 /* mlx4_get_slave_default_vlan -
2486 * retrun true if VST ( default vlan)
2487 * if VST will fill vlan & qos (if not NULL) */
2488bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos)
2489{
2490	struct mlx4_vport_oper_state *vp_oper;
2491	struct mlx4_priv *priv;
2492
2493	priv = mlx4_priv(dev);
2494	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2495
2496	if (MLX4_VGT != vp_oper->state.default_vlan) {
2497		if (vlan)
2498			*vlan = vp_oper->state.default_vlan;
2499		if (qos)
2500			*qos = vp_oper->state.default_qos;
2501		return true;
2502	}
2503	return false;
2504}
2505EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2506
2507int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2508{
2509	struct mlx4_priv *priv = mlx4_priv(dev);
2510	struct mlx4_vport_state *s_info;
2511	int slave;
2512
2513	if ((!mlx4_is_master(dev)) ||
2514	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2515		return -EPROTONOSUPPORT;
2516
2517	slave = mlx4_get_slave_indx(dev, vf);
2518	if (slave < 0)
2519		return -EINVAL;
2520
2521	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2522	s_info->spoofchk = setting;
2523
2524	return 0;
2525}
2526EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2527
2528int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2529{
2530	struct mlx4_priv *priv = mlx4_priv(dev);
2531	struct mlx4_vport_state *s_info;
2532	struct mlx4_vport_oper_state *vp_oper;
2533	int slave;
2534	u8 link_stat_event;
2535
2536	slave = mlx4_get_slave_indx(dev, vf);
2537	if (slave < 0)
2538		return -EINVAL;
2539
2540	switch (link_state) {
2541	case IFLA_VF_LINK_STATE_AUTO:
2542		/* get link curent state */
2543		if (!priv->sense.do_sense_port[port])
2544			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2545		else
2546			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2547	    break;
2548
2549	case IFLA_VF_LINK_STATE_ENABLE:
2550		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2551	    break;
2552
2553	case IFLA_VF_LINK_STATE_DISABLE:
2554		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2555	    break;
2556
2557	default:
2558		mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2559			  link_state, slave, port);
2560		return -EINVAL;
2561	};
2562	/* update the admin & oper state on the link state */
2563	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2564	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2565	s_info->link_state = link_state;
2566	vp_oper->state.link_state = link_state;
2567
2568	/* send event */
2569	mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
2570	return 0;
2571}
2572EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
2573
2574int mlx4_get_vf_link_state(struct mlx4_dev *dev, int port, int vf)
2575{
2576	struct mlx4_priv *priv = mlx4_priv(dev);
2577	struct mlx4_vport_state *s_info;
2578	int slave;
2579
2580	if (!mlx4_is_master(dev))
2581		return -EPROTONOSUPPORT;
2582
2583	slave = mlx4_get_slave_indx(dev, vf);
2584	if (slave < 0)
2585		return -EINVAL;
2586
2587	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2588
2589	return s_info->link_state;
2590}
2591EXPORT_SYMBOL_GPL(mlx4_get_vf_link_state);
2592
2593