cmd.c revision 291185
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/errno.h>
40
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/device.h>
43#include <linux/semaphore.h>
44#include <rdma/ib_smi.h>
45
46#include <asm/io.h>
47#include <linux/ktime.h>
48
49#include "mlx4.h"
50#include "fw.h"
51
52#define CMD_POLL_TOKEN 0xffff
53#define INBOX_MASK	0xffffffffffffff00ULL
54
55#define CMD_CHAN_VER 1
56#define CMD_CHAN_IF_REV 1
57
58enum {
59	/* command completed successfully: */
60	CMD_STAT_OK		= 0x00,
61	/* Internal error (such as a bus error) occurred while processing command: */
62	CMD_STAT_INTERNAL_ERR	= 0x01,
63	/* Operation/command not supported or opcode modifier not supported: */
64	CMD_STAT_BAD_OP		= 0x02,
65	/* Parameter not supported or parameter out of range: */
66	CMD_STAT_BAD_PARAM	= 0x03,
67	/* System not enabled or bad system state: */
68	CMD_STAT_BAD_SYS_STATE	= 0x04,
69	/* Attempt to access reserved or unallocaterd resource: */
70	CMD_STAT_BAD_RESOURCE	= 0x05,
71	/* Requested resource is currently executing a command, or is otherwise busy: */
72	CMD_STAT_RESOURCE_BUSY	= 0x06,
73	/* Required capability exceeds device limits: */
74	CMD_STAT_EXCEED_LIM	= 0x08,
75	/* Resource is not in the appropriate state or ownership: */
76	CMD_STAT_BAD_RES_STATE	= 0x09,
77	/* Index out of range: */
78	CMD_STAT_BAD_INDEX	= 0x0a,
79	/* FW image corrupted: */
80	CMD_STAT_BAD_NVMEM	= 0x0b,
81	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
82	CMD_STAT_ICM_ERROR	= 0x0c,
83	/* Attempt to modify a QP/EE which is not in the presumed state: */
84	CMD_STAT_BAD_QP_STATE   = 0x10,
85	/* Bad segment parameters (Address/Size): */
86	CMD_STAT_BAD_SEG_PARAM	= 0x20,
87	/* Memory Region has Memory Windows bound to: */
88	CMD_STAT_REG_BOUND	= 0x21,
89	/* HCA local attached memory not present: */
90	CMD_STAT_LAM_NOT_PRE	= 0x22,
91	/* Bad management packet (silently discarded): */
92	CMD_STAT_BAD_PKT	= 0x30,
93	/* More outstanding CQEs in CQ than new CQ size: */
94	CMD_STAT_BAD_SIZE	= 0x40,
95	/* Multi Function device support required: */
96	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
97};
98
99enum {
100	HCR_IN_PARAM_OFFSET	= 0x00,
101	HCR_IN_MODIFIER_OFFSET	= 0x08,
102	HCR_OUT_PARAM_OFFSET	= 0x0c,
103	HCR_TOKEN_OFFSET	= 0x14,
104	HCR_STATUS_OFFSET	= 0x18,
105
106	HCR_OPMOD_SHIFT		= 12,
107	HCR_T_BIT		= 21,
108	HCR_E_BIT		= 22,
109	HCR_GO_BIT		= 23
110};
111
112enum {
113	GO_BIT_TIMEOUT_MSECS	= 10000
114};
115
116enum mlx4_vlan_transition {
117	MLX4_VLAN_TRANSITION_VST_VST = 0,
118	MLX4_VLAN_TRANSITION_VST_VGT = 1,
119	MLX4_VLAN_TRANSITION_VGT_VST = 2,
120	MLX4_VLAN_TRANSITION_VGT_VGT = 3,
121};
122
123
124struct mlx4_cmd_context {
125	struct completion	done;
126	int			result;
127	int			next;
128	u64			out_param;
129	u16			token;
130	u8			fw_status;
131};
132
133static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
134				    struct mlx4_vhcr_cmd *in_vhcr);
135
136static int mlx4_status_to_errno(u8 status)
137{
138	static const int trans_table[] = {
139		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
140		[CMD_STAT_BAD_OP]	  = -EPERM,
141		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
142		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
143		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
144		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
145		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
146		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
147		[CMD_STAT_BAD_INDEX]	  = -EBADF,
148		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
149		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
150		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
151		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
152		[CMD_STAT_REG_BOUND]	  = -EBUSY,
153		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
154		[CMD_STAT_BAD_PKT]	  = -EINVAL,
155		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
156		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
157	};
158
159	if (status >= ARRAY_SIZE(trans_table) ||
160	    (status != CMD_STAT_OK && trans_table[status] == 0))
161		return -EIO;
162
163	return trans_table[status];
164}
165
166static const char *cmd_to_str(u16 cmd)
167{
168	switch (cmd) {
169	case MLX4_CMD_SYS_EN:		return "SYS_EN";
170	case MLX4_CMD_SYS_DIS:		return "SYS_DIS";
171	case MLX4_CMD_MAP_FA:		return "MAP_FA";
172	case MLX4_CMD_UNMAP_FA:		return "UNMAP_FA";
173	case MLX4_CMD_RUN_FW:		return "RUN_FW";
174	case MLX4_CMD_MOD_STAT_CFG:	return "MOD_STAT_CFG";
175	case MLX4_CMD_QUERY_DEV_CAP:	return "QUERY_DEV_CAP";
176	case MLX4_CMD_QUERY_FW:		return "QUERY_FW";
177	case MLX4_CMD_ENABLE_LAM:	return "ENABLE_LAM";
178	case MLX4_CMD_DISABLE_LAM:	return "DISABLE_LAM";
179	case MLX4_CMD_QUERY_DDR:	return "QUERY_DDR";
180	case MLX4_CMD_QUERY_ADAPTER:	return "QUERY_ADAPTER";
181	case MLX4_CMD_INIT_HCA:		return "INIT_HCA";
182	case MLX4_CMD_CLOSE_HCA:	return "CLOSE_HCA";
183	case MLX4_CMD_INIT_PORT:	return "INIT_PORT";
184	case MLX4_CMD_CLOSE_PORT:	return "CLOSE_PORT";
185	case MLX4_CMD_QUERY_HCA:	return "QUERY_HCA";
186	case MLX4_CMD_QUERY_PORT:	return "QUERY_PORT";
187	case MLX4_CMD_SENSE_PORT:	return "SENSE_PORT";
188	case MLX4_CMD_HW_HEALTH_CHECK:  return "HW_HEALTH_CHECK";
189	case MLX4_CMD_SET_PORT:		return "SET_PORT";
190	case MLX4_CMD_SET_NODE:		return "SET_NODE";
191	case MLX4_CMD_QUERY_FUNC:	return "QUERY_FUNC";
192	case MLX4_CMD_MAP_ICM:		return "MAP_ICM";
193	case MLX4_CMD_UNMAP_ICM:	return "UNMAP_ICM";
194	case MLX4_CMD_MAP_ICM_AUX:	return "MAP_ICM_AUX";
195	case MLX4_CMD_UNMAP_ICM_AUX:	return "UNMAP_ICM_AUX";
196	case MLX4_CMD_SET_ICM_SIZE:	return "SET_ICM_SIZE";
197		/*master notify fw on finish for slave's flr*/
198	case MLX4_CMD_INFORM_FLR_DONE:	return "INFORM_FLR_DONE";
199	case MLX4_CMD_GET_OP_REQ:	return "GET_OP_REQ";
200
201		/* TPT commands */
202	case MLX4_CMD_SW2HW_MPT:	return "SW2HW_MPT";
203	case MLX4_CMD_QUERY_MPT:	return "QUERY_MPT";
204	case MLX4_CMD_HW2SW_MPT:	return "HW2SW_MPT";
205	case MLX4_CMD_READ_MTT:		return "READ_MTT";
206	case MLX4_CMD_WRITE_MTT:	return "WRITE_MTT";
207	case MLX4_CMD_SYNC_TPT:		return "SYNC_TPT";
208
209		/* EQ commands */
210	case MLX4_CMD_MAP_EQ:		return "MAP_EQ";
211	case MLX4_CMD_SW2HW_EQ:		return "SW2HW_EQ";
212	case MLX4_CMD_HW2SW_EQ:		return "HW2SW_EQ";
213	case MLX4_CMD_QUERY_EQ:		return "QUERY_EQ";
214
215		/* CQ commands */
216	case MLX4_CMD_SW2HW_CQ:		return "SW2HW_CQ";
217	case MLX4_CMD_HW2SW_CQ:		return "HW2SW_CQ";
218	case MLX4_CMD_QUERY_CQ:		return "QUERY_CQ:";
219	case MLX4_CMD_MODIFY_CQ:	return "MODIFY_CQ:";
220
221		/* SRQ commands */
222	case MLX4_CMD_SW2HW_SRQ:	return "SW2HW_SRQ";
223	case MLX4_CMD_HW2SW_SRQ:	return "HW2SW_SRQ";
224	case MLX4_CMD_QUERY_SRQ:	return "QUERY_SRQ";
225	case MLX4_CMD_ARM_SRQ:		return "ARM_SRQ";
226
227		/* QP/EE commands */
228	case MLX4_CMD_RST2INIT_QP:	return "RST2INIT_QP";
229	case MLX4_CMD_INIT2RTR_QP:	return "INIT2RTR_QP";
230	case MLX4_CMD_RTR2RTS_QP:	return "RTR2RTS_QP";
231	case MLX4_CMD_RTS2RTS_QP:	return "RTS2RTS_QP";
232	case MLX4_CMD_SQERR2RTS_QP:	return "SQERR2RTS_QP";
233	case MLX4_CMD_2ERR_QP:		return "2ERR_QP";
234	case MLX4_CMD_RTS2SQD_QP:	return "RTS2SQD_QP";
235	case MLX4_CMD_SQD2SQD_QP:	return "SQD2SQD_QP";
236	case MLX4_CMD_SQD2RTS_QP:	return "SQD2RTS_QP";
237	case MLX4_CMD_2RST_QP:		return "2RST_QP";
238	case MLX4_CMD_QUERY_QP:		return "QUERY_QP";
239	case MLX4_CMD_INIT2INIT_QP:	return "INIT2INIT_QP";
240	case MLX4_CMD_SUSPEND_QP:	return "SUSPEND_QP";
241	case MLX4_CMD_UNSUSPEND_QP:	return "UNSUSPEND_QP";
242		/* special QP and management commands */
243	case MLX4_CMD_CONF_SPECIAL_QP:	return "CONF_SPECIAL_QP";
244	case MLX4_CMD_MAD_IFC:		return "MAD_IFC";
245
246		/* multicast commands */
247	case MLX4_CMD_READ_MCG:		return "READ_MCG";
248	case MLX4_CMD_WRITE_MCG:	return "WRITE_MCG";
249	case MLX4_CMD_MGID_HASH:	return "MGID_HASH";
250
251		/* miscellaneous commands */
252	case MLX4_CMD_DIAG_RPRT:	return "DIAG_RPRT";
253	case MLX4_CMD_NOP:		return "NOP";
254	case MLX4_CMD_ACCESS_MEM:	return "ACCESS_MEM";
255	case MLX4_CMD_SET_VEP:		return "SET_VEP";
256
257		/* Ethernet specific commands */
258	case MLX4_CMD_SET_VLAN_FLTR:	return "SET_VLAN_FLTR";
259	case MLX4_CMD_SET_MCAST_FLTR:	return "SET_MCAST_FLTR";
260	case MLX4_CMD_DUMP_ETH_STATS:	return "DUMP_ETH_STATS";
261
262		/* Communication channel commands */
263	case MLX4_CMD_ARM_COMM_CHANNEL:	return "ARM_COMM_CHANNEL";
264	case MLX4_CMD_GEN_EQE:		return "GEN_EQE";
265
266		/* virtual commands */
267	case MLX4_CMD_ALLOC_RES:	return "ALLOC_RES";
268	case MLX4_CMD_FREE_RES:		return "FREE_RES";
269	case MLX4_CMD_MCAST_ATTACH:	return "MCAST_ATTACH";
270	case MLX4_CMD_UCAST_ATTACH:	return "UCAST_ATTACH";
271	case MLX4_CMD_PROMISC:		return "PROMISC";
272	case MLX4_CMD_QUERY_FUNC_CAP:	return "QUERY_FUNC_CAP";
273	case MLX4_CMD_QP_ATTACH:	return "QP_ATTACH";
274
275		/* debug commands */
276	case MLX4_CMD_QUERY_DEBUG_MSG:	return "QUERY_DEBUG_MSG";
277	case MLX4_CMD_SET_DEBUG_MSG:	return "SET_DEBUG_MSG";
278
279		/* statistics commands */
280	case MLX4_CMD_QUERY_IF_STAT:	return "QUERY_IF_STAT";
281	case MLX4_CMD_SET_IF_STAT:	return "SET_IF_STAT";
282
283		/* register/delete flow steering network rules */
284	case MLX4_QP_FLOW_STEERING_ATTACH:	return "QP_FLOW_STEERING_ATTACH";
285	case MLX4_QP_FLOW_STEERING_DETACH:	return "QP_FLOW_STEERING_DETACH";
286	case MLX4_FLOW_STEERING_IB_UC_QP_RANGE:	return "FLOW_STEERING_IB_UC_QP_RANGE";
287	default: return "OTHER";
288	}
289}
290
291static u8 mlx4_errno_to_status(int errno)
292{
293	switch (errno) {
294	case -EPERM:
295		return CMD_STAT_BAD_OP;
296	case -EINVAL:
297		return CMD_STAT_BAD_PARAM;
298	case -ENXIO:
299		return CMD_STAT_BAD_SYS_STATE;
300	case -EBUSY:
301		return CMD_STAT_RESOURCE_BUSY;
302	case -ENOMEM:
303		return CMD_STAT_EXCEED_LIM;
304	case -ENFILE:
305		return CMD_STAT_ICM_ERROR;
306	default:
307		return CMD_STAT_INTERNAL_ERR;
308	}
309}
310
311static int comm_pending(struct mlx4_dev *dev)
312{
313	struct mlx4_priv *priv = mlx4_priv(dev);
314	u32 status = readl(&priv->mfunc.comm->slave_read);
315
316	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
317}
318
319static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
320{
321	struct mlx4_priv *priv = mlx4_priv(dev);
322	u32 val;
323
324	priv->cmd.comm_toggle ^= 1;
325	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
326	__raw_writel((__force u32) cpu_to_be32(val),
327		     &priv->mfunc.comm->slave_write);
328	mmiowb();
329}
330
331static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
332		       unsigned long timeout)
333{
334	struct mlx4_priv *priv = mlx4_priv(dev);
335	unsigned long end;
336	int err = 0;
337	int ret_from_pending = 0;
338
339	/* First, verify that the master reports correct status */
340	if (comm_pending(dev)) {
341		mlx4_warn(dev, "Communication channel is not idle."
342			  "my toggle is %d (cmd:0x%x)\n",
343			  priv->cmd.comm_toggle, cmd);
344		return -EAGAIN;
345	}
346
347	/* Write command */
348	down(&priv->cmd.poll_sem);
349	mlx4_comm_cmd_post(dev, cmd, param);
350
351	end = msecs_to_jiffies(timeout) + jiffies;
352	while (comm_pending(dev) && time_before(jiffies, end))
353		cond_resched();
354	ret_from_pending = comm_pending(dev);
355	if (ret_from_pending) {
356		/* check if the slave is trying to boot in the middle of
357		 * FLR process. The only non-zero result in the RESET command
358		 * is MLX4_DELAY_RESET_SLAVE*/
359		if ((MLX4_COMM_CMD_RESET == cmd)) {
360			mlx4_warn(dev, "Got slave FLRed from Communication"
361				  " channel (ret:0x%x)\n", ret_from_pending);
362			err = MLX4_DELAY_RESET_SLAVE;
363		} else {
364			mlx4_warn(dev, "Communication channel timed out\n");
365			err = -ETIMEDOUT;
366		}
367	}
368
369	up(&priv->cmd.poll_sem);
370	return err;
371}
372
373static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
374			      u16 param, unsigned long timeout)
375{
376	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
377	struct mlx4_cmd_context *context;
378	unsigned long end;
379	int err = 0;
380
381	down(&cmd->event_sem);
382
383	end = msecs_to_jiffies(timeout) + jiffies;
384	while (comm_pending(dev) && time_before(jiffies, end))
385		cond_resched();
386	if (comm_pending(dev)) {
387		mlx4_warn(dev, "mlx4_comm_cmd_wait: Comm channel "
388			  "is not idle. My toggle is %d (op: 0x%x)\n",
389			  mlx4_priv(dev)->cmd.comm_toggle, op);
390		up(&cmd->event_sem);
391		return -EAGAIN;
392	}
393
394	spin_lock(&cmd->context_lock);
395	BUG_ON(cmd->free_head < 0);
396	context = &cmd->context[cmd->free_head];
397	context->token += cmd->token_mask + 1;
398	cmd->free_head = context->next;
399	spin_unlock(&cmd->context_lock);
400
401	init_completion(&context->done);
402
403	mlx4_comm_cmd_post(dev, op, param);
404
405	/* In slave, wait unconditionally for completion */
406	wait_for_completion(&context->done);
407
408	err = context->result;
409	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
410		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
411			 op, context->fw_status);
412		goto out;
413	}
414
415out:
416	/* wait for comm channel ready
417	 * this is necessary for prevention the race
418	 * when switching between event to polling mode
419	 */
420	end = msecs_to_jiffies(timeout) + jiffies;
421	while (comm_pending(dev) && time_before(jiffies, end))
422		cond_resched();
423
424	spin_lock(&cmd->context_lock);
425	context->next = cmd->free_head;
426	cmd->free_head = context - cmd->context;
427	spin_unlock(&cmd->context_lock);
428
429	up(&cmd->event_sem);
430	return err;
431}
432
433int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
434		  unsigned long timeout)
435{
436	if (mlx4_priv(dev)->cmd.use_events)
437		return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
438	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
439}
440
441static int cmd_pending(struct mlx4_dev *dev)
442{
443	u32 status;
444
445	if (pci_channel_offline(dev->pdev))
446		return -EIO;
447
448	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
449
450	return (status & swab32(1 << HCR_GO_BIT)) ||
451		(mlx4_priv(dev)->cmd.toggle ==
452		 !!(status & swab32(1 << HCR_T_BIT)));
453}
454
455static int get_status(struct mlx4_dev *dev, u32 *status, int *go_bit,
456		      int *t_bit)
457{
458	if (pci_channel_offline(dev->pdev))
459		return -EIO;
460
461	*status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
462	*t_bit = !!(*status & swab32(1 << HCR_T_BIT));
463	*go_bit = !!(*status & swab32(1 << HCR_GO_BIT));
464
465	return 0;
466}
467
468static int mlx4_cmd_post(struct mlx4_dev *dev, struct timespec *ts1,
469			 u64 in_param, u64 out_param, u32 in_modifier,
470			 u8 op_modifier, u16 op, u16 token, int event)
471{
472	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
473	u32 __iomem *hcr = cmd->hcr;
474	int ret = -EAGAIN;
475	unsigned long end;
476	int err, go_bit = 0, t_bit = 0;
477	u32 status = 0;
478
479	mutex_lock(&cmd->hcr_mutex);
480
481	if (pci_channel_offline(dev->pdev)) {
482		/*
483		 * Device is going through error recovery
484		 * and cannot accept commands.
485		 */
486		ret = -EIO;
487		goto out;
488	}
489
490	end = jiffies;
491	if (event)
492		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
493
494	while (cmd_pending(dev)) {
495		if (pci_channel_offline(dev->pdev)) {
496			/*
497			 * Device is going through error recovery
498			 * and cannot accept commands.
499			 */
500			ret = -EIO;
501			goto out;
502		}
503
504		if (time_after_eq(jiffies, end)) {
505			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
506			goto out;
507		}
508		cond_resched();
509	}
510
511	/*
512	 * We use writel (instead of something like memcpy_toio)
513	 * because writes of less than 32 bits to the HCR don't work
514	 * (and some architectures such as ia64 implement memcpy_toio
515	 * in terms of writeb).
516	 */
517	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
518	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
519	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
520	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
521	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
522	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
523
524	if (ts1)
525		ktime_get_ts(ts1);
526
527	/* __raw_writel may not order writes. */
528	wmb();
529
530	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
531					       (cmd->toggle << HCR_T_BIT)	|
532					       (event ? (1 << HCR_E_BIT) : 0)	|
533					       (op_modifier << HCR_OPMOD_SHIFT) |
534					       op), hcr + 6);
535
536	/*
537	 * Make sure that our HCR writes don't get mixed in with
538	 * writes from another CPU starting a FW command.
539	 */
540	mmiowb();
541
542	cmd->toggle = cmd->toggle ^ 1;
543
544	ret = 0;
545
546out:
547	if (ret) {
548		err = get_status(dev, &status, &go_bit, &t_bit);
549		mlx4_warn(dev, "Could not post command %s (0x%x): ret=%d, "
550			  "in_param=0x%llx, in_mod=0x%x, op_mod=0x%x, "
551			  "get_status err=%d, status_reg=0x%x, go_bit=%d, "
552			  "t_bit=%d, toggle=0x%x\n", cmd_to_str(op), op, ret,
553			  (unsigned long long) in_param, in_modifier, op_modifier, err, status,
554			  go_bit, t_bit, cmd->toggle);
555	}
556	mutex_unlock(&cmd->hcr_mutex);
557	return ret;
558}
559
560static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
561			  int out_is_imm, u32 in_modifier, u8 op_modifier,
562			  u16 op, unsigned long timeout)
563{
564	struct mlx4_priv *priv = mlx4_priv(dev);
565	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
566	int ret;
567
568	mutex_lock(&priv->cmd.slave_cmd_mutex);
569
570	vhcr->in_param = cpu_to_be64(in_param);
571	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
572	vhcr->in_modifier = cpu_to_be32(in_modifier);
573	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
574	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
575	vhcr->status = 0;
576	vhcr->flags = !!(priv->cmd.use_events) << 6;
577
578	if (mlx4_is_master(dev)) {
579		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
580		if (!ret) {
581			if (out_is_imm) {
582				if (out_param)
583					*out_param =
584						be64_to_cpu(vhcr->out_param);
585				else {
586					mlx4_err(dev, "response expected while"
587						 "output mailbox is NULL for "
588						 "command 0x%x\n", op);
589					vhcr->status = CMD_STAT_BAD_PARAM;
590				}
591			}
592			ret = mlx4_status_to_errno(vhcr->status);
593		}
594	} else {
595		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
596				    MLX4_COMM_TIME + timeout);
597		if (!ret) {
598			if (out_is_imm) {
599				if (out_param)
600					*out_param =
601						be64_to_cpu(vhcr->out_param);
602				else {
603					mlx4_err(dev, "response expected while"
604						 "output mailbox is NULL for "
605						 "command 0x%x\n", op);
606					vhcr->status = CMD_STAT_BAD_PARAM;
607				}
608			}
609			ret = mlx4_status_to_errno(vhcr->status);
610		} else
611			mlx4_err(dev, "failed execution of VHCR_POST command"
612				 "opcode %s (0x%x)\n", cmd_to_str(op), op);
613	}
614
615	mutex_unlock(&priv->cmd.slave_cmd_mutex);
616	return ret;
617}
618
619static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
620			 int out_is_imm, u32 in_modifier, u8 op_modifier,
621			 u16 op, unsigned long timeout)
622{
623	struct mlx4_priv *priv = mlx4_priv(dev);
624	void __iomem *hcr = priv->cmd.hcr;
625	int err = 0;
626	unsigned long end;
627	u32 stat;
628
629	down(&priv->cmd.poll_sem);
630
631	if (pci_channel_offline(dev->pdev)) {
632		/*
633		 * Device is going through error recovery
634		 * and cannot accept commands.
635		 */
636		err = -EIO;
637		goto out;
638	}
639
640	err = mlx4_cmd_post(dev, NULL, in_param, out_param ? *out_param : 0,
641			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
642	if (err)
643		goto out;
644
645	end = msecs_to_jiffies(timeout) + jiffies;
646	while (cmd_pending(dev) && time_before(jiffies, end)) {
647		if (pci_channel_offline(dev->pdev)) {
648			/*
649			 * Device is going through error recovery
650			 * and cannot accept commands.
651			 */
652			err = -EIO;
653			goto out;
654		}
655
656		cond_resched();
657	}
658
659	if (cmd_pending(dev)) {
660		mlx4_warn(dev, "command %s (0x%x) timed out (go bit not cleared)\n",
661			  cmd_to_str(op), op);
662		err = -ETIMEDOUT;
663		goto out;
664	}
665
666	if (out_is_imm)
667		*out_param =
668			(u64) be32_to_cpu((__force __be32)
669					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
670			(u64) be32_to_cpu((__force __be32)
671					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
672	stat = be32_to_cpu((__force __be32)
673			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
674	err = mlx4_status_to_errno(stat);
675	if (err)
676		mlx4_err(dev, "command %s (0x%x) failed: fw status = 0x%x\n",
677			 cmd_to_str(op), op, stat);
678
679out:
680	up(&priv->cmd.poll_sem);
681	return err;
682}
683
684void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
685{
686	struct mlx4_priv *priv = mlx4_priv(dev);
687	struct mlx4_cmd_context *context =
688		&priv->cmd.context[token & priv->cmd.token_mask];
689
690	/* previously timed out command completing at long last */
691	if (token != context->token)
692		return;
693
694	context->fw_status = status;
695	context->result    = mlx4_status_to_errno(status);
696	context->out_param = out_param;
697
698	complete(&context->done);
699}
700
701static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
702			 int out_is_imm, u32 in_modifier, u8 op_modifier,
703			 u16 op, unsigned long timeout)
704{
705	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
706	struct mlx4_cmd_context *context;
707	int err = 0;
708	int go_bit = 0, t_bit = 0, stat_err;
709	u32 status = 0;
710	struct timespec	ts1, ts2;
711	ktime_t t1, t2, delta;
712	s64 ds;
713
714	if (out_is_imm && !out_param)
715		return -EINVAL;
716
717	down(&cmd->event_sem);
718
719	spin_lock(&cmd->context_lock);
720	BUG_ON(cmd->free_head < 0);
721	context = &cmd->context[cmd->free_head];
722	context->token += cmd->token_mask + 1;
723	cmd->free_head = context->next;
724	spin_unlock(&cmd->context_lock);
725
726	init_completion(&context->done);
727
728	err = mlx4_cmd_post(dev, &ts1, in_param, out_param ? *out_param : 0,
729			    in_modifier, op_modifier, op, context->token, 1);
730	if (err)
731		goto out;
732
733	if (!wait_for_completion_timeout(&context->done,
734					 msecs_to_jiffies(timeout))) {
735		stat_err = get_status(dev, &status, &go_bit, &t_bit);
736		mlx4_warn(dev, "command %s (0x%x) timed out: in_param=0x%llx, "
737			  "in_mod=0x%x, op_mod=0x%x, get_status err=%d, "
738			  "status_reg=0x%x, go_bit=%d, t_bit=%d, toggle=0x%x\n"
739			  , cmd_to_str(op), op, (unsigned long long) in_param, in_modifier,
740			  op_modifier, stat_err, status, go_bit, t_bit,
741			  mlx4_priv(dev)->cmd.toggle);
742		err = -EBUSY;
743		goto out;
744	}
745	if (mlx4_debug_level & MLX4_DEBUG_MASK_CMD_TIME) {
746		ktime_get_ts(&ts2);
747		t1 = timespec_to_ktime(ts1);
748		t2 = timespec_to_ktime(ts2);
749		delta = ktime_sub(t2, t1);
750		ds = ktime_to_ns(delta);
751		pr_info("mlx4: fw exec time for %s is %lld nsec\n", cmd_to_str(op), (long long) ds);
752	}
753
754	err = context->result;
755	if (err) {
756		mlx4_err(dev, "command %s (0x%x) failed: in_param=0x%llx, "
757			 "in_mod=0x%x, op_mod=0x%x, fw status = 0x%x\n",
758			 cmd_to_str(op), op, (unsigned long long) in_param, in_modifier,
759			 op_modifier, context->fw_status);
760
761		switch(context->fw_status) {
762		case CMD_STAT_BAD_PARAM:
763			mlx4_err(dev, "Parameter is not supported, "
764			    "parameter is out of range\n");
765			break;
766		case CMD_STAT_EXCEED_LIM:
767			mlx4_err(dev, "Required capability exceeded "
768			    "device limits\n");
769			break;
770		default:
771			break;
772		}
773		goto out;
774	}
775
776	if (out_is_imm)
777		*out_param = context->out_param;
778
779out:
780	spin_lock(&cmd->context_lock);
781	context->next = cmd->free_head;
782	cmd->free_head = context - cmd->context;
783	spin_unlock(&cmd->context_lock);
784
785	up(&cmd->event_sem);
786	return err;
787}
788
789int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
790	       int out_is_imm, u32 in_modifier, u8 op_modifier,
791	       u16 op, unsigned long timeout, int native)
792{
793	if (pci_channel_offline(dev->pdev))
794		return -EIO;
795
796	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
797		if (mlx4_priv(dev)->cmd.use_events)
798			return mlx4_cmd_wait(dev, in_param, out_param,
799					     out_is_imm, in_modifier,
800					     op_modifier, op, timeout);
801		else
802			return mlx4_cmd_poll(dev, in_param, out_param,
803					     out_is_imm, in_modifier,
804					     op_modifier, op, timeout);
805	}
806	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
807			      in_modifier, op_modifier, op, timeout);
808}
809EXPORT_SYMBOL_GPL(__mlx4_cmd);
810
811
812static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
813{
814	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
815			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
816}
817
818static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
819			   int slave, u64 slave_addr,
820			   int size, int is_read)
821{
822	u64 in_param;
823	u64 out_param;
824
825	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
826	    (slave & ~0x7f) | (size & 0xff)) {
827		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
828			      "master_addr:0x%llx slave_id:%d size:%d\n",
829			      (unsigned long long) slave_addr, (unsigned long long) master_addr, slave, size);
830		return -EINVAL;
831	}
832
833	if (is_read) {
834		in_param = (u64) slave | slave_addr;
835		out_param = (u64) dev->caps.function | master_addr;
836	} else {
837		in_param = (u64) dev->caps.function | master_addr;
838		out_param = (u64) slave | slave_addr;
839	}
840
841	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
842			    MLX4_CMD_ACCESS_MEM,
843			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
844}
845
846static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
847			       struct mlx4_cmd_mailbox *inbox,
848			       struct mlx4_cmd_mailbox *outbox)
849{
850	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
851	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
852	int err;
853	int i;
854
855	if (index & 0x1f)
856		return -EINVAL;
857
858	in_mad->attr_mod = cpu_to_be32(index / 32);
859
860	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
861			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
862			   MLX4_CMD_NATIVE);
863	if (err)
864		return err;
865
866	for (i = 0; i < 32; ++i)
867		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
868
869	return err;
870}
871
872static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
873			       struct mlx4_cmd_mailbox *inbox,
874			       struct mlx4_cmd_mailbox *outbox)
875{
876	int i;
877	int err;
878
879	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
880		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
881		if (err)
882			return err;
883	}
884
885	return 0;
886}
887#define PORT_CAPABILITY_LOCATION_IN_SMP 20
888#define PORT_STATE_OFFSET 32
889
890static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
891{
892	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
893		return IB_PORT_ACTIVE;
894	else
895		return IB_PORT_DOWN;
896}
897
898static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
899				struct mlx4_vhcr *vhcr,
900				struct mlx4_cmd_mailbox *inbox,
901				struct mlx4_cmd_mailbox *outbox,
902				struct mlx4_cmd_info *cmd)
903{
904	struct ib_smp *smp = inbox->buf;
905	u32 index;
906	u8 port;
907	u16 *table;
908	int err;
909	int vidx, pidx;
910	struct mlx4_priv *priv = mlx4_priv(dev);
911	struct ib_smp *outsmp = outbox->buf;
912	__be16 *outtab = (__be16 *)(outsmp->data);
913	__be32 slave_cap_mask;
914	__be64 slave_node_guid;
915	port = vhcr->in_modifier;
916
917	if (smp->base_version == 1 &&
918	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
919	    smp->class_version == 1) {
920		if (smp->method	== IB_MGMT_METHOD_GET) {
921			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
922				index = be32_to_cpu(smp->attr_mod);
923				if (port < 1 || port > dev->caps.num_ports)
924					return -EINVAL;
925				table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
926				if (!table)
927					return -ENOMEM;
928				/* need to get the full pkey table because the paravirtualized
929				 * pkeys may be scattered among several pkey blocks.
930				 */
931				err = get_full_pkey_table(dev, port, table, inbox, outbox);
932				if (!err) {
933					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
934						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
935						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
936					}
937				}
938				kfree(table);
939				return err;
940			}
941			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
942				/*get the slave specific caps:*/
943				/*do the command */
944				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
945					    vhcr->in_modifier, vhcr->op_modifier,
946					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
947				/* modify the response for slaves */
948				if (!err && slave != mlx4_master_func_num(dev)) {
949					u8 *state = outsmp->data + PORT_STATE_OFFSET;
950
951					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
952					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
953					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
954				}
955				return err;
956			}
957			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
958				/* compute slave's gid block */
959				smp->attr_mod = cpu_to_be32(slave / 8);
960				/* execute cmd */
961				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
962					     vhcr->in_modifier, vhcr->op_modifier,
963					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
964				if (!err) {
965					/* if needed, move slave gid to index 0 */
966					if (slave % 8)
967						memcpy(outsmp->data,
968						       outsmp->data + (slave % 8) * 8, 8);
969					/* delete all other gids */
970					memset(outsmp->data + 8, 0, 56);
971				}
972				return err;
973			}
974			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
975				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
976					     vhcr->in_modifier, vhcr->op_modifier,
977					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
978				if (!err) {
979					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
980					memcpy(outsmp->data + 12, &slave_node_guid, 8);
981				}
982				return err;
983			}
984		}
985	}
986	if (slave != mlx4_master_func_num(dev) &&
987	    ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
988	     (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
989	      smp->method == IB_MGMT_METHOD_SET))) {
990		mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
991			 "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
992			 slave, smp->method, smp->mgmt_class,
993			 be16_to_cpu(smp->attr_id));
994		return -EPERM;
995	}
996	/*default:*/
997	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
998				    vhcr->in_modifier, vhcr->op_modifier,
999				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1000}
1001
1002static int MLX4_CMD_DIAG_RPRT_wrapper(struct mlx4_dev *dev, int slave,
1003		     struct mlx4_vhcr *vhcr,
1004		     struct mlx4_cmd_mailbox *inbox,
1005		     struct mlx4_cmd_mailbox *outbox,
1006		     struct mlx4_cmd_info *cmd)
1007{
1008	return -EPERM;
1009}
1010
1011static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1012		     struct mlx4_vhcr *vhcr,
1013		     struct mlx4_cmd_mailbox *inbox,
1014		     struct mlx4_cmd_mailbox *outbox,
1015		     struct mlx4_cmd_info *cmd)
1016{
1017	return -EPERM;
1018}
1019
1020int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1021		     struct mlx4_vhcr *vhcr,
1022		     struct mlx4_cmd_mailbox *inbox,
1023		     struct mlx4_cmd_mailbox *outbox,
1024		     struct mlx4_cmd_info *cmd)
1025{
1026	u64 in_param;
1027	u64 out_param;
1028	int err;
1029
1030	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1031	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1032	if (cmd->encode_slave_id) {
1033		in_param &= 0xffffffffffffff00ll;
1034		in_param |= slave;
1035	}
1036
1037	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1038			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1039			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1040
1041	if (cmd->out_is_imm)
1042		vhcr->out_param = out_param;
1043
1044	return err;
1045}
1046
1047static struct mlx4_cmd_info cmd_info[] = {
1048	{
1049		.opcode = MLX4_CMD_QUERY_FW,
1050		.has_inbox = false,
1051		.has_outbox = true,
1052		.out_is_imm = false,
1053		.encode_slave_id = false,
1054		.verify = NULL,
1055		.wrapper = mlx4_QUERY_FW_wrapper
1056	},
1057	{
1058		.opcode = MLX4_CMD_QUERY_HCA,
1059		.has_inbox = false,
1060		.has_outbox = true,
1061		.out_is_imm = false,
1062		.encode_slave_id = false,
1063		.verify = NULL,
1064		.wrapper = NULL
1065	},
1066	{
1067		.opcode = MLX4_CMD_QUERY_DEV_CAP,
1068		.has_inbox = false,
1069		.has_outbox = true,
1070		.out_is_imm = false,
1071		.encode_slave_id = false,
1072		.verify = NULL,
1073		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
1074	},
1075	{
1076		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
1077		.has_inbox = false,
1078		.has_outbox = true,
1079		.out_is_imm = false,
1080		.encode_slave_id = false,
1081		.verify = NULL,
1082		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1083	},
1084	{
1085		.opcode = MLX4_CMD_QUERY_ADAPTER,
1086		.has_inbox = false,
1087		.has_outbox = true,
1088		.out_is_imm = false,
1089		.encode_slave_id = false,
1090		.verify = NULL,
1091		.wrapper = NULL
1092	},
1093	{
1094		.opcode = MLX4_CMD_INIT_PORT,
1095		.has_inbox = false,
1096		.has_outbox = false,
1097		.out_is_imm = false,
1098		.encode_slave_id = false,
1099		.verify = NULL,
1100		.wrapper = mlx4_INIT_PORT_wrapper
1101	},
1102	{
1103		.opcode = MLX4_CMD_CLOSE_PORT,
1104		.has_inbox = false,
1105		.has_outbox = false,
1106		.out_is_imm  = false,
1107		.encode_slave_id = false,
1108		.verify = NULL,
1109		.wrapper = mlx4_CLOSE_PORT_wrapper
1110	},
1111	{
1112		.opcode = MLX4_CMD_QUERY_PORT,
1113		.has_inbox = false,
1114		.has_outbox = true,
1115		.out_is_imm = false,
1116		.encode_slave_id = false,
1117		.verify = NULL,
1118		.wrapper = mlx4_QUERY_PORT_wrapper
1119	},
1120	{
1121		.opcode = MLX4_CMD_SET_PORT,
1122		.has_inbox = true,
1123		.has_outbox = false,
1124		.out_is_imm = false,
1125		.encode_slave_id = false,
1126		.verify = NULL,
1127		.wrapper = mlx4_SET_PORT_wrapper
1128	},
1129	{
1130		.opcode = MLX4_CMD_MAP_EQ,
1131		.has_inbox = false,
1132		.has_outbox = false,
1133		.out_is_imm = false,
1134		.encode_slave_id = false,
1135		.verify = NULL,
1136		.wrapper = mlx4_MAP_EQ_wrapper
1137	},
1138	{
1139		.opcode = MLX4_CMD_SW2HW_EQ,
1140		.has_inbox = true,
1141		.has_outbox = false,
1142		.out_is_imm = false,
1143		.encode_slave_id = true,
1144		.verify = NULL,
1145		.wrapper = mlx4_SW2HW_EQ_wrapper
1146	},
1147	{
1148		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
1149		.has_inbox = false,
1150		.has_outbox = false,
1151		.out_is_imm = false,
1152		.encode_slave_id = false,
1153		.verify = NULL,
1154		.wrapper = NULL
1155	},
1156	{
1157		.opcode = MLX4_CMD_DIAG_RPRT,
1158		.has_inbox = false,
1159		.has_outbox = false,
1160		.out_is_imm = false,
1161		.encode_slave_id = false,
1162		.skip_err_print = true,
1163		.verify = NULL,
1164		.wrapper = MLX4_CMD_DIAG_RPRT_wrapper
1165	},
1166	{
1167		.opcode = MLX4_CMD_NOP,
1168		.has_inbox = false,
1169		.has_outbox = false,
1170		.out_is_imm = false,
1171		.encode_slave_id = false,
1172		.verify = NULL,
1173		.wrapper = NULL
1174	},
1175	{
1176		.opcode = MLX4_CMD_ALLOC_RES,
1177		.has_inbox = false,
1178		.has_outbox = false,
1179		.out_is_imm = true,
1180		.encode_slave_id = false,
1181		.verify = NULL,
1182		.wrapper = mlx4_ALLOC_RES_wrapper
1183	},
1184	{
1185		.opcode = MLX4_CMD_FREE_RES,
1186		.has_inbox = false,
1187		.has_outbox = false,
1188		.out_is_imm = false,
1189		.encode_slave_id = false,
1190		.verify = NULL,
1191		.wrapper = mlx4_FREE_RES_wrapper
1192	},
1193	{
1194		.opcode = MLX4_CMD_SW2HW_MPT,
1195		.has_inbox = true,
1196		.has_outbox = false,
1197		.out_is_imm = false,
1198		.encode_slave_id = true,
1199		.verify = NULL,
1200		.wrapper = mlx4_SW2HW_MPT_wrapper
1201	},
1202	{
1203		.opcode = MLX4_CMD_QUERY_MPT,
1204		.has_inbox = false,
1205		.has_outbox = true,
1206		.out_is_imm = false,
1207		.encode_slave_id = false,
1208		.verify = NULL,
1209		.wrapper = mlx4_QUERY_MPT_wrapper
1210	},
1211	{
1212		.opcode = MLX4_CMD_HW2SW_MPT,
1213		.has_inbox = false,
1214		.has_outbox = false,
1215		.out_is_imm = false,
1216		.encode_slave_id = false,
1217		.verify = NULL,
1218		.wrapper = mlx4_HW2SW_MPT_wrapper
1219	},
1220	{
1221		.opcode = MLX4_CMD_READ_MTT,
1222		.has_inbox = false,
1223		.has_outbox = true,
1224		.out_is_imm = false,
1225		.encode_slave_id = false,
1226		.verify = NULL,
1227		.wrapper = NULL
1228	},
1229	{
1230		.opcode = MLX4_CMD_WRITE_MTT,
1231		.has_inbox = true,
1232		.has_outbox = false,
1233		.out_is_imm = false,
1234		.encode_slave_id = false,
1235		.verify = NULL,
1236		.wrapper = mlx4_WRITE_MTT_wrapper
1237	},
1238	{
1239		.opcode = MLX4_CMD_SYNC_TPT,
1240		.has_inbox = true,
1241		.has_outbox = false,
1242		.out_is_imm = false,
1243		.encode_slave_id = false,
1244		.verify = NULL,
1245		.wrapper = NULL
1246	},
1247	{
1248		.opcode = MLX4_CMD_HW2SW_EQ,
1249		.has_inbox = false,
1250		.has_outbox = true,
1251		.out_is_imm = false,
1252		.encode_slave_id = true,
1253		.verify = NULL,
1254		.wrapper = mlx4_HW2SW_EQ_wrapper
1255	},
1256	{
1257		.opcode = MLX4_CMD_QUERY_EQ,
1258		.has_inbox = false,
1259		.has_outbox = true,
1260		.out_is_imm = false,
1261		.encode_slave_id = true,
1262		.verify = NULL,
1263		.wrapper = mlx4_QUERY_EQ_wrapper
1264	},
1265	{
1266		.opcode = MLX4_CMD_SW2HW_CQ,
1267		.has_inbox = true,
1268		.has_outbox = false,
1269		.out_is_imm = false,
1270		.encode_slave_id = true,
1271		.verify = NULL,
1272		.wrapper = mlx4_SW2HW_CQ_wrapper
1273	},
1274	{
1275		.opcode = MLX4_CMD_HW2SW_CQ,
1276		.has_inbox = false,
1277		.has_outbox = false,
1278		.out_is_imm = false,
1279		.encode_slave_id = false,
1280		.verify = NULL,
1281		.wrapper = mlx4_HW2SW_CQ_wrapper
1282	},
1283	{
1284		.opcode = MLX4_CMD_QUERY_CQ,
1285		.has_inbox = false,
1286		.has_outbox = true,
1287		.out_is_imm = false,
1288		.encode_slave_id = false,
1289		.verify = NULL,
1290		.wrapper = mlx4_QUERY_CQ_wrapper
1291	},
1292	{
1293		.opcode = MLX4_CMD_MODIFY_CQ,
1294		.has_inbox = true,
1295		.has_outbox = false,
1296		.out_is_imm = true,
1297		.encode_slave_id = false,
1298		.verify = NULL,
1299		.wrapper = mlx4_MODIFY_CQ_wrapper
1300	},
1301	{
1302		.opcode = MLX4_CMD_SW2HW_SRQ,
1303		.has_inbox = true,
1304		.has_outbox = false,
1305		.out_is_imm = false,
1306		.encode_slave_id = true,
1307		.verify = NULL,
1308		.wrapper = mlx4_SW2HW_SRQ_wrapper
1309	},
1310	{
1311		.opcode = MLX4_CMD_HW2SW_SRQ,
1312		.has_inbox = false,
1313		.has_outbox = false,
1314		.out_is_imm = false,
1315		.encode_slave_id = false,
1316		.verify = NULL,
1317		.wrapper = mlx4_HW2SW_SRQ_wrapper
1318	},
1319	{
1320		.opcode = MLX4_CMD_QUERY_SRQ,
1321		.has_inbox = false,
1322		.has_outbox = true,
1323		.out_is_imm = false,
1324		.encode_slave_id = false,
1325		.verify = NULL,
1326		.wrapper = mlx4_QUERY_SRQ_wrapper
1327	},
1328	{
1329		.opcode = MLX4_CMD_ARM_SRQ,
1330		.has_inbox = false,
1331		.has_outbox = false,
1332		.out_is_imm = false,
1333		.encode_slave_id = false,
1334		.verify = NULL,
1335		.wrapper = mlx4_ARM_SRQ_wrapper
1336	},
1337	{
1338		.opcode = MLX4_CMD_RST2INIT_QP,
1339		.has_inbox = true,
1340		.has_outbox = false,
1341		.out_is_imm = false,
1342		.encode_slave_id = true,
1343		.verify = NULL,
1344		.wrapper = mlx4_RST2INIT_QP_wrapper
1345	},
1346	{
1347		.opcode = MLX4_CMD_INIT2INIT_QP,
1348		.has_inbox = true,
1349		.has_outbox = false,
1350		.out_is_imm = false,
1351		.encode_slave_id = false,
1352		.verify = NULL,
1353		.wrapper = mlx4_INIT2INIT_QP_wrapper
1354	},
1355	{
1356		.opcode = MLX4_CMD_INIT2RTR_QP,
1357		.has_inbox = true,
1358		.has_outbox = false,
1359		.out_is_imm = false,
1360		.encode_slave_id = false,
1361		.verify = NULL,
1362		.wrapper = mlx4_INIT2RTR_QP_wrapper
1363	},
1364	{
1365		.opcode = MLX4_CMD_RTR2RTS_QP,
1366		.has_inbox = true,
1367		.has_outbox = false,
1368		.out_is_imm = false,
1369		.encode_slave_id = false,
1370		.verify = NULL,
1371		.wrapper = mlx4_RTR2RTS_QP_wrapper
1372	},
1373	{
1374		.opcode = MLX4_CMD_RTS2RTS_QP,
1375		.has_inbox = true,
1376		.has_outbox = false,
1377		.out_is_imm = false,
1378		.encode_slave_id = false,
1379		.verify = NULL,
1380		.wrapper = mlx4_RTS2RTS_QP_wrapper
1381	},
1382	{
1383		.opcode = MLX4_CMD_SQERR2RTS_QP,
1384		.has_inbox = true,
1385		.has_outbox = false,
1386		.out_is_imm = false,
1387		.encode_slave_id = false,
1388		.verify = NULL,
1389		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1390	},
1391	{
1392		.opcode = MLX4_CMD_2ERR_QP,
1393		.has_inbox = false,
1394		.has_outbox = false,
1395		.out_is_imm = false,
1396		.encode_slave_id = false,
1397		.verify = NULL,
1398		.wrapper = mlx4_GEN_QP_wrapper
1399	},
1400	{
1401		.opcode = MLX4_CMD_RTS2SQD_QP,
1402		.has_inbox = false,
1403		.has_outbox = false,
1404		.out_is_imm = false,
1405		.encode_slave_id = false,
1406		.verify = NULL,
1407		.wrapper = mlx4_GEN_QP_wrapper
1408	},
1409	{
1410		.opcode = MLX4_CMD_SQD2SQD_QP,
1411		.has_inbox = true,
1412		.has_outbox = false,
1413		.out_is_imm = false,
1414		.encode_slave_id = false,
1415		.verify = NULL,
1416		.wrapper = mlx4_SQD2SQD_QP_wrapper
1417	},
1418	{
1419		.opcode = MLX4_CMD_SQD2RTS_QP,
1420		.has_inbox = true,
1421		.has_outbox = false,
1422		.out_is_imm = false,
1423		.encode_slave_id = false,
1424		.verify = NULL,
1425		.wrapper = mlx4_SQD2RTS_QP_wrapper
1426	},
1427	{
1428		.opcode = MLX4_CMD_2RST_QP,
1429		.has_inbox = false,
1430		.has_outbox = false,
1431		.out_is_imm = false,
1432		.encode_slave_id = false,
1433		.verify = NULL,
1434		.wrapper = mlx4_2RST_QP_wrapper
1435	},
1436	{
1437		.opcode = MLX4_CMD_QUERY_QP,
1438		.has_inbox = false,
1439		.has_outbox = true,
1440		.out_is_imm = false,
1441		.encode_slave_id = false,
1442		.verify = NULL,
1443		.wrapper = mlx4_GEN_QP_wrapper
1444	},
1445	{
1446		.opcode = MLX4_CMD_SUSPEND_QP,
1447		.has_inbox = false,
1448		.has_outbox = false,
1449		.out_is_imm = false,
1450		.encode_slave_id = false,
1451		.verify = NULL,
1452		.wrapper = mlx4_GEN_QP_wrapper
1453	},
1454	{
1455		.opcode = MLX4_CMD_UNSUSPEND_QP,
1456		.has_inbox = false,
1457		.has_outbox = false,
1458		.out_is_imm = false,
1459		.encode_slave_id = false,
1460		.verify = NULL,
1461		.wrapper = mlx4_GEN_QP_wrapper
1462	},
1463	{
1464		.opcode = MLX4_CMD_UPDATE_QP,
1465		.has_inbox = false,
1466		.has_outbox = false,
1467		.out_is_imm = false,
1468		.encode_slave_id = false,
1469		.skip_err_print = true,
1470		.verify = NULL,
1471		.wrapper = MLX4_CMD_UPDATE_QP_wrapper
1472	},
1473	{
1474		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1475		.has_inbox = false,
1476		.has_outbox = false,
1477		.out_is_imm = false,
1478		.encode_slave_id = false,
1479		.verify = NULL, /* XXX verify: only demux can do this */
1480		.wrapper = NULL
1481	},
1482	{
1483		.opcode = MLX4_CMD_MAD_IFC,
1484		.has_inbox = true,
1485		.has_outbox = true,
1486		.out_is_imm = false,
1487		.encode_slave_id = false,
1488		.verify = NULL,
1489		.wrapper = mlx4_MAD_IFC_wrapper
1490	},
1491	{
1492		.opcode = MLX4_CMD_QUERY_IF_STAT,
1493		.has_inbox = false,
1494		.has_outbox = true,
1495		.out_is_imm = false,
1496		.encode_slave_id = false,
1497		.verify = NULL,
1498		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1499	},
1500	/* Native multicast commands are not available for guests */
1501	{
1502		.opcode = MLX4_CMD_QP_ATTACH,
1503		.has_inbox = true,
1504		.has_outbox = false,
1505		.out_is_imm = false,
1506		.encode_slave_id = false,
1507		.verify = NULL,
1508		.wrapper = mlx4_QP_ATTACH_wrapper
1509	},
1510	{
1511		.opcode = MLX4_CMD_PROMISC,
1512		.has_inbox = false,
1513		.has_outbox = false,
1514		.out_is_imm = false,
1515		.encode_slave_id = false,
1516		.verify = NULL,
1517		.wrapper = mlx4_PROMISC_wrapper
1518	},
1519	/* Ethernet specific commands */
1520	{
1521		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1522		.has_inbox = true,
1523		.has_outbox = false,
1524		.out_is_imm = false,
1525		.encode_slave_id = false,
1526		.verify = NULL,
1527		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1528	},
1529	{
1530		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1531		.has_inbox = false,
1532		.has_outbox = false,
1533		.out_is_imm = false,
1534		.encode_slave_id = false,
1535		.verify = NULL,
1536		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1537	},
1538	{
1539		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1540		.has_inbox = false,
1541		.has_outbox = true,
1542		.out_is_imm = false,
1543		.encode_slave_id = false,
1544		.verify = NULL,
1545		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1546	},
1547	{
1548		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1549		.has_inbox = false,
1550		.has_outbox = false,
1551		.out_is_imm = false,
1552		.encode_slave_id = false,
1553		.verify = NULL,
1554		.wrapper = NULL
1555	},
1556	/* flow steering commands */
1557	{
1558		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1559		.has_inbox = true,
1560		.has_outbox = false,
1561		.out_is_imm = true,
1562		.encode_slave_id = false,
1563		.verify = NULL,
1564		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1565	},
1566	{
1567		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1568		.has_inbox = false,
1569		.has_outbox = false,
1570		.out_is_imm = false,
1571		.encode_slave_id = false,
1572		.verify = NULL,
1573		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1574	},
1575	/* wol commands */
1576	{
1577		.opcode = MLX4_CMD_MOD_STAT_CFG,
1578		.has_inbox = false,
1579		.has_outbox = false,
1580		.out_is_imm = false,
1581		.encode_slave_id = false,
1582		.skip_err_print = true,
1583		.verify = NULL,
1584		.wrapper = mlx4_MOD_STAT_CFG_wrapper
1585	},
1586};
1587
1588static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1589				    struct mlx4_vhcr_cmd *in_vhcr)
1590{
1591	struct mlx4_priv *priv = mlx4_priv(dev);
1592	struct mlx4_cmd_info *cmd = NULL;
1593	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1594	struct mlx4_vhcr *vhcr;
1595	struct mlx4_cmd_mailbox *inbox = NULL;
1596	struct mlx4_cmd_mailbox *outbox = NULL;
1597	u64 in_param;
1598	u64 out_param;
1599	int ret = 0;
1600	int i;
1601	int err = 0;
1602
1603	/* Create sw representation of Virtual HCR */
1604	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1605	if (!vhcr)
1606		return -ENOMEM;
1607
1608	/* DMA in the vHCR */
1609	if (!in_vhcr) {
1610		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1611				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1612				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1613					    MLX4_ACCESS_MEM_ALIGN), 1);
1614		if (ret) {
1615			mlx4_err(dev, "%s:Failed reading vhcr"
1616				 "ret: 0x%x\n", __func__, ret);
1617			kfree(vhcr);
1618			return ret;
1619		}
1620	}
1621
1622	/* Fill SW VHCR fields */
1623	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1624	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1625	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1626	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1627	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1628	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1629	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1630
1631	/* Lookup command */
1632	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1633		if (vhcr->op == cmd_info[i].opcode) {
1634			cmd = &cmd_info[i];
1635			break;
1636		}
1637	}
1638	if (!cmd) {
1639		mlx4_err(dev, "unparavirt command: %s (0x%x) accepted from slave:%d\n",
1640			 cmd_to_str(vhcr->op), vhcr->op, slave);
1641		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1642		goto out_status;
1643	}
1644
1645	/* Read inbox */
1646	if (cmd->has_inbox) {
1647		vhcr->in_param &= INBOX_MASK;
1648		inbox = mlx4_alloc_cmd_mailbox(dev);
1649		if (IS_ERR(inbox)) {
1650			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1651			inbox = NULL;
1652			goto out_status;
1653		}
1654
1655		if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1656				    vhcr->in_param,
1657				    MLX4_MAILBOX_SIZE, 1)) {
1658			mlx4_err(dev, "%s: Failed reading inbox for cmd %s (0x%x)\n",
1659				 __func__, cmd_to_str(cmd->opcode), cmd->opcode);
1660			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1661			goto out_status;
1662		}
1663	}
1664
1665	/* Apply permission and bound checks if applicable */
1666	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1667		mlx4_warn(dev, "Command %s (0x%x) from slave: %d failed protection "
1668			  "checks for resource_id: %d\n", cmd_to_str(vhcr->op),
1669			  vhcr->op, slave, vhcr->in_modifier);
1670		vhcr_cmd->status = CMD_STAT_BAD_OP;
1671		goto out_status;
1672	}
1673
1674	/* Allocate outbox */
1675	if (cmd->has_outbox) {
1676		outbox = mlx4_alloc_cmd_mailbox(dev);
1677		if (IS_ERR(outbox)) {
1678			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1679			outbox = NULL;
1680			goto out_status;
1681		}
1682	}
1683
1684	/* Execute the command! */
1685	if (cmd->wrapper) {
1686		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1687				   cmd);
1688		if (cmd->out_is_imm)
1689			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1690	} else {
1691		in_param = cmd->has_inbox ? (u64) inbox->dma :
1692			vhcr->in_param;
1693		out_param = cmd->has_outbox ? (u64) outbox->dma :
1694			vhcr->out_param;
1695		err = __mlx4_cmd(dev, in_param, &out_param,
1696				 cmd->out_is_imm, vhcr->in_modifier,
1697				 vhcr->op_modifier, vhcr->op,
1698				 MLX4_CMD_TIME_CLASS_A,
1699				 MLX4_CMD_NATIVE);
1700
1701		if (cmd->out_is_imm) {
1702			vhcr->out_param = out_param;
1703			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1704		}
1705	}
1706
1707	if (err) {
1708		if (!cmd->skip_err_print)
1709			mlx4_warn(dev, "vhcr command %s (0x%x) slave:%d "
1710				  "in_param 0x%llx in_mod=0x%x, op_mod=0x%x "
1711				  "failed with error:%d, status %d\n",
1712				  cmd_to_str(vhcr->op), vhcr->op, slave,
1713				  (unsigned long long) vhcr->in_param, vhcr->in_modifier,
1714				  vhcr->op_modifier, vhcr->errno, err);
1715		vhcr_cmd->status = mlx4_errno_to_status(err);
1716		goto out_status;
1717	}
1718
1719
1720	/* Write outbox if command completed successfully */
1721	if (cmd->has_outbox && !vhcr_cmd->status) {
1722		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1723				      vhcr->out_param,
1724				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1725		if (ret) {
1726			/* If we failed to write back the outbox after the
1727			 *command was successfully executed, we must fail this
1728			 * slave, as it is now in undefined state */
1729			mlx4_err(dev, "%s: Failed writing outbox\n", __func__);
1730			goto out;
1731		}
1732	}
1733
1734out_status:
1735	/* DMA back vhcr result */
1736	if (!in_vhcr) {
1737		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1738				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1739				      ALIGN(sizeof(struct mlx4_vhcr),
1740					    MLX4_ACCESS_MEM_ALIGN),
1741				      MLX4_CMD_WRAPPED);
1742		if (ret)
1743			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1744				 __func__);
1745		else if (vhcr->e_bit &&
1746			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1747				mlx4_warn(dev, "Failed to generate command completion "
1748					  "eqe for slave %d\n", slave);
1749	}
1750
1751out:
1752	kfree(vhcr);
1753	mlx4_free_cmd_mailbox(dev, inbox);
1754	mlx4_free_cmd_mailbox(dev, outbox);
1755	return ret;
1756}
1757
1758static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1759					    int slave, int port)
1760{
1761	struct mlx4_vport_oper_state *vp_oper;
1762	struct mlx4_vport_state *vp_admin;
1763	struct mlx4_vf_immed_vlan_work *work;
1764	int err;
1765	int admin_vlan_ix = NO_INDX;
1766
1767	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1768	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1769
1770	if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1771	    vp_oper->state.default_qos == vp_admin->default_qos)
1772		return 0;
1773
1774	work = kzalloc(sizeof(*work), GFP_KERNEL);
1775	if (!work)
1776		return -ENOMEM;
1777
1778	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1779		if (MLX4_VGT != vp_admin->default_vlan) {
1780			err = __mlx4_register_vlan(&priv->dev, port,
1781						   vp_admin->default_vlan,
1782						   &admin_vlan_ix);
1783			if (err) {
1784				mlx4_warn((&priv->dev),
1785					  "No vlan resources slave %d, port %d\n",
1786					  slave, port);
1787				kfree(work);
1788				return err;
1789			}
1790		} else {
1791			admin_vlan_ix = NO_INDX;
1792		}
1793		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1794		mlx4_dbg((&(priv->dev)),
1795			 "alloc vlan %d idx  %d slave %d port %d\n",
1796			 (int)(vp_admin->default_vlan),
1797			 admin_vlan_ix, slave, port);
1798	}
1799
1800	/* save original vlan ix and vlan id */
1801	work->orig_vlan_id = vp_oper->state.default_vlan;
1802	work->orig_vlan_ix = vp_oper->vlan_idx;
1803
1804	/* handle new qos */
1805	if (vp_oper->state.default_qos != vp_admin->default_qos)
1806		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1807
1808	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1809		vp_oper->vlan_idx = admin_vlan_ix;
1810
1811	vp_oper->state.default_vlan = vp_admin->default_vlan;
1812	vp_oper->state.default_qos = vp_admin->default_qos;
1813
1814	/* iterate over QPs owned by this slave, using UPDATE_QP */
1815	work->port = port;
1816	work->slave = slave;
1817	work->qos = vp_oper->state.default_qos;
1818	work->vlan_id = vp_oper->state.default_vlan;
1819	work->vlan_ix = vp_oper->vlan_idx;
1820	work->priv = priv;
1821	INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1822	queue_work(priv->mfunc.master.comm_wq, &work->work);
1823
1824	return 0;
1825}
1826
1827
1828static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1829{
1830	int port, err;
1831	struct mlx4_vport_state *vp_admin;
1832	struct mlx4_vport_oper_state *vp_oper;
1833
1834	for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1835		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1836		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1837		vp_oper->state = *vp_admin;
1838		if (MLX4_VGT != vp_admin->default_vlan) {
1839			err = __mlx4_register_vlan(&priv->dev, port,
1840						 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1841			if (err) {
1842				vp_oper->vlan_idx = NO_INDX;
1843				mlx4_warn((&priv->dev),
1844					  "No vlan resorces slave %d, port %d\n",
1845					  slave, port);
1846				return err;
1847			}
1848			mlx4_dbg((&(priv->dev)), "alloc vlan %d idx  %d slave %d port %d\n",
1849				 (int)(vp_oper->state.default_vlan),
1850				 vp_oper->vlan_idx, slave, port);
1851		}
1852		if (vp_admin->spoofchk) {
1853			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1854							       port,
1855							       vp_admin->mac);
1856			if (0 > vp_oper->mac_idx) {
1857				err = vp_oper->mac_idx;
1858				vp_oper->mac_idx = NO_INDX;
1859				mlx4_warn((&priv->dev),
1860					  "No mac resources slave %d, port %d\n",
1861					  slave, port);
1862				return err;
1863			}
1864			mlx4_dbg((&(priv->dev)), "alloc mac %llx idx  %d slave %d port %d\n",
1865				 (unsigned long long) vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1866		}
1867	}
1868	return 0;
1869}
1870
1871static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1872{
1873	int port;
1874	struct mlx4_vport_oper_state *vp_oper;
1875
1876	for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1877		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1878		if (NO_INDX != vp_oper->vlan_idx) {
1879			__mlx4_unregister_vlan(&priv->dev,
1880					       port, vp_oper->state.default_vlan);
1881			vp_oper->vlan_idx = NO_INDX;
1882		}
1883		if (NO_INDX != vp_oper->mac_idx) {
1884			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
1885			vp_oper->mac_idx = NO_INDX;
1886		}
1887	}
1888	return;
1889}
1890
1891static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1892			       u16 param, u8 toggle)
1893{
1894	struct mlx4_priv *priv = mlx4_priv(dev);
1895	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1896	u32 reply;
1897	u8 is_going_down = 0;
1898	int i;
1899	unsigned long flags;
1900
1901	slave_state[slave].comm_toggle ^= 1;
1902	reply = (u32) slave_state[slave].comm_toggle << 31;
1903	if (toggle != slave_state[slave].comm_toggle) {
1904		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
1905			  "STATE COMPROMISIED ***\n", toggle, slave);
1906		goto reset_slave;
1907	}
1908	if (cmd == MLX4_COMM_CMD_RESET) {
1909		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1910		slave_state[slave].active = false;
1911		slave_state[slave].old_vlan_api = false;
1912		mlx4_master_deactivate_admin_state(priv, slave);
1913		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1914				slave_state[slave].event_eq[i].eqn = -1;
1915				slave_state[slave].event_eq[i].token = 0;
1916		}
1917		/*check if we are in the middle of FLR process,
1918		if so return "retry" status to the slave*/
1919		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1920			goto inform_slave_state;
1921
1922		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1923
1924		/* write the version in the event field */
1925		reply |= mlx4_comm_get_version();
1926
1927		goto reset_slave;
1928	}
1929	/*command from slave in the middle of FLR*/
1930	if (cmd != MLX4_COMM_CMD_RESET &&
1931	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1932		mlx4_warn(dev, "slave:%d is Trying to run cmd (0x%x) "
1933			  "in the middle of FLR\n", slave, cmd);
1934		return;
1935	}
1936
1937	switch (cmd) {
1938	case MLX4_COMM_CMD_VHCR0:
1939		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1940			goto reset_slave;
1941		slave_state[slave].vhcr_dma = ((u64) param) << 48;
1942		priv->mfunc.master.slave_state[slave].cookie = 0;
1943		break;
1944	case MLX4_COMM_CMD_VHCR1:
1945		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1946			goto reset_slave;
1947		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1948		break;
1949	case MLX4_COMM_CMD_VHCR2:
1950		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1951			goto reset_slave;
1952		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1953		break;
1954	case MLX4_COMM_CMD_VHCR_EN:
1955		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1956			goto reset_slave;
1957		slave_state[slave].vhcr_dma |= param;
1958		if (mlx4_master_activate_admin_state(priv, slave))
1959				goto reset_slave;
1960		slave_state[slave].active = true;
1961		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1962		break;
1963	case MLX4_COMM_CMD_VHCR_POST:
1964		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1965		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1966			goto reset_slave;
1967
1968		mutex_lock(&priv->cmd.slave_cmd_mutex);
1969		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1970			mlx4_err(dev, "Failed processing vhcr for slave: %d,"
1971				 " resetting slave.\n", slave);
1972			mutex_unlock(&priv->cmd.slave_cmd_mutex);
1973			goto reset_slave;
1974		}
1975		mutex_unlock(&priv->cmd.slave_cmd_mutex);
1976		break;
1977	default:
1978		mlx4_warn(dev, "Bad comm cmd: %d from slave: %d\n", cmd, slave);
1979		goto reset_slave;
1980	}
1981	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1982	if (!slave_state[slave].is_slave_going_down)
1983		slave_state[slave].last_cmd = cmd;
1984	else
1985		is_going_down = 1;
1986	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1987	if (is_going_down) {
1988		mlx4_warn(dev, "Slave is going down aborting command (%d)"
1989			  " executing from slave: %d\n",
1990			  cmd, slave);
1991		return;
1992	}
1993	__raw_writel((__force u32) cpu_to_be32(reply),
1994		     &priv->mfunc.comm[slave].slave_read);
1995	mmiowb();
1996
1997	return;
1998
1999reset_slave:
2000	/* cleanup any slave resources */
2001	mlx4_delete_all_resources_for_slave(dev, slave);
2002	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2003	if (!slave_state[slave].is_slave_going_down)
2004		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2005	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2006	/*with slave in the middle of flr, no need to clean resources again.*/
2007inform_slave_state:
2008	__raw_writel((__force u32) cpu_to_be32(reply),
2009		     &priv->mfunc.comm[slave].slave_read);
2010	wmb();
2011}
2012
2013/* master command processing */
2014void mlx4_master_comm_channel(struct work_struct *work)
2015{
2016	struct mlx4_mfunc_master_ctx *master =
2017		container_of(work,
2018			     struct mlx4_mfunc_master_ctx,
2019			     comm_work);
2020	struct mlx4_mfunc *mfunc =
2021		container_of(master, struct mlx4_mfunc, master);
2022	struct mlx4_priv *priv =
2023		container_of(mfunc, struct mlx4_priv, mfunc);
2024	struct mlx4_dev *dev = &priv->dev;
2025	__be32 *bit_vec;
2026	u32 comm_cmd;
2027	u32 vec;
2028	int i, j, slave;
2029	int toggle;
2030	int served = 0;
2031	int reported = 0;
2032	u32 slt;
2033
2034	bit_vec = master->comm_arm_bit_vector;
2035	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2036		vec = be32_to_cpu(bit_vec[i]);
2037		for (j = 0; j < 32; j++) {
2038			if (!(vec & (1 << j)))
2039				continue;
2040			++reported;
2041			slave = (i * 32) + j;
2042			comm_cmd = swab32(readl(
2043					  &mfunc->comm[slave].slave_write));
2044			slt = swab32(readl(&mfunc->comm[slave].slave_read))
2045				     >> 31;
2046			toggle = comm_cmd >> 31;
2047			if (toggle != slt) {
2048				if (master->slave_state[slave].comm_toggle
2049				    != slt) {
2050					mlx4_info(dev, "slave %d out of sync."
2051						  " read toggle %d, state toggle %d. "
2052						  "Resynching.\n", slave, slt,
2053						  master->slave_state[slave].comm_toggle);
2054					master->slave_state[slave].comm_toggle =
2055						slt;
2056				}
2057				mlx4_master_do_cmd(dev, slave,
2058						   comm_cmd >> 16 & 0xff,
2059						   comm_cmd & 0xffff, toggle);
2060				++served;
2061			} else
2062				mlx4_err(dev, "slave %d out of sync."
2063				  " read toggle %d, write toggle %d.\n", slave, slt,
2064				  toggle);
2065		}
2066	}
2067
2068	if (reported && reported != served)
2069		mlx4_warn(dev, "Got command event with bitmask from %d slaves"
2070			  " but %d were served\n",
2071			  reported, served);
2072}
2073/* master command processing */
2074void mlx4_master_arm_comm_channel(struct work_struct *work)
2075{
2076	struct mlx4_mfunc_master_ctx *master =
2077		container_of(work,
2078			     struct mlx4_mfunc_master_ctx,
2079			     arm_comm_work);
2080	struct mlx4_mfunc *mfunc =
2081		container_of(master, struct mlx4_mfunc, master);
2082	struct mlx4_priv *priv =
2083		container_of(mfunc, struct mlx4_priv, mfunc);
2084	struct mlx4_dev *dev = &priv->dev;
2085
2086	if (mlx4_ARM_COMM_CHANNEL(dev))
2087		mlx4_warn(dev, "Failed to arm comm channel events\n");
2088}
2089
2090static int sync_toggles(struct mlx4_dev *dev)
2091{
2092	struct mlx4_priv *priv = mlx4_priv(dev);
2093	int wr_toggle;
2094	int rd_toggle;
2095	unsigned long end;
2096
2097	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
2098	end = jiffies + msecs_to_jiffies(5000);
2099
2100	while (time_before(jiffies, end)) {
2101		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
2102		if (rd_toggle == wr_toggle) {
2103			priv->cmd.comm_toggle = rd_toggle;
2104			return 0;
2105		}
2106
2107		cond_resched();
2108	}
2109
2110	/*
2111	 * we could reach here if for example the previous VM using this
2112	 * function misbehaved and left the channel with unsynced state. We
2113	 * should fix this here and give this VM a chance to use a properly
2114	 * synced channel
2115	 */
2116	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2117	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2118	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2119	priv->cmd.comm_toggle = 0;
2120
2121	return 0;
2122}
2123
2124int mlx4_multi_func_init(struct mlx4_dev *dev)
2125{
2126	struct mlx4_priv *priv = mlx4_priv(dev);
2127	struct mlx4_slave_state *s_state;
2128	int i, j, err, port;
2129
2130	if (mlx4_is_master(dev))
2131		priv->mfunc.comm =
2132		ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
2133			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2134	else
2135		priv->mfunc.comm =
2136		ioremap(pci_resource_start(dev->pdev, 2) +
2137			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2138	if (!priv->mfunc.comm) {
2139		mlx4_err(dev, "Couldn't map communication vector.\n");
2140		goto err_vhcr;
2141	}
2142
2143	if (mlx4_is_master(dev)) {
2144		priv->mfunc.master.slave_state =
2145			kzalloc(dev->num_slaves *
2146				sizeof(struct mlx4_slave_state), GFP_KERNEL);
2147		if (!priv->mfunc.master.slave_state)
2148			goto err_comm;
2149
2150		priv->mfunc.master.vf_admin =
2151			kzalloc(dev->num_slaves *
2152				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2153		if (!priv->mfunc.master.vf_admin)
2154			goto err_comm_admin;
2155
2156		priv->mfunc.master.vf_oper =
2157			kzalloc(dev->num_slaves *
2158				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2159		if (!priv->mfunc.master.vf_oper)
2160			goto err_comm_oper;
2161
2162		for (i = 0; i < dev->num_slaves; ++i) {
2163			s_state = &priv->mfunc.master.slave_state[i];
2164			s_state->last_cmd = MLX4_COMM_CMD_RESET;
2165			mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2166			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2167				s_state->event_eq[j].eqn = -1;
2168			__raw_writel((__force u32) 0,
2169				     &priv->mfunc.comm[i].slave_write);
2170			__raw_writel((__force u32) 0,
2171				     &priv->mfunc.comm[i].slave_read);
2172			mmiowb();
2173			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2174				s_state->vlan_filter[port] =
2175					kzalloc(sizeof(struct mlx4_vlan_fltr),
2176						GFP_KERNEL);
2177				if (!s_state->vlan_filter[port]) {
2178					if (--port)
2179						kfree(s_state->vlan_filter[port]);
2180					goto err_slaves;
2181				}
2182				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2183				priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
2184				priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
2185				priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
2186				priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
2187			}
2188			spin_lock_init(&s_state->lock);
2189		}
2190
2191		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
2192		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2193		INIT_WORK(&priv->mfunc.master.comm_work,
2194			  mlx4_master_comm_channel);
2195		INIT_WORK(&priv->mfunc.master.arm_comm_work,
2196			  mlx4_master_arm_comm_channel);
2197		INIT_WORK(&priv->mfunc.master.slave_event_work,
2198			  mlx4_gen_slave_eqe);
2199		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2200			  mlx4_master_handle_slave_flr);
2201		spin_lock_init(&priv->mfunc.master.slave_state_lock);
2202		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2203		priv->mfunc.master.comm_wq =
2204			create_singlethread_workqueue("mlx4_comm");
2205		if (!priv->mfunc.master.comm_wq)
2206			goto err_slaves;
2207
2208		if (mlx4_init_resource_tracker(dev))
2209			goto err_thread;
2210
2211		err = mlx4_ARM_COMM_CHANNEL(dev);
2212		if (err) {
2213			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
2214				 err);
2215			goto err_resource;
2216		}
2217
2218	} else {
2219		err = sync_toggles(dev);
2220		if (err) {
2221			mlx4_err(dev, "Couldn't sync toggles\n");
2222			goto err_comm;
2223		}
2224	}
2225	return 0;
2226
2227err_resource:
2228	mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
2229err_thread:
2230	flush_workqueue(priv->mfunc.master.comm_wq);
2231	destroy_workqueue(priv->mfunc.master.comm_wq);
2232err_slaves:
2233	while (--i) {
2234		for (port = 1; port <= MLX4_MAX_PORTS; port++)
2235			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2236	}
2237	kfree(priv->mfunc.master.vf_oper);
2238err_comm_oper:
2239	kfree(priv->mfunc.master.vf_admin);
2240err_comm_admin:
2241	kfree(priv->mfunc.master.slave_state);
2242err_comm:
2243	iounmap(priv->mfunc.comm);
2244err_vhcr:
2245	dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2246					     priv->mfunc.vhcr,
2247					     priv->mfunc.vhcr_dma);
2248	priv->mfunc.vhcr = NULL;
2249	return -ENOMEM;
2250}
2251
2252int mlx4_cmd_init(struct mlx4_dev *dev)
2253{
2254	struct mlx4_priv *priv = mlx4_priv(dev);
2255
2256	mutex_init(&priv->cmd.hcr_mutex);
2257	mutex_init(&priv->cmd.slave_cmd_mutex);
2258	sema_init(&priv->cmd.poll_sem, 1);
2259	priv->cmd.use_events = 0;
2260	priv->cmd.toggle     = 1;
2261
2262	priv->cmd.hcr = NULL;
2263	priv->mfunc.vhcr = NULL;
2264
2265	if (!mlx4_is_slave(dev)) {
2266		priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2267					MLX4_HCR_BASE, MLX4_HCR_SIZE);
2268		if (!priv->cmd.hcr) {
2269			mlx4_err(dev, "Couldn't map command register.\n");
2270			return -ENOMEM;
2271		}
2272	}
2273
2274	if (mlx4_is_mfunc(dev)) {
2275		priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
2276						      &priv->mfunc.vhcr_dma,
2277						      GFP_KERNEL);
2278		if (!priv->mfunc.vhcr) {
2279			mlx4_err(dev, "Couldn't allocate VHCR.\n");
2280			goto err_hcr;
2281		}
2282	}
2283
2284	priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
2285					 MLX4_MAILBOX_SIZE,
2286					 MLX4_MAILBOX_SIZE, 0);
2287	if (!priv->cmd.pool)
2288		goto err_vhcr;
2289
2290	return 0;
2291
2292err_vhcr:
2293	if (mlx4_is_mfunc(dev))
2294		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2295				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2296	priv->mfunc.vhcr = NULL;
2297
2298err_hcr:
2299	if (!mlx4_is_slave(dev))
2300		iounmap(priv->cmd.hcr);
2301	return -ENOMEM;
2302}
2303
2304void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2305{
2306	struct mlx4_priv *priv = mlx4_priv(dev);
2307	int i, port;
2308
2309	if (mlx4_is_master(dev)) {
2310		flush_workqueue(priv->mfunc.master.comm_wq);
2311		destroy_workqueue(priv->mfunc.master.comm_wq);
2312		for (i = 0; i < dev->num_slaves; i++) {
2313			for (port = 1; port <= MLX4_MAX_PORTS; port++)
2314				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2315		}
2316		kfree(priv->mfunc.master.slave_state);
2317		kfree(priv->mfunc.master.vf_admin);
2318		kfree(priv->mfunc.master.vf_oper);
2319	}
2320
2321	iounmap(priv->mfunc.comm);
2322}
2323
2324void mlx4_cmd_cleanup(struct mlx4_dev *dev)
2325{
2326	struct mlx4_priv *priv = mlx4_priv(dev);
2327
2328	pci_pool_destroy(priv->cmd.pool);
2329
2330	if (!mlx4_is_slave(dev))
2331		iounmap(priv->cmd.hcr);
2332	if (mlx4_is_mfunc(dev))
2333		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2334				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2335	priv->mfunc.vhcr = NULL;
2336}
2337
2338/*
2339 * Switch to using events to issue FW commands (can only be called
2340 * after event queue for command events has been initialized).
2341 */
2342int mlx4_cmd_use_events(struct mlx4_dev *dev)
2343{
2344	struct mlx4_priv *priv = mlx4_priv(dev);
2345	int i;
2346	int err = 0;
2347
2348	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2349				   sizeof (struct mlx4_cmd_context),
2350				   GFP_KERNEL);
2351	if (!priv->cmd.context)
2352		return -ENOMEM;
2353
2354	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2355		priv->cmd.context[i].token = i;
2356		priv->cmd.context[i].next  = i + 1;
2357	}
2358
2359	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2360	priv->cmd.free_head = 0;
2361
2362	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2363	spin_lock_init(&priv->cmd.context_lock);
2364
2365	for (priv->cmd.token_mask = 1;
2366	     priv->cmd.token_mask < priv->cmd.max_cmds;
2367	     priv->cmd.token_mask <<= 1)
2368		; /* nothing */
2369	--priv->cmd.token_mask;
2370
2371	down(&priv->cmd.poll_sem);
2372	priv->cmd.use_events = 1;
2373
2374	return err;
2375}
2376
2377/*
2378 * Switch back to polling (used when shutting down the device)
2379 */
2380void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2381{
2382	struct mlx4_priv *priv = mlx4_priv(dev);
2383	int i;
2384
2385	priv->cmd.use_events = 0;
2386
2387	for (i = 0; i < priv->cmd.max_cmds; ++i)
2388		down(&priv->cmd.event_sem);
2389
2390	kfree(priv->cmd.context);
2391
2392	up(&priv->cmd.poll_sem);
2393}
2394
2395struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2396{
2397	struct mlx4_cmd_mailbox *mailbox;
2398
2399	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2400	if (!mailbox)
2401		return ERR_PTR(-ENOMEM);
2402
2403	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2404				      &mailbox->dma);
2405	if (!mailbox->buf) {
2406		kfree(mailbox);
2407		return ERR_PTR(-ENOMEM);
2408	}
2409
2410	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2411
2412	return mailbox;
2413}
2414EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2415
2416void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2417			   struct mlx4_cmd_mailbox *mailbox)
2418{
2419	if (!mailbox)
2420		return;
2421
2422	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2423	kfree(mailbox);
2424}
2425EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2426
2427u32 mlx4_comm_get_version(void)
2428{
2429	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2430}
2431
2432static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2433{
2434	if ((vf < 0) || (vf >= dev->num_vfs)) {
2435		mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
2436		return -EINVAL;
2437	}
2438	return (vf+1);
2439}
2440
2441int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
2442{
2443	struct mlx4_priv *priv = mlx4_priv(dev);
2444	struct mlx4_vport_state *s_info;
2445	int slave;
2446
2447	if (!mlx4_is_master(dev))
2448		return -EPROTONOSUPPORT;
2449
2450	slave = mlx4_get_slave_indx(dev, vf);
2451	if (slave < 0)
2452		return -EINVAL;
2453
2454	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2455	s_info->mac = mlx4_mac_to_u64(mac);
2456	mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2457		  vf, port, (unsigned long long) s_info->mac);
2458	return 0;
2459}
2460EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2461
2462int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2463{
2464	struct mlx4_priv *priv = mlx4_priv(dev);
2465	struct mlx4_vport_oper_state *vf_oper;
2466	struct mlx4_vport_state *vf_admin;
2467	int slave;
2468
2469	if ((!mlx4_is_master(dev)) ||
2470	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2471		return -EPROTONOSUPPORT;
2472
2473	if ((vlan > 4095) || (qos > 7))
2474		return -EINVAL;
2475
2476	slave = mlx4_get_slave_indx(dev, vf);
2477	if (slave < 0)
2478		return -EINVAL;
2479
2480	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2481	vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2482
2483	if ((0 == vlan) && (0 == qos))
2484		vf_admin->default_vlan = MLX4_VGT;
2485	else
2486		vf_admin->default_vlan = vlan;
2487	vf_admin->default_qos = qos;
2488
2489	if (priv->mfunc.master.slave_state[slave].active &&
2490	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
2491		mlx4_info(dev, "updating vf %d port %d config params immediately\n",
2492			  vf, port);
2493		mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
2494	}
2495	return 0;
2496}
2497EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2498
2499 /* mlx4_get_slave_default_vlan -
2500 * retrun true if VST ( default vlan)
2501 * if VST will fill vlan & qos (if not NULL) */
2502bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos)
2503{
2504	struct mlx4_vport_oper_state *vp_oper;
2505	struct mlx4_priv *priv;
2506
2507	priv = mlx4_priv(dev);
2508	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2509
2510	if (MLX4_VGT != vp_oper->state.default_vlan) {
2511		if (vlan)
2512			*vlan = vp_oper->state.default_vlan;
2513		if (qos)
2514			*qos = vp_oper->state.default_qos;
2515		return true;
2516	}
2517	return false;
2518}
2519EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2520
2521int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2522{
2523	struct mlx4_priv *priv = mlx4_priv(dev);
2524	struct mlx4_vport_state *s_info;
2525	int slave;
2526
2527	if ((!mlx4_is_master(dev)) ||
2528	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2529		return -EPROTONOSUPPORT;
2530
2531	slave = mlx4_get_slave_indx(dev, vf);
2532	if (slave < 0)
2533		return -EINVAL;
2534
2535	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2536	s_info->spoofchk = setting;
2537
2538	return 0;
2539}
2540EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2541
2542int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2543{
2544	struct mlx4_priv *priv = mlx4_priv(dev);
2545	struct mlx4_vport_state *s_info;
2546	struct mlx4_vport_oper_state *vp_oper;
2547	int slave;
2548	u8 link_stat_event;
2549
2550	slave = mlx4_get_slave_indx(dev, vf);
2551	if (slave < 0)
2552		return -EINVAL;
2553
2554	switch (link_state) {
2555	case IFLA_VF_LINK_STATE_AUTO:
2556		/* get link curent state */
2557		if (!priv->sense.do_sense_port[port])
2558			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2559		else
2560			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2561	    break;
2562
2563	case IFLA_VF_LINK_STATE_ENABLE:
2564		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2565	    break;
2566
2567	case IFLA_VF_LINK_STATE_DISABLE:
2568		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2569	    break;
2570
2571	default:
2572		mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2573			  link_state, slave, port);
2574		return -EINVAL;
2575	};
2576	/* update the admin & oper state on the link state */
2577	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2578	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2579	s_info->link_state = link_state;
2580	vp_oper->state.link_state = link_state;
2581
2582	/* send event */
2583	mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
2584	return 0;
2585}
2586EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
2587
2588int mlx4_get_vf_link_state(struct mlx4_dev *dev, int port, int vf)
2589{
2590	struct mlx4_priv *priv = mlx4_priv(dev);
2591	struct mlx4_vport_state *s_info;
2592	int slave;
2593
2594	if (!mlx4_is_master(dev))
2595		return -EPROTONOSUPPORT;
2596
2597	slave = mlx4_get_slave_indx(dev, vf);
2598	if (slave < 0)
2599		return -EINVAL;
2600
2601	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2602
2603	return s_info->link_state;
2604}
2605EXPORT_SYMBOL_GPL(mlx4_get_vf_link_state);
2606
2607