1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/errno.h>
40
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/device.h>
43#include <linux/semaphore.h>
44#include <rdma/ib_smi.h>
45
46#include <asm/io.h>
47#include <linux/ktime.h>
48
49#include "mlx4.h"
50#include "fw.h"
51
52#define CMD_POLL_TOKEN 0xffff
53#define INBOX_MASK	0xffffffffffffff00ULL
54
55#define CMD_CHAN_VER 1
56#define CMD_CHAN_IF_REV 1
57
58enum {
59	/* command completed successfully: */
60	CMD_STAT_OK		= 0x00,
61	/* Internal error (such as a bus error) occurred while processing command: */
62	CMD_STAT_INTERNAL_ERR	= 0x01,
63	/* Operation/command not supported or opcode modifier not supported: */
64	CMD_STAT_BAD_OP		= 0x02,
65	/* Parameter not supported or parameter out of range: */
66	CMD_STAT_BAD_PARAM	= 0x03,
67	/* System not enabled or bad system state: */
68	CMD_STAT_BAD_SYS_STATE	= 0x04,
69	/* Attempt to access reserved or unallocaterd resource: */
70	CMD_STAT_BAD_RESOURCE	= 0x05,
71	/* Requested resource is currently executing a command, or is otherwise busy: */
72	CMD_STAT_RESOURCE_BUSY	= 0x06,
73	/* Required capability exceeds device limits: */
74	CMD_STAT_EXCEED_LIM	= 0x08,
75	/* Resource is not in the appropriate state or ownership: */
76	CMD_STAT_BAD_RES_STATE	= 0x09,
77	/* Index out of range: */
78	CMD_STAT_BAD_INDEX	= 0x0a,
79	/* FW image corrupted: */
80	CMD_STAT_BAD_NVMEM	= 0x0b,
81	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
82	CMD_STAT_ICM_ERROR	= 0x0c,
83	/* Attempt to modify a QP/EE which is not in the presumed state: */
84	CMD_STAT_BAD_QP_STATE   = 0x10,
85	/* Bad segment parameters (Address/Size): */
86	CMD_STAT_BAD_SEG_PARAM	= 0x20,
87	/* Memory Region has Memory Windows bound to: */
88	CMD_STAT_REG_BOUND	= 0x21,
89	/* HCA local attached memory not present: */
90	CMD_STAT_LAM_NOT_PRE	= 0x22,
91	/* Bad management packet (silently discarded): */
92	CMD_STAT_BAD_PKT	= 0x30,
93	/* More outstanding CQEs in CQ than new CQ size: */
94	CMD_STAT_BAD_SIZE	= 0x40,
95	/* Multi Function device support required: */
96	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
97};
98
99enum {
100	HCR_IN_PARAM_OFFSET	= 0x00,
101	HCR_IN_MODIFIER_OFFSET	= 0x08,
102	HCR_OUT_PARAM_OFFSET	= 0x0c,
103	HCR_TOKEN_OFFSET	= 0x14,
104	HCR_STATUS_OFFSET	= 0x18,
105
106	HCR_OPMOD_SHIFT		= 12,
107	HCR_T_BIT		= 21,
108	HCR_E_BIT		= 22,
109	HCR_GO_BIT		= 23
110};
111
112enum {
113	GO_BIT_TIMEOUT_MSECS	= 10000
114};
115
116enum mlx4_vlan_transition {
117	MLX4_VLAN_TRANSITION_VST_VST = 0,
118	MLX4_VLAN_TRANSITION_VST_VGT = 1,
119	MLX4_VLAN_TRANSITION_VGT_VST = 2,
120	MLX4_VLAN_TRANSITION_VGT_VGT = 3,
121};
122
123
124struct mlx4_cmd_context {
125	struct completion	done;
126	int			result;
127	int			next;
128	u64			out_param;
129	u16			token;
130	u8			fw_status;
131};
132
133static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
134				    struct mlx4_vhcr_cmd *in_vhcr);
135
136static int mlx4_status_to_errno(u8 status)
137{
138	static const int trans_table[] = {
139		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
140		[CMD_STAT_BAD_OP]	  = -EPERM,
141		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
142		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
143		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
144		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
145		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
146		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
147		[CMD_STAT_BAD_INDEX]	  = -EBADF,
148		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
149		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
150		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
151		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
152		[CMD_STAT_REG_BOUND]	  = -EBUSY,
153		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
154		[CMD_STAT_BAD_PKT]	  = -EINVAL,
155		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
156		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
157	};
158
159	if (status >= ARRAY_SIZE(trans_table) ||
160	    (status != CMD_STAT_OK && trans_table[status] == 0))
161		return -EIO;
162
163	return trans_table[status];
164}
165
166static const char *cmd_to_str(u16 cmd)
167{
168	switch (cmd) {
169	case MLX4_CMD_SYS_EN:		return "SYS_EN";
170	case MLX4_CMD_SYS_DIS:		return "SYS_DIS";
171	case MLX4_CMD_MAP_FA:		return "MAP_FA";
172	case MLX4_CMD_UNMAP_FA:		return "UNMAP_FA";
173	case MLX4_CMD_RUN_FW:		return "RUN_FW";
174	case MLX4_CMD_MOD_STAT_CFG:	return "MOD_STAT_CFG";
175	case MLX4_CMD_QUERY_DEV_CAP:	return "QUERY_DEV_CAP";
176	case MLX4_CMD_QUERY_FW:		return "QUERY_FW";
177	case MLX4_CMD_ENABLE_LAM:	return "ENABLE_LAM";
178	case MLX4_CMD_DISABLE_LAM:	return "DISABLE_LAM";
179	case MLX4_CMD_QUERY_DDR:	return "QUERY_DDR";
180	case MLX4_CMD_QUERY_ADAPTER:	return "QUERY_ADAPTER";
181	case MLX4_CMD_INIT_HCA:		return "INIT_HCA";
182	case MLX4_CMD_CLOSE_HCA:	return "CLOSE_HCA";
183	case MLX4_CMD_INIT_PORT:	return "INIT_PORT";
184	case MLX4_CMD_CLOSE_PORT:	return "CLOSE_PORT";
185	case MLX4_CMD_QUERY_HCA:	return "QUERY_HCA";
186	case MLX4_CMD_QUERY_PORT:	return "QUERY_PORT";
187	case MLX4_CMD_SENSE_PORT:	return "SENSE_PORT";
188	case MLX4_CMD_HW_HEALTH_CHECK:  return "HW_HEALTH_CHECK";
189	case MLX4_CMD_SET_PORT:		return "SET_PORT";
190	case MLX4_CMD_SET_NODE:		return "SET_NODE";
191	case MLX4_CMD_QUERY_FUNC:	return "QUERY_FUNC";
192	case MLX4_CMD_MAP_ICM:		return "MAP_ICM";
193	case MLX4_CMD_UNMAP_ICM:	return "UNMAP_ICM";
194	case MLX4_CMD_MAP_ICM_AUX:	return "MAP_ICM_AUX";
195	case MLX4_CMD_UNMAP_ICM_AUX:	return "UNMAP_ICM_AUX";
196	case MLX4_CMD_SET_ICM_SIZE:	return "SET_ICM_SIZE";
197		/*master notify fw on finish for slave's flr*/
198	case MLX4_CMD_INFORM_FLR_DONE:	return "INFORM_FLR_DONE";
199	case MLX4_CMD_GET_OP_REQ:	return "GET_OP_REQ";
200
201		/* TPT commands */
202	case MLX4_CMD_SW2HW_MPT:	return "SW2HW_MPT";
203	case MLX4_CMD_QUERY_MPT:	return "QUERY_MPT";
204	case MLX4_CMD_HW2SW_MPT:	return "HW2SW_MPT";
205	case MLX4_CMD_READ_MTT:		return "READ_MTT";
206	case MLX4_CMD_WRITE_MTT:	return "WRITE_MTT";
207	case MLX4_CMD_SYNC_TPT:		return "SYNC_TPT";
208
209		/* EQ commands */
210	case MLX4_CMD_MAP_EQ:		return "MAP_EQ";
211	case MLX4_CMD_SW2HW_EQ:		return "SW2HW_EQ";
212	case MLX4_CMD_HW2SW_EQ:		return "HW2SW_EQ";
213	case MLX4_CMD_QUERY_EQ:		return "QUERY_EQ";
214
215		/* CQ commands */
216	case MLX4_CMD_SW2HW_CQ:		return "SW2HW_CQ";
217	case MLX4_CMD_HW2SW_CQ:		return "HW2SW_CQ";
218	case MLX4_CMD_QUERY_CQ:		return "QUERY_CQ:";
219	case MLX4_CMD_MODIFY_CQ:	return "MODIFY_CQ:";
220
221		/* SRQ commands */
222	case MLX4_CMD_SW2HW_SRQ:	return "SW2HW_SRQ";
223	case MLX4_CMD_HW2SW_SRQ:	return "HW2SW_SRQ";
224	case MLX4_CMD_QUERY_SRQ:	return "QUERY_SRQ";
225	case MLX4_CMD_ARM_SRQ:		return "ARM_SRQ";
226
227		/* QP/EE commands */
228	case MLX4_CMD_RST2INIT_QP:	return "RST2INIT_QP";
229	case MLX4_CMD_INIT2RTR_QP:	return "INIT2RTR_QP";
230	case MLX4_CMD_RTR2RTS_QP:	return "RTR2RTS_QP";
231	case MLX4_CMD_RTS2RTS_QP:	return "RTS2RTS_QP";
232	case MLX4_CMD_SQERR2RTS_QP:	return "SQERR2RTS_QP";
233	case MLX4_CMD_2ERR_QP:		return "2ERR_QP";
234	case MLX4_CMD_RTS2SQD_QP:	return "RTS2SQD_QP";
235	case MLX4_CMD_SQD2SQD_QP:	return "SQD2SQD_QP";
236	case MLX4_CMD_SQD2RTS_QP:	return "SQD2RTS_QP";
237	case MLX4_CMD_2RST_QP:		return "2RST_QP";
238	case MLX4_CMD_QUERY_QP:		return "QUERY_QP";
239	case MLX4_CMD_INIT2INIT_QP:	return "INIT2INIT_QP";
240	case MLX4_CMD_SUSPEND_QP:	return "SUSPEND_QP";
241	case MLX4_CMD_UNSUSPEND_QP:	return "UNSUSPEND_QP";
242		/* special QP and management commands */
243	case MLX4_CMD_CONF_SPECIAL_QP:	return "CONF_SPECIAL_QP";
244	case MLX4_CMD_MAD_IFC:		return "MAD_IFC";
245
246		/* multicast commands */
247	case MLX4_CMD_READ_MCG:		return "READ_MCG";
248	case MLX4_CMD_WRITE_MCG:	return "WRITE_MCG";
249	case MLX4_CMD_MGID_HASH:	return "MGID_HASH";
250
251		/* miscellaneous commands */
252	case MLX4_CMD_DIAG_RPRT:	return "DIAG_RPRT";
253	case MLX4_CMD_NOP:		return "NOP";
254	case MLX4_CMD_ACCESS_MEM:	return "ACCESS_MEM";
255	case MLX4_CMD_SET_VEP:		return "SET_VEP";
256
257		/* Ethernet specific commands */
258	case MLX4_CMD_SET_VLAN_FLTR:	return "SET_VLAN_FLTR";
259	case MLX4_CMD_SET_MCAST_FLTR:	return "SET_MCAST_FLTR";
260	case MLX4_CMD_DUMP_ETH_STATS:	return "DUMP_ETH_STATS";
261
262		/* Communication channel commands */
263	case MLX4_CMD_ARM_COMM_CHANNEL:	return "ARM_COMM_CHANNEL";
264	case MLX4_CMD_GEN_EQE:		return "GEN_EQE";
265
266		/* virtual commands */
267	case MLX4_CMD_ALLOC_RES:	return "ALLOC_RES";
268	case MLX4_CMD_FREE_RES:		return "FREE_RES";
269	case MLX4_CMD_MCAST_ATTACH:	return "MCAST_ATTACH";
270	case MLX4_CMD_UCAST_ATTACH:	return "UCAST_ATTACH";
271	case MLX4_CMD_PROMISC:		return "PROMISC";
272	case MLX4_CMD_QUERY_FUNC_CAP:	return "QUERY_FUNC_CAP";
273	case MLX4_CMD_QP_ATTACH:	return "QP_ATTACH";
274
275		/* debug commands */
276	case MLX4_CMD_QUERY_DEBUG_MSG:	return "QUERY_DEBUG_MSG";
277	case MLX4_CMD_SET_DEBUG_MSG:	return "SET_DEBUG_MSG";
278
279		/* statistics commands */
280	case MLX4_CMD_QUERY_IF_STAT:	return "QUERY_IF_STAT";
281	case MLX4_CMD_SET_IF_STAT:	return "SET_IF_STAT";
282
283		/* register/delete flow steering network rules */
284	case MLX4_QP_FLOW_STEERING_ATTACH:	return "QP_FLOW_STEERING_ATTACH";
285	case MLX4_QP_FLOW_STEERING_DETACH:	return "QP_FLOW_STEERING_DETACH";
286	case MLX4_FLOW_STEERING_IB_UC_QP_RANGE:	return "FLOW_STEERING_IB_UC_QP_RANGE";
287	default: return "OTHER";
288	}
289}
290
291static u8 mlx4_errno_to_status(int errno)
292{
293	switch (errno) {
294	case -EPERM:
295		return CMD_STAT_BAD_OP;
296	case -EINVAL:
297		return CMD_STAT_BAD_PARAM;
298	case -ENXIO:
299		return CMD_STAT_BAD_SYS_STATE;
300	case -EBUSY:
301		return CMD_STAT_RESOURCE_BUSY;
302	case -ENOMEM:
303		return CMD_STAT_EXCEED_LIM;
304	case -ENFILE:
305		return CMD_STAT_ICM_ERROR;
306	default:
307		return CMD_STAT_INTERNAL_ERR;
308	}
309}
310
311static int comm_pending(struct mlx4_dev *dev)
312{
313	struct mlx4_priv *priv = mlx4_priv(dev);
314	u32 status = readl(&priv->mfunc.comm->slave_read);
315
316	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
317}
318
319static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
320{
321	struct mlx4_priv *priv = mlx4_priv(dev);
322	u32 val;
323
324	priv->cmd.comm_toggle ^= 1;
325	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
326	__raw_writel((__force u32) cpu_to_be32(val),
327		     &priv->mfunc.comm->slave_write);
328	mmiowb();
329}
330
331static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
332		       unsigned long timeout)
333{
334	struct mlx4_priv *priv = mlx4_priv(dev);
335	unsigned long end;
336	int err = 0;
337	int ret_from_pending = 0;
338
339	/* First, verify that the master reports correct status */
340	if (comm_pending(dev)) {
341		mlx4_warn(dev, "Communication channel is not idle."
342			  "my toggle is %d (cmd:0x%x)\n",
343			  priv->cmd.comm_toggle, cmd);
344		return -EAGAIN;
345	}
346
347	/* Write command */
348	down(&priv->cmd.poll_sem);
349	mlx4_comm_cmd_post(dev, cmd, param);
350
351	end = msecs_to_jiffies(timeout) + jiffies;
352	while (comm_pending(dev) && time_before(jiffies, end))
353		cond_resched();
354	ret_from_pending = comm_pending(dev);
355	if (ret_from_pending) {
356		/* check if the slave is trying to boot in the middle of
357		 * FLR process. The only non-zero result in the RESET command
358		 * is MLX4_DELAY_RESET_SLAVE*/
359		if ((MLX4_COMM_CMD_RESET == cmd)) {
360			mlx4_warn(dev, "Got slave FLRed from Communication"
361				  " channel (ret:0x%x)\n", ret_from_pending);
362			err = MLX4_DELAY_RESET_SLAVE;
363		} else {
364			mlx4_warn(dev, "Communication channel timed out\n");
365			err = -ETIMEDOUT;
366		}
367	}
368
369	up(&priv->cmd.poll_sem);
370	return err;
371}
372
373static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
374			      u16 param, unsigned long timeout)
375{
376	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
377	struct mlx4_cmd_context *context;
378	unsigned long end;
379	int err = 0;
380
381	down(&cmd->event_sem);
382
383	end = msecs_to_jiffies(timeout) + jiffies;
384	while (comm_pending(dev) && time_before(jiffies, end))
385		cond_resched();
386	if (comm_pending(dev)) {
387		mlx4_warn(dev, "mlx4_comm_cmd_wait: Comm channel "
388			  "is not idle. My toggle is %d (op: 0x%x)\n",
389			  mlx4_priv(dev)->cmd.comm_toggle, op);
390		up(&cmd->event_sem);
391		return -EAGAIN;
392	}
393
394	spin_lock(&cmd->context_lock);
395	BUG_ON(cmd->free_head < 0);
396	context = &cmd->context[cmd->free_head];
397	context->token += cmd->token_mask + 1;
398	cmd->free_head = context->next;
399	spin_unlock(&cmd->context_lock);
400
401	init_completion(&context->done);
402
403	mlx4_comm_cmd_post(dev, op, param);
404
405	/* In slave, wait unconditionally for completion */
406	wait_for_completion(&context->done);
407
408	err = context->result;
409	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
410		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
411			 op, context->fw_status);
412		goto out;
413	}
414
415out:
416	/* wait for comm channel ready
417	 * this is necessary for prevention the race
418	 * when switching between event to polling mode
419	 */
420	end = msecs_to_jiffies(timeout) + jiffies;
421	while (comm_pending(dev) && time_before(jiffies, end))
422		cond_resched();
423
424	spin_lock(&cmd->context_lock);
425	context->next = cmd->free_head;
426	cmd->free_head = context - cmd->context;
427	spin_unlock(&cmd->context_lock);
428
429	up(&cmd->event_sem);
430	return err;
431}
432
433int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
434		  unsigned long timeout)
435{
436	if (mlx4_priv(dev)->cmd.use_events)
437		return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
438	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
439}
440
441static int cmd_pending(struct mlx4_dev *dev)
442{
443	u32 status;
444
445	if (pci_channel_offline(dev->pdev))
446		return -EIO;
447
448	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
449
450	return (status & swab32(1 << HCR_GO_BIT)) ||
451		(mlx4_priv(dev)->cmd.toggle ==
452		 !!(status & swab32(1 << HCR_T_BIT)));
453}
454
455static int get_status(struct mlx4_dev *dev, u32 *status, int *go_bit,
456		      int *t_bit)
457{
458	if (pci_channel_offline(dev->pdev))
459		return -EIO;
460
461	*status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
462	*t_bit = !!(*status & swab32(1 << HCR_T_BIT));
463	*go_bit = !!(*status & swab32(1 << HCR_GO_BIT));
464
465	return 0;
466}
467
468static int mlx4_cmd_post(struct mlx4_dev *dev, struct timespec *ts1,
469			 u64 in_param, u64 out_param, u32 in_modifier,
470			 u8 op_modifier, u16 op, u16 token, int event)
471{
472	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
473	u32 __iomem *hcr = cmd->hcr;
474	int ret = -EAGAIN;
475	unsigned long end;
476	int err, go_bit = 0, t_bit = 0;
477	u32 status = 0;
478
479	mutex_lock(&cmd->hcr_mutex);
480
481	if (pci_channel_offline(dev->pdev)) {
482		/*
483		 * Device is going through error recovery
484		 * and cannot accept commands.
485		 */
486		ret = -EIO;
487		goto out;
488	}
489
490	end = jiffies;
491	if (event)
492		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
493
494	while (cmd_pending(dev)) {
495		if (pci_channel_offline(dev->pdev)) {
496			/*
497			 * Device is going through error recovery
498			 * and cannot accept commands.
499			 */
500			ret = -EIO;
501			goto out;
502		}
503
504		if (time_after_eq(jiffies, end)) {
505			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
506			goto out;
507		}
508		cond_resched();
509	}
510
511	/*
512	 * We use writel (instead of something like memcpy_toio)
513	 * because writes of less than 32 bits to the HCR don't work
514	 * (and some architectures such as ia64 implement memcpy_toio
515	 * in terms of writeb).
516	 */
517	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
518	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
519	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
520	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
521	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
522	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
523
524	if (ts1)
525		ktime_get_ts(ts1);
526
527	/* __raw_writel may not order writes. */
528	wmb();
529
530	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
531					       (cmd->toggle << HCR_T_BIT)	|
532					       (event ? (1 << HCR_E_BIT) : 0)	|
533					       (op_modifier << HCR_OPMOD_SHIFT) |
534					       op), hcr + 6);
535
536	/*
537	 * Make sure that our HCR writes don't get mixed in with
538	 * writes from another CPU starting a FW command.
539	 */
540	mmiowb();
541
542	cmd->toggle = cmd->toggle ^ 1;
543
544	ret = 0;
545
546out:
547	if (ret) {
548		err = get_status(dev, &status, &go_bit, &t_bit);
549		mlx4_warn(dev, "Could not post command %s (0x%x): ret=%d, "
550			  "in_param=0x%llx, in_mod=0x%x, op_mod=0x%x, "
551			  "get_status err=%d, status_reg=0x%x, go_bit=%d, "
552			  "t_bit=%d, toggle=0x%x\n", cmd_to_str(op), op, ret,
553			  (unsigned long long) in_param, in_modifier, op_modifier, err, status,
554			  go_bit, t_bit, cmd->toggle);
555	}
556	mutex_unlock(&cmd->hcr_mutex);
557	return ret;
558}
559
560static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
561			  int out_is_imm, u32 in_modifier, u8 op_modifier,
562			  u16 op, unsigned long timeout)
563{
564	struct mlx4_priv *priv = mlx4_priv(dev);
565	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
566	int ret;
567
568	mutex_lock(&priv->cmd.slave_cmd_mutex);
569
570	vhcr->in_param = cpu_to_be64(in_param);
571	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
572	vhcr->in_modifier = cpu_to_be32(in_modifier);
573	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
574	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
575	vhcr->status = 0;
576	vhcr->flags = !!(priv->cmd.use_events) << 6;
577
578	if (mlx4_is_master(dev)) {
579		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
580		if (!ret) {
581			if (out_is_imm) {
582				if (out_param)
583					*out_param =
584						be64_to_cpu(vhcr->out_param);
585				else {
586					mlx4_err(dev, "response expected while"
587						 "output mailbox is NULL for "
588						 "command 0x%x\n", op);
589					vhcr->status = CMD_STAT_BAD_PARAM;
590				}
591			}
592			ret = mlx4_status_to_errno(vhcr->status);
593		}
594	} else {
595		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
596				    MLX4_COMM_TIME + timeout);
597		if (!ret) {
598			if (out_is_imm) {
599				if (out_param)
600					*out_param =
601						be64_to_cpu(vhcr->out_param);
602				else {
603					mlx4_err(dev, "response expected while"
604						 "output mailbox is NULL for "
605						 "command 0x%x\n", op);
606					vhcr->status = CMD_STAT_BAD_PARAM;
607				}
608			}
609			ret = mlx4_status_to_errno(vhcr->status);
610		} else
611			mlx4_err(dev, "failed execution of VHCR_POST command"
612				 "opcode %s (0x%x)\n", cmd_to_str(op), op);
613	}
614
615	mutex_unlock(&priv->cmd.slave_cmd_mutex);
616	return ret;
617}
618
619static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
620			 int out_is_imm, u32 in_modifier, u8 op_modifier,
621			 u16 op, unsigned long timeout)
622{
623	struct mlx4_priv *priv = mlx4_priv(dev);
624	void __iomem *hcr = priv->cmd.hcr;
625	int err = 0;
626	unsigned long end;
627	u32 stat;
628
629	down(&priv->cmd.poll_sem);
630
631	if (pci_channel_offline(dev->pdev)) {
632		/*
633		 * Device is going through error recovery
634		 * and cannot accept commands.
635		 */
636		err = -EIO;
637		goto out;
638	}
639
640	err = mlx4_cmd_post(dev, NULL, in_param, out_param ? *out_param : 0,
641			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
642	if (err)
643		goto out;
644
645	end = msecs_to_jiffies(timeout) + jiffies;
646	while (cmd_pending(dev) && time_before(jiffies, end)) {
647		if (pci_channel_offline(dev->pdev)) {
648			/*
649			 * Device is going through error recovery
650			 * and cannot accept commands.
651			 */
652			err = -EIO;
653			goto out;
654		}
655
656		cond_resched();
657	}
658
659	if (cmd_pending(dev)) {
660		mlx4_warn(dev, "command %s (0x%x) timed out (go bit not cleared)\n",
661			  cmd_to_str(op), op);
662		err = -ETIMEDOUT;
663		goto out;
664	}
665
666	if (out_is_imm)
667		*out_param =
668			(u64) be32_to_cpu((__force __be32)
669					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
670			(u64) be32_to_cpu((__force __be32)
671					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
672	stat = be32_to_cpu((__force __be32)
673			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
674	err = mlx4_status_to_errno(stat);
675	if (err)
676		mlx4_err(dev, "command %s (0x%x) failed: fw status = 0x%x\n",
677			 cmd_to_str(op), op, stat);
678
679out:
680	up(&priv->cmd.poll_sem);
681	return err;
682}
683
684void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
685{
686	struct mlx4_priv *priv = mlx4_priv(dev);
687	struct mlx4_cmd_context *context =
688		&priv->cmd.context[token & priv->cmd.token_mask];
689
690	/* previously timed out command completing at long last */
691	if (token != context->token)
692		return;
693
694	context->fw_status = status;
695	context->result    = mlx4_status_to_errno(status);
696	context->out_param = out_param;
697
698	complete(&context->done);
699}
700
701static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
702			 int out_is_imm, u32 in_modifier, u8 op_modifier,
703			 u16 op, unsigned long timeout)
704{
705	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
706	struct mlx4_cmd_context *context;
707	int err = 0;
708	int go_bit = 0, t_bit = 0, stat_err;
709	u32 status = 0;
710	struct timespec	ts1, ts2;
711	ktime_t t1, t2, delta;
712	s64 ds;
713
714	if (out_is_imm && !out_param)
715		return -EINVAL;
716
717	down(&cmd->event_sem);
718
719	spin_lock(&cmd->context_lock);
720	BUG_ON(cmd->free_head < 0);
721	context = &cmd->context[cmd->free_head];
722	context->token += cmd->token_mask + 1;
723	cmd->free_head = context->next;
724	spin_unlock(&cmd->context_lock);
725
726	init_completion(&context->done);
727
728	err = mlx4_cmd_post(dev, &ts1, in_param, out_param ? *out_param : 0,
729			    in_modifier, op_modifier, op, context->token, 1);
730	if (err)
731		goto out;
732
733	if (!wait_for_completion_timeout(&context->done,
734					 msecs_to_jiffies(timeout))) {
735		stat_err = get_status(dev, &status, &go_bit, &t_bit);
736		mlx4_warn(dev, "command %s (0x%x) timed out: in_param=0x%llx, "
737			  "in_mod=0x%x, op_mod=0x%x, get_status err=%d, "
738			  "status_reg=0x%x, go_bit=%d, t_bit=%d, toggle=0x%x\n"
739			  , cmd_to_str(op), op, (unsigned long long) in_param, in_modifier,
740			  op_modifier, stat_err, status, go_bit, t_bit,
741			  mlx4_priv(dev)->cmd.toggle);
742		err = -EBUSY;
743		goto out;
744	}
745	if (mlx4_debug_level & MLX4_DEBUG_MASK_CMD_TIME) {
746		ktime_get_ts(&ts2);
747		t1 = timespec_to_ktime(ts1);
748		t2 = timespec_to_ktime(ts2);
749		delta = ktime_sub(t2, t1);
750		ds = ktime_to_ns(delta);
751		pr_info("mlx4: fw exec time for %s is %lld nsec\n", cmd_to_str(op), (long long) ds);
752	}
753
754	err = context->result;
755	if (err) {
756		mlx4_err(dev, "command %s (0x%x) failed: in_param=0x%llx, "
757			 "in_mod=0x%x, op_mod=0x%x, fw status = 0x%x\n",
758			 cmd_to_str(op), op, (unsigned long long) in_param, in_modifier,
759			 op_modifier, context->fw_status);
760
761		switch(context->fw_status) {
762		case CMD_STAT_BAD_PARAM:
763			mlx4_err(dev, "Parameter is not supported, "
764			    "parameter is out of range\n");
765			break;
766		case CMD_STAT_EXCEED_LIM:
767			mlx4_err(dev, "Required capability exceeded "
768			    "device limits\n");
769			break;
770		default:
771			break;
772		}
773		goto out;
774	}
775
776	if (out_is_imm)
777		*out_param = context->out_param;
778
779out:
780	spin_lock(&cmd->context_lock);
781	context->next = cmd->free_head;
782	cmd->free_head = context - cmd->context;
783	spin_unlock(&cmd->context_lock);
784
785	up(&cmd->event_sem);
786	return err;
787}
788
789int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
790	       int out_is_imm, u32 in_modifier, u8 op_modifier,
791	       u16 op, unsigned long timeout, int native)
792{
793	if (pci_channel_offline(dev->pdev))
794		return -EIO;
795
796	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
797		int ret;
798
799		down_read(&mlx4_priv(dev)->cmd.switch_sem);
800		if (mlx4_priv(dev)->cmd.use_events)
801			ret = mlx4_cmd_wait(dev, in_param, out_param,
802					    out_is_imm, in_modifier,
803					    op_modifier, op, timeout);
804		else
805			ret = mlx4_cmd_poll(dev, in_param, out_param,
806					    out_is_imm, in_modifier,
807					    op_modifier, op, timeout);
808		up_read(&mlx4_priv(dev)->cmd.switch_sem);
809		return ret;
810	}
811	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
812			      in_modifier, op_modifier, op, timeout);
813}
814EXPORT_SYMBOL_GPL(__mlx4_cmd);
815
816
817static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
818{
819	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
820			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
821}
822
823static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
824			   int slave, u64 slave_addr,
825			   int size, int is_read)
826{
827	u64 in_param;
828	u64 out_param;
829
830	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
831	    (slave & ~0x7f) | (size & 0xff)) {
832		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
833			      "master_addr:0x%llx slave_id:%d size:%d\n",
834			      (unsigned long long) slave_addr, (unsigned long long) master_addr, slave, size);
835		return -EINVAL;
836	}
837
838	if (is_read) {
839		in_param = (u64) slave | slave_addr;
840		out_param = (u64) dev->caps.function | master_addr;
841	} else {
842		in_param = (u64) dev->caps.function | master_addr;
843		out_param = (u64) slave | slave_addr;
844	}
845
846	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
847			    MLX4_CMD_ACCESS_MEM,
848			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
849}
850
851static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
852			       struct mlx4_cmd_mailbox *inbox,
853			       struct mlx4_cmd_mailbox *outbox)
854{
855	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
856	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
857	int err;
858	int i;
859
860	if (index & 0x1f)
861		return -EINVAL;
862
863	in_mad->attr_mod = cpu_to_be32(index / 32);
864
865	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
866			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
867			   MLX4_CMD_NATIVE);
868	if (err)
869		return err;
870
871	for (i = 0; i < 32; ++i)
872		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
873
874	return err;
875}
876
877static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
878			       struct mlx4_cmd_mailbox *inbox,
879			       struct mlx4_cmd_mailbox *outbox)
880{
881	int i;
882	int err;
883
884	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
885		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
886		if (err)
887			return err;
888	}
889
890	return 0;
891}
892#define PORT_CAPABILITY_LOCATION_IN_SMP 20
893#define PORT_STATE_OFFSET 32
894
895static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
896{
897	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
898		return IB_PORT_ACTIVE;
899	else
900		return IB_PORT_DOWN;
901}
902
903static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
904				struct mlx4_vhcr *vhcr,
905				struct mlx4_cmd_mailbox *inbox,
906				struct mlx4_cmd_mailbox *outbox,
907				struct mlx4_cmd_info *cmd)
908{
909	struct ib_smp *smp = inbox->buf;
910	u32 index;
911	u8 port;
912	u16 *table;
913	int err;
914	int vidx, pidx;
915	struct mlx4_priv *priv = mlx4_priv(dev);
916	struct ib_smp *outsmp = outbox->buf;
917	__be16 *outtab = (__be16 *)(outsmp->data);
918	__be32 slave_cap_mask;
919	__be64 slave_node_guid;
920	port = vhcr->in_modifier;
921
922	if (smp->base_version == 1 &&
923	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
924	    smp->class_version == 1) {
925		if (smp->method	== IB_MGMT_METHOD_GET) {
926			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
927				index = be32_to_cpu(smp->attr_mod);
928				if (port < 1 || port > dev->caps.num_ports)
929					return -EINVAL;
930				table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
931				if (!table)
932					return -ENOMEM;
933				/* need to get the full pkey table because the paravirtualized
934				 * pkeys may be scattered among several pkey blocks.
935				 */
936				err = get_full_pkey_table(dev, port, table, inbox, outbox);
937				if (!err) {
938					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
939						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
940						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
941					}
942				}
943				kfree(table);
944				return err;
945			}
946			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
947				/*get the slave specific caps:*/
948				/*do the command */
949				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
950					    vhcr->in_modifier, vhcr->op_modifier,
951					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
952				/* modify the response for slaves */
953				if (!err && slave != mlx4_master_func_num(dev)) {
954					u8 *state = outsmp->data + PORT_STATE_OFFSET;
955
956					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
957					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
958					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
959				}
960				return err;
961			}
962			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
963				/* compute slave's gid block */
964				smp->attr_mod = cpu_to_be32(slave / 8);
965				/* execute cmd */
966				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
967					     vhcr->in_modifier, vhcr->op_modifier,
968					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
969				if (!err) {
970					/* if needed, move slave gid to index 0 */
971					if (slave % 8)
972						memcpy(outsmp->data,
973						       outsmp->data + (slave % 8) * 8, 8);
974					/* delete all other gids */
975					memset(outsmp->data + 8, 0, 56);
976				}
977				return err;
978			}
979			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
980				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
981					     vhcr->in_modifier, vhcr->op_modifier,
982					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
983				if (!err) {
984					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
985					memcpy(outsmp->data + 12, &slave_node_guid, 8);
986				}
987				return err;
988			}
989		}
990	}
991	if (slave != mlx4_master_func_num(dev) &&
992	    ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
993	     (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
994	      smp->method == IB_MGMT_METHOD_SET))) {
995		mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
996			 "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
997			 slave, smp->method, smp->mgmt_class,
998			 be16_to_cpu(smp->attr_id));
999		return -EPERM;
1000	}
1001	/*default:*/
1002	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1003				    vhcr->in_modifier, vhcr->op_modifier,
1004				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1005}
1006
1007static int MLX4_CMD_DIAG_RPRT_wrapper(struct mlx4_dev *dev, int slave,
1008		     struct mlx4_vhcr *vhcr,
1009		     struct mlx4_cmd_mailbox *inbox,
1010		     struct mlx4_cmd_mailbox *outbox,
1011		     struct mlx4_cmd_info *cmd)
1012{
1013	return -EPERM;
1014}
1015
1016static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1017		     struct mlx4_vhcr *vhcr,
1018		     struct mlx4_cmd_mailbox *inbox,
1019		     struct mlx4_cmd_mailbox *outbox,
1020		     struct mlx4_cmd_info *cmd)
1021{
1022	return -EPERM;
1023}
1024
1025int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1026		     struct mlx4_vhcr *vhcr,
1027		     struct mlx4_cmd_mailbox *inbox,
1028		     struct mlx4_cmd_mailbox *outbox,
1029		     struct mlx4_cmd_info *cmd)
1030{
1031	u64 in_param;
1032	u64 out_param;
1033	int err;
1034
1035	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1036	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1037	if (cmd->encode_slave_id) {
1038		in_param &= 0xffffffffffffff00ll;
1039		in_param |= slave;
1040	}
1041
1042	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1043			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1044			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1045
1046	if (cmd->out_is_imm)
1047		vhcr->out_param = out_param;
1048
1049	return err;
1050}
1051
1052static struct mlx4_cmd_info cmd_info[] = {
1053	{
1054		.opcode = MLX4_CMD_QUERY_FW,
1055		.has_inbox = false,
1056		.has_outbox = true,
1057		.out_is_imm = false,
1058		.encode_slave_id = false,
1059		.verify = NULL,
1060		.wrapper = mlx4_QUERY_FW_wrapper
1061	},
1062	{
1063		.opcode = MLX4_CMD_QUERY_HCA,
1064		.has_inbox = false,
1065		.has_outbox = true,
1066		.out_is_imm = false,
1067		.encode_slave_id = false,
1068		.verify = NULL,
1069		.wrapper = NULL
1070	},
1071	{
1072		.opcode = MLX4_CMD_QUERY_DEV_CAP,
1073		.has_inbox = false,
1074		.has_outbox = true,
1075		.out_is_imm = false,
1076		.encode_slave_id = false,
1077		.verify = NULL,
1078		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
1079	},
1080	{
1081		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
1082		.has_inbox = false,
1083		.has_outbox = true,
1084		.out_is_imm = false,
1085		.encode_slave_id = false,
1086		.verify = NULL,
1087		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1088	},
1089	{
1090		.opcode = MLX4_CMD_QUERY_ADAPTER,
1091		.has_inbox = false,
1092		.has_outbox = true,
1093		.out_is_imm = false,
1094		.encode_slave_id = false,
1095		.verify = NULL,
1096		.wrapper = NULL
1097	},
1098	{
1099		.opcode = MLX4_CMD_INIT_PORT,
1100		.has_inbox = false,
1101		.has_outbox = false,
1102		.out_is_imm = false,
1103		.encode_slave_id = false,
1104		.verify = NULL,
1105		.wrapper = mlx4_INIT_PORT_wrapper
1106	},
1107	{
1108		.opcode = MLX4_CMD_CLOSE_PORT,
1109		.has_inbox = false,
1110		.has_outbox = false,
1111		.out_is_imm  = false,
1112		.encode_slave_id = false,
1113		.verify = NULL,
1114		.wrapper = mlx4_CLOSE_PORT_wrapper
1115	},
1116	{
1117		.opcode = MLX4_CMD_QUERY_PORT,
1118		.has_inbox = false,
1119		.has_outbox = true,
1120		.out_is_imm = false,
1121		.encode_slave_id = false,
1122		.verify = NULL,
1123		.wrapper = mlx4_QUERY_PORT_wrapper
1124	},
1125	{
1126		.opcode = MLX4_CMD_SET_PORT,
1127		.has_inbox = true,
1128		.has_outbox = false,
1129		.out_is_imm = false,
1130		.encode_slave_id = false,
1131		.verify = NULL,
1132		.wrapper = mlx4_SET_PORT_wrapper
1133	},
1134	{
1135		.opcode = MLX4_CMD_MAP_EQ,
1136		.has_inbox = false,
1137		.has_outbox = false,
1138		.out_is_imm = false,
1139		.encode_slave_id = false,
1140		.verify = NULL,
1141		.wrapper = mlx4_MAP_EQ_wrapper
1142	},
1143	{
1144		.opcode = MLX4_CMD_SW2HW_EQ,
1145		.has_inbox = true,
1146		.has_outbox = false,
1147		.out_is_imm = false,
1148		.encode_slave_id = true,
1149		.verify = NULL,
1150		.wrapper = mlx4_SW2HW_EQ_wrapper
1151	},
1152	{
1153		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
1154		.has_inbox = false,
1155		.has_outbox = false,
1156		.out_is_imm = false,
1157		.encode_slave_id = false,
1158		.verify = NULL,
1159		.wrapper = NULL
1160	},
1161	{
1162		.opcode = MLX4_CMD_DIAG_RPRT,
1163		.has_inbox = false,
1164		.has_outbox = false,
1165		.out_is_imm = false,
1166		.encode_slave_id = false,
1167		.skip_err_print = true,
1168		.verify = NULL,
1169		.wrapper = MLX4_CMD_DIAG_RPRT_wrapper
1170	},
1171	{
1172		.opcode = MLX4_CMD_NOP,
1173		.has_inbox = false,
1174		.has_outbox = false,
1175		.out_is_imm = false,
1176		.encode_slave_id = false,
1177		.verify = NULL,
1178		.wrapper = NULL
1179	},
1180	{
1181		.opcode = MLX4_CMD_ALLOC_RES,
1182		.has_inbox = false,
1183		.has_outbox = false,
1184		.out_is_imm = true,
1185		.encode_slave_id = false,
1186		.verify = NULL,
1187		.wrapper = mlx4_ALLOC_RES_wrapper
1188	},
1189	{
1190		.opcode = MLX4_CMD_FREE_RES,
1191		.has_inbox = false,
1192		.has_outbox = false,
1193		.out_is_imm = false,
1194		.encode_slave_id = false,
1195		.verify = NULL,
1196		.wrapper = mlx4_FREE_RES_wrapper
1197	},
1198	{
1199		.opcode = MLX4_CMD_SW2HW_MPT,
1200		.has_inbox = true,
1201		.has_outbox = false,
1202		.out_is_imm = false,
1203		.encode_slave_id = true,
1204		.verify = NULL,
1205		.wrapper = mlx4_SW2HW_MPT_wrapper
1206	},
1207	{
1208		.opcode = MLX4_CMD_QUERY_MPT,
1209		.has_inbox = false,
1210		.has_outbox = true,
1211		.out_is_imm = false,
1212		.encode_slave_id = false,
1213		.verify = NULL,
1214		.wrapper = mlx4_QUERY_MPT_wrapper
1215	},
1216	{
1217		.opcode = MLX4_CMD_HW2SW_MPT,
1218		.has_inbox = false,
1219		.has_outbox = false,
1220		.out_is_imm = false,
1221		.encode_slave_id = false,
1222		.verify = NULL,
1223		.wrapper = mlx4_HW2SW_MPT_wrapper
1224	},
1225	{
1226		.opcode = MLX4_CMD_READ_MTT,
1227		.has_inbox = false,
1228		.has_outbox = true,
1229		.out_is_imm = false,
1230		.encode_slave_id = false,
1231		.verify = NULL,
1232		.wrapper = NULL
1233	},
1234	{
1235		.opcode = MLX4_CMD_WRITE_MTT,
1236		.has_inbox = true,
1237		.has_outbox = false,
1238		.out_is_imm = false,
1239		.encode_slave_id = false,
1240		.verify = NULL,
1241		.wrapper = mlx4_WRITE_MTT_wrapper
1242	},
1243	{
1244		.opcode = MLX4_CMD_SYNC_TPT,
1245		.has_inbox = true,
1246		.has_outbox = false,
1247		.out_is_imm = false,
1248		.encode_slave_id = false,
1249		.verify = NULL,
1250		.wrapper = NULL
1251	},
1252	{
1253		.opcode = MLX4_CMD_HW2SW_EQ,
1254		.has_inbox = false,
1255		.has_outbox = true,
1256		.out_is_imm = false,
1257		.encode_slave_id = true,
1258		.verify = NULL,
1259		.wrapper = mlx4_HW2SW_EQ_wrapper
1260	},
1261	{
1262		.opcode = MLX4_CMD_QUERY_EQ,
1263		.has_inbox = false,
1264		.has_outbox = true,
1265		.out_is_imm = false,
1266		.encode_slave_id = true,
1267		.verify = NULL,
1268		.wrapper = mlx4_QUERY_EQ_wrapper
1269	},
1270	{
1271		.opcode = MLX4_CMD_SW2HW_CQ,
1272		.has_inbox = true,
1273		.has_outbox = false,
1274		.out_is_imm = false,
1275		.encode_slave_id = true,
1276		.verify = NULL,
1277		.wrapper = mlx4_SW2HW_CQ_wrapper
1278	},
1279	{
1280		.opcode = MLX4_CMD_HW2SW_CQ,
1281		.has_inbox = false,
1282		.has_outbox = false,
1283		.out_is_imm = false,
1284		.encode_slave_id = false,
1285		.verify = NULL,
1286		.wrapper = mlx4_HW2SW_CQ_wrapper
1287	},
1288	{
1289		.opcode = MLX4_CMD_QUERY_CQ,
1290		.has_inbox = false,
1291		.has_outbox = true,
1292		.out_is_imm = false,
1293		.encode_slave_id = false,
1294		.verify = NULL,
1295		.wrapper = mlx4_QUERY_CQ_wrapper
1296	},
1297	{
1298		.opcode = MLX4_CMD_MODIFY_CQ,
1299		.has_inbox = true,
1300		.has_outbox = false,
1301		.out_is_imm = true,
1302		.encode_slave_id = false,
1303		.verify = NULL,
1304		.wrapper = mlx4_MODIFY_CQ_wrapper
1305	},
1306	{
1307		.opcode = MLX4_CMD_SW2HW_SRQ,
1308		.has_inbox = true,
1309		.has_outbox = false,
1310		.out_is_imm = false,
1311		.encode_slave_id = true,
1312		.verify = NULL,
1313		.wrapper = mlx4_SW2HW_SRQ_wrapper
1314	},
1315	{
1316		.opcode = MLX4_CMD_HW2SW_SRQ,
1317		.has_inbox = false,
1318		.has_outbox = false,
1319		.out_is_imm = false,
1320		.encode_slave_id = false,
1321		.verify = NULL,
1322		.wrapper = mlx4_HW2SW_SRQ_wrapper
1323	},
1324	{
1325		.opcode = MLX4_CMD_QUERY_SRQ,
1326		.has_inbox = false,
1327		.has_outbox = true,
1328		.out_is_imm = false,
1329		.encode_slave_id = false,
1330		.verify = NULL,
1331		.wrapper = mlx4_QUERY_SRQ_wrapper
1332	},
1333	{
1334		.opcode = MLX4_CMD_ARM_SRQ,
1335		.has_inbox = false,
1336		.has_outbox = false,
1337		.out_is_imm = false,
1338		.encode_slave_id = false,
1339		.verify = NULL,
1340		.wrapper = mlx4_ARM_SRQ_wrapper
1341	},
1342	{
1343		.opcode = MLX4_CMD_RST2INIT_QP,
1344		.has_inbox = true,
1345		.has_outbox = false,
1346		.out_is_imm = false,
1347		.encode_slave_id = true,
1348		.verify = NULL,
1349		.wrapper = mlx4_RST2INIT_QP_wrapper
1350	},
1351	{
1352		.opcode = MLX4_CMD_INIT2INIT_QP,
1353		.has_inbox = true,
1354		.has_outbox = false,
1355		.out_is_imm = false,
1356		.encode_slave_id = false,
1357		.verify = NULL,
1358		.wrapper = mlx4_INIT2INIT_QP_wrapper
1359	},
1360	{
1361		.opcode = MLX4_CMD_INIT2RTR_QP,
1362		.has_inbox = true,
1363		.has_outbox = false,
1364		.out_is_imm = false,
1365		.encode_slave_id = false,
1366		.verify = NULL,
1367		.wrapper = mlx4_INIT2RTR_QP_wrapper
1368	},
1369	{
1370		.opcode = MLX4_CMD_RTR2RTS_QP,
1371		.has_inbox = true,
1372		.has_outbox = false,
1373		.out_is_imm = false,
1374		.encode_slave_id = false,
1375		.verify = NULL,
1376		.wrapper = mlx4_RTR2RTS_QP_wrapper
1377	},
1378	{
1379		.opcode = MLX4_CMD_RTS2RTS_QP,
1380		.has_inbox = true,
1381		.has_outbox = false,
1382		.out_is_imm = false,
1383		.encode_slave_id = false,
1384		.verify = NULL,
1385		.wrapper = mlx4_RTS2RTS_QP_wrapper
1386	},
1387	{
1388		.opcode = MLX4_CMD_SQERR2RTS_QP,
1389		.has_inbox = true,
1390		.has_outbox = false,
1391		.out_is_imm = false,
1392		.encode_slave_id = false,
1393		.verify = NULL,
1394		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1395	},
1396	{
1397		.opcode = MLX4_CMD_2ERR_QP,
1398		.has_inbox = false,
1399		.has_outbox = false,
1400		.out_is_imm = false,
1401		.encode_slave_id = false,
1402		.verify = NULL,
1403		.wrapper = mlx4_GEN_QP_wrapper
1404	},
1405	{
1406		.opcode = MLX4_CMD_RTS2SQD_QP,
1407		.has_inbox = false,
1408		.has_outbox = false,
1409		.out_is_imm = false,
1410		.encode_slave_id = false,
1411		.verify = NULL,
1412		.wrapper = mlx4_GEN_QP_wrapper
1413	},
1414	{
1415		.opcode = MLX4_CMD_SQD2SQD_QP,
1416		.has_inbox = true,
1417		.has_outbox = false,
1418		.out_is_imm = false,
1419		.encode_slave_id = false,
1420		.verify = NULL,
1421		.wrapper = mlx4_SQD2SQD_QP_wrapper
1422	},
1423	{
1424		.opcode = MLX4_CMD_SQD2RTS_QP,
1425		.has_inbox = true,
1426		.has_outbox = false,
1427		.out_is_imm = false,
1428		.encode_slave_id = false,
1429		.verify = NULL,
1430		.wrapper = mlx4_SQD2RTS_QP_wrapper
1431	},
1432	{
1433		.opcode = MLX4_CMD_2RST_QP,
1434		.has_inbox = false,
1435		.has_outbox = false,
1436		.out_is_imm = false,
1437		.encode_slave_id = false,
1438		.verify = NULL,
1439		.wrapper = mlx4_2RST_QP_wrapper
1440	},
1441	{
1442		.opcode = MLX4_CMD_QUERY_QP,
1443		.has_inbox = false,
1444		.has_outbox = true,
1445		.out_is_imm = false,
1446		.encode_slave_id = false,
1447		.verify = NULL,
1448		.wrapper = mlx4_GEN_QP_wrapper
1449	},
1450	{
1451		.opcode = MLX4_CMD_SUSPEND_QP,
1452		.has_inbox = false,
1453		.has_outbox = false,
1454		.out_is_imm = false,
1455		.encode_slave_id = false,
1456		.verify = NULL,
1457		.wrapper = mlx4_GEN_QP_wrapper
1458	},
1459	{
1460		.opcode = MLX4_CMD_UNSUSPEND_QP,
1461		.has_inbox = false,
1462		.has_outbox = false,
1463		.out_is_imm = false,
1464		.encode_slave_id = false,
1465		.verify = NULL,
1466		.wrapper = mlx4_GEN_QP_wrapper
1467	},
1468	{
1469		.opcode = MLX4_CMD_UPDATE_QP,
1470		.has_inbox = false,
1471		.has_outbox = false,
1472		.out_is_imm = false,
1473		.encode_slave_id = false,
1474		.skip_err_print = true,
1475		.verify = NULL,
1476		.wrapper = MLX4_CMD_UPDATE_QP_wrapper
1477	},
1478	{
1479		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1480		.has_inbox = false,
1481		.has_outbox = false,
1482		.out_is_imm = false,
1483		.encode_slave_id = false,
1484		.verify = NULL, /* XXX verify: only demux can do this */
1485		.wrapper = NULL
1486	},
1487	{
1488		.opcode = MLX4_CMD_MAD_IFC,
1489		.has_inbox = true,
1490		.has_outbox = true,
1491		.out_is_imm = false,
1492		.encode_slave_id = false,
1493		.verify = NULL,
1494		.wrapper = mlx4_MAD_IFC_wrapper
1495	},
1496	{
1497		.opcode = MLX4_CMD_QUERY_IF_STAT,
1498		.has_inbox = false,
1499		.has_outbox = true,
1500		.out_is_imm = false,
1501		.encode_slave_id = false,
1502		.verify = NULL,
1503		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1504	},
1505	/* Native multicast commands are not available for guests */
1506	{
1507		.opcode = MLX4_CMD_QP_ATTACH,
1508		.has_inbox = true,
1509		.has_outbox = false,
1510		.out_is_imm = false,
1511		.encode_slave_id = false,
1512		.verify = NULL,
1513		.wrapper = mlx4_QP_ATTACH_wrapper
1514	},
1515	{
1516		.opcode = MLX4_CMD_PROMISC,
1517		.has_inbox = false,
1518		.has_outbox = false,
1519		.out_is_imm = false,
1520		.encode_slave_id = false,
1521		.verify = NULL,
1522		.wrapper = mlx4_PROMISC_wrapper
1523	},
1524	/* Ethernet specific commands */
1525	{
1526		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1527		.has_inbox = true,
1528		.has_outbox = false,
1529		.out_is_imm = false,
1530		.encode_slave_id = false,
1531		.verify = NULL,
1532		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1533	},
1534	{
1535		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1536		.has_inbox = false,
1537		.has_outbox = false,
1538		.out_is_imm = false,
1539		.encode_slave_id = false,
1540		.verify = NULL,
1541		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1542	},
1543	{
1544		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1545		.has_inbox = false,
1546		.has_outbox = true,
1547		.out_is_imm = false,
1548		.encode_slave_id = false,
1549		.verify = NULL,
1550		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1551	},
1552	{
1553		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1554		.has_inbox = false,
1555		.has_outbox = false,
1556		.out_is_imm = false,
1557		.encode_slave_id = false,
1558		.verify = NULL,
1559		.wrapper = NULL
1560	},
1561	/* flow steering commands */
1562	{
1563		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1564		.has_inbox = true,
1565		.has_outbox = false,
1566		.out_is_imm = true,
1567		.encode_slave_id = false,
1568		.verify = NULL,
1569		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1570	},
1571	{
1572		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1573		.has_inbox = false,
1574		.has_outbox = false,
1575		.out_is_imm = false,
1576		.encode_slave_id = false,
1577		.verify = NULL,
1578		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1579	},
1580	/* wol commands */
1581	{
1582		.opcode = MLX4_CMD_MOD_STAT_CFG,
1583		.has_inbox = false,
1584		.has_outbox = false,
1585		.out_is_imm = false,
1586		.encode_slave_id = false,
1587		.skip_err_print = true,
1588		.verify = NULL,
1589		.wrapper = mlx4_MOD_STAT_CFG_wrapper
1590	},
1591};
1592
1593static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1594				    struct mlx4_vhcr_cmd *in_vhcr)
1595{
1596	struct mlx4_priv *priv = mlx4_priv(dev);
1597	struct mlx4_cmd_info *cmd = NULL;
1598	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1599	struct mlx4_vhcr *vhcr;
1600	struct mlx4_cmd_mailbox *inbox = NULL;
1601	struct mlx4_cmd_mailbox *outbox = NULL;
1602	u64 in_param;
1603	u64 out_param;
1604	int ret = 0;
1605	int i;
1606	int err = 0;
1607
1608	/* Create sw representation of Virtual HCR */
1609	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1610	if (!vhcr)
1611		return -ENOMEM;
1612
1613	/* DMA in the vHCR */
1614	if (!in_vhcr) {
1615		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1616				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1617				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1618					    MLX4_ACCESS_MEM_ALIGN), 1);
1619		if (ret) {
1620			mlx4_err(dev, "%s:Failed reading vhcr"
1621				 "ret: 0x%x\n", __func__, ret);
1622			kfree(vhcr);
1623			return ret;
1624		}
1625	}
1626
1627	/* Fill SW VHCR fields */
1628	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1629	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1630	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1631	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1632	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1633	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1634	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1635
1636	/* Lookup command */
1637	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1638		if (vhcr->op == cmd_info[i].opcode) {
1639			cmd = &cmd_info[i];
1640			break;
1641		}
1642	}
1643	if (!cmd) {
1644		mlx4_err(dev, "unparavirt command: %s (0x%x) accepted from slave:%d\n",
1645			 cmd_to_str(vhcr->op), vhcr->op, slave);
1646		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1647		goto out_status;
1648	}
1649
1650	/* Read inbox */
1651	if (cmd->has_inbox) {
1652		vhcr->in_param &= INBOX_MASK;
1653		inbox = mlx4_alloc_cmd_mailbox(dev);
1654		if (IS_ERR(inbox)) {
1655			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1656			inbox = NULL;
1657			goto out_status;
1658		}
1659
1660		if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1661				    vhcr->in_param,
1662				    MLX4_MAILBOX_SIZE, 1)) {
1663			mlx4_err(dev, "%s: Failed reading inbox for cmd %s (0x%x)\n",
1664				 __func__, cmd_to_str(cmd->opcode), cmd->opcode);
1665			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1666			goto out_status;
1667		}
1668	}
1669
1670	/* Apply permission and bound checks if applicable */
1671	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1672		mlx4_warn(dev, "Command %s (0x%x) from slave: %d failed protection "
1673			  "checks for resource_id: %d\n", cmd_to_str(vhcr->op),
1674			  vhcr->op, slave, vhcr->in_modifier);
1675		vhcr_cmd->status = CMD_STAT_BAD_OP;
1676		goto out_status;
1677	}
1678
1679	/* Allocate outbox */
1680	if (cmd->has_outbox) {
1681		outbox = mlx4_alloc_cmd_mailbox(dev);
1682		if (IS_ERR(outbox)) {
1683			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1684			outbox = NULL;
1685			goto out_status;
1686		}
1687	}
1688
1689	/* Execute the command! */
1690	if (cmd->wrapper) {
1691		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1692				   cmd);
1693		if (cmd->out_is_imm)
1694			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1695	} else {
1696		in_param = cmd->has_inbox ? (u64) inbox->dma :
1697			vhcr->in_param;
1698		out_param = cmd->has_outbox ? (u64) outbox->dma :
1699			vhcr->out_param;
1700		err = __mlx4_cmd(dev, in_param, &out_param,
1701				 cmd->out_is_imm, vhcr->in_modifier,
1702				 vhcr->op_modifier, vhcr->op,
1703				 MLX4_CMD_TIME_CLASS_A,
1704				 MLX4_CMD_NATIVE);
1705
1706		if (cmd->out_is_imm) {
1707			vhcr->out_param = out_param;
1708			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1709		}
1710	}
1711
1712	if (err) {
1713		if (!cmd->skip_err_print)
1714			mlx4_warn(dev, "vhcr command %s (0x%x) slave:%d "
1715				  "in_param 0x%llx in_mod=0x%x, op_mod=0x%x "
1716				  "failed with error:%d, status %d\n",
1717				  cmd_to_str(vhcr->op), vhcr->op, slave,
1718				  (unsigned long long) vhcr->in_param, vhcr->in_modifier,
1719				  vhcr->op_modifier, vhcr->errno, err);
1720		vhcr_cmd->status = mlx4_errno_to_status(err);
1721		goto out_status;
1722	}
1723
1724
1725	/* Write outbox if command completed successfully */
1726	if (cmd->has_outbox && !vhcr_cmd->status) {
1727		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1728				      vhcr->out_param,
1729				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1730		if (ret) {
1731			/* If we failed to write back the outbox after the
1732			 *command was successfully executed, we must fail this
1733			 * slave, as it is now in undefined state */
1734			mlx4_err(dev, "%s: Failed writing outbox\n", __func__);
1735			goto out;
1736		}
1737	}
1738
1739out_status:
1740	/* DMA back vhcr result */
1741	if (!in_vhcr) {
1742		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1743				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1744				      ALIGN(sizeof(struct mlx4_vhcr),
1745					    MLX4_ACCESS_MEM_ALIGN),
1746				      MLX4_CMD_WRAPPED);
1747		if (ret)
1748			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1749				 __func__);
1750		else if (vhcr->e_bit &&
1751			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1752				mlx4_warn(dev, "Failed to generate command completion "
1753					  "eqe for slave %d\n", slave);
1754	}
1755
1756out:
1757	kfree(vhcr);
1758	mlx4_free_cmd_mailbox(dev, inbox);
1759	mlx4_free_cmd_mailbox(dev, outbox);
1760	return ret;
1761}
1762
1763static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1764					    int slave, int port)
1765{
1766	struct mlx4_vport_oper_state *vp_oper;
1767	struct mlx4_vport_state *vp_admin;
1768	struct mlx4_vf_immed_vlan_work *work;
1769	int err;
1770	int admin_vlan_ix = NO_INDX;
1771
1772	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1773	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1774
1775	if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1776	    vp_oper->state.default_qos == vp_admin->default_qos)
1777		return 0;
1778
1779	work = kzalloc(sizeof(*work), GFP_KERNEL);
1780	if (!work)
1781		return -ENOMEM;
1782
1783	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1784		if (MLX4_VGT != vp_admin->default_vlan) {
1785			err = __mlx4_register_vlan(&priv->dev, port,
1786						   vp_admin->default_vlan,
1787						   &admin_vlan_ix);
1788			if (err) {
1789				mlx4_warn((&priv->dev),
1790					  "No vlan resources slave %d, port %d\n",
1791					  slave, port);
1792				kfree(work);
1793				return err;
1794			}
1795		} else {
1796			admin_vlan_ix = NO_INDX;
1797		}
1798		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1799		mlx4_dbg((&(priv->dev)),
1800			 "alloc vlan %d idx  %d slave %d port %d\n",
1801			 (int)(vp_admin->default_vlan),
1802			 admin_vlan_ix, slave, port);
1803	}
1804
1805	/* save original vlan ix and vlan id */
1806	work->orig_vlan_id = vp_oper->state.default_vlan;
1807	work->orig_vlan_ix = vp_oper->vlan_idx;
1808
1809	/* handle new qos */
1810	if (vp_oper->state.default_qos != vp_admin->default_qos)
1811		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1812
1813	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1814		vp_oper->vlan_idx = admin_vlan_ix;
1815
1816	vp_oper->state.default_vlan = vp_admin->default_vlan;
1817	vp_oper->state.default_qos = vp_admin->default_qos;
1818
1819	/* iterate over QPs owned by this slave, using UPDATE_QP */
1820	work->port = port;
1821	work->slave = slave;
1822	work->qos = vp_oper->state.default_qos;
1823	work->vlan_id = vp_oper->state.default_vlan;
1824	work->vlan_ix = vp_oper->vlan_idx;
1825	work->priv = priv;
1826	INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1827	queue_work(priv->mfunc.master.comm_wq, &work->work);
1828
1829	return 0;
1830}
1831
1832
1833static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1834{
1835	int port, err;
1836	struct mlx4_vport_state *vp_admin;
1837	struct mlx4_vport_oper_state *vp_oper;
1838
1839	for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1840		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1841		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1842		vp_oper->state = *vp_admin;
1843		if (MLX4_VGT != vp_admin->default_vlan) {
1844			err = __mlx4_register_vlan(&priv->dev, port,
1845						 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1846			if (err) {
1847				vp_oper->vlan_idx = NO_INDX;
1848				mlx4_warn((&priv->dev),
1849					  "No vlan resorces slave %d, port %d\n",
1850					  slave, port);
1851				return err;
1852			}
1853			mlx4_dbg((&(priv->dev)), "alloc vlan %d idx  %d slave %d port %d\n",
1854				 (int)(vp_oper->state.default_vlan),
1855				 vp_oper->vlan_idx, slave, port);
1856		}
1857		if (vp_admin->spoofchk) {
1858			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1859							       port,
1860							       vp_admin->mac);
1861			if (0 > vp_oper->mac_idx) {
1862				err = vp_oper->mac_idx;
1863				vp_oper->mac_idx = NO_INDX;
1864				mlx4_warn((&priv->dev),
1865					  "No mac resources slave %d, port %d\n",
1866					  slave, port);
1867				return err;
1868			}
1869			mlx4_dbg((&(priv->dev)), "alloc mac %llx idx  %d slave %d port %d\n",
1870				 (unsigned long long) vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1871		}
1872	}
1873	return 0;
1874}
1875
1876static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1877{
1878	int port;
1879	struct mlx4_vport_oper_state *vp_oper;
1880
1881	for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1882		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1883		if (NO_INDX != vp_oper->vlan_idx) {
1884			__mlx4_unregister_vlan(&priv->dev,
1885					       port, vp_oper->state.default_vlan);
1886			vp_oper->vlan_idx = NO_INDX;
1887		}
1888		if (NO_INDX != vp_oper->mac_idx) {
1889			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
1890			vp_oper->mac_idx = NO_INDX;
1891		}
1892	}
1893	return;
1894}
1895
1896static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1897			       u16 param, u8 toggle)
1898{
1899	struct mlx4_priv *priv = mlx4_priv(dev);
1900	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1901	u32 reply;
1902	u8 is_going_down = 0;
1903	int i;
1904	unsigned long flags;
1905
1906	slave_state[slave].comm_toggle ^= 1;
1907	reply = (u32) slave_state[slave].comm_toggle << 31;
1908	if (toggle != slave_state[slave].comm_toggle) {
1909		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
1910			  "STATE COMPROMISIED ***\n", toggle, slave);
1911		goto reset_slave;
1912	}
1913	if (cmd == MLX4_COMM_CMD_RESET) {
1914		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1915		slave_state[slave].active = false;
1916		slave_state[slave].old_vlan_api = false;
1917		mlx4_master_deactivate_admin_state(priv, slave);
1918		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1919				slave_state[slave].event_eq[i].eqn = -1;
1920				slave_state[slave].event_eq[i].token = 0;
1921		}
1922		/*check if we are in the middle of FLR process,
1923		if so return "retry" status to the slave*/
1924		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1925			goto inform_slave_state;
1926
1927		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1928
1929		/* write the version in the event field */
1930		reply |= mlx4_comm_get_version();
1931
1932		goto reset_slave;
1933	}
1934	/*command from slave in the middle of FLR*/
1935	if (cmd != MLX4_COMM_CMD_RESET &&
1936	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1937		mlx4_warn(dev, "slave:%d is Trying to run cmd (0x%x) "
1938			  "in the middle of FLR\n", slave, cmd);
1939		return;
1940	}
1941
1942	switch (cmd) {
1943	case MLX4_COMM_CMD_VHCR0:
1944		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1945			goto reset_slave;
1946		slave_state[slave].vhcr_dma = ((u64) param) << 48;
1947		priv->mfunc.master.slave_state[slave].cookie = 0;
1948		break;
1949	case MLX4_COMM_CMD_VHCR1:
1950		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1951			goto reset_slave;
1952		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1953		break;
1954	case MLX4_COMM_CMD_VHCR2:
1955		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1956			goto reset_slave;
1957		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1958		break;
1959	case MLX4_COMM_CMD_VHCR_EN:
1960		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1961			goto reset_slave;
1962		slave_state[slave].vhcr_dma |= param;
1963		if (mlx4_master_activate_admin_state(priv, slave))
1964				goto reset_slave;
1965		slave_state[slave].active = true;
1966		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1967		break;
1968	case MLX4_COMM_CMD_VHCR_POST:
1969		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1970		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1971			goto reset_slave;
1972
1973		mutex_lock(&priv->cmd.slave_cmd_mutex);
1974		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1975			mlx4_err(dev, "Failed processing vhcr for slave: %d,"
1976				 " resetting slave.\n", slave);
1977			mutex_unlock(&priv->cmd.slave_cmd_mutex);
1978			goto reset_slave;
1979		}
1980		mutex_unlock(&priv->cmd.slave_cmd_mutex);
1981		break;
1982	default:
1983		mlx4_warn(dev, "Bad comm cmd: %d from slave: %d\n", cmd, slave);
1984		goto reset_slave;
1985	}
1986	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1987	if (!slave_state[slave].is_slave_going_down)
1988		slave_state[slave].last_cmd = cmd;
1989	else
1990		is_going_down = 1;
1991	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1992	if (is_going_down) {
1993		mlx4_warn(dev, "Slave is going down aborting command (%d)"
1994			  " executing from slave: %d\n",
1995			  cmd, slave);
1996		return;
1997	}
1998	__raw_writel((__force u32) cpu_to_be32(reply),
1999		     &priv->mfunc.comm[slave].slave_read);
2000	mmiowb();
2001
2002	return;
2003
2004reset_slave:
2005	/* cleanup any slave resources */
2006	mlx4_delete_all_resources_for_slave(dev, slave);
2007	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2008	if (!slave_state[slave].is_slave_going_down)
2009		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2010	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2011	/*with slave in the middle of flr, no need to clean resources again.*/
2012inform_slave_state:
2013	__raw_writel((__force u32) cpu_to_be32(reply),
2014		     &priv->mfunc.comm[slave].slave_read);
2015	wmb();
2016}
2017
2018/* master command processing */
2019void mlx4_master_comm_channel(struct work_struct *work)
2020{
2021	struct mlx4_mfunc_master_ctx *master =
2022		container_of(work,
2023			     struct mlx4_mfunc_master_ctx,
2024			     comm_work);
2025	struct mlx4_mfunc *mfunc =
2026		container_of(master, struct mlx4_mfunc, master);
2027	struct mlx4_priv *priv =
2028		container_of(mfunc, struct mlx4_priv, mfunc);
2029	struct mlx4_dev *dev = &priv->dev;
2030	__be32 *bit_vec;
2031	u32 comm_cmd;
2032	u32 vec;
2033	int i, j, slave;
2034	int toggle;
2035	int served = 0;
2036	int reported = 0;
2037	u32 slt;
2038
2039	bit_vec = master->comm_arm_bit_vector;
2040	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2041		vec = be32_to_cpu(bit_vec[i]);
2042		for (j = 0; j < 32; j++) {
2043			if (!(vec & (1 << j)))
2044				continue;
2045			++reported;
2046			slave = (i * 32) + j;
2047			comm_cmd = swab32(readl(
2048					  &mfunc->comm[slave].slave_write));
2049			slt = swab32(readl(&mfunc->comm[slave].slave_read))
2050				     >> 31;
2051			toggle = comm_cmd >> 31;
2052			if (toggle != slt) {
2053				if (master->slave_state[slave].comm_toggle
2054				    != slt) {
2055					mlx4_info(dev, "slave %d out of sync."
2056						  " read toggle %d, state toggle %d. "
2057						  "Resynching.\n", slave, slt,
2058						  master->slave_state[slave].comm_toggle);
2059					master->slave_state[slave].comm_toggle =
2060						slt;
2061				}
2062				mlx4_master_do_cmd(dev, slave,
2063						   comm_cmd >> 16 & 0xff,
2064						   comm_cmd & 0xffff, toggle);
2065				++served;
2066			} else
2067				mlx4_err(dev, "slave %d out of sync."
2068				  " read toggle %d, write toggle %d.\n", slave, slt,
2069				  toggle);
2070		}
2071	}
2072
2073	if (reported && reported != served)
2074		mlx4_warn(dev, "Got command event with bitmask from %d slaves"
2075			  " but %d were served\n",
2076			  reported, served);
2077}
2078/* master command processing */
2079void mlx4_master_arm_comm_channel(struct work_struct *work)
2080{
2081	struct mlx4_mfunc_master_ctx *master =
2082		container_of(work,
2083			     struct mlx4_mfunc_master_ctx,
2084			     arm_comm_work);
2085	struct mlx4_mfunc *mfunc =
2086		container_of(master, struct mlx4_mfunc, master);
2087	struct mlx4_priv *priv =
2088		container_of(mfunc, struct mlx4_priv, mfunc);
2089	struct mlx4_dev *dev = &priv->dev;
2090
2091	if (mlx4_ARM_COMM_CHANNEL(dev))
2092		mlx4_warn(dev, "Failed to arm comm channel events\n");
2093}
2094
2095static int sync_toggles(struct mlx4_dev *dev)
2096{
2097	struct mlx4_priv *priv = mlx4_priv(dev);
2098	int wr_toggle;
2099	int rd_toggle;
2100	unsigned long end;
2101
2102	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
2103	end = jiffies + msecs_to_jiffies(5000);
2104
2105	while (time_before(jiffies, end)) {
2106		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
2107		if (rd_toggle == wr_toggle) {
2108			priv->cmd.comm_toggle = rd_toggle;
2109			return 0;
2110		}
2111
2112		cond_resched();
2113	}
2114
2115	/*
2116	 * we could reach here if for example the previous VM using this
2117	 * function misbehaved and left the channel with unsynced state. We
2118	 * should fix this here and give this VM a chance to use a properly
2119	 * synced channel
2120	 */
2121	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2122	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2123	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2124	priv->cmd.comm_toggle = 0;
2125
2126	return 0;
2127}
2128
2129int mlx4_multi_func_init(struct mlx4_dev *dev)
2130{
2131	struct mlx4_priv *priv = mlx4_priv(dev);
2132	struct mlx4_slave_state *s_state;
2133	int i, j, err, port;
2134
2135	if (mlx4_is_master(dev))
2136		priv->mfunc.comm =
2137		ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
2138			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2139	else
2140		priv->mfunc.comm =
2141		ioremap(pci_resource_start(dev->pdev, 2) +
2142			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2143	if (!priv->mfunc.comm) {
2144		mlx4_err(dev, "Couldn't map communication vector.\n");
2145		goto err_vhcr;
2146	}
2147
2148	if (mlx4_is_master(dev)) {
2149		priv->mfunc.master.slave_state =
2150			kzalloc(dev->num_slaves *
2151				sizeof(struct mlx4_slave_state), GFP_KERNEL);
2152		if (!priv->mfunc.master.slave_state)
2153			goto err_comm;
2154
2155		priv->mfunc.master.vf_admin =
2156			kzalloc(dev->num_slaves *
2157				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2158		if (!priv->mfunc.master.vf_admin)
2159			goto err_comm_admin;
2160
2161		priv->mfunc.master.vf_oper =
2162			kzalloc(dev->num_slaves *
2163				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2164		if (!priv->mfunc.master.vf_oper)
2165			goto err_comm_oper;
2166
2167		for (i = 0; i < dev->num_slaves; ++i) {
2168			s_state = &priv->mfunc.master.slave_state[i];
2169			s_state->last_cmd = MLX4_COMM_CMD_RESET;
2170			mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2171			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2172				s_state->event_eq[j].eqn = -1;
2173			__raw_writel((__force u32) 0,
2174				     &priv->mfunc.comm[i].slave_write);
2175			__raw_writel((__force u32) 0,
2176				     &priv->mfunc.comm[i].slave_read);
2177			mmiowb();
2178			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2179				s_state->vlan_filter[port] =
2180					kzalloc(sizeof(struct mlx4_vlan_fltr),
2181						GFP_KERNEL);
2182				if (!s_state->vlan_filter[port]) {
2183					if (--port)
2184						kfree(s_state->vlan_filter[port]);
2185					goto err_slaves;
2186				}
2187				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2188				priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
2189				priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
2190				priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
2191				priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
2192			}
2193			spin_lock_init(&s_state->lock);
2194		}
2195
2196		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
2197		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2198		INIT_WORK(&priv->mfunc.master.comm_work,
2199			  mlx4_master_comm_channel);
2200		INIT_WORK(&priv->mfunc.master.arm_comm_work,
2201			  mlx4_master_arm_comm_channel);
2202		INIT_WORK(&priv->mfunc.master.slave_event_work,
2203			  mlx4_gen_slave_eqe);
2204		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2205			  mlx4_master_handle_slave_flr);
2206		spin_lock_init(&priv->mfunc.master.slave_state_lock);
2207		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2208		priv->mfunc.master.comm_wq =
2209			create_singlethread_workqueue("mlx4_comm");
2210		if (!priv->mfunc.master.comm_wq)
2211			goto err_slaves;
2212
2213		if (mlx4_init_resource_tracker(dev))
2214			goto err_thread;
2215
2216		err = mlx4_ARM_COMM_CHANNEL(dev);
2217		if (err) {
2218			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
2219				 err);
2220			goto err_resource;
2221		}
2222
2223	} else {
2224		err = sync_toggles(dev);
2225		if (err) {
2226			mlx4_err(dev, "Couldn't sync toggles\n");
2227			goto err_comm;
2228		}
2229	}
2230	return 0;
2231
2232err_resource:
2233	mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
2234err_thread:
2235	flush_workqueue(priv->mfunc.master.comm_wq);
2236	destroy_workqueue(priv->mfunc.master.comm_wq);
2237err_slaves:
2238	while (--i) {
2239		for (port = 1; port <= MLX4_MAX_PORTS; port++)
2240			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2241	}
2242	kfree(priv->mfunc.master.vf_oper);
2243err_comm_oper:
2244	kfree(priv->mfunc.master.vf_admin);
2245err_comm_admin:
2246	kfree(priv->mfunc.master.slave_state);
2247err_comm:
2248	iounmap(priv->mfunc.comm);
2249err_vhcr:
2250	dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2251					     priv->mfunc.vhcr,
2252					     priv->mfunc.vhcr_dma);
2253	priv->mfunc.vhcr = NULL;
2254	return -ENOMEM;
2255}
2256
2257int mlx4_cmd_init(struct mlx4_dev *dev)
2258{
2259	struct mlx4_priv *priv = mlx4_priv(dev);
2260
2261	init_rwsem(&priv->cmd.switch_sem);
2262	mutex_init(&priv->cmd.hcr_mutex);
2263	mutex_init(&priv->cmd.slave_cmd_mutex);
2264	sema_init(&priv->cmd.poll_sem, 1);
2265	priv->cmd.use_events = 0;
2266	priv->cmd.toggle     = 1;
2267
2268	priv->cmd.hcr = NULL;
2269	priv->mfunc.vhcr = NULL;
2270
2271	if (!mlx4_is_slave(dev)) {
2272		priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2273					MLX4_HCR_BASE, MLX4_HCR_SIZE);
2274		if (!priv->cmd.hcr) {
2275			mlx4_err(dev, "Couldn't map command register.\n");
2276			return -ENOMEM;
2277		}
2278	}
2279
2280	if (mlx4_is_mfunc(dev)) {
2281		priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
2282						      &priv->mfunc.vhcr_dma,
2283						      GFP_KERNEL);
2284		if (!priv->mfunc.vhcr) {
2285			mlx4_err(dev, "Couldn't allocate VHCR.\n");
2286			goto err_hcr;
2287		}
2288	}
2289
2290	priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
2291					 MLX4_MAILBOX_SIZE,
2292					 MLX4_MAILBOX_SIZE, 0);
2293	if (!priv->cmd.pool)
2294		goto err_vhcr;
2295
2296	return 0;
2297
2298err_vhcr:
2299	if (mlx4_is_mfunc(dev))
2300		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2301				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2302	priv->mfunc.vhcr = NULL;
2303
2304err_hcr:
2305	if (!mlx4_is_slave(dev))
2306		iounmap(priv->cmd.hcr);
2307	return -ENOMEM;
2308}
2309
2310void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2311{
2312	struct mlx4_priv *priv = mlx4_priv(dev);
2313	int i, port;
2314
2315	if (mlx4_is_master(dev)) {
2316		flush_workqueue(priv->mfunc.master.comm_wq);
2317		destroy_workqueue(priv->mfunc.master.comm_wq);
2318		for (i = 0; i < dev->num_slaves; i++) {
2319			for (port = 1; port <= MLX4_MAX_PORTS; port++)
2320				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2321		}
2322		kfree(priv->mfunc.master.slave_state);
2323		kfree(priv->mfunc.master.vf_admin);
2324		kfree(priv->mfunc.master.vf_oper);
2325	}
2326
2327	iounmap(priv->mfunc.comm);
2328}
2329
2330void mlx4_cmd_cleanup(struct mlx4_dev *dev)
2331{
2332	struct mlx4_priv *priv = mlx4_priv(dev);
2333
2334	pci_pool_destroy(priv->cmd.pool);
2335
2336	if (!mlx4_is_slave(dev))
2337		iounmap(priv->cmd.hcr);
2338	if (mlx4_is_mfunc(dev))
2339		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2340				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2341	priv->mfunc.vhcr = NULL;
2342}
2343
2344/*
2345 * Switch to using events to issue FW commands (can only be called
2346 * after event queue for command events has been initialized).
2347 */
2348int mlx4_cmd_use_events(struct mlx4_dev *dev)
2349{
2350	struct mlx4_priv *priv = mlx4_priv(dev);
2351	int i;
2352	int err = 0;
2353
2354	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2355				   sizeof (struct mlx4_cmd_context),
2356				   GFP_KERNEL);
2357	if (!priv->cmd.context)
2358		return -ENOMEM;
2359
2360	down_write(&priv->cmd.switch_sem);
2361	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2362		priv->cmd.context[i].token = i;
2363		priv->cmd.context[i].next  = i + 1;
2364	}
2365
2366	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2367	priv->cmd.free_head = 0;
2368
2369	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2370	spin_lock_init(&priv->cmd.context_lock);
2371
2372	for (priv->cmd.token_mask = 1;
2373	     priv->cmd.token_mask < priv->cmd.max_cmds;
2374	     priv->cmd.token_mask <<= 1)
2375		; /* nothing */
2376	--priv->cmd.token_mask;
2377
2378	down(&priv->cmd.poll_sem);
2379	priv->cmd.use_events = 1;
2380	up_write(&priv->cmd.switch_sem);
2381
2382	return err;
2383}
2384
2385/*
2386 * Switch back to polling (used when shutting down the device)
2387 */
2388void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2389{
2390	struct mlx4_priv *priv = mlx4_priv(dev);
2391	int i;
2392
2393	down_write(&priv->cmd.switch_sem);
2394	priv->cmd.use_events = 0;
2395
2396	for (i = 0; i < priv->cmd.max_cmds; ++i)
2397		down(&priv->cmd.event_sem);
2398
2399	kfree(priv->cmd.context);
2400
2401	up(&priv->cmd.poll_sem);
2402	up_write(&priv->cmd.switch_sem);
2403}
2404
2405struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2406{
2407	struct mlx4_cmd_mailbox *mailbox;
2408
2409	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2410	if (!mailbox)
2411		return ERR_PTR(-ENOMEM);
2412
2413	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2414				      &mailbox->dma);
2415	if (!mailbox->buf) {
2416		kfree(mailbox);
2417		return ERR_PTR(-ENOMEM);
2418	}
2419
2420	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2421
2422	return mailbox;
2423}
2424EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2425
2426void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2427			   struct mlx4_cmd_mailbox *mailbox)
2428{
2429	if (!mailbox)
2430		return;
2431
2432	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2433	kfree(mailbox);
2434}
2435EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2436
2437u32 mlx4_comm_get_version(void)
2438{
2439	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2440}
2441
2442static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2443{
2444	if ((vf < 0) || (vf >= dev->num_vfs)) {
2445		mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
2446		return -EINVAL;
2447	}
2448	return (vf+1);
2449}
2450
2451int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
2452{
2453	struct mlx4_priv *priv = mlx4_priv(dev);
2454	struct mlx4_vport_state *s_info;
2455	int slave;
2456
2457	if (!mlx4_is_master(dev))
2458		return -EPROTONOSUPPORT;
2459
2460	slave = mlx4_get_slave_indx(dev, vf);
2461	if (slave < 0)
2462		return -EINVAL;
2463
2464	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2465	s_info->mac = mlx4_mac_to_u64(mac);
2466	mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2467		  vf, port, (unsigned long long) s_info->mac);
2468	return 0;
2469}
2470EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2471
2472int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2473{
2474	struct mlx4_priv *priv = mlx4_priv(dev);
2475	struct mlx4_vport_oper_state *vf_oper;
2476	struct mlx4_vport_state *vf_admin;
2477	int slave;
2478
2479	if ((!mlx4_is_master(dev)) ||
2480	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2481		return -EPROTONOSUPPORT;
2482
2483	if ((vlan > 4095) || (qos > 7))
2484		return -EINVAL;
2485
2486	slave = mlx4_get_slave_indx(dev, vf);
2487	if (slave < 0)
2488		return -EINVAL;
2489
2490	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2491	vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2492
2493	if ((0 == vlan) && (0 == qos))
2494		vf_admin->default_vlan = MLX4_VGT;
2495	else
2496		vf_admin->default_vlan = vlan;
2497	vf_admin->default_qos = qos;
2498
2499	if (priv->mfunc.master.slave_state[slave].active &&
2500	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
2501		mlx4_info(dev, "updating vf %d port %d config params immediately\n",
2502			  vf, port);
2503		mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
2504	}
2505	return 0;
2506}
2507EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2508
2509 /* mlx4_get_slave_default_vlan -
2510 * retrun true if VST ( default vlan)
2511 * if VST will fill vlan & qos (if not NULL) */
2512bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos)
2513{
2514	struct mlx4_vport_oper_state *vp_oper;
2515	struct mlx4_priv *priv;
2516
2517	priv = mlx4_priv(dev);
2518	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2519
2520	if (MLX4_VGT != vp_oper->state.default_vlan) {
2521		if (vlan)
2522			*vlan = vp_oper->state.default_vlan;
2523		if (qos)
2524			*qos = vp_oper->state.default_qos;
2525		return true;
2526	}
2527	return false;
2528}
2529EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2530
2531int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2532{
2533	struct mlx4_priv *priv = mlx4_priv(dev);
2534	struct mlx4_vport_state *s_info;
2535	int slave;
2536
2537	if ((!mlx4_is_master(dev)) ||
2538	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2539		return -EPROTONOSUPPORT;
2540
2541	slave = mlx4_get_slave_indx(dev, vf);
2542	if (slave < 0)
2543		return -EINVAL;
2544
2545	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2546	s_info->spoofchk = setting;
2547
2548	return 0;
2549}
2550EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2551
2552int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2553{
2554	struct mlx4_priv *priv = mlx4_priv(dev);
2555	struct mlx4_vport_state *s_info;
2556	struct mlx4_vport_oper_state *vp_oper;
2557	int slave;
2558	u8 link_stat_event;
2559
2560	slave = mlx4_get_slave_indx(dev, vf);
2561	if (slave < 0)
2562		return -EINVAL;
2563
2564	switch (link_state) {
2565	case IFLA_VF_LINK_STATE_AUTO:
2566		/* get link curent state */
2567		if (!priv->sense.do_sense_port[port])
2568			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2569		else
2570			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2571	    break;
2572
2573	case IFLA_VF_LINK_STATE_ENABLE:
2574		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2575	    break;
2576
2577	case IFLA_VF_LINK_STATE_DISABLE:
2578		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2579	    break;
2580
2581	default:
2582		mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2583			  link_state, slave, port);
2584		return -EINVAL;
2585	};
2586	/* update the admin & oper state on the link state */
2587	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2588	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2589	s_info->link_state = link_state;
2590	vp_oper->state.link_state = link_state;
2591
2592	/* send event */
2593	mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
2594	return 0;
2595}
2596EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
2597
2598int mlx4_get_vf_link_state(struct mlx4_dev *dev, int port, int vf)
2599{
2600	struct mlx4_priv *priv = mlx4_priv(dev);
2601	struct mlx4_vport_state *s_info;
2602	int slave;
2603
2604	if (!mlx4_is_master(dev))
2605		return -EPROTONOSUPPORT;
2606
2607	slave = mlx4_get_slave_indx(dev, vf);
2608	if (slave < 0)
2609		return -EINVAL;
2610
2611	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2612
2613	return s_info->link_state;
2614}
2615EXPORT_SYMBOL_GPL(mlx4_get_vf_link_state);
2616
2617