1/*
2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/types.h>
37#include <linux/gfp.h>
38#include <linux/module.h>
39
40#include <dev/mlx4/cmd.h>
41#include <dev/mlx4/qp.h>
42
43#include "mlx4.h"
44#include "icm.h"
45
46/* QP to support BF should have bits 6,7 cleared */
47#define MLX4_BF_QP_SKIP_MASK	0xc0
48#define MLX4_MAX_BF_QP_RANGE	0x40
49
50void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
51{
52	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
53	struct mlx4_qp *qp;
54
55	spin_lock(&qp_table->lock);
56
57	qp = __mlx4_qp_lookup(dev, qpn);
58	if (qp)
59		atomic_inc(&qp->refcount);
60
61	spin_unlock(&qp_table->lock);
62
63	if (!qp) {
64		mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
65		return;
66	}
67
68	qp->event(qp, event_type);
69
70	if (atomic_dec_and_test(&qp->refcount))
71		complete(&qp->free);
72}
73
74/* used for INIT/CLOSE port logic */
75static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
76{
77	/* this procedure is called after we already know we are on the master */
78	/* qp0 is either the proxy qp0, or the real qp0 */
79	u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
80	*proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
81
82	*real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
83		qp->qpn <= dev->phys_caps.base_sqpn + 1;
84
85	return *real_qp0 || *proxy_qp0;
86}
87
88static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
89		     enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
90		     struct mlx4_qp_context *context,
91		     enum mlx4_qp_optpar optpar,
92		     int sqd_event, struct mlx4_qp *qp, int native)
93{
94	static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
95		[MLX4_QP_STATE_RST] = {
96			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
97			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
98			[MLX4_QP_STATE_INIT]	= MLX4_CMD_RST2INIT_QP,
99		},
100		[MLX4_QP_STATE_INIT]  = {
101			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
102			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
103			[MLX4_QP_STATE_INIT]	= MLX4_CMD_INIT2INIT_QP,
104			[MLX4_QP_STATE_RTR]	= MLX4_CMD_INIT2RTR_QP,
105		},
106		[MLX4_QP_STATE_RTR]   = {
107			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
108			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
109			[MLX4_QP_STATE_RTS]	= MLX4_CMD_RTR2RTS_QP,
110		},
111		[MLX4_QP_STATE_RTS]   = {
112			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
113			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
114			[MLX4_QP_STATE_RTS]	= MLX4_CMD_RTS2RTS_QP,
115			[MLX4_QP_STATE_SQD]	= MLX4_CMD_RTS2SQD_QP,
116		},
117		[MLX4_QP_STATE_SQD] = {
118			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
119			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
120			[MLX4_QP_STATE_RTS]	= MLX4_CMD_SQD2RTS_QP,
121			[MLX4_QP_STATE_SQD]	= MLX4_CMD_SQD2SQD_QP,
122		},
123		[MLX4_QP_STATE_SQER] = {
124			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
125			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
126			[MLX4_QP_STATE_RTS]	= MLX4_CMD_SQERR2RTS_QP,
127		},
128		[MLX4_QP_STATE_ERR] = {
129			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
130			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
131		}
132	};
133
134	struct mlx4_priv *priv = mlx4_priv(dev);
135	struct mlx4_cmd_mailbox *mailbox;
136	int ret = 0;
137	int real_qp0 = 0;
138	int proxy_qp0 = 0;
139	u8 port;
140
141	if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
142	    !op[cur_state][new_state])
143		return -EINVAL;
144
145	if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
146		ret = mlx4_cmd(dev, 0, qp->qpn, 2,
147			MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
148		if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
149		    cur_state != MLX4_QP_STATE_RST &&
150		    is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
151			port = (qp->qpn & 1) + 1;
152			if (proxy_qp0)
153				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
154			else
155				priv->mfunc.master.qp0_state[port].qp0_active = 0;
156		}
157		return ret;
158	}
159
160	mailbox = mlx4_alloc_cmd_mailbox(dev);
161	if (IS_ERR(mailbox))
162		return PTR_ERR(mailbox);
163
164	if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
165		u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
166		context->mtt_base_addr_h = mtt_addr >> 32;
167		context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
168		context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
169	}
170
171	if ((cur_state == MLX4_QP_STATE_RTR) &&
172	    (new_state == MLX4_QP_STATE_RTS) &&
173	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
174		context->roce_entropy =
175			cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
176
177	*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
178	memcpy(mailbox->buf + 8, context, sizeof *context);
179
180	((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
181		cpu_to_be32(qp->qpn);
182
183	ret = mlx4_cmd(dev, mailbox->dma,
184		       qp->qpn | (!!sqd_event << 31),
185		       new_state == MLX4_QP_STATE_RST ? 2 : 0,
186		       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
187
188	if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
189		port = (qp->qpn & 1) + 1;
190		if (cur_state != MLX4_QP_STATE_ERR &&
191		    cur_state != MLX4_QP_STATE_RST &&
192		    new_state == MLX4_QP_STATE_ERR) {
193			if (proxy_qp0)
194				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
195			else
196				priv->mfunc.master.qp0_state[port].qp0_active = 0;
197		} else if (new_state == MLX4_QP_STATE_RTR) {
198			if (proxy_qp0)
199				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
200			else
201				priv->mfunc.master.qp0_state[port].qp0_active = 1;
202		}
203	}
204
205	mlx4_free_cmd_mailbox(dev, mailbox);
206	return ret;
207}
208
209int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
210		   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
211		   struct mlx4_qp_context *context,
212		   enum mlx4_qp_optpar optpar,
213		   int sqd_event, struct mlx4_qp *qp)
214{
215	return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
216				optpar, sqd_event, qp, 0);
217}
218EXPORT_SYMBOL_GPL(mlx4_qp_modify);
219
220int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
221			    int *base, u8 flags)
222{
223	u32 uid;
224	int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
225
226	struct mlx4_priv *priv = mlx4_priv(dev);
227	struct mlx4_qp_table *qp_table = &priv->qp_table;
228
229	if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
230		return -ENOMEM;
231
232	uid = MLX4_QP_TABLE_ZONE_GENERAL;
233	if (flags & (u8)MLX4_RESERVE_A0_QP) {
234		if (bf_qp)
235			uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
236		else
237			uid = MLX4_QP_TABLE_ZONE_RSS;
238	}
239
240	*base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
241					bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
242	if (*base == -1)
243		return -ENOMEM;
244
245	return 0;
246}
247
248int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
249			  int *base, u8 flags)
250{
251	u64 in_param = 0;
252	u64 out_param;
253	int err;
254
255	/* Turn off all unsupported QP allocation flags */
256	flags &= dev->caps.alloc_res_qp_mask;
257
258	if (mlx4_is_mfunc(dev)) {
259		set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
260		set_param_h(&in_param, align);
261		err = mlx4_cmd_imm(dev, in_param, &out_param,
262				   RES_QP, RES_OP_RESERVE,
263				   MLX4_CMD_ALLOC_RES,
264				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
265		if (err)
266			return err;
267
268		*base = get_param_l(&out_param);
269		return 0;
270	}
271	return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
272}
273EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
274
275void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
276{
277	struct mlx4_priv *priv = mlx4_priv(dev);
278	struct mlx4_qp_table *qp_table = &priv->qp_table;
279
280	if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
281		return;
282	mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
283}
284
285void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
286{
287	u64 in_param = 0;
288	int err;
289
290	if (mlx4_is_mfunc(dev)) {
291		set_param_l(&in_param, base_qpn);
292		set_param_h(&in_param, cnt);
293		err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
294			       MLX4_CMD_FREE_RES,
295			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
296		if (err) {
297			mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
298				  base_qpn, cnt);
299		}
300	} else
301		 __mlx4_qp_release_range(dev, base_qpn, cnt);
302}
303EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
304
305int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
306{
307	struct mlx4_priv *priv = mlx4_priv(dev);
308	struct mlx4_qp_table *qp_table = &priv->qp_table;
309	int err;
310
311	err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
312	if (err)
313		goto err_out;
314
315	err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
316	if (err)
317		goto err_put_qp;
318
319	err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
320	if (err)
321		goto err_put_auxc;
322
323	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
324	if (err)
325		goto err_put_altc;
326
327	err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
328	if (err)
329		goto err_put_rdmarc;
330
331	return 0;
332
333err_put_rdmarc:
334	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
335
336err_put_altc:
337	mlx4_table_put(dev, &qp_table->altc_table, qpn);
338
339err_put_auxc:
340	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
341
342err_put_qp:
343	mlx4_table_put(dev, &qp_table->qp_table, qpn);
344
345err_out:
346	return err;
347}
348
349static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
350{
351	u64 param = 0;
352
353	if (mlx4_is_mfunc(dev)) {
354		set_param_l(&param, qpn);
355		return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
356				    MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
357				    MLX4_CMD_WRAPPED);
358	}
359	return __mlx4_qp_alloc_icm(dev, qpn, gfp);
360}
361
362void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
363{
364	struct mlx4_priv *priv = mlx4_priv(dev);
365	struct mlx4_qp_table *qp_table = &priv->qp_table;
366
367	mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
368	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
369	mlx4_table_put(dev, &qp_table->altc_table, qpn);
370	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
371	mlx4_table_put(dev, &qp_table->qp_table, qpn);
372}
373
374static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
375{
376	u64 in_param = 0;
377
378	if (mlx4_is_mfunc(dev)) {
379		set_param_l(&in_param, qpn);
380		if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
381			     MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
382			     MLX4_CMD_WRAPPED))
383			mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
384	} else
385		__mlx4_qp_free_icm(dev, qpn);
386}
387
388int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
389{
390	struct mlx4_priv *priv = mlx4_priv(dev);
391	struct mlx4_qp_table *qp_table = &priv->qp_table;
392	int err;
393
394	if (!qpn)
395		return -EINVAL;
396
397	qp->qpn = qpn;
398
399	err = mlx4_qp_alloc_icm(dev, qpn, gfp);
400	if (err)
401		return err;
402
403	spin_lock_irq(&qp_table->lock);
404	err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
405				(dev->caps.num_qps - 1), qp);
406	spin_unlock_irq(&qp_table->lock);
407	if (err)
408		goto err_icm;
409
410	atomic_set(&qp->refcount, 1);
411	init_completion(&qp->free);
412
413	return 0;
414
415err_icm:
416	mlx4_qp_free_icm(dev, qpn);
417	return err;
418}
419
420EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
421
422int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
423		   enum mlx4_update_qp_attr attr,
424		   struct mlx4_update_qp_params *params)
425{
426	struct mlx4_cmd_mailbox *mailbox;
427	struct mlx4_update_qp_context *cmd;
428	u64 pri_addr_path_mask = 0;
429	u64 qp_mask = 0;
430	int err = 0;
431
432	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
433		return -EINVAL;
434
435	mailbox = mlx4_alloc_cmd_mailbox(dev);
436	if (IS_ERR(mailbox))
437		return PTR_ERR(mailbox);
438
439	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
440
441	if (attr & MLX4_UPDATE_QP_SMAC) {
442		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
443		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
444	}
445
446	if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
447		if (!(dev->caps.flags2
448		      & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
449			mlx4_warn(dev,
450				  "Trying to set src check LB, but it isn't supported\n");
451			err = -ENOTSUPP;
452			goto out;
453		}
454		pri_addr_path_mask |=
455			1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
456		if (params->flags &
457		    MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
458			cmd->qp_context.pri_path.fl |=
459				MLX4_FL_ETH_SRC_CHECK_MC_LB;
460		}
461	}
462
463	if (attr & MLX4_UPDATE_QP_VSD) {
464		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
465		if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
466			cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
467	}
468
469	if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
470		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
471		cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
472	}
473
474	if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
475		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
476		cmd->qp_context.qos_vport = params->qos_vport;
477	}
478
479	cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
480	cmd->qp_mask = cpu_to_be64(qp_mask);
481
482	err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
483		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
484		       MLX4_CMD_NATIVE);
485out:
486	mlx4_free_cmd_mailbox(dev, mailbox);
487	return err;
488}
489EXPORT_SYMBOL_GPL(mlx4_update_qp);
490
491void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
492{
493	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
494	unsigned long flags;
495
496	spin_lock_irqsave(&qp_table->lock, flags);
497	radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
498	spin_unlock_irqrestore(&qp_table->lock, flags);
499}
500EXPORT_SYMBOL_GPL(mlx4_qp_remove);
501
502void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
503{
504	if (atomic_dec_and_test(&qp->refcount))
505		complete(&qp->free);
506	wait_for_completion(&qp->free);
507
508	mlx4_qp_free_icm(dev, qp->qpn);
509}
510EXPORT_SYMBOL_GPL(mlx4_qp_free);
511
512static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
513{
514	return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
515			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
516}
517
518#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
519#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
520#define MLX4_QP_TABLE_RAW_ETH_SIZE     256
521
522static int mlx4_create_zones(struct mlx4_dev *dev,
523			     u32 reserved_bottom_general,
524			     u32 reserved_top_general,
525			     u32 reserved_bottom_rss,
526			     u32 start_offset_rss,
527			     u32 max_table_offset)
528{
529	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
530	struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
531	int bitmap_initialized = 0;
532	u32 last_offset;
533	int k;
534	int err;
535
536	qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
537
538	if (NULL == qp_table->zones)
539		return -ENOMEM;
540
541	bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
542
543	if (NULL == bitmap) {
544		err = -ENOMEM;
545		goto free_zone;
546	}
547
548	err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
549			       (1 << 23) - 1, reserved_bottom_general,
550			       reserved_top_general);
551
552	if (err)
553		goto free_bitmap;
554
555	++bitmap_initialized;
556
557	err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
558				MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
559				MLX4_ZONE_USE_RR, 0,
560				0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
561
562	if (err)
563		goto free_bitmap;
564
565	err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
566			       reserved_bottom_rss,
567			       reserved_bottom_rss - 1,
568			       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
569			       reserved_bottom_rss - start_offset_rss);
570
571	if (err)
572		goto free_bitmap;
573
574	++bitmap_initialized;
575
576	err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
577				MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
578				MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
579				MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
580				0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
581
582	if (err)
583		goto free_bitmap;
584
585	last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
586	/*  We have a single zone for the A0 steering QPs area of the FW. This area
587	 *  needs to be split into subareas. One set of subareas is for RSS QPs
588	 *  (in which qp number bits 6 and/or 7 are set); the other set of subareas
589	 *  is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
590	 *  Currently, the values returned by the FW (A0 steering area starting qp number
591	 *  and A0 steering area size) are such that there are only two subareas -- one
592	 *  for RSS and one for RAW_ETH.
593	 */
594	for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
595	     k++) {
596		int size;
597		u32 offset = start_offset_rss;
598		u32 bf_mask;
599		u32 requested_size;
600
601		/* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
602		 * a mask of all LSB bits set until (and not including) the first
603		 * set bit of  MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
604		 * is 0xc0, bf_mask will be 0x3f.
605		 */
606		bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
607		requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
608
609		if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
610		     ((int)(max_table_offset - last_offset)) >=
611		     roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
612		    (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
613		     !((last_offset + requested_size - 1) &
614		       MLX4_BF_QP_SKIP_MASK)))
615			size = requested_size;
616		else {
617			u32 candidate_offset =
618				(last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
619
620			if (last_offset & MLX4_BF_QP_SKIP_MASK)
621				last_offset = candidate_offset;
622
623			/* From this point, the BF bits are 0 */
624
625			if (last_offset > max_table_offset) {
626				/* need to skip */
627				size = -1;
628			} else {
629				size = min3(max_table_offset - last_offset,
630					    bf_mask - (last_offset & bf_mask),
631					    requested_size);
632				if (size < requested_size) {
633					int candidate_size;
634
635					candidate_size = min3(
636						max_table_offset - candidate_offset,
637						bf_mask - (last_offset & bf_mask),
638						requested_size);
639
640					/*  We will not take this path if last_offset was
641					 *  already set above to candidate_offset
642					 */
643					if (candidate_size > size) {
644						last_offset = candidate_offset;
645						size = candidate_size;
646					}
647				}
648			}
649		}
650
651		if (size > 0) {
652			/* mlx4_bitmap_alloc_range will find a contiguous range of "size"
653			 * QPs in which both bits 6 and 7 are zero, because we pass it the
654			 * MLX4_BF_SKIP_MASK).
655			 */
656			offset = mlx4_bitmap_alloc_range(
657					*bitmap + MLX4_QP_TABLE_ZONE_RSS,
658					size, 1,
659					MLX4_BF_QP_SKIP_MASK);
660
661			if (offset == (u32)-1) {
662				err = -ENOMEM;
663				break;
664			}
665
666			last_offset = offset + size;
667
668			err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
669					       roundup_pow_of_two(size) - 1, 0,
670					       roundup_pow_of_two(size) - size);
671		} else {
672			/* Add an empty bitmap, we'll allocate from different zones (since
673			 * at least one is reserved)
674			 */
675			err = mlx4_bitmap_init(*bitmap + k, 1,
676					       MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
677					       0);
678			mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
679		}
680
681		if (err)
682			break;
683
684		++bitmap_initialized;
685
686		err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
687					MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
688					MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
689					MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
690					offset, qp_table->zones_uids + k);
691
692		if (err)
693			break;
694	}
695
696	if (err)
697		goto free_bitmap;
698
699	qp_table->bitmap_gen = *bitmap;
700
701	return err;
702
703free_bitmap:
704	for (k = 0; k < bitmap_initialized; k++)
705		mlx4_bitmap_cleanup(*bitmap + k);
706	kfree(bitmap);
707free_zone:
708	mlx4_zone_allocator_destroy(qp_table->zones);
709	return err;
710}
711
712static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
713{
714	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
715
716	if (qp_table->zones) {
717		int i;
718
719		for (i = 0;
720		     i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
721		     i++) {
722			struct mlx4_bitmap *bitmap =
723				mlx4_zone_get_bitmap(qp_table->zones,
724						     qp_table->zones_uids[i]);
725
726			mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
727			if (NULL == bitmap)
728				continue;
729
730			mlx4_bitmap_cleanup(bitmap);
731		}
732		mlx4_zone_allocator_destroy(qp_table->zones);
733		kfree(qp_table->bitmap_gen);
734		qp_table->bitmap_gen = NULL;
735		qp_table->zones = NULL;
736	}
737}
738
739int mlx4_init_qp_table(struct mlx4_dev *dev)
740{
741	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
742	int err;
743	int reserved_from_top = 0;
744	int reserved_from_bot;
745	int k;
746	int fixed_reserved_from_bot_rv = 0;
747	int bottom_reserved_for_rss_bitmap;
748	u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
749			dev->caps.dmfs_high_rate_qpn_range;
750
751	spin_lock_init(&qp_table->lock);
752	INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
753	if (mlx4_is_slave(dev))
754		return 0;
755
756	/* We reserve 2 extra QPs per port for the special QPs.  The
757	 * block of special QPs must be aligned to a multiple of 8, so
758	 * round up.
759	 *
760	 * We also reserve the MSB of the 24-bit QP number to indicate
761	 * that a QP is an XRC QP.
762	 */
763	for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
764		fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
765
766	if (fixed_reserved_from_bot_rv < max_table_offset)
767		fixed_reserved_from_bot_rv = max_table_offset;
768
769	/* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
770	bottom_reserved_for_rss_bitmap =
771		roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
772	dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
773
774	{
775		int sort[MLX4_NUM_QP_REGION];
776		int i, j;
777		int last_base = dev->caps.num_qps;
778
779		for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
780			sort[i] = i;
781
782		for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
783			for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
784				if (dev->caps.reserved_qps_cnt[sort[j]] >
785				    dev->caps.reserved_qps_cnt[sort[j - 1]])
786					swap(sort[j], sort[j - 1]);
787			}
788		}
789
790		for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
791			last_base -= dev->caps.reserved_qps_cnt[sort[i]];
792			dev->caps.reserved_qps_base[sort[i]] = last_base;
793			reserved_from_top +=
794				dev->caps.reserved_qps_cnt[sort[i]];
795		}
796	}
797
798       /* Reserve 8 real SQPs in both native and SRIOV modes.
799	* In addition, in SRIOV mode, reserve 8 proxy SQPs per function
800	* (for all PFs and VFs), and 8 corresponding tunnel QPs.
801	* Each proxy SQP works opposite its own tunnel QP.
802	*
803	* The QPs are arranged as follows:
804	* a. 8 real SQPs
805	* b. All the proxy SQPs (8 per function)
806	* c. All the tunnel QPs (8 per function)
807	*/
808	reserved_from_bot = mlx4_num_reserved_sqps(dev);
809	if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
810		mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
811		return -EINVAL;
812	}
813
814	err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
815				bottom_reserved_for_rss_bitmap,
816				fixed_reserved_from_bot_rv,
817				max_table_offset);
818
819	if (err)
820		return err;
821
822	if (mlx4_is_mfunc(dev)) {
823		/* for PPF use */
824		dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
825		dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
826
827		/* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
828		 * since the PF does not call mlx4_slave_caps */
829		dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
830		dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
831		dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
832		dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
833
834		if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
835		    !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
836			err = -ENOMEM;
837			goto err_mem;
838		}
839
840		for (k = 0; k < dev->caps.num_ports; k++) {
841			dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
842				8 * mlx4_master_func_num(dev) + k;
843			dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
844			dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
845				8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
846			dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
847		}
848	}
849
850
851	err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
852	if (err)
853		goto err_mem;
854
855	return err;
856
857err_mem:
858	kfree(dev->caps.qp0_tunnel);
859	kfree(dev->caps.qp0_proxy);
860	kfree(dev->caps.qp1_tunnel);
861	kfree(dev->caps.qp1_proxy);
862	dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
863		dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
864	mlx4_cleanup_qp_zones(dev);
865	return err;
866}
867
868void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
869{
870	if (mlx4_is_slave(dev))
871		return;
872
873	mlx4_CONF_SPECIAL_QP(dev, 0);
874
875	mlx4_cleanup_qp_zones(dev);
876}
877
878int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
879		  struct mlx4_qp_context *context)
880{
881	struct mlx4_cmd_mailbox *mailbox;
882	int err;
883
884	mailbox = mlx4_alloc_cmd_mailbox(dev);
885	if (IS_ERR(mailbox))
886		return PTR_ERR(mailbox);
887
888	err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
889			   MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
890			   MLX4_CMD_WRAPPED);
891	if (!err)
892		memcpy(context, mailbox->buf + 8, sizeof *context);
893
894	mlx4_free_cmd_mailbox(dev, mailbox);
895	return err;
896}
897EXPORT_SYMBOL_GPL(mlx4_qp_query);
898
899int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
900		     struct mlx4_qp_context *context,
901		     struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
902{
903	int err;
904	int i;
905	enum mlx4_qp_state states[] = {
906		MLX4_QP_STATE_RST,
907		MLX4_QP_STATE_INIT,
908		MLX4_QP_STATE_RTR,
909		MLX4_QP_STATE_RTS
910	};
911
912	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
913		context->flags &= cpu_to_be32(~(0xf << 28));
914		context->flags |= cpu_to_be32(states[i + 1] << 28);
915		if (states[i + 1] != MLX4_QP_STATE_RTR)
916			context->params2 &= ~MLX4_QP_BIT_FPP;
917		err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
918				     context, 0, 0, qp);
919		if (err) {
920			mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
921				 states[i + 1], err);
922			return err;
923		}
924
925		*qp_state = states[i + 1];
926	}
927
928	return 0;
929}
930EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
931
932u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
933{
934	struct mlx4_qp_context context;
935	struct mlx4_qp qp;
936	int err;
937
938	qp.qpn = qpn;
939	err = mlx4_qp_query(dev, &qp, &context);
940	if (!err) {
941		u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff;
942		u16 folded_dst = folded_qp(dest_qpn);
943		u16 folded_src = folded_qp(qpn);
944
945		return (dest_qpn != qpn) ?
946			((folded_dst ^ folded_src) | 0xC000) :
947			folded_src | 0xC000;
948	}
949	return 0xdead;
950}
951