mlx5_eswitch_vacl.c revision 292196
1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c 292196 2015-12-14 10:31:03Z hselasky $
26 */
27
28#include <linux/if_ether.h>
29#include <linux/etherdevice.h>
30#include <dev/mlx5/driver.h>
31#include <dev/mlx5/flow_table.h>
32#include <dev/mlx5/eswitch_vacl.h>
33#include "mlx5_core.h"
34
35enum {
36	MLX5_ACL_LOOPBACK_GROUP_IDX	= 0,
37	MLX5_ACL_UNTAGGED_GROUP_IDX	= 1,
38	MLX5_ACL_VLAN_GROUP_IDX		= 2,
39	MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX	= 3,
40	MLX5_ACL_DEFAULT_GROUP_IDX	= 4,
41	MLX5_ACL_GROUPS_NUM,
42};
43
44struct mlx_vacl_fr {
45	bool			applied;
46	u32			fi;
47	u16			action;
48};
49
50struct mlx5_vacl_table {
51	struct mlx5_core_dev	*dev;
52	u16			vport;
53	void			*ft;
54	int			max_ft_size;
55	int			acl_type;
56
57	struct mlx_vacl_fr	loopback_fr;
58	struct mlx_vacl_fr	untagged_fr;
59	struct mlx_vacl_fr	unknown_vlan_fr;
60	struct mlx_vacl_fr	default_fr;
61
62	bool			vlan_filter_enabled;
63	bool			vlan_filter_applied;
64	unsigned long		*vlan_allowed_bitmap;
65	u32			vlan_fi_table[4096];
66
67	bool			spoofchk_enabled;
68	u8			smac[ETH_ALEN];
69};
70
71static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
72{
73	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
74	u32 *flow_context = NULL;
75	void *in_match_criteria = NULL;
76	void *in_match_value = NULL;
77	u8 *smac;
78	int vlan_mc_enable = MLX5_MATCH_OUTER_HEADERS;
79	int err = 0;
80
81	if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
82		return -EINVAL;
83
84	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
85	if (!flow_context) {
86		err = -ENOMEM;
87		goto out;
88	}
89
90	in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
91	if (!in_match_criteria) {
92		err = -ENOMEM;
93		goto out;
94	}
95
96	/* Apply vlan rule */
97	MLX5_SET(flow_context, flow_context, action,
98		 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
99	in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
100	MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 1);
101	MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
102		 vlan);
103	MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
104	MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
105		 0xfff);
106	if (acl_table->spoofchk_enabled) {
107		smac = MLX5_ADDR_OF(fte_match_param,
108				    in_match_value,
109				    outer_headers.smac_47_16);
110		ether_addr_copy(smac, acl_table->smac);
111		smac = MLX5_ADDR_OF(fte_match_param,
112				    in_match_criteria,
113				    outer_headers.smac_47_16);
114		memset(smac, 0xff, ETH_ALEN);
115	}
116	err = mlx5_add_flow_table_entry(acl_table->ft, vlan_mc_enable,
117					in_match_criteria, flow_context,
118					&acl_table->vlan_fi_table[vlan]);
119out:
120	if (flow_context)
121		vfree(flow_context);
122	if (in_match_criteria)
123		vfree(in_match_criteria);
124	return err;
125}
126
127static int mlx5_vacl_table_apply_loopback_filter(void *acl_t, u16 new_action)
128{
129	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
130	u8 loopback_mc_enable = MLX5_MATCH_MISC_PARAMETERS;
131	u32 *flow_context = NULL;
132	void *in_match_criteria = NULL;
133	void *in_match_value = NULL;
134	void *mv_misc = NULL;
135	void *mc_misc = NULL;
136	int err = 0;
137
138	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
139	if (!flow_context) {
140		err = -ENOMEM;
141		goto out;
142	}
143
144	in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
145	if (!in_match_criteria) {
146		err = -ENOMEM;
147		goto out;
148	}
149
150	if (acl_table->loopback_fr.applied)
151		mlx5_del_flow_table_entry(acl_table->ft,
152					  acl_table->loopback_fr.fi);
153
154	/* Apply new loopback rule */
155	MLX5_SET(flow_context, flow_context, action, new_action);
156	in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
157	mv_misc  = MLX5_ADDR_OF(fte_match_param, in_match_value,
158				misc_parameters);
159	mc_misc  = MLX5_ADDR_OF(fte_match_param, in_match_criteria,
160				misc_parameters);
161	MLX5_SET(fte_match_set_misc, mv_misc, source_port, acl_table->vport);
162
163	MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
164
165	err = mlx5_add_flow_table_entry(acl_table->ft, loopback_mc_enable,
166					in_match_criteria, flow_context,
167					&acl_table->loopback_fr.fi);
168	if (err) {
169		acl_table->loopback_fr.applied = false;
170	} else {
171		acl_table->loopback_fr.applied = true;
172		acl_table->loopback_fr.action  = new_action;
173	}
174
175out:
176	if (flow_context)
177		vfree(flow_context);
178	if (in_match_criteria)
179		vfree(in_match_criteria);
180	return err;
181}
182
183static int mlx5_vacl_table_apply_default(void *acl_t, u16 new_action)
184{
185	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
186	u8 default_mc_enable = 0;
187	u32 *flow_context = NULL;
188	void *in_match_criteria = NULL;
189	int err = 0;
190
191	if (!acl_table->spoofchk_enabled)
192		return -EINVAL;
193
194	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
195	if (!flow_context) {
196		err = -ENOMEM;
197		goto out;
198	}
199
200	in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
201	if (!in_match_criteria) {
202		err = -ENOMEM;
203		goto out;
204	}
205
206	if (acl_table->default_fr.applied)
207		mlx5_del_flow_table_entry(acl_table->ft,
208					  acl_table->default_fr.fi);
209
210	/* Apply new default rule */
211	MLX5_SET(flow_context, flow_context, action, new_action);
212	err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
213					in_match_criteria, flow_context,
214					&acl_table->default_fr.fi);
215	if (err) {
216		acl_table->default_fr.applied = false;
217	} else {
218		acl_table->default_fr.applied = true;
219		acl_table->default_fr.action  = new_action;
220	}
221
222out:
223	if (flow_context)
224		vfree(flow_context);
225	if (in_match_criteria)
226		vfree(in_match_criteria);
227	return err;
228}
229
230static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
231{
232	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
233	u8 untagged_mc_enable = MLX5_MATCH_OUTER_HEADERS;
234	u8 *smac;
235	u32 *flow_context = NULL;
236	void *in_match_criteria = NULL;
237	void *in_match_value = NULL;
238	int err = 0;
239
240	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
241	if (!flow_context) {
242		err = -ENOMEM;
243		goto out;
244	}
245
246	in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
247	if (!in_match_criteria) {
248		err = -ENOMEM;
249		goto out;
250	}
251
252	if (acl_table->untagged_fr.applied)
253		mlx5_del_flow_table_entry(acl_table->ft,
254					  acl_table->untagged_fr.fi);
255
256	/* Apply new untagged rule */
257	MLX5_SET(flow_context, flow_context, action, new_action);
258	in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
259	MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 0);
260	MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
261	if (acl_table->spoofchk_enabled) {
262		smac = MLX5_ADDR_OF(fte_match_param,
263				    in_match_value,
264				    outer_headers.smac_47_16);
265		ether_addr_copy(smac, acl_table->smac);
266		smac = MLX5_ADDR_OF(fte_match_param,
267				    in_match_criteria,
268				    outer_headers.smac_47_16);
269		memset(smac, 0xff, ETH_ALEN);
270	}
271	err = mlx5_add_flow_table_entry(acl_table->ft, untagged_mc_enable,
272					in_match_criteria, flow_context,
273					&acl_table->untagged_fr.fi);
274	if (err) {
275		acl_table->untagged_fr.applied = false;
276	} else {
277		acl_table->untagged_fr.applied = true;
278		acl_table->untagged_fr.action  = new_action;
279	}
280
281out:
282	if (flow_context)
283		vfree(flow_context);
284	if (in_match_criteria)
285		vfree(in_match_criteria);
286	return err;
287}
288
289static int mlx5_vacl_table_apply_unknown_vlan(void *acl_t, u16 new_action)
290{
291	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
292	u8 default_mc_enable = (!acl_table->spoofchk_enabled) ? 0 :
293				MLX5_MATCH_OUTER_HEADERS;
294	u32 *flow_context = NULL;
295	void *in_match_criteria = NULL;
296	void *in_match_value = NULL;
297	u8 *smac;
298	int err = 0;
299
300	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
301	if (!flow_context) {
302		err = -ENOMEM;
303		goto out;
304	}
305
306	in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
307	if (!in_match_criteria) {
308		err = -ENOMEM;
309		goto out;
310	}
311
312	if (acl_table->unknown_vlan_fr.applied)
313		mlx5_del_flow_table_entry(acl_table->ft,
314					  acl_table->unknown_vlan_fr.fi);
315
316	/* Apply new unknown vlan rule */
317	MLX5_SET(flow_context, flow_context, action, new_action);
318	if (acl_table->spoofchk_enabled) {
319		in_match_value = MLX5_ADDR_OF(flow_context, flow_context,
320					      match_value);
321		smac = MLX5_ADDR_OF(fte_match_param,
322				    in_match_value,
323				    outer_headers.smac_47_16);
324		ether_addr_copy(smac, acl_table->smac);
325		smac = MLX5_ADDR_OF(fte_match_param,
326				    in_match_criteria,
327				    outer_headers.smac_47_16);
328		memset(smac, 0xff, ETH_ALEN);
329	}
330	err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
331					in_match_criteria, flow_context,
332					&acl_table->unknown_vlan_fr.fi);
333	if (err) {
334		acl_table->unknown_vlan_fr.applied = false;
335	} else {
336		acl_table->unknown_vlan_fr.applied = true;
337		acl_table->unknown_vlan_fr.action  = new_action;
338	}
339
340out:
341	if (flow_context)
342		vfree(flow_context);
343	if (in_match_criteria)
344		vfree(in_match_criteria);
345	return err;
346}
347
348static int mlx5_vacl_table_apply_vlan_filter(void *acl_t)
349{
350	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
351	int index = 0;
352	int err_index = 0;
353	int err = 0;
354
355	if (acl_table->vlan_filter_applied)
356		return 0;
357
358	for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
359		index < 4096;
360		index = find_next_bit(acl_table->vlan_allowed_bitmap,
361				      4096, ++index)) {
362		err = mlx5_vacl_table_allow_vlan(acl_t, index);
363		if (err)
364			goto err_disable_vlans;
365	}
366
367	acl_table->vlan_filter_applied = true;
368	return 0;
369
370err_disable_vlans:
371	for (err_index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
372		err_index < index;
373		err_index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
374					  ++err_index)) {
375		mlx5_del_flow_table_entry(acl_table->ft,
376					  acl_table->vlan_fi_table[err_index]);
377	}
378	return err;
379}
380
381static void mlx5_vacl_table_disapply_vlan_filter(void *acl_t)
382{
383	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
384	int index = 0;
385
386	if (!acl_table->vlan_filter_applied)
387		return;
388
389	for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
390		index < 4096;
391		index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
392				      ++index)) {
393		mlx5_del_flow_table_entry(acl_table->ft,
394					  acl_table->vlan_fi_table[index]);
395	}
396
397	acl_table->vlan_filter_applied = false;
398}
399
400static void mlx5_vacl_table_disapply_all_filters(void *acl_t)
401{
402	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
403
404	if (acl_table->default_fr.applied) {
405		mlx5_del_flow_table_entry(acl_table->ft,
406					  acl_table->default_fr.fi);
407		acl_table->default_fr.applied = false;
408	}
409	if (acl_table->unknown_vlan_fr.applied) {
410		mlx5_del_flow_table_entry(acl_table->ft,
411					  acl_table->unknown_vlan_fr.fi);
412		acl_table->unknown_vlan_fr.applied = false;
413	}
414	if (acl_table->loopback_fr.applied) {
415		mlx5_del_flow_table_entry(acl_table->ft,
416					  acl_table->loopback_fr.fi);
417		acl_table->loopback_fr.applied = false;
418	}
419	if (acl_table->untagged_fr.applied) {
420		mlx5_del_flow_table_entry(acl_table->ft,
421					  acl_table->untagged_fr.fi);
422		acl_table->untagged_fr.applied = false;
423	}
424	if (acl_table->vlan_filter_applied) {
425		mlx5_vacl_table_disapply_vlan_filter(acl_t);
426		acl_table->vlan_filter_applied = false;
427	}
428}
429
430static int mlx5_vacl_table_apply_all_filters(void *acl_t)
431{
432	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
433	int err = 0;
434
435	if (!acl_table->default_fr.applied && acl_table->spoofchk_enabled) {
436		err = mlx5_vacl_table_apply_default(acl_table,
437						    acl_table->default_fr.action);
438		if (err)
439			goto err_disapply_all;
440	}
441
442	if (!acl_table->unknown_vlan_fr.applied) {
443		err = mlx5_vacl_table_apply_unknown_vlan(acl_table,
444							 acl_table->unknown_vlan_fr.action);
445		if (err)
446			goto err_disapply_all;
447	}
448
449	if (!acl_table->loopback_fr.applied &&
450	    acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
451		err = mlx5_vacl_table_apply_loopback_filter(
452						acl_table,
453						acl_table->loopback_fr.action);
454		if (err)
455			goto err_disapply_all;
456	}
457
458	if (!acl_table->untagged_fr.applied) {
459		err = mlx5_vacl_table_apply_untagged(acl_table,
460						     acl_table->untagged_fr.action);
461		if (err)
462			goto err_disapply_all;
463	}
464
465	if (!acl_table->vlan_filter_applied && acl_table->vlan_filter_enabled) {
466		err = mlx5_vacl_table_apply_vlan_filter(acl_t);
467		if (err)
468			goto err_disapply_all;
469	}
470
471	goto out;
472
473err_disapply_all:
474	mlx5_vacl_table_disapply_all_filters(acl_t);
475
476out:
477	return err;
478}
479
480static void mlx5_vacl_table_destroy_ft(void *acl_t)
481{
482	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
483
484	mlx5_vacl_table_disapply_all_filters(acl_t);
485	if (acl_table->ft)
486		mlx5_destroy_flow_table(acl_table->ft);
487	acl_table->ft = NULL;
488}
489
490static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
491{
492	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
493	int log_acl_ft_size;
494	int err = 0;
495	int groups_num = MLX5_ACL_GROUPS_NUM - 1;
496	int shift_idx = MLX5_ACL_UNTAGGED_GROUP_IDX;
497	u8 *smac;
498	struct mlx5_flow_table_group *g;
499
500	if (acl_table->ft)
501		return -EINVAL;
502
503	g = kcalloc(MLX5_ACL_GROUPS_NUM, sizeof(*g), GFP_KERNEL);
504	if (!g)
505		goto out;
506
507	acl_table->spoofchk_enabled = spoofchk;
508
509	/*
510	 * for vlan group
511	 */
512	log_acl_ft_size = 4096;
513	/*
514	 * for loopback filter rule
515	 */
516	log_acl_ft_size += 1;
517	/*
518	 * for untagged rule
519	 */
520	log_acl_ft_size += 1;
521	/*
522	 * for unknown vlan rule
523	 */
524	log_acl_ft_size += 1;
525	/*
526	 * for default rule
527	 */
528	log_acl_ft_size += 1;
529
530	log_acl_ft_size = order_base_2(log_acl_ft_size);
531	log_acl_ft_size = min_t(int, log_acl_ft_size, acl_table->max_ft_size);
532
533	if (log_acl_ft_size < 2)
534		goto out;
535
536	if (acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
537		/* Loopback filter group */
538		g[MLX5_ACL_LOOPBACK_GROUP_IDX].log_sz = 0;
539		g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria_enable =
540				MLX5_MATCH_MISC_PARAMETERS;
541		MLX5_SET_TO_ONES(fte_match_param,
542				 g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria,
543				 misc_parameters.source_port);
544		groups_num++;
545		shift_idx = MLX5_ACL_LOOPBACK_GROUP_IDX;
546	}
547	/* Untagged traffic group */
548	g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].log_sz = 0;
549	g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria_enable =
550			MLX5_MATCH_OUTER_HEADERS;
551	MLX5_SET(fte_match_param,
552		 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
553		 outer_headers.vlan_tag, 1);
554	if (spoofchk) {
555		smac = MLX5_ADDR_OF(fte_match_param,
556				    g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
557				      .match_criteria,
558				    outer_headers.smac_47_16);
559		memset(smac, 0xff, ETH_ALEN);
560	}
561
562	/* Allowed vlans group */
563	g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].log_sz = log_acl_ft_size - 1;
564	g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
565			MLX5_MATCH_OUTER_HEADERS;
566	MLX5_SET(fte_match_param,
567		 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
568		 outer_headers.vlan_tag, 1);
569	MLX5_SET(fte_match_param,
570		 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
571		 outer_headers.first_vid, 0xfff);
572	if (spoofchk) {
573		smac = MLX5_ADDR_OF(fte_match_param,
574				    g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx]
575				      .match_criteria,
576				    outer_headers.smac_47_16);
577		memset(smac, 0xff, ETH_ALEN);
578	}
579
580	/* Unknown vlan traffic group */
581	g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].log_sz = 0;
582	g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
583			(spoofchk ? MLX5_MATCH_OUTER_HEADERS : 0);
584	if (spoofchk) {
585		smac = MLX5_ADDR_OF(
586				fte_match_param,
587				g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx]
588				  .match_criteria,
589				outer_headers.smac_47_16);
590		memset(smac, 0xff, ETH_ALEN);
591	}
592
593	/*
594	 * Default group - for spoofchk only.
595	 */
596	g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].log_sz = 0;
597	g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].match_criteria_enable = 0;
598
599	acl_table->ft = mlx5_create_flow_table(acl_table->dev,
600					       0,
601					       acl_table->acl_type,
602					       acl_table->vport,
603					       groups_num,
604					       g);
605	if (!acl_table->ft) {
606		err = -ENOMEM;
607		goto out;
608	}
609
610	err = mlx5_vacl_table_apply_all_filters(acl_t);
611	if (err)
612		goto err_destroy_ft;
613
614	goto out;
615
616err_destroy_ft:
617	mlx5_vacl_table_destroy_ft(acl_table->ft);
618	acl_table->ft = NULL;
619
620out:
621	kfree(g);
622	return err;
623}
624
625void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
626			     u16 vport, bool is_egress)
627{
628	struct mlx5_vacl_table *acl_table;
629	int err = 0;
630
631	if (is_egress && !MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev, ft_support))
632		return NULL;
633
634	if (!is_egress && !MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev, ft_support))
635		return NULL;
636
637	acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
638	if (!acl_table)
639		return NULL;
640
641	acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
642					  MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
643	acl_table->max_ft_size = (is_egress ?
644					MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev,
645									  log_max_ft_size) :
646					MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev,
647									   log_max_ft_size));
648	acl_table->dev = dev;
649	acl_table->vport = vport;
650
651	/*
652	 * default behavior : Allow and if spoofchk drop the default
653	 */
654	acl_table->default_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
655	acl_table->loopback_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
656	acl_table->unknown_vlan_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
657	acl_table->untagged_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
658	err = mlx5_vacl_table_create_ft(acl_table, false);
659	if (err)
660		goto err_free_acl_table;
661
662	acl_table->vlan_allowed_bitmap = kcalloc(BITS_TO_LONGS(4096),
663						 sizeof(uintptr_t),
664						 GFP_KERNEL);
665	if (!acl_table->vlan_allowed_bitmap)
666		goto err_destroy_ft;
667
668	goto out;
669
670err_destroy_ft:
671	mlx5_vacl_table_destroy_ft(acl_table->ft);
672	acl_table->ft = NULL;
673
674err_free_acl_table:
675	kfree(acl_table);
676	acl_table = NULL;
677
678out:
679	return (void *)acl_table;
680}
681EXPORT_SYMBOL(mlx5_vacl_table_create);
682
683void mlx5_vacl_table_cleanup(void *acl_t)
684{
685	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
686
687	mlx5_vacl_table_destroy_ft(acl_t);
688	kfree(acl_table->vlan_allowed_bitmap);
689	kfree(acl_table);
690}
691EXPORT_SYMBOL(mlx5_vacl_table_cleanup);
692
693int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan)
694{
695	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
696	int err = 0;
697
698	if (test_bit(vlan, acl_table->vlan_allowed_bitmap))
699		return 0;
700	__set_bit(vlan, acl_table->vlan_allowed_bitmap);
701	if (!acl_table->vlan_filter_applied)
702		return 0;
703
704	err = mlx5_vacl_table_allow_vlan(acl_t, vlan);
705	if (err)
706		goto err_clear_vbit;
707
708	goto out;
709
710err_clear_vbit:
711	__clear_bit(vlan, acl_table->vlan_allowed_bitmap);
712
713out:
714	return err;
715}
716EXPORT_SYMBOL(mlx5_vacl_table_add_vlan);
717
718void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan)
719{
720	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
721
722	if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
723		return;
724
725	__clear_bit(vlan, acl_table->vlan_allowed_bitmap);
726
727	if (!acl_table->vlan_filter_applied)
728		return;
729
730	mlx5_del_flow_table_entry(acl_table->ft,
731				  acl_table->vlan_fi_table[vlan]);
732}
733EXPORT_SYMBOL(mlx5_vacl_table_del_vlan);
734
735int mlx5_vacl_table_enable_vlan_filter(void *acl_t)
736{
737	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
738
739	acl_table->vlan_filter_enabled = true;
740	return mlx5_vacl_table_apply_vlan_filter(acl_t);
741}
742EXPORT_SYMBOL(mlx5_vacl_table_enable_vlan_filter);
743
744void mlx5_vacl_table_disable_vlan_filter(void *acl_t)
745{
746	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
747
748	acl_table->vlan_filter_enabled = false;
749	mlx5_vacl_table_disapply_vlan_filter(acl_t);
750}
751EXPORT_SYMBOL(mlx5_vacl_table_disable_vlan_filter);
752
753int mlx5_vacl_table_drop_untagged(void *acl_t)
754{
755	return mlx5_vacl_table_apply_untagged(acl_t,
756			MLX5_FLOW_CONTEXT_ACTION_DROP);
757}
758EXPORT_SYMBOL(mlx5_vacl_table_drop_untagged);
759
760int mlx5_vacl_table_allow_untagged(void *acl_t)
761{
762	return mlx5_vacl_table_apply_untagged(acl_t,
763			MLX5_FLOW_CONTEXT_ACTION_ALLOW);
764}
765EXPORT_SYMBOL(mlx5_vacl_table_allow_untagged);
766
767int mlx5_vacl_table_drop_unknown_vlan(void *acl_t)
768{
769	return mlx5_vacl_table_apply_unknown_vlan(acl_t,
770			MLX5_FLOW_CONTEXT_ACTION_DROP);
771}
772EXPORT_SYMBOL(mlx5_vacl_table_drop_unknown_vlan);
773
774int mlx5_vacl_table_allow_unknown_vlan(void *acl_t)
775{
776	return mlx5_vacl_table_apply_unknown_vlan(acl_t,
777			MLX5_FLOW_CONTEXT_ACTION_ALLOW);
778}
779EXPORT_SYMBOL(mlx5_vacl_table_allow_unknown_vlan);
780
781int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac)
782{
783	struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
784	int err = 0;
785
786	if (spoofchk == acl_table->spoofchk_enabled) {
787		if (!spoofchk ||
788		    (spoofchk && !memcmp(acl_table->smac, vport_mac, ETH_ALEN)))
789			return 0;
790	}
791
792	ether_addr_copy(acl_table->smac, vport_mac);
793	if (spoofchk != acl_table->spoofchk_enabled) {
794		mlx5_vacl_table_destroy_ft(acl_t);
795		err = mlx5_vacl_table_create_ft(acl_t, spoofchk);
796	} else {
797		mlx5_vacl_table_disapply_all_filters(acl_t);
798		err = mlx5_vacl_table_apply_all_filters(acl_t);
799	}
800
801	return err;
802}
803EXPORT_SYMBOL(mlx5_vacl_table_set_spoofchk);
804
805