ecore_l2_api.h revision 337519
154359Sroberto/*
254359Sroberto * Copyright (c) 2017-2018 Cavium, Inc.
3290000Sglebius * All rights reserved.
4290000Sglebius *
5290000Sglebius *  Redistribution and use in source and binary forms, with or without
654359Sroberto *  modification, are permitted provided that the following conditions
7290000Sglebius *  are met:
854359Sroberto *
954359Sroberto *  1. Redistributions of source code must retain the above copyright
1054359Sroberto *     notice, this list of conditions and the following disclaimer.
1154359Sroberto *  2. Redistributions in binary form must reproduce the above copyright
1254359Sroberto *     notice, this list of conditions and the following disclaimer in the
13290000Sglebius *     documentation and/or other materials provided with the distribution.
14290000Sglebius *
15290000Sglebius *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16290000Sglebius *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17290000Sglebius *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1854359Sroberto *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19290000Sglebius *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20290000Sglebius *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2154359Sroberto *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2254359Sroberto *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23290000Sglebius *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24290000Sglebius *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2554359Sroberto *  POSSIBILITY OF SUCH DAMAGE.
2654359Sroberto *
27290000Sglebius * $FreeBSD: stable/10/sys/dev/qlnx/qlnxe/ecore_l2_api.h 337519 2018-08-09 01:39:47Z davidcs $
28290000Sglebius *
29290000Sglebius */
30290000Sglebius
31290000Sglebius#ifndef __ECORE_L2_API_H__
3254359Sroberto#define __ECORE_L2_API_H__
3354359Sroberto
34290000Sglebius#include "ecore_status.h"
35290000Sglebius#include "ecore_sp_api.h"
36290000Sglebius#include "ecore_int_api.h"
3754359Sroberto
38290000Sglebius#ifndef __EXTRACT__LINUX__
39290000Sglebiusenum ecore_rss_caps {
40290000Sglebius	ECORE_RSS_IPV4		= 0x1,
4154359Sroberto	ECORE_RSS_IPV6		= 0x2,
42	ECORE_RSS_IPV4_TCP	= 0x4,
43	ECORE_RSS_IPV6_TCP	= 0x8,
44	ECORE_RSS_IPV4_UDP	= 0x10,
45	ECORE_RSS_IPV6_UDP	= 0x20,
46};
47
48/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
49#define ECORE_RSS_IND_TABLE_SIZE 128
50#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
51
52#define ECORE_MAX_PHC_DRIFT_PPB	291666666
53
54enum ecore_ptp_filter_type {
55	ECORE_PTP_FILTER_NONE,
56	ECORE_PTP_FILTER_ALL,
57	ECORE_PTP_FILTER_V1_L4_EVENT,
58	ECORE_PTP_FILTER_V1_L4_GEN,
59	ECORE_PTP_FILTER_V2_L4_EVENT,
60	ECORE_PTP_FILTER_V2_L4_GEN,
61	ECORE_PTP_FILTER_V2_L2_EVENT,
62	ECORE_PTP_FILTER_V2_L2_GEN,
63	ECORE_PTP_FILTER_V2_EVENT,
64	ECORE_PTP_FILTER_V2_GEN
65};
66
67enum ecore_ptp_hwtstamp_tx_type {
68	ECORE_PTP_HWTSTAMP_TX_OFF,
69	ECORE_PTP_HWTSTAMP_TX_ON,
70};
71#endif
72
73#ifndef __EXTRACT__LINUX__
74struct ecore_queue_start_common_params {
75	/* Should always be relative to entity sending this. */
76	u8 vport_id;
77	u16 queue_id;
78
79	/* Relative, but relevant only for PFs */
80	u8 stats_id;
81
82	struct ecore_sb_info *p_sb;
83	u8 sb_idx;
84
85	u8 tc;
86};
87
88struct ecore_rxq_start_ret_params {
89	void OSAL_IOMEM *p_prod;
90	void *p_handle;
91};
92
93struct ecore_txq_start_ret_params {
94	void OSAL_IOMEM *p_doorbell;
95	void *p_handle;
96};
97#endif
98
99struct ecore_rss_params {
100	u8 update_rss_config;
101	u8 rss_enable;
102	u8 rss_eng_id;
103	u8 update_rss_capabilities;
104	u8 update_rss_ind_table;
105	u8 update_rss_key;
106	u8 rss_caps;
107	u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
108
109	/* Indirection table consist of rx queue handles */
110	void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
111	u32 rss_key[ECORE_RSS_KEY_SIZE];
112};
113
114struct ecore_sge_tpa_params {
115	u8 max_buffers_per_cqe;
116
117	u8 update_tpa_en_flg;
118	u8 tpa_ipv4_en_flg;
119	u8 tpa_ipv6_en_flg;
120	u8 tpa_ipv4_tunn_en_flg;
121	u8 tpa_ipv6_tunn_en_flg;
122
123	u8 update_tpa_param_flg;
124	u8 tpa_pkt_split_flg;
125	u8 tpa_hdr_data_split_flg;
126	u8 tpa_gro_consistent_flg;
127	u8 tpa_max_aggs_num;
128	u16 tpa_max_size;
129	u16 tpa_min_size_to_start;
130	u16 tpa_min_size_to_cont;
131};
132
133enum ecore_filter_opcode {
134	ECORE_FILTER_ADD,
135	ECORE_FILTER_REMOVE,
136	ECORE_FILTER_MOVE,
137	ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
138	ECORE_FILTER_FLUSH, /* Removes all filters */
139};
140
141enum ecore_filter_ucast_type {
142	ECORE_FILTER_MAC,
143	ECORE_FILTER_VLAN,
144	ECORE_FILTER_MAC_VLAN,
145	ECORE_FILTER_INNER_MAC,
146	ECORE_FILTER_INNER_VLAN,
147	ECORE_FILTER_INNER_PAIR,
148	ECORE_FILTER_INNER_MAC_VNI_PAIR,
149	ECORE_FILTER_MAC_VNI_PAIR,
150	ECORE_FILTER_VNI,
151};
152
153struct ecore_filter_ucast {
154	enum ecore_filter_opcode opcode;
155	enum ecore_filter_ucast_type type;
156	u8 is_rx_filter;
157	u8 is_tx_filter;
158	u8 vport_to_add_to;
159	u8 vport_to_remove_from;
160	unsigned char mac[ETH_ALEN];
161	u8 assert_on_error;
162	u16 vlan;
163	u32 vni;
164};
165
166struct ecore_filter_mcast {
167	/* MOVE is not supported for multicast */
168	enum ecore_filter_opcode opcode;
169	u8 vport_to_add_to;
170	u8 vport_to_remove_from;
171	u8	num_mc_addrs;
172#define ECORE_MAX_MC_ADDRS	64
173	unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
174};
175
176struct ecore_filter_accept_flags {
177	u8 update_rx_mode_config;
178	u8 update_tx_mode_config;
179	u8 rx_accept_filter;
180	u8 tx_accept_filter;
181#define	ECORE_ACCEPT_NONE		0x01
182#define ECORE_ACCEPT_UCAST_MATCHED	0x02
183#define ECORE_ACCEPT_UCAST_UNMATCHED	0x04
184#define ECORE_ACCEPT_MCAST_MATCHED	0x08
185#define ECORE_ACCEPT_MCAST_UNMATCHED	0x10
186#define ECORE_ACCEPT_BCAST		0x20
187};
188
189#ifndef __EXTRACT__LINUX__
190enum ecore_filter_config_mode {
191	ECORE_FILTER_CONFIG_MODE_DISABLE,
192	ECORE_FILTER_CONFIG_MODE_5_TUPLE,
193	ECORE_FILTER_CONFIG_MODE_L4_PORT,
194	ECORE_FILTER_CONFIG_MODE_IP_DEST,
195};
196#endif
197
198struct ecore_arfs_config_params {
199	bool tcp;
200	bool udp;
201	bool ipv4;
202	bool ipv6;
203	enum ecore_filter_config_mode mode;
204};
205
206/* Add / remove / move / remove-all unicast MAC-VLAN filters.
207 * FW will assert in the following cases, so driver should take care...:
208 * 1. Adding a filter to a full table.
209 * 2. Adding a filter which already exists on that vport.
210 * 3. Removing a filter which doesn't exist.
211 */
212
213enum _ecore_status_t
214ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
215		       struct ecore_filter_ucast *p_filter_cmd,
216		       enum spq_mode comp_mode,
217		       struct ecore_spq_comp_cb *p_comp_data);
218
219/* Add / remove / move multicast MAC filters. */
220enum _ecore_status_t
221ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
222		       struct ecore_filter_mcast *p_filter_cmd,
223		       enum spq_mode comp_mode,
224		       struct ecore_spq_comp_cb *p_comp_data);
225
226/* Set "accept" filters */
227enum _ecore_status_t
228ecore_filter_accept_cmd(
229	struct ecore_dev		 *p_dev,
230	u8				 vport,
231	struct ecore_filter_accept_flags accept_flags,
232	u8				 update_accept_any_vlan,
233	u8				 accept_any_vlan,
234	enum spq_mode			 comp_mode,
235	struct ecore_spq_comp_cb	 *p_comp_data);
236
237/**
238 * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
239 *
240 * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
241 * the VPort ID is not currently initialized.
242 *
243 * @param p_hwfn
244 * @param opaque_fid
245 * @p_params			Inputs; Relative for PF [SB being an exception]
246 * @param bd_max_bytes 		Maximum bytes that can be placed on a BD
247 * @param bd_chain_phys_addr	Physical address of BDs for receive.
248 * @param cqe_pbl_addr		Physical address of the CQE PBL Table.
249 * @param cqe_pbl_size 		Size of the CQE PBL Table
250 * @param p_ret_params		Pointed struct to be filled with outputs.
251 *
252 * @return enum _ecore_status_t
253 */
254enum _ecore_status_t
255ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
256			 u16 opaque_fid,
257			 struct ecore_queue_start_common_params *p_params,
258			 u16 bd_max_bytes,
259			 dma_addr_t bd_chain_phys_addr,
260			 dma_addr_t cqe_pbl_addr,
261			 u16 cqe_pbl_size,
262			 struct ecore_rxq_start_ret_params *p_ret_params);
263
264/**
265 * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
266 *
267 * @param p_hwfn
268 * @param p_rxq			Handler of queue to close
269 * @param eq_completion_only	If True completion will be on
270 *				EQe, if False completion will be
271 *				on EQe if p_hwfn opaque
272 *				different from the RXQ opaque
273 *				otherwise on CQe.
274 * @param cqe_completion	If True completion will be
275 *				recieve on CQe.
276 * @return enum _ecore_status_t
277 */
278enum _ecore_status_t
279ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
280			void *p_rxq,
281			bool eq_completion_only,
282			bool cqe_completion);
283
284/**
285 * @brief - TX Queue Start Ramrod
286 *
287 * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
288 * the VPort is not currently initialized.
289 *
290 * @param p_hwfn
291 * @param opaque_fid
292 * @p_params
293 * @param tc			traffic class to use with this L2 txq
294 * @param pbl_addr		address of the pbl array
295 * @param pbl_size 		number of entries in pbl
296 * @oaram p_ret_params		Pointer to fill the return parameters in.
297 *
298 * @return enum _ecore_status_t
299 */
300enum _ecore_status_t
301ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
302			 u16 opaque_fid,
303			 struct ecore_queue_start_common_params *p_params,
304			 u8 tc,
305			 dma_addr_t pbl_addr,
306			 u16 pbl_size,
307			 struct ecore_txq_start_ret_params *p_ret_params);
308
309/**
310 * @brief ecore_eth_tx_queue_stop - closes a Tx queue
311 *
312 * @param p_hwfn
313 * @param p_txq - handle to Tx queue needed to be closed
314 *
315 * @return enum _ecore_status_t
316 */
317enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
318					     void *p_txq);
319
320enum ecore_tpa_mode	{
321	ECORE_TPA_MODE_NONE,
322	ECORE_TPA_MODE_RSC,
323	ECORE_TPA_MODE_GRO,
324	ECORE_TPA_MODE_MAX
325};
326
327struct ecore_sp_vport_start_params {
328	enum ecore_tpa_mode tpa_mode;
329	bool remove_inner_vlan;	/* Inner VLAN removal is enabled */
330	bool tx_switching;	/* Vport supports tx-switching */
331	bool handle_ptp_pkts;	/* Handle PTP packets */
332	bool only_untagged;	/* Untagged pkt control */
333	bool drop_ttl0;		/* Drop packets with TTL = 0 */
334	u8 max_buffers_per_cqe;
335	u32 concrete_fid;
336	u16 opaque_fid;
337	u8 vport_id;		/* VPORT ID */
338	u16 mtu;		/* VPORT MTU */
339	bool zero_placement_offset;
340	bool check_mac;
341	bool check_ethtype;
342
343	/* Strict behavior on transmission errors */
344	bool b_err_illegal_vlan_mode;
345	bool b_err_illegal_inband_mode;
346	bool b_err_vlan_insert_with_inband;
347	bool b_err_small_pkt;
348	bool b_err_big_pkt;
349	bool b_err_anti_spoof;
350	bool b_err_ctrl_frame;
351};
352
353/**
354 * @brief ecore_sp_vport_start -
355 *
356 * This ramrod initializes a VPort. An Assert if generated if the Function ID
357 * of the VPort is not enabled.
358 *
359 * @param p_hwfn
360 * @param p_params		VPORT start params
361 *
362 * @return enum _ecore_status_t
363 */
364enum _ecore_status_t
365ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
366		     struct ecore_sp_vport_start_params *p_params);
367
368struct ecore_sp_vport_update_params {
369	u16			opaque_fid;
370	u8			vport_id;
371	u8			update_vport_active_rx_flg;
372	u8			vport_active_rx_flg;
373	u8			update_vport_active_tx_flg;
374	u8			vport_active_tx_flg;
375	u8			update_inner_vlan_removal_flg;
376	u8			inner_vlan_removal_flg;
377	u8			silent_vlan_removal_flg;
378	u8			update_default_vlan_enable_flg;
379	u8			default_vlan_enable_flg;
380	u8			update_default_vlan_flg;
381	u16			default_vlan;
382	u8			update_tx_switching_flg;
383	u8			tx_switching_flg;
384	u8			update_approx_mcast_flg;
385	u8			update_anti_spoofing_en_flg;
386	u8			anti_spoofing_en;
387	u8			update_accept_any_vlan_flg;
388	u8			accept_any_vlan;
389	u32			bins[8];
390	struct ecore_rss_params	*rss_params;
391	struct ecore_filter_accept_flags accept_flags;
392	struct ecore_sge_tpa_params *sge_tpa_params;
393};
394
395/**
396 * @brief ecore_sp_vport_update -
397 *
398 * This ramrod updates the parameters of the VPort. Every field can be updated
399 * independently, according to flags.
400 *
401 * This ramrod is also used to set the VPort state to active after creation.
402 * An Assert is generated if the VPort does not contain an RX queue.
403 *
404 * @param p_hwfn
405 * @param p_params
406 *
407 * @return enum _ecore_status_t
408 */
409enum _ecore_status_t
410ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
411		      struct ecore_sp_vport_update_params *p_params,
412		      enum spq_mode comp_mode,
413		      struct ecore_spq_comp_cb *p_comp_data);
414/**
415 * @brief ecore_sp_vport_stop -
416 *
417 * This ramrod closes a VPort after all its RX and TX queues are terminated.
418 * An Assert is generated if any queues are left open.
419 *
420 * @param p_hwfn
421 * @param opaque_fid
422 * @param vport_id VPort ID
423 *
424 * @return enum _ecore_status_t
425 */
426enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
427					 u16 opaque_fid,
428					 u8 vport_id);
429
430enum _ecore_status_t
431ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
432			  u16 opaque_fid,
433			  struct ecore_filter_ucast *p_filter_cmd,
434			  enum spq_mode comp_mode,
435			  struct ecore_spq_comp_cb *p_comp_data);
436
437/**
438 * @brief ecore_sp_rx_eth_queues_update -
439 *
440 * This ramrod updates an RX queue. It is used for setting the active state
441 * of the queue and updating the TPA and SGE parameters.
442 *
443 * @note Final phase API.
444 *
445 * @param p_hwfn
446 * @param pp_rxq_handlers	An array of queue handlers to be updated.
447 * @param num_rxqs              number of queues to update.
448 * @param complete_cqe_flg	Post completion to the CQE Ring if set
449 * @param complete_event_flg	Post completion to the Event Ring if set
450 * @param comp_mode
451 * @param p_comp_data
452 *
453 * @return enum _ecore_status_t
454 */
455
456enum _ecore_status_t
457ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
458			      void **pp_rxq_handlers,
459			      u8 num_rxqs,
460			      u8 complete_cqe_flg,
461			      u8 complete_event_flg,
462			      enum spq_mode comp_mode,
463			      struct ecore_spq_comp_cb *p_comp_data);
464
465/**
466 * @brief ecore_sp_eth_rx_queues_set_default -
467 *
468 * This ramrod sets RSS RX queue as default one.
469 *
470 * @note Final phase API.
471 *
472 * @param p_hwfn
473 * @param p_rxq_handlers	queue handlers to be updated.
474 * @param comp_mode
475 * @param p_comp_data
476 *
477 * @return enum _ecore_status_t
478 */
479
480enum _ecore_status_t
481ecore_sp_eth_rx_queues_set_default(struct ecore_hwfn *p_hwfn,
482				   void *p_rxq_handler,
483				   enum spq_mode comp_mode,
484				   struct ecore_spq_comp_cb *p_comp_data);
485
486void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
487			     struct ecore_ptt *p_ptt,
488			     struct ecore_eth_stats *stats,
489			     u16 statistics_bin, bool b_get_port_stats);
490
491void ecore_get_vport_stats(struct ecore_dev *p_dev,
492			   struct ecore_eth_stats *stats);
493
494void ecore_reset_vport_stats(struct ecore_dev *p_dev);
495
496/**
497 *@brief ecore_arfs_mode_configure -
498 *
499 *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
500 *and atleast one of ipv4 or ipv6 true to enable rfs mode.
501 *
502 *@param p_hwfn
503 *@param p_ptt
504 *@param p_cfg_params		arfs mode configuration parameters.
505 *
506 */
507void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
508			       struct ecore_ptt *p_ptt,
509			       struct ecore_arfs_config_params *p_cfg_params);
510
511#ifndef __EXTRACT__LINUX__
512struct ecore_ntuple_filter_params {
513	/* Physically mapped address containing header of buffer to be used
514	 * as filter.
515	 */
516	dma_addr_t addr;
517
518	/* Length of header in bytes */
519	u16 length;
520
521	/* Relative queue-id to receive classified packet */
522#define ECORE_RFS_NTUPLE_QID_RSS ((u16)-1)
523	u16 qid;
524
525	/* Identifier can either be according to vport-id or vfid */
526	bool b_is_vf;
527	u8 vport_id;
528	u8 vf_id;
529
530	/* true iff this filter is to be added. Else to be removed */
531	bool b_is_add;
532};
533#endif
534
535/**
536 * @brief - ecore_configure_rfs_ntuple_filter
537 *
538 * This ramrod should be used to add or remove arfs hw filter
539 *
540 * @params p_hwfn
541 * @params p_cb		Used for ECORE_SPQ_MODE_CB,where client would initialize
542 *			it with cookie and callback function address, if not
543 *			using this mode then client must pass NULL.
544 * @params p_params
545 */
546enum _ecore_status_t
547ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
548				  struct ecore_spq_comp_cb *p_cb,
549				  struct ecore_ntuple_filter_params *p_params);
550#endif
551