ixlvc.c revision 274360
1/******************************************************************************
2
3  Copyright (c) 2013-2014, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixl/ixlvc.c 274360 2014-11-10 23:56:06Z jfv $*/
34
35/*
36**	Virtual Channel support
37**		These are support functions to communication
38**		between the VF and PF drivers.
39*/
40
41#include "ixl.h"
42#include "ixlv.h"
43#include "i40e_prototype.h"
44
45
46/* busy wait delay in msec */
47#define IXLV_BUSY_WAIT_DELAY 10
48#define IXLV_BUSY_WAIT_COUNT 50
49
50static void	ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
51		    enum i40e_status_code);
52static void	ixl_vc_process_next(struct ixl_vc_mgr *mgr);
53static void	ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
54static void	ixl_vc_send_current(struct ixl_vc_mgr *mgr);
55
56#ifdef IXL_DEBUG
57/*
58** Validate VF messages
59*/
60static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
61    u8 *msg, u16 msglen)
62{
63	bool err_msg_format = false;
64	int valid_len;
65
66	/* Validate message length. */
67	switch (v_opcode) {
68	case I40E_VIRTCHNL_OP_VERSION:
69		valid_len = sizeof(struct i40e_virtchnl_version_info);
70		break;
71	case I40E_VIRTCHNL_OP_RESET_VF:
72	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
73		valid_len = 0;
74		break;
75	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
76		valid_len = sizeof(struct i40e_virtchnl_txq_info);
77		break;
78	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
79		valid_len = sizeof(struct i40e_virtchnl_rxq_info);
80		break;
81	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
82		valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
83		if (msglen >= valid_len) {
84			struct i40e_virtchnl_vsi_queue_config_info *vqc =
85			    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
86			valid_len += (vqc->num_queue_pairs *
87				      sizeof(struct
88					     i40e_virtchnl_queue_pair_info));
89			if (vqc->num_queue_pairs == 0)
90				err_msg_format = true;
91		}
92		break;
93	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
94		valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
95		if (msglen >= valid_len) {
96			struct i40e_virtchnl_irq_map_info *vimi =
97			    (struct i40e_virtchnl_irq_map_info *)msg;
98			valid_len += (vimi->num_vectors *
99				      sizeof(struct i40e_virtchnl_vector_map));
100			if (vimi->num_vectors == 0)
101				err_msg_format = true;
102		}
103		break;
104	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
105	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
106		valid_len = sizeof(struct i40e_virtchnl_queue_select);
107		break;
108	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
109	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
110		valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
111		if (msglen >= valid_len) {
112			struct i40e_virtchnl_ether_addr_list *veal =
113			    (struct i40e_virtchnl_ether_addr_list *)msg;
114			valid_len += veal->num_elements *
115			    sizeof(struct i40e_virtchnl_ether_addr);
116			if (veal->num_elements == 0)
117				err_msg_format = true;
118		}
119		break;
120	case I40E_VIRTCHNL_OP_ADD_VLAN:
121	case I40E_VIRTCHNL_OP_DEL_VLAN:
122		valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
123		if (msglen >= valid_len) {
124			struct i40e_virtchnl_vlan_filter_list *vfl =
125			    (struct i40e_virtchnl_vlan_filter_list *)msg;
126			valid_len += vfl->num_elements * sizeof(u16);
127			if (vfl->num_elements == 0)
128				err_msg_format = true;
129		}
130		break;
131	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
132		valid_len = sizeof(struct i40e_virtchnl_promisc_info);
133		break;
134	case I40E_VIRTCHNL_OP_GET_STATS:
135		valid_len = sizeof(struct i40e_virtchnl_queue_select);
136		break;
137	/* These are always errors coming from the VF. */
138	case I40E_VIRTCHNL_OP_EVENT:
139	case I40E_VIRTCHNL_OP_UNKNOWN:
140	default:
141		return EPERM;
142		break;
143	}
144	/* few more checks */
145	if ((valid_len != msglen) || (err_msg_format))
146		return EINVAL;
147	else
148		return 0;
149}
150#endif
151
152/*
153** ixlv_send_pf_msg
154**
155** Send message to PF and print status if failure.
156*/
157static int
158ixlv_send_pf_msg(struct ixlv_sc *sc,
159	enum i40e_virtchnl_ops op, u8 *msg, u16 len)
160{
161	struct i40e_hw	*hw = &sc->hw;
162	device_t	dev = sc->dev;
163	i40e_status	err;
164
165#ifdef IXL_DEBUG
166	/*
167	** Pre-validating messages to the PF
168	*/
169	int val_err;
170	val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
171	if (val_err)
172		device_printf(dev, "Error validating msg to PF for op %d,"
173		    " msglen %d: error %d\n", op, len, val_err);
174#endif
175
176	err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
177	if (err)
178		device_printf(dev, "Unable to send opcode %d to PF, "
179		    "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
180	return err;
181}
182
183
184/*
185** ixlv_send_api_ver
186**
187** Send API version admin queue message to the PF. The reply is not checked
188** in this function. Returns 0 if the message was successfully
189** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
190*/
191int
192ixlv_send_api_ver(struct ixlv_sc *sc)
193{
194	struct i40e_virtchnl_version_info vvi;
195
196	vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
197	vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
198
199	return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
200	    (u8 *)&vvi, sizeof(vvi));
201}
202
203/*
204** ixlv_verify_api_ver
205**
206** Compare API versions with the PF. Must be called after admin queue is
207** initialized. Returns 0 if API versions match, EIO if
208** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
209*/
210int
211ixlv_verify_api_ver(struct ixlv_sc *sc)
212{
213	struct i40e_virtchnl_version_info *pf_vvi;
214	struct i40e_hw *hw = &sc->hw;
215	struct i40e_arq_event_info event;
216	i40e_status err;
217	int retries = 0;
218
219	event.buf_len = IXL_AQ_BUFSZ;
220	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
221	if (!event.msg_buf) {
222		err = ENOMEM;
223		goto out;
224	}
225
226	do {
227		if (++retries > IXLV_AQ_MAX_ERR)
228			goto out_alloc;
229
230		/* NOTE: initial delay is necessary */
231		i40e_msec_delay(100);
232		err = i40e_clean_arq_element(hw, &event, NULL);
233	} while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
234	if (err)
235		goto out_alloc;
236
237	err = (i40e_status)le32toh(event.desc.cookie_low);
238	if (err) {
239		err = EIO;
240		goto out_alloc;
241	}
242
243	if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
244	    I40E_VIRTCHNL_OP_VERSION) {
245		DDPRINTF(sc->dev, "Received unexpected op response: %d\n",
246		    le32toh(event.desc.cookie_high));
247		err = EIO;
248		goto out_alloc;
249	}
250
251	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
252	if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
253	    (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
254		err = EIO;
255
256out_alloc:
257	free(event.msg_buf, M_DEVBUF);
258out:
259	return err;
260}
261
262/*
263** ixlv_send_vf_config_msg
264**
265** Send VF configuration request admin queue message to the PF. The reply
266** is not checked in this function. Returns 0 if the message was
267** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
268*/
269int
270ixlv_send_vf_config_msg(struct ixlv_sc *sc)
271{
272	return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
273				  NULL, 0);
274}
275
276/*
277** ixlv_get_vf_config
278**
279** Get VF configuration from PF and populate hw structure. Must be called after
280** admin queue is initialized. Busy waits until response is received from PF,
281** with maximum timeout. Response from PF is returned in the buffer for further
282** processing by the caller.
283*/
284int
285ixlv_get_vf_config(struct ixlv_sc *sc)
286{
287	struct i40e_hw	*hw = &sc->hw;
288	device_t	dev = sc->dev;
289	struct i40e_arq_event_info event;
290	u16 len;
291	i40e_status err = 0;
292	u32 retries = 0;
293
294	/* Note this assumes a single VSI */
295	len = sizeof(struct i40e_virtchnl_vf_resource) +
296	    sizeof(struct i40e_virtchnl_vsi_resource);
297	event.buf_len = len;
298	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
299	if (!event.msg_buf) {
300		err = ENOMEM;
301		goto out;
302	}
303
304	for (;;) {
305		err = i40e_clean_arq_element(hw, &event, NULL);
306		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
307			if (++retries <= IXLV_AQ_MAX_ERR)
308				i40e_msec_delay(10);
309		} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
310		    I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
311			DDPRINTF(dev, "Received a response from PF,"
312			    " opcode %d, error %d",
313			    le32toh(event.desc.cookie_high),
314			    le32toh(event.desc.cookie_low));
315			retries++;
316			continue;
317		} else {
318			err = (i40e_status)le32toh(event.desc.cookie_low);
319			if (err) {
320				device_printf(dev, "%s: Error returned from PF,"
321				    " opcode %d, error %d\n", __func__,
322				    le32toh(event.desc.cookie_high),
323				    le32toh(event.desc.cookie_low));
324				err = EIO;
325				goto out_alloc;
326			}
327			/* We retrieved the config message, with no errors */
328			break;
329		}
330
331		if (retries > IXLV_AQ_MAX_ERR) {
332			INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
333			    retries);
334			err = ETIMEDOUT;
335			goto out_alloc;
336		}
337	}
338
339	memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
340	i40e_vf_parse_hw_config(hw, sc->vf_res);
341
342out_alloc:
343	free(event.msg_buf, M_DEVBUF);
344out:
345	return err;
346}
347
348/*
349** ixlv_configure_queues
350**
351** Request that the PF set up our queues.
352*/
353void
354ixlv_configure_queues(struct ixlv_sc *sc)
355{
356	device_t		dev = sc->dev;
357	struct ixl_vsi		*vsi = &sc->vsi;
358	struct ixl_queue	*que = vsi->queues;
359	struct tx_ring		*txr;
360	struct rx_ring		*rxr;
361	int			len, pairs;
362
363	struct i40e_virtchnl_vsi_queue_config_info *vqci;
364	struct i40e_virtchnl_queue_pair_info *vqpi;
365
366	pairs = vsi->num_queues;
367	len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
368		       (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
369	vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
370	if (!vqci) {
371		device_printf(dev, "%s: unable to allocate memory\n", __func__);
372		ixl_vc_schedule_retry(&sc->vc_mgr);
373		return;
374	}
375	vqci->vsi_id = sc->vsi_res->vsi_id;
376	vqci->num_queue_pairs = pairs;
377	vqpi = vqci->qpair;
378	/* Size check is not needed here - HW max is 16 queue pairs, and we
379	 * can fit info for 31 of them into the AQ buffer before it overflows.
380	 */
381	for (int i = 0; i < pairs; i++, que++, vqpi++) {
382		txr = &que->txr;
383		rxr = &que->rxr;
384		vqpi->txq.vsi_id = vqci->vsi_id;
385		vqpi->txq.queue_id = i;
386		vqpi->txq.ring_len = que->num_desc;
387		vqpi->txq.dma_ring_addr = txr->dma.pa;
388		/* Enable Head writeback */
389		vqpi->txq.headwb_enabled = 1;
390		vqpi->txq.dma_headwb_addr = txr->dma.pa +
391		    (que->num_desc * sizeof(struct i40e_tx_desc));
392
393		vqpi->rxq.vsi_id = vqci->vsi_id;
394		vqpi->rxq.queue_id = i;
395		vqpi->rxq.ring_len = que->num_desc;
396		vqpi->rxq.dma_ring_addr = rxr->dma.pa;
397		vqpi->rxq.max_pkt_size = vsi->max_frame_size;
398		vqpi->rxq.databuffer_size = rxr->mbuf_sz;
399		vqpi->rxq.splithdr_enabled = 0;
400	}
401
402	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
403			   (u8 *)vqci, len);
404	free(vqci, M_DEVBUF);
405}
406
407/*
408** ixlv_enable_queues
409**
410** Request that the PF enable all of our queues.
411*/
412void
413ixlv_enable_queues(struct ixlv_sc *sc)
414{
415	struct i40e_virtchnl_queue_select vqs;
416
417	vqs.vsi_id = sc->vsi_res->vsi_id;
418	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
419	vqs.rx_queues = vqs.tx_queues;
420	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
421			   (u8 *)&vqs, sizeof(vqs));
422}
423
424/*
425** ixlv_disable_queues
426**
427** Request that the PF disable all of our queues.
428*/
429void
430ixlv_disable_queues(struct ixlv_sc *sc)
431{
432	struct i40e_virtchnl_queue_select vqs;
433
434	vqs.vsi_id = sc->vsi_res->vsi_id;
435	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
436	vqs.rx_queues = vqs.tx_queues;
437	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
438			   (u8 *)&vqs, sizeof(vqs));
439}
440
441/*
442** ixlv_map_queues
443**
444** Request that the PF map queues to interrupt vectors. Misc causes, including
445** admin queue, are always mapped to vector 0.
446*/
447void
448ixlv_map_queues(struct ixlv_sc *sc)
449{
450	struct i40e_virtchnl_irq_map_info *vm;
451	int 			i, q, len;
452	struct ixl_vsi		*vsi = &sc->vsi;
453	struct ixl_queue	*que = vsi->queues;
454
455	/* How many queue vectors, adminq uses one */
456	q = sc->msix - 1;
457
458	len = sizeof(struct i40e_virtchnl_irq_map_info) +
459	      (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
460	vm = malloc(len, M_DEVBUF, M_NOWAIT);
461	if (!vm) {
462		printf("%s: unable to allocate memory\n", __func__);
463		ixl_vc_schedule_retry(&sc->vc_mgr);
464		return;
465	}
466
467	vm->num_vectors = sc->msix;
468	/* Queue vectors first */
469	for (i = 0; i < q; i++, que++) {
470		vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
471		vm->vecmap[i].vector_id = i + 1; /* first is adminq */
472		vm->vecmap[i].txq_map = (1 << que->me);
473		vm->vecmap[i].rxq_map = (1 << que->me);
474		vm->vecmap[i].rxitr_idx = 0;
475		vm->vecmap[i].txitr_idx = 0;
476	}
477
478	/* Misc vector last - this is only for AdminQ messages */
479	vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
480	vm->vecmap[i].vector_id = 0;
481	vm->vecmap[i].txq_map = 0;
482	vm->vecmap[i].rxq_map = 0;
483	vm->vecmap[i].rxitr_idx = 0;
484	vm->vecmap[i].txitr_idx = 0;
485
486	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
487	    (u8 *)vm, len);
488	free(vm, M_DEVBUF);
489}
490
491/*
492** Scan the Filter List looking for vlans that need
493** to be added, then create the data to hand to the AQ
494** for handling.
495*/
496void
497ixlv_add_vlans(struct ixlv_sc *sc)
498{
499	struct i40e_virtchnl_vlan_filter_list	*v;
500	struct ixlv_vlan_filter *f, *ftmp;
501	device_t	dev = sc->dev;
502	int		len, i = 0, cnt = 0;
503
504	/* Get count of VLAN filters to add */
505	SLIST_FOREACH(f, sc->vlan_filters, next) {
506		if (f->flags & IXL_FILTER_ADD)
507			cnt++;
508	}
509
510	if (!cnt) {  /* no work... */
511		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
512		    I40E_SUCCESS);
513		return;
514	}
515
516	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
517	      (cnt * sizeof(u16));
518
519	if (len > IXL_AQ_BUF_SZ) {
520		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
521			__func__);
522		ixl_vc_schedule_retry(&sc->vc_mgr);
523		return;
524	}
525
526	v = malloc(len, M_DEVBUF, M_NOWAIT);
527	if (!v) {
528		device_printf(dev, "%s: unable to allocate memory\n",
529			__func__);
530		ixl_vc_schedule_retry(&sc->vc_mgr);
531		return;
532	}
533
534	v->vsi_id = sc->vsi_res->vsi_id;
535	v->num_elements = cnt;
536
537	/* Scan the filter array */
538	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
539                if (f->flags & IXL_FILTER_ADD) {
540                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
541			f->flags = IXL_FILTER_USED;
542                        i++;
543                }
544                if (i == cnt)
545                        break;
546	}
547	// ERJ: Should this be taken out?
548 	if (i == 0) { /* Should not happen... */
549		device_printf(dev, "%s: i == 0?\n", __func__);
550		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
551		    I40E_SUCCESS);
552		return;
553 	}
554
555	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
556	free(v, M_DEVBUF);
557	/* add stats? */
558}
559
560/*
561** Scan the Filter Table looking for vlans that need
562** to be removed, then create the data to hand to the AQ
563** for handling.
564*/
565void
566ixlv_del_vlans(struct ixlv_sc *sc)
567{
568	device_t	dev = sc->dev;
569	struct i40e_virtchnl_vlan_filter_list *v;
570	struct ixlv_vlan_filter *f, *ftmp;
571	int len, i = 0, cnt = 0;
572
573	/* Get count of VLAN filters to delete */
574	SLIST_FOREACH(f, sc->vlan_filters, next) {
575		if (f->flags & IXL_FILTER_DEL)
576			cnt++;
577	}
578
579	if (!cnt) {  /* no work... */
580		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
581		    I40E_SUCCESS);
582		return;
583	}
584
585	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
586	      (cnt * sizeof(u16));
587
588	if (len > IXL_AQ_BUF_SZ) {
589		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
590			__func__);
591		ixl_vc_schedule_retry(&sc->vc_mgr);
592		return;
593	}
594
595	v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
596	if (!v) {
597		device_printf(dev, "%s: unable to allocate memory\n",
598			__func__);
599		ixl_vc_schedule_retry(&sc->vc_mgr);
600		return;
601	}
602
603	v->vsi_id = sc->vsi_res->vsi_id;
604	v->num_elements = cnt;
605
606	/* Scan the filter array */
607	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
608                if (f->flags & IXL_FILTER_DEL) {
609                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
610                        i++;
611                        SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
612                        free(f, M_DEVBUF);
613                }
614                if (i == cnt)
615                        break;
616	}
617	// ERJ: Take this out?
618 	if (i == 0) { /* Should not happen... */
619		device_printf(dev, "%s: i == 0?\n", __func__);
620		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
621		    I40E_SUCCESS);
622		return;
623 	}
624
625	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
626	free(v, M_DEVBUF);
627	/* add stats? */
628}
629
630
631/*
632** This routine takes additions to the vsi filter
633** table and creates an Admin Queue call to create
634** the filters in the hardware.
635*/
636void
637ixlv_add_ether_filters(struct ixlv_sc *sc)
638{
639	struct i40e_virtchnl_ether_addr_list *a;
640	struct ixlv_mac_filter	*f;
641	device_t			dev = sc->dev;
642	int				len, j = 0, cnt = 0;
643
644	/* Get count of MAC addresses to add */
645	SLIST_FOREACH(f, sc->mac_filters, next) {
646		if (f->flags & IXL_FILTER_ADD)
647			cnt++;
648	}
649	if (cnt == 0) { /* Should not happen... */
650		DDPRINTF(dev, "cnt == 0, exiting...");
651		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
652		    I40E_SUCCESS);
653		return;
654	}
655
656	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
657	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
658
659	a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
660	if (a == NULL) {
661		device_printf(dev, "%s: Failed to get memory for "
662		    "virtchnl_ether_addr_list\n", __func__);
663		ixl_vc_schedule_retry(&sc->vc_mgr);
664		return;
665	}
666	a->vsi_id = sc->vsi.id;
667	a->num_elements = cnt;
668
669	/* Scan the filter array */
670	SLIST_FOREACH(f, sc->mac_filters, next) {
671		if (f->flags & IXL_FILTER_ADD) {
672			bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
673			f->flags &= ~IXL_FILTER_ADD;
674			j++;
675
676			DDPRINTF(dev, "ADD: " MAC_FORMAT,
677			    MAC_FORMAT_ARGS(f->macaddr));
678		}
679		if (j == cnt)
680			break;
681	}
682	DDPRINTF(dev, "len %d, j %d, cnt %d",
683	    len, j, cnt);
684	ixlv_send_pf_msg(sc,
685	    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
686	/* add stats? */
687	free(a, M_DEVBUF);
688	return;
689}
690
691/*
692** This routine takes filters flagged for deletion in the
693** sc MAC filter list and creates an Admin Queue call
694** to delete those filters in the hardware.
695*/
696void
697ixlv_del_ether_filters(struct ixlv_sc *sc)
698{
699	struct i40e_virtchnl_ether_addr_list *d;
700	device_t			dev = sc->dev;
701	struct ixlv_mac_filter	*f, *f_temp;
702	int				len, j = 0, cnt = 0;
703
704	/* Get count of MAC addresses to delete */
705	SLIST_FOREACH(f, sc->mac_filters, next) {
706		if (f->flags & IXL_FILTER_DEL)
707			cnt++;
708	}
709	if (cnt == 0) {
710		DDPRINTF(dev, "cnt == 0, exiting...");
711		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
712		    I40E_SUCCESS);
713		return;
714	}
715
716	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
717	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
718
719	d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
720	if (d == NULL) {
721		device_printf(dev, "%s: Failed to get memory for "
722		    "virtchnl_ether_addr_list\n", __func__);
723		ixl_vc_schedule_retry(&sc->vc_mgr);
724		return;
725	}
726	d->vsi_id = sc->vsi.id;
727	d->num_elements = cnt;
728
729	/* Scan the filter array */
730	SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
731		if (f->flags & IXL_FILTER_DEL) {
732			bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
733			DDPRINTF(dev, "DEL: " MAC_FORMAT,
734			    MAC_FORMAT_ARGS(f->macaddr));
735			j++;
736			SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
737			free(f, M_DEVBUF);
738		}
739		if (j == cnt)
740			break;
741	}
742	ixlv_send_pf_msg(sc,
743	    I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
744	/* add stats? */
745	free(d, M_DEVBUF);
746	return;
747}
748
749/*
750** ixlv_request_reset
751** Request that the PF reset this VF. No response is expected.
752*/
753void
754ixlv_request_reset(struct ixlv_sc *sc)
755{
756	/*
757	** Set the reset status to "in progress" before
758	** the request, this avoids any possibility of
759	** a mistaken early detection of completion.
760	*/
761	wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
762	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
763}
764
765/*
766** ixlv_request_stats
767** Request the statistics for this VF's VSI from PF.
768*/
769void
770ixlv_request_stats(struct ixlv_sc *sc)
771{
772	struct i40e_virtchnl_queue_select vqs;
773
774	vqs.vsi_id = sc->vsi_res->vsi_id;
775	/* Low priority, we don't need to error check */
776	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
777	    (u8 *)&vqs, sizeof(vqs));
778}
779
780/*
781** Updates driver's stats counters with VSI stats returned from PF.
782*/
783void
784ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
785{
786	struct ixl_vsi *vsi;
787	uint64_t tx_discards;
788	int i;
789
790	vsi = &sc->vsi;
791
792	tx_discards = es->tx_discards;
793	for (i = 0; i < sc->vsi.num_queues; i++)
794		tx_discards += sc->vsi.queues[i].txr.br->br_drops;
795
796	/* Update ifnet stats */
797	IXL_SET_IPACKETS(vsi, es->rx_unicast +
798	                   es->rx_multicast +
799			   es->rx_broadcast);
800	IXL_SET_OPACKETS(vsi, es->tx_unicast +
801	                   es->tx_multicast +
802			   es->tx_broadcast);
803	IXL_SET_IBYTES(vsi, es->rx_bytes);
804	IXL_SET_OBYTES(vsi, es->tx_bytes);
805	IXL_SET_IMCASTS(vsi, es->rx_multicast);
806	IXL_SET_OMCASTS(vsi, es->tx_multicast);
807
808	IXL_SET_OERRORS(vsi, es->tx_errors);
809	IXL_SET_IQDROPS(vsi, es->rx_discards);
810	IXL_SET_OQDROPS(vsi, tx_discards);
811	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
812	IXL_SET_COLLISIONS(vsi, 0);
813
814	sc->vsi.eth_stats = *es;
815}
816
817/*
818** ixlv_vc_completion
819**
820** Asynchronous completion function for admin queue messages. Rather than busy
821** wait, we fire off our requests and assume that no errors will be returned.
822** This function handles the reply messages.
823*/
824void
825ixlv_vc_completion(struct ixlv_sc *sc,
826    enum i40e_virtchnl_ops v_opcode,
827    i40e_status v_retval, u8 *msg, u16 msglen)
828{
829	device_t	dev = sc->dev;
830	struct ixl_vsi	*vsi = &sc->vsi;
831
832	if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
833		struct i40e_virtchnl_pf_event *vpe =
834			(struct i40e_virtchnl_pf_event *)msg;
835
836		switch (vpe->event) {
837		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
838#ifdef IXL_DEBUG
839			device_printf(dev, "Link change: status %d, speed %d\n",
840			    vpe->event_data.link_event.link_status,
841			    vpe->event_data.link_event.link_speed);
842#endif
843			vsi->link_up =
844				vpe->event_data.link_event.link_status;
845			vsi->link_speed =
846				vpe->event_data.link_event.link_speed;
847			ixlv_update_link_status(sc);
848			break;
849		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
850			device_printf(dev, "PF initiated reset!\n");
851			sc->init_state = IXLV_RESET_PENDING;
852			ixlv_init(sc);
853			break;
854		default:
855			device_printf(dev, "%s: Unknown event %d from AQ\n",
856				__func__, vpe->event);
857			break;
858		}
859
860		return;
861	}
862
863	/* Catch-all error response */
864	if (v_retval) {
865		device_printf(dev,
866		    "%s: AQ returned error %d to our request %d!\n",
867		    __func__, v_retval, v_opcode);
868	}
869
870#ifdef IXL_DEBUG
871	if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
872		DDPRINTF(dev, "opcode %d", v_opcode);
873#endif
874
875	switch (v_opcode) {
876	case I40E_VIRTCHNL_OP_GET_STATS:
877		ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
878		break;
879	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
880		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
881		    v_retval);
882		if (v_retval) {
883			device_printf(dev, "WARNING: Error adding VF mac filter!\n");
884			device_printf(dev, "WARNING: Device may not receive traffic!\n");
885		}
886		break;
887	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
888		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
889		    v_retval);
890		break;
891	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
892		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
893		    v_retval);
894		break;
895	case I40E_VIRTCHNL_OP_ADD_VLAN:
896		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
897		    v_retval);
898		break;
899	case I40E_VIRTCHNL_OP_DEL_VLAN:
900		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
901		    v_retval);
902		break;
903	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
904		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
905		    v_retval);
906		if (v_retval == 0) {
907			/* Update link status */
908			ixlv_update_link_status(sc);
909			/* Turn on all interrupts */
910			ixlv_enable_intr(vsi);
911			/* And inform the stack we're ready */
912			vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
913			vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
914		}
915		break;
916	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
917		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
918		    v_retval);
919		if (v_retval == 0) {
920			/* Turn off all interrupts */
921			ixlv_disable_intr(vsi);
922			/* Tell the stack that the interface is no longer active */
923			vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
924		}
925		break;
926	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
927		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
928		    v_retval);
929		break;
930	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
931		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
932		    v_retval);
933		break;
934	default:
935		device_printf(dev,
936		    "%s: Received unexpected message %d from PF.\n",
937		    __func__, v_opcode);
938		break;
939	}
940	return;
941}
942
943static void
944ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
945{
946
947	switch (request) {
948	case IXLV_FLAG_AQ_MAP_VECTORS:
949		ixlv_map_queues(sc);
950		break;
951
952	case IXLV_FLAG_AQ_ADD_MAC_FILTER:
953		ixlv_add_ether_filters(sc);
954		break;
955
956	case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
957		ixlv_add_vlans(sc);
958		break;
959
960	case IXLV_FLAG_AQ_DEL_MAC_FILTER:
961		ixlv_del_ether_filters(sc);
962		break;
963
964	case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
965		ixlv_del_vlans(sc);
966		break;
967
968	case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
969		ixlv_configure_queues(sc);
970		break;
971
972	case IXLV_FLAG_AQ_DISABLE_QUEUES:
973		ixlv_disable_queues(sc);
974		break;
975
976	case IXLV_FLAG_AQ_ENABLE_QUEUES:
977		ixlv_enable_queues(sc);
978		break;
979	}
980}
981
982void
983ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
984{
985	mgr->sc = sc;
986	mgr->current = NULL;
987	TAILQ_INIT(&mgr->pending);
988	callout_init_mtx(&mgr->callout, &sc->mtx, 0);
989}
990
991static void
992ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
993{
994	struct ixl_vc_cmd *cmd;
995
996	cmd = mgr->current;
997	mgr->current = NULL;
998	cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
999
1000	cmd->callback(cmd, cmd->arg, err);
1001	ixl_vc_process_next(mgr);
1002}
1003
1004static void
1005ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
1006    enum i40e_status_code err)
1007{
1008	struct ixl_vc_cmd *cmd;
1009
1010	cmd = mgr->current;
1011	if (cmd == NULL || cmd->request != request)
1012		return;
1013
1014	callout_stop(&mgr->callout);
1015	ixl_vc_process_completion(mgr, err);
1016}
1017
1018static void
1019ixl_vc_cmd_timeout(void *arg)
1020{
1021	struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1022
1023	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1024	ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
1025}
1026
1027static void
1028ixl_vc_cmd_retry(void *arg)
1029{
1030	struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1031
1032	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1033	ixl_vc_send_current(mgr);
1034}
1035
1036static void
1037ixl_vc_send_current(struct ixl_vc_mgr *mgr)
1038{
1039	struct ixl_vc_cmd *cmd;
1040
1041	cmd = mgr->current;
1042	ixl_vc_send_cmd(mgr->sc, cmd->request);
1043	callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
1044}
1045
1046static void
1047ixl_vc_process_next(struct ixl_vc_mgr *mgr)
1048{
1049	struct ixl_vc_cmd *cmd;
1050
1051	if (mgr->current != NULL)
1052		return;
1053
1054	if (TAILQ_EMPTY(&mgr->pending))
1055		return;
1056
1057	cmd = TAILQ_FIRST(&mgr->pending);
1058	TAILQ_REMOVE(&mgr->pending, cmd, next);
1059
1060	mgr->current = cmd;
1061	ixl_vc_send_current(mgr);
1062}
1063
1064static void
1065ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
1066{
1067
1068	callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
1069}
1070
1071void
1072ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
1073	    uint32_t req, ixl_vc_callback_t *callback, void *arg)
1074{
1075	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1076
1077	if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
1078		if (mgr->current == cmd)
1079			mgr->current = NULL;
1080		else
1081			TAILQ_REMOVE(&mgr->pending, cmd, next);
1082	}
1083
1084	cmd->request = req;
1085	cmd->callback = callback;
1086	cmd->arg = arg;
1087	cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
1088	TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
1089
1090	ixl_vc_process_next(mgr);
1091}
1092
1093void
1094ixl_vc_flush(struct ixl_vc_mgr *mgr)
1095{
1096	struct ixl_vc_cmd *cmd;
1097
1098	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1099	KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
1100	    ("ixlv: pending commands waiting but no command in progress"));
1101
1102	cmd = mgr->current;
1103	if (cmd != NULL) {
1104		mgr->current = NULL;
1105		cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1106		cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1107	}
1108
1109	while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
1110		TAILQ_REMOVE(&mgr->pending, cmd, next);
1111		cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1112		cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1113	}
1114
1115	callout_stop(&mgr->callout);
1116}
1117
1118