qla_isr.c revision 331722
1/*
2 * Copyright (c) 2011-2013 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: qla_isr.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgb/qla_isr.c 331722 2018-03-29 02:50:57Z eadler $");
35
36#include "qla_os.h"
37#include "qla_reg.h"
38#include "qla_hw.h"
39#include "qla_def.h"
40#include "qla_inline.h"
41#include "qla_ver.h"
42#include "qla_glbl.h"
43#include "qla_dbg.h"
44
45static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp);
46static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp);
47
48/*
49 * Name: qla_rx_intr
50 * Function: Handles normal ethernet frames received
51 */
52static void
53qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx,
54	struct lro_ctrl *lro)
55{
56	uint32_t idx, length, status, ring;
57	qla_rx_buf_t *rxb;
58	struct mbuf *mp;
59	struct ifnet *ifp = ha->ifp;
60	qla_sds_t *sdsp;
61	struct ether_vlan_header *eh;
62
63	sdsp = &ha->hw.sds[sds_idx];
64
65	ring = (uint32_t)Q8_STAT_DESC_TYPE(data);
66	idx = (uint32_t)Q8_STAT_DESC_HANDLE(data);
67	length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data);
68	status = (uint32_t)Q8_STAT_DESC_STATUS(data);
69
70	if (ring == 0) {
71		if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) {
72			device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
73				" len[0x%08x] invalid\n",
74				__func__, ring, idx, length);
75			return;
76		}
77	} else {
78		if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) {
79			device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
80				" len[0x%08x] invalid\n",
81				__func__, ring, idx, length);
82			return;
83		}
84	}
85
86	if (ring == 0)
87		rxb = &ha->rx_buf[idx];
88	else
89		rxb = &ha->rx_jbuf[idx];
90
91	QL_ASSERT((rxb != NULL),\
92		("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\
93		 __func__, ring, idx, sds_idx));
94
95	mp = rxb->m_head;
96
97	QL_ASSERT((mp != NULL),\
98		("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\
99		 __func__, ring, idx, rxb, sds_idx));
100
101	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
102
103	if (ring == 0) {
104		rxb->m_head = NULL;
105		rxb->next = sdsp->rxb_free;
106		sdsp->rxb_free = rxb;
107		sdsp->rx_free++;
108	} else {
109		rxb->m_head = NULL;
110		rxb->next = sdsp->rxjb_free;
111		sdsp->rxjb_free = rxb;
112		sdsp->rxj_free++;
113	}
114
115	mp->m_len = length;
116	mp->m_pkthdr.len = length;
117	mp->m_pkthdr.rcvif = ifp;
118
119	eh = mtod(mp, struct ether_vlan_header *);
120
121	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
122		uint32_t *data = (uint32_t *)eh;
123
124		mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
125		mp->m_flags |= M_VLANTAG;
126
127		*(data + 3) = *(data + 2);
128		*(data + 2) = *(data + 1);
129		*(data + 1) = *data;
130
131		m_adj(mp, ETHER_VLAN_ENCAP_LEN);
132	}
133
134	if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
135		mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
136	} else {
137		mp->m_pkthdr.csum_flags = 0;
138	}
139
140	if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
141		/* LRO packet has been successfully queued */
142	} else {
143		(*ifp->if_input)(ifp, mp);
144	}
145
146	if (sdsp->rx_free > std_replenish)
147		qla_replenish_normal_rx(ha, sdsp);
148
149	if (sdsp->rxj_free > jumbo_replenish)
150		qla_replenish_jumbo_rx(ha, sdsp);
151
152	return;
153}
154
155static void
156qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp)
157{
158	qla_rx_buf_t *rxb;
159	int count = jumbo_replenish;
160	uint32_t rxj_next;
161
162	if (!mtx_trylock(&ha->rxj_lock))
163		return;
164
165	rxj_next = ha->hw.rxj_next;
166
167	while (count--) {
168		rxb = sdsp->rxjb_free;
169
170		if (rxb == NULL)
171			break;
172
173		sdsp->rxjb_free = rxb->next;
174		sdsp->rxj_free--;
175
176
177		if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) {
178			qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO,
179				ha->hw.rxj_in, rxb->handle, rxb->paddr,
180				(rxb->m_head)->m_pkthdr.len);
181			ha->hw.rxj_in++;
182			if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS)
183				ha->hw.rxj_in = 0;
184			ha->hw.rxj_next++;
185			if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS)
186				ha->hw.rxj_next = 0;
187		} else {
188			device_printf(ha->pci_dev,
189				"%s: qla_get_mbuf [1,(%d),(%d)] failed\n",
190				__func__, ha->hw.rxj_in, rxb->handle);
191
192			rxb->m_head = NULL;
193			rxb->next = sdsp->rxjb_free;
194			sdsp->rxjb_free = rxb;
195			sdsp->rxj_free++;
196
197			break;
198		}
199	}
200
201	if (rxj_next != ha->hw.rxj_next) {
202		QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
203	}
204	mtx_unlock(&ha->rxj_lock);
205}
206
207static void
208qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp)
209{
210	qla_rx_buf_t *rxb;
211	int count = std_replenish;
212	uint32_t rx_next;
213
214	if (!mtx_trylock(&ha->rx_lock))
215		return;
216
217	rx_next = ha->hw.rx_next;
218
219	while (count--) {
220		rxb = sdsp->rxb_free;
221
222		if (rxb == NULL)
223			break;
224
225		sdsp->rxb_free = rxb->next;
226		sdsp->rx_free--;
227
228		if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) {
229			qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL,
230				ha->hw.rx_in, rxb->handle, rxb->paddr,
231				(rxb->m_head)->m_pkthdr.len);
232			ha->hw.rx_in++;
233			if (ha->hw.rx_in == NUM_RX_DESCRIPTORS)
234				ha->hw.rx_in = 0;
235			ha->hw.rx_next++;
236			if (ha->hw.rx_next == NUM_RX_DESCRIPTORS)
237				ha->hw.rx_next = 0;
238		} else {
239			device_printf(ha->pci_dev,
240				"%s: qla_get_mbuf [0,(%d),(%d)] failed\n",
241				__func__, ha->hw.rx_in, rxb->handle);
242
243			rxb->m_head = NULL;
244			rxb->next = sdsp->rxb_free;
245			sdsp->rxb_free = rxb;
246			sdsp->rx_free++;
247
248			break;
249		}
250	}
251
252	if (rx_next != ha->hw.rx_next) {
253		QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
254	}
255	mtx_unlock(&ha->rx_lock);
256}
257
258/*
259 * Name: qla_isr
260 * Function: Main Interrupt Service Routine
261 */
262static uint32_t
263qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
264{
265	device_t dev;
266	qla_hw_t *hw;
267	uint32_t comp_idx, desc_count;
268	q80_stat_desc_t *sdesc;
269	struct lro_ctrl *lro;
270	uint32_t ret = 0;
271
272	dev = ha->pci_dev;
273	hw = &ha->hw;
274
275	hw->sds[sds_idx].rcv_active = 1;
276	if (ha->flags.stop_rcv) {
277		hw->sds[sds_idx].rcv_active = 0;
278		return 0;
279	}
280
281	QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));
282
283	/*
284	 * receive interrupts
285	 */
286	comp_idx = hw->sds[sds_idx].sdsr_next;
287	lro = &hw->sds[sds_idx].lro;
288
289	while (count--) {
290
291		sdesc = (q80_stat_desc_t *)
292				&hw->sds[sds_idx].sds_ring_base[comp_idx];
293
294		if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
295			Q8_STAT_DESC_OWNER_HOST) {
296			QL_DPRINT2((dev, "%s:  data %p sdsr_next 0x%08x\n",
297				__func__, (void *)sdesc->data[0], comp_idx));
298			break;
299		}
300
301		desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));
302
303		switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {
304
305		case Q8_STAT_DESC_OPCODE_RCV_PKT:
306		case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
307			qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
308
309			break;
310
311		default:
312			device_printf(dev, "%s: default 0x%llx!\n", __func__,
313					(long long unsigned int)sdesc->data[0]);
314			break;
315		}
316
317		while (desc_count--) {
318			sdesc->data[0] =
319				Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
320			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
321			sdesc = (q80_stat_desc_t *)
322				&hw->sds[sds_idx].sds_ring_base[comp_idx];
323		}
324	}
325
326	tcp_lro_flush_all(lro);
327
328	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
329		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
330	}
331	hw->sds[sds_idx].sdsr_next = comp_idx;
332
333	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
334	if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
335					Q8_STAT_DESC_OWNER_HOST)) {
336		ret = -1;
337	}
338
339	hw->sds[sds_idx].rcv_active = 0;
340	return (ret);
341}
342
343void
344qla_isr(void *arg)
345{
346	qla_ivec_t *ivec = arg;
347	qla_host_t *ha;
348	uint32_t sds_idx;
349	uint32_t ret;
350
351	ha = ivec->ha;
352	sds_idx = ivec->irq_rid - 1;
353
354	if (sds_idx >= ha->hw.num_sds_rings) {
355		device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__,
356			sds_idx);
357
358		return;
359	}
360
361	if (sds_idx == 0)
362		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
363
364	ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres);
365
366	if (sds_idx == 0)
367		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
368
369	if (ret) {
370		taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq,
371			&ha->irq_vec[sds_idx].rcv_task);
372	} else {
373		QL_ENABLE_INTERRUPTS(ha, sds_idx);
374	}
375}
376
377void
378qla_rcv(void *context, int pending)
379{
380	qla_ivec_t *ivec = context;
381	qla_host_t *ha;
382	device_t dev;
383	qla_hw_t *hw;
384	uint32_t sds_idx;
385	uint32_t ret;
386	struct ifnet *ifp;
387
388	ha = ivec->ha;
389	dev = ha->pci_dev;
390	hw = &ha->hw;
391	sds_idx = ivec->irq_rid - 1;
392	ifp = ha->ifp;
393
394	do {
395		if (sds_idx == 0) {
396			if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
397				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
398			} else if ((ifp->if_snd.ifq_head != NULL) &&
399					QL_RUNNING(ifp)) {
400				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
401			}
402		}
403		ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d);
404	} while (ret);
405
406	if (sds_idx == 0)
407		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
408
409	QL_ENABLE_INTERRUPTS(ha, sds_idx);
410}
411
412