qls_os.c revision 281955
1/*
2 * Copyright (c) 2013-2014 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: qls_os.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/dev/qlxge/qls_os.c 281955 2015-04-24 23:26:44Z hiren $");
35
36
37#include "qls_os.h"
38#include "qls_hw.h"
39#include "qls_def.h"
40#include "qls_inline.h"
41#include "qls_ver.h"
42#include "qls_glbl.h"
43#include "qls_dbg.h"
44#include <sys/smp.h>
45
46/*
47 * Some PCI Configuration Space Related Defines
48 */
49
50#ifndef PCI_VENDOR_QLOGIC
51#define PCI_VENDOR_QLOGIC	0x1077
52#endif
53
54#ifndef PCI_DEVICE_QLOGIC_8000
55#define PCI_DEVICE_QLOGIC_8000	0x8000
56#endif
57
58#define PCI_QLOGIC_DEV8000 \
59	((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC)
60
61/*
62 * static functions
63 */
64static int qls_alloc_parent_dma_tag(qla_host_t *ha);
65static void qls_free_parent_dma_tag(qla_host_t *ha);
66
67static void qls_flush_xmt_bufs(qla_host_t *ha);
68
69static int qls_alloc_rcv_bufs(qla_host_t *ha);
70static void qls_free_rcv_bufs(qla_host_t *ha);
71
72static void qls_init_ifnet(device_t dev, qla_host_t *ha);
73static void qls_release(qla_host_t *ha);
74static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
75		int error);
76static void qls_stop(qla_host_t *ha);
77static int qls_send(qla_host_t *ha, struct mbuf **m_headp);
78static void qls_tx_done(void *context, int pending);
79
80static int qls_config_lro(qla_host_t *ha);
81static void qls_free_lro(qla_host_t *ha);
82
83static void qls_error_recovery(void *context, int pending);
84
85/*
86 * Hooks to the Operating Systems
87 */
88static int qls_pci_probe (device_t);
89static int qls_pci_attach (device_t);
90static int qls_pci_detach (device_t);
91
92static void qls_start(struct ifnet *ifp);
93static void qls_init(void *arg);
94static int qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
95static int qls_media_change(struct ifnet *ifp);
96static void qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
97
98static device_method_t qla_pci_methods[] = {
99	/* Device interface */
100	DEVMETHOD(device_probe, qls_pci_probe),
101	DEVMETHOD(device_attach, qls_pci_attach),
102	DEVMETHOD(device_detach, qls_pci_detach),
103	{ 0, 0 }
104};
105
106static driver_t qla_pci_driver = {
107	"ql", qla_pci_methods, sizeof (qla_host_t),
108};
109
110static devclass_t qla8000_devclass;
111
112DRIVER_MODULE(qla8000, pci, qla_pci_driver, qla8000_devclass, 0, 0);
113
114MODULE_DEPEND(qla8000, pci, 1, 1, 1);
115MODULE_DEPEND(qla8000, ether, 1, 1, 1);
116
117MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver");
118
119static char dev_str[64];
120static char ver_str[64];
121
122/*
123 * Name:	qls_pci_probe
124 * Function:	Validate the PCI device to be a QLA80XX device
125 */
126static int
127qls_pci_probe(device_t dev)
128{
129        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
130        case PCI_QLOGIC_DEV8000:
131		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
132			"Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function",
133			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
134			QLA_VERSION_BUILD);
135		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
136			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
137			QLA_VERSION_BUILD);
138                device_set_desc(dev, dev_str);
139                break;
140        default:
141                return (ENXIO);
142        }
143
144        if (bootverbose)
145                printf("%s: %s\n ", __func__, dev_str);
146
147        return (BUS_PROBE_DEFAULT);
148}
149
150static int
151qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
152{
153        int err = 0, ret;
154        qla_host_t *ha;
155        uint32_t i;
156
157        err = sysctl_handle_int(oidp, &ret, 0, req);
158
159        if (err || !req->newptr)
160                return (err);
161
162        if (ret == 1) {
163
164                ha = (qla_host_t *)arg1;
165
166                for (i = 0; i < ha->num_tx_rings; i++) {
167
168                        device_printf(ha->pci_dev,
169                                "%s: tx_ring[%d].tx_frames= %p\n",
170				__func__, i,
171                                (void *)ha->tx_ring[i].tx_frames);
172
173                        device_printf(ha->pci_dev,
174                                "%s: tx_ring[%d].tx_tso_frames= %p\n",
175				__func__, i,
176                                (void *)ha->tx_ring[i].tx_tso_frames);
177
178                        device_printf(ha->pci_dev,
179                                "%s: tx_ring[%d].tx_vlan_frames= %p\n",
180				__func__, i,
181                                (void *)ha->tx_ring[i].tx_vlan_frames);
182
183                        device_printf(ha->pci_dev,
184                                "%s: tx_ring[%d].txr_free= 0x%08x\n",
185				__func__, i,
186                                ha->tx_ring[i].txr_free);
187
188                        device_printf(ha->pci_dev,
189                                "%s: tx_ring[%d].txr_next= 0x%08x\n",
190				__func__, i,
191                                ha->tx_ring[i].txr_next);
192
193                        device_printf(ha->pci_dev,
194                                "%s: tx_ring[%d].txr_done= 0x%08x\n",
195				__func__, i,
196                                ha->tx_ring[i].txr_done);
197
198                        device_printf(ha->pci_dev,
199                                "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n",
200				__func__, i,
201                                *(ha->tx_ring[i].txr_cons_vaddr));
202		}
203
204                for (i = 0; i < ha->num_rx_rings; i++) {
205
206                        device_printf(ha->pci_dev,
207                                "%s: rx_ring[%d].rx_int= %p\n",
208				__func__, i,
209                                (void *)ha->rx_ring[i].rx_int);
210
211                        device_printf(ha->pci_dev,
212                                "%s: rx_ring[%d].rss_int= %p\n",
213				__func__, i,
214                                (void *)ha->rx_ring[i].rss_int);
215
216                        device_printf(ha->pci_dev,
217                                "%s: rx_ring[%d].lbq_next= 0x%08x\n",
218				__func__, i,
219                                ha->rx_ring[i].lbq_next);
220
221                        device_printf(ha->pci_dev,
222                                "%s: rx_ring[%d].lbq_free= 0x%08x\n",
223				__func__, i,
224                                ha->rx_ring[i].lbq_free);
225
226                        device_printf(ha->pci_dev,
227                                "%s: rx_ring[%d].lbq_in= 0x%08x\n",
228				__func__, i,
229                                ha->rx_ring[i].lbq_in);
230
231                        device_printf(ha->pci_dev,
232                                "%s: rx_ring[%d].sbq_next= 0x%08x\n",
233				__func__, i,
234                                ha->rx_ring[i].sbq_next);
235
236                        device_printf(ha->pci_dev,
237                                "%s: rx_ring[%d].sbq_free= 0x%08x\n",
238				__func__, i,
239                                ha->rx_ring[i].sbq_free);
240
241                        device_printf(ha->pci_dev,
242                                "%s: rx_ring[%d].sbq_in= 0x%08x\n",
243				__func__, i,
244                                ha->rx_ring[i].sbq_in);
245		}
246
247		device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n",
248				__func__, ha->err_m_getcl);
249		device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n",
250				__func__, ha->err_m_getjcl);
251		device_printf(ha->pci_dev,
252				"%s: err_tx_dmamap_create = 0x%08x\n",
253				__func__, ha->err_tx_dmamap_create);
254		device_printf(ha->pci_dev,
255				"%s: err_tx_dmamap_load = 0x%08x\n",
256				__func__, ha->err_tx_dmamap_load);
257		device_printf(ha->pci_dev,
258				"%s: err_tx_defrag = 0x%08x\n",
259				__func__, ha->err_tx_defrag);
260        }
261        return (err);
262}
263
264static void
265qls_add_sysctls(qla_host_t *ha)
266{
267        device_t dev = ha->pci_dev;
268
269	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
270		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
271		OID_AUTO, "version", CTLFLAG_RD,
272		ver_str, 0, "Driver Version");
273
274	qls_dbg_level = 0;
275        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
276                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
277                OID_AUTO, "debug", CTLFLAG_RW,
278                &qls_dbg_level, qls_dbg_level, "Debug Level");
279
280        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
281                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
282                OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
283                (void *)ha, 0,
284                qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
285
286        return;
287}
288
289static void
290qls_watchdog(void *arg)
291{
292	qla_host_t *ha = arg;
293	struct ifnet *ifp;
294
295	ifp = ha->ifp;
296
297        if (ha->flags.qla_watchdog_exit) {
298		ha->qla_watchdog_exited = 1;
299		return;
300	}
301	ha->qla_watchdog_exited = 0;
302
303	if (!ha->flags.qla_watchdog_pause) {
304
305		if (ha->qla_initiate_recovery) {
306
307			ha->qla_watchdog_paused = 1;
308			ha->qla_initiate_recovery = 0;
309			ha->err_inject = 0;
310			taskqueue_enqueue(ha->err_tq, &ha->err_task);
311
312		} else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
313
314			taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
315		}
316
317		ha->qla_watchdog_paused = 0;
318	} else {
319		ha->qla_watchdog_paused = 1;
320	}
321
322	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
323	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
324		qls_watchdog, ha);
325
326	return;
327}
328
329/*
330 * Name:	qls_pci_attach
331 * Function:	attaches the device to the operating system
332 */
333static int
334qls_pci_attach(device_t dev)
335{
336	qla_host_t *ha = NULL;
337	int i;
338
339	QL_DPRINT2((dev, "%s: enter\n", __func__));
340
341        if ((ha = device_get_softc(dev)) == NULL) {
342                device_printf(dev, "cannot get softc\n");
343                return (ENOMEM);
344        }
345
346        memset(ha, 0, sizeof (qla_host_t));
347
348        if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) {
349                device_printf(dev, "device is not QLE8000\n");
350                return (ENXIO);
351	}
352
353        ha->pci_func = pci_get_function(dev);
354
355        ha->pci_dev = dev;
356
357	pci_enable_busmaster(dev);
358
359	ha->reg_rid = PCIR_BAR(1);
360	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
361				RF_ACTIVE);
362
363        if (ha->pci_reg == NULL) {
364                device_printf(dev, "unable to map any ports\n");
365                goto qls_pci_attach_err;
366        }
367
368	ha->reg_rid1 = PCIR_BAR(3);
369	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
370			&ha->reg_rid1, RF_ACTIVE);
371
372        if (ha->pci_reg1 == NULL) {
373                device_printf(dev, "unable to map any ports\n");
374                goto qls_pci_attach_err;
375        }
376
377	mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
378	mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
379
380	qls_add_sysctls(ha);
381	qls_hw_add_sysctls(ha);
382
383	ha->flags.lock_init = 1;
384
385	ha->msix_count = pci_msix_count(dev);
386
387	if (ha->msix_count < qls_get_msix_count(ha)) {
388		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
389			ha->msix_count);
390		goto qls_pci_attach_err;
391	}
392
393	ha->msix_count = qls_get_msix_count(ha);
394
395	device_printf(dev, "\n%s: ha %p pci_func 0x%x  msix_count 0x%x"
396		" pci_reg %p pci_reg1 %p\n", __func__, ha,
397		ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1);
398
399	if (pci_alloc_msix(dev, &ha->msix_count)) {
400		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
401			ha->msix_count);
402		ha->msix_count = 0;
403		goto qls_pci_attach_err;
404	}
405
406        for (i = 0; i < ha->num_rx_rings; i++) {
407                ha->irq_vec[i].cq_idx = i;
408                ha->irq_vec[i].ha = ha;
409                ha->irq_vec[i].irq_rid = 1 + i;
410
411                ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
412                                &ha->irq_vec[i].irq_rid,
413                                (RF_ACTIVE | RF_SHAREABLE));
414
415                if (ha->irq_vec[i].irq == NULL) {
416                        device_printf(dev, "could not allocate interrupt\n");
417                        goto qls_pci_attach_err;
418                }
419
420		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
421			(INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr,
422			&ha->irq_vec[i], &ha->irq_vec[i].handle)) {
423				device_printf(dev,
424					"could not setup interrupt\n");
425			goto qls_pci_attach_err;
426		}
427        }
428
429	qls_rd_nic_params(ha);
430
431	/* allocate parent dma tag */
432	if (qls_alloc_parent_dma_tag(ha)) {
433		device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n",
434			__func__);
435		goto qls_pci_attach_err;
436	}
437
438	/* alloc all dma buffers */
439	if (qls_alloc_dma(ha)) {
440		device_printf(dev, "%s: qls_alloc_dma failed\n", __func__);
441		goto qls_pci_attach_err;
442	}
443
444	/* create the o.s ethernet interface */
445	qls_init_ifnet(dev, ha);
446
447	ha->flags.qla_watchdog_active = 1;
448	ha->flags.qla_watchdog_pause = 1;
449
450	TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha);
451	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
452			taskqueue_thread_enqueue, &ha->tx_tq);
453	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
454		device_get_nameunit(ha->pci_dev));
455
456	callout_init(&ha->tx_callout, TRUE);
457	ha->flags.qla_callout_init = 1;
458
459        /* create ioctl device interface */
460        if (qls_make_cdev(ha)) {
461                device_printf(dev, "%s: qls_make_cdev failed\n", __func__);
462                goto qls_pci_attach_err;
463        }
464
465	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
466		qls_watchdog, ha);
467
468        TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha);
469        ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
470                        taskqueue_thread_enqueue, &ha->err_tq);
471        taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
472                device_get_nameunit(ha->pci_dev));
473
474	QL_DPRINT2((dev, "%s: exit 0\n", __func__));
475        return (0);
476
477qls_pci_attach_err:
478
479	qls_release(ha);
480
481	QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
482        return (ENXIO);
483}
484
485/*
486 * Name:	qls_pci_detach
487 * Function:	Unhooks the device from the operating system
488 */
489static int
490qls_pci_detach(device_t dev)
491{
492	qla_host_t *ha = NULL;
493	struct ifnet *ifp;
494
495	QL_DPRINT2((dev, "%s: enter\n", __func__));
496
497        if ((ha = device_get_softc(dev)) == NULL) {
498                device_printf(dev, "cannot get softc\n");
499                return (ENOMEM);
500        }
501
502	ifp = ha->ifp;
503
504	(void)QLA_LOCK(ha, __func__, 0);
505	qls_stop(ha);
506	QLA_UNLOCK(ha, __func__);
507
508	qls_release(ha);
509
510	QL_DPRINT2((dev, "%s: exit\n", __func__));
511
512        return (0);
513}
514
515/*
516 * Name:	qls_release
517 * Function:	Releases the resources allocated for the device
518 */
519static void
520qls_release(qla_host_t *ha)
521{
522	device_t dev;
523	int i;
524
525	dev = ha->pci_dev;
526
527	if (ha->err_tq) {
528		taskqueue_drain(ha->err_tq, &ha->err_task);
529		taskqueue_free(ha->err_tq);
530	}
531
532	if (ha->tx_tq) {
533		taskqueue_drain(ha->tx_tq, &ha->tx_task);
534		taskqueue_free(ha->tx_tq);
535	}
536
537	qls_del_cdev(ha);
538
539	if (ha->flags.qla_watchdog_active) {
540		ha->flags.qla_watchdog_exit = 1;
541
542		while (ha->qla_watchdog_exited == 0)
543			qls_mdelay(__func__, 1);
544	}
545
546	if (ha->flags.qla_callout_init)
547		callout_stop(&ha->tx_callout);
548
549	if (ha->ifp != NULL)
550		ether_ifdetach(ha->ifp);
551
552	qls_free_dma(ha);
553	qls_free_parent_dma_tag(ha);
554
555        for (i = 0; i < ha->num_rx_rings; i++) {
556
557                if (ha->irq_vec[i].handle) {
558                        (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
559                                        ha->irq_vec[i].handle);
560                }
561
562                if (ha->irq_vec[i].irq) {
563                        (void)bus_release_resource(dev, SYS_RES_IRQ,
564                                ha->irq_vec[i].irq_rid,
565                                ha->irq_vec[i].irq);
566                }
567        }
568
569	if (ha->msix_count)
570		pci_release_msi(dev);
571
572	if (ha->flags.lock_init) {
573		mtx_destroy(&ha->tx_lock);
574		mtx_destroy(&ha->hw_lock);
575	}
576
577        if (ha->pci_reg)
578                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
579				ha->pci_reg);
580
581        if (ha->pci_reg1)
582                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
583				ha->pci_reg1);
584}
585
586/*
587 * DMA Related Functions
588 */
589
590static void
591qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
592{
593        *((bus_addr_t *)arg) = 0;
594
595        if (error) {
596                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
597                return;
598	}
599
600        *((bus_addr_t *)arg) = segs[0].ds_addr;
601
602	return;
603}
604
605int
606qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
607{
608        int             ret = 0;
609        device_t        dev;
610        bus_addr_t      b_addr;
611
612        dev = ha->pci_dev;
613
614        QL_DPRINT2((dev, "%s: enter\n", __func__));
615
616        ret = bus_dma_tag_create(
617                        ha->parent_tag,/* parent */
618                        dma_buf->alignment,
619                        ((bus_size_t)(1ULL << 32)),/* boundary */
620                        BUS_SPACE_MAXADDR,      /* lowaddr */
621                        BUS_SPACE_MAXADDR,      /* highaddr */
622                        NULL, NULL,             /* filter, filterarg */
623                        dma_buf->size,          /* maxsize */
624                        1,                      /* nsegments */
625                        dma_buf->size,          /* maxsegsize */
626                        0,                      /* flags */
627                        NULL, NULL,             /* lockfunc, lockarg */
628                        &dma_buf->dma_tag);
629
630        if (ret) {
631                device_printf(dev, "%s: could not create dma tag\n", __func__);
632                goto qls_alloc_dmabuf_exit;
633        }
634        ret = bus_dmamem_alloc(dma_buf->dma_tag,
635                        (void **)&dma_buf->dma_b,
636                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
637                        &dma_buf->dma_map);
638        if (ret) {
639                bus_dma_tag_destroy(dma_buf->dma_tag);
640                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
641                goto qls_alloc_dmabuf_exit;
642        }
643
644        ret = bus_dmamap_load(dma_buf->dma_tag,
645                        dma_buf->dma_map,
646                        dma_buf->dma_b,
647                        dma_buf->size,
648                        qls_dmamap_callback,
649                        &b_addr, BUS_DMA_NOWAIT);
650
651        if (ret || !b_addr) {
652                bus_dma_tag_destroy(dma_buf->dma_tag);
653                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
654                        dma_buf->dma_map);
655                ret = -1;
656                goto qls_alloc_dmabuf_exit;
657        }
658
659        dma_buf->dma_addr = b_addr;
660
661qls_alloc_dmabuf_exit:
662        QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
663                __func__, ret, (void *)dma_buf->dma_tag,
664                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
665		dma_buf->size));
666
667        return ret;
668}
669
670void
671qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
672{
673        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
674        bus_dma_tag_destroy(dma_buf->dma_tag);
675}
676
677static int
678qls_alloc_parent_dma_tag(qla_host_t *ha)
679{
680	int		ret;
681	device_t	dev;
682
683	dev = ha->pci_dev;
684
685        /*
686         * Allocate parent DMA Tag
687         */
688        ret = bus_dma_tag_create(
689                        bus_get_dma_tag(dev),   /* parent */
690                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
691                        BUS_SPACE_MAXADDR,      /* lowaddr */
692                        BUS_SPACE_MAXADDR,      /* highaddr */
693                        NULL, NULL,             /* filter, filterarg */
694                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
695                        0,                      /* nsegments */
696                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
697                        0,                      /* flags */
698                        NULL, NULL,             /* lockfunc, lockarg */
699                        &ha->parent_tag);
700
701        if (ret) {
702                device_printf(dev, "%s: could not create parent dma tag\n",
703                        __func__);
704		return (-1);
705        }
706
707        ha->flags.parent_tag = 1;
708
709	return (0);
710}
711
712static void
713qls_free_parent_dma_tag(qla_host_t *ha)
714{
715        if (ha->flags.parent_tag) {
716                bus_dma_tag_destroy(ha->parent_tag);
717                ha->flags.parent_tag = 0;
718        }
719}
720
721/*
722 * Name: qls_init_ifnet
723 * Function: Creates the Network Device Interface and Registers it with the O.S
724 */
725
726static void
727qls_init_ifnet(device_t dev, qla_host_t *ha)
728{
729	struct ifnet *ifp;
730
731	QL_DPRINT2((dev, "%s: enter\n", __func__));
732
733	ifp = ha->ifp = if_alloc(IFT_ETHER);
734
735	if (ifp == NULL)
736		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
737
738	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
739
740#if __FreeBSD_version >= 1000000
741	if_initbaudrate(ifp, IF_Gbps(10));
742#else
743	ifp->if_baudrate = 1 * 1000 * 1000 * 1000;
744#endif /* #if (__FreeBSD_version > 1000000) */
745
746	ifp->if_init = qls_init;
747	ifp->if_softc = ha;
748	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
749	ifp->if_ioctl = qls_ioctl;
750	ifp->if_start = qls_start;
751
752	IFQ_SET_MAXLEN(&ifp->if_snd, qls_get_ifq_snd_maxlen(ha));
753	ifp->if_snd.ifq_drv_maxlen = qls_get_ifq_snd_maxlen(ha);
754	IFQ_SET_READY(&ifp->if_snd);
755
756	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
757	if (ha->max_frame_size <= MCLBYTES) {
758		ha->msize = MCLBYTES;
759	} else if (ha->max_frame_size <= MJUMPAGESIZE) {
760		ha->msize = MJUMPAGESIZE;
761	} else
762		ha->msize = MJUM9BYTES;
763
764	ether_ifattach(ifp, qls_get_mac_addr(ha));
765
766	ifp->if_capabilities = IFCAP_JUMBO_MTU;
767
768	ifp->if_capabilities |= IFCAP_HWCSUM;
769	ifp->if_capabilities |= IFCAP_VLAN_MTU;
770
771	ifp->if_capabilities |= IFCAP_TSO4;
772	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
773	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
774	ifp->if_capabilities |= IFCAP_LINKSTATE;
775
776	ifp->if_capenable = ifp->if_capabilities;
777
778	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
779
780	ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status);
781
782	ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0,
783		NULL);
784	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
785
786	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
787
788	QL_DPRINT2((dev, "%s: exit\n", __func__));
789
790	return;
791}
792
793static void
794qls_init_locked(qla_host_t *ha)
795{
796	struct ifnet *ifp = ha->ifp;
797
798	qls_stop(ha);
799
800	qls_flush_xmt_bufs(ha);
801
802	if (qls_alloc_rcv_bufs(ha) != 0)
803		return;
804
805	if (qls_config_lro(ha))
806		return;
807
808	bcopy(IF_LLADDR(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN);
809
810	ifp->if_hwassist = CSUM_IP;
811	ifp->if_hwassist |= CSUM_TCP;
812	ifp->if_hwassist |= CSUM_UDP;
813	ifp->if_hwassist |= CSUM_TSO;
814
815 	if (qls_init_hw_if(ha) == 0) {
816		ifp = ha->ifp;
817		ifp->if_drv_flags |= IFF_DRV_RUNNING;
818		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
819		ha->flags.qla_watchdog_pause = 0;
820	}
821
822	return;
823}
824
825static void
826qls_init(void *arg)
827{
828	qla_host_t *ha;
829
830	ha = (qla_host_t *)arg;
831
832	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
833
834	(void)QLA_LOCK(ha, __func__, 0);
835	qls_init_locked(ha);
836	QLA_UNLOCK(ha, __func__);
837
838	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
839}
840
841static void
842qls_set_multi(qla_host_t *ha, uint32_t add_multi)
843{
844	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
845	struct ifmultiaddr *ifma;
846	int mcnt = 0;
847	struct ifnet *ifp = ha->ifp;
848
849	if_maddr_rlock(ifp);
850
851	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
852
853		if (ifma->ifma_addr->sa_family != AF_LINK)
854			continue;
855
856		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
857			break;
858
859		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
860			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
861
862		mcnt++;
863	}
864
865	if_maddr_runlock(ifp);
866
867	if (QLA_LOCK(ha, __func__, 1) == 0) {
868		qls_hw_set_multi(ha, mta, mcnt, add_multi);
869		QLA_UNLOCK(ha, __func__);
870	}
871
872	return;
873}
874
875static int
876qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
877{
878	int ret = 0;
879	struct ifreq *ifr = (struct ifreq *)data;
880	struct ifaddr *ifa = (struct ifaddr *)data;
881	qla_host_t *ha;
882
883	ha = (qla_host_t *)ifp->if_softc;
884
885	switch (cmd) {
886	case SIOCSIFADDR:
887		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
888			__func__, cmd));
889
890		if (ifa->ifa_addr->sa_family == AF_INET) {
891			ifp->if_flags |= IFF_UP;
892			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
893				(void)QLA_LOCK(ha, __func__, 0);
894				qls_init_locked(ha);
895				QLA_UNLOCK(ha, __func__);
896			}
897			QL_DPRINT4((ha->pci_dev,
898				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
899				__func__, cmd,
900				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
901
902			arp_ifinit(ifp, ifa);
903		} else {
904			ether_ioctl(ifp, cmd, data);
905		}
906		break;
907
908	case SIOCSIFMTU:
909		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
910			__func__, cmd));
911
912		if (ifr->ifr_mtu > QLA_MAX_MTU) {
913			ret = EINVAL;
914		} else {
915			(void) QLA_LOCK(ha, __func__, 0);
916
917			ifp->if_mtu = ifr->ifr_mtu;
918			ha->max_frame_size =
919				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
920
921			QLA_UNLOCK(ha, __func__);
922
923			if (ret)
924				ret = EINVAL;
925		}
926
927		break;
928
929	case SIOCSIFFLAGS:
930		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
931			__func__, cmd));
932
933		(void)QLA_LOCK(ha, __func__, 0);
934
935		if (ifp->if_flags & IFF_UP) {
936			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
937				if ((ifp->if_flags ^ ha->if_flags) &
938					IFF_PROMISC) {
939					ret = qls_set_promisc(ha);
940				} else if ((ifp->if_flags ^ ha->if_flags) &
941					IFF_ALLMULTI) {
942					ret = qls_set_allmulti(ha);
943				}
944			} else {
945				ha->max_frame_size = ifp->if_mtu +
946					ETHER_HDR_LEN + ETHER_CRC_LEN;
947				qls_init_locked(ha);
948			}
949		} else {
950			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
951				qls_stop(ha);
952			ha->if_flags = ifp->if_flags;
953		}
954
955		QLA_UNLOCK(ha, __func__);
956		break;
957
958	case SIOCADDMULTI:
959		QL_DPRINT4((ha->pci_dev,
960			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
961
962		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
963			qls_set_multi(ha, 1);
964		}
965		break;
966
967	case SIOCDELMULTI:
968		QL_DPRINT4((ha->pci_dev,
969			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
970
971		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
972			qls_set_multi(ha, 0);
973		}
974		break;
975
976	case SIOCSIFMEDIA:
977	case SIOCGIFMEDIA:
978		QL_DPRINT4((ha->pci_dev,
979			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
980			__func__, cmd));
981		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
982		break;
983
984	case SIOCSIFCAP:
985	{
986		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
987
988		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
989			__func__, cmd));
990
991		if (mask & IFCAP_HWCSUM)
992			ifp->if_capenable ^= IFCAP_HWCSUM;
993		if (mask & IFCAP_TSO4)
994			ifp->if_capenable ^= IFCAP_TSO4;
995		if (mask & IFCAP_VLAN_HWTAGGING)
996			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
997		if (mask & IFCAP_VLAN_HWTSO)
998			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
999
1000		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1001			qls_init(ha);
1002
1003		VLAN_CAPABILITIES(ifp);
1004		break;
1005	}
1006
1007	default:
1008		QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
1009			__func__, cmd));
1010		ret = ether_ioctl(ifp, cmd, data);
1011		break;
1012	}
1013
1014	return (ret);
1015}
1016
1017static int
1018qls_media_change(struct ifnet *ifp)
1019{
1020	qla_host_t *ha;
1021	struct ifmedia *ifm;
1022	int ret = 0;
1023
1024	ha = (qla_host_t *)ifp->if_softc;
1025
1026	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1027
1028	ifm = &ha->media;
1029
1030	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1031		ret = EINVAL;
1032
1033	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1034
1035	return (ret);
1036}
1037
1038static void
1039qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1040{
1041	qla_host_t *ha;
1042
1043	ha = (qla_host_t *)ifp->if_softc;
1044
1045	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1046
1047	ifmr->ifm_status = IFM_AVALID;
1048	ifmr->ifm_active = IFM_ETHER;
1049
1050	qls_update_link_state(ha);
1051	if (ha->link_up) {
1052		ifmr->ifm_status |= IFM_ACTIVE;
1053		ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha));
1054	}
1055
1056	QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
1057		(ha->link_up ? "link_up" : "link_down")));
1058
1059	return;
1060}
1061
1062static void
1063qls_start(struct ifnet *ifp)
1064{
1065	int		i, ret = 0;
1066	struct mbuf	*m_head;
1067	qla_host_t	*ha = (qla_host_t *)ifp->if_softc;
1068
1069	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1070
1071	if (!mtx_trylock(&ha->tx_lock)) {
1072		QL_DPRINT8((ha->pci_dev,
1073			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1074		return;
1075	}
1076
1077	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) ==
1078		IFF_DRV_RUNNING) {
1079
1080		for (i = 0; i < ha->num_tx_rings; i++) {
1081			ret |= qls_hw_tx_done(ha, i);
1082		}
1083
1084		if (ret == 0)
1085			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1086	}
1087
1088	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1089		IFF_DRV_RUNNING) {
1090		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1091		QLA_TX_UNLOCK(ha);
1092		return;
1093	}
1094
1095	if (!ha->link_up) {
1096		qls_update_link_state(ha);
1097		if (!ha->link_up) {
1098			QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1099			QLA_TX_UNLOCK(ha);
1100			return;
1101		}
1102	}
1103
1104	while (ifp->if_snd.ifq_head != NULL) {
1105
1106		IF_DEQUEUE(&ifp->if_snd, m_head);
1107
1108		if (m_head == NULL) {
1109			QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1110				__func__));
1111			break;
1112		}
1113
1114		if (qls_send(ha, &m_head)) {
1115			if (m_head == NULL)
1116				break;
1117			QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1118			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1119			IF_PREPEND(&ifp->if_snd, m_head);
1120			break;
1121		}
1122		/* Send a copy of the frame to the BPF listener */
1123		ETHER_BPF_MTAP(ifp, m_head);
1124	}
1125
1126	QLA_TX_UNLOCK(ha);
1127	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1128	return;
1129}
1130
1131static int
1132qls_send(qla_host_t *ha, struct mbuf **m_headp)
1133{
1134	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1135	bus_dmamap_t		map;
1136	int			nsegs;
1137	int			ret = -1;
1138	uint32_t		tx_idx;
1139	struct mbuf		*m_head = *m_headp;
1140	uint32_t		txr_idx = 0;
1141
1142	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1143
1144	/* check if flowid is set */
1145	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
1146		txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
1147
1148	tx_idx = ha->tx_ring[txr_idx].txr_next;
1149
1150	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1151
1152	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1153			BUS_DMA_NOWAIT);
1154
1155	if (ret == EFBIG) {
1156
1157		struct mbuf *m;
1158
1159		QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1160			m_head->m_pkthdr.len));
1161
1162		m = m_defrag(m_head, M_DONTWAIT);
1163		if (m == NULL) {
1164			ha->err_tx_defrag++;
1165			m_freem(m_head);
1166			*m_headp = NULL;
1167			device_printf(ha->pci_dev,
1168				"%s: m_defrag() = NULL [%d]\n",
1169				__func__, ret);
1170			return (ENOBUFS);
1171		}
1172		m_head = m;
1173		*m_headp = m_head;
1174
1175		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1176					segs, &nsegs, BUS_DMA_NOWAIT))) {
1177
1178			ha->err_tx_dmamap_load++;
1179
1180			device_printf(ha->pci_dev,
1181				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1182				__func__, ret, m_head->m_pkthdr.len);
1183
1184			if (ret != ENOMEM) {
1185				m_freem(m_head);
1186				*m_headp = NULL;
1187			}
1188			return (ret);
1189		}
1190
1191	} else if (ret) {
1192
1193		ha->err_tx_dmamap_load++;
1194
1195		device_printf(ha->pci_dev,
1196			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1197			__func__, ret, m_head->m_pkthdr.len);
1198
1199		if (ret != ENOMEM) {
1200			m_freem(m_head);
1201			*m_headp = NULL;
1202		}
1203		return (ret);
1204	}
1205
1206	QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet"));
1207
1208	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1209
1210        if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1211
1212		ha->tx_ring[txr_idx].count++;
1213		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1214		ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
1215	} else {
1216		if (ret == EINVAL) {
1217			if (m_head)
1218				m_freem(m_head);
1219			*m_headp = NULL;
1220		}
1221	}
1222
1223	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1224	return (ret);
1225}
1226
1227static void
1228qls_stop(qla_host_t *ha)
1229{
1230	struct ifnet *ifp = ha->ifp;
1231	device_t	dev;
1232
1233	dev = ha->pci_dev;
1234
1235	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1236
1237	ha->flags.qla_watchdog_pause = 1;
1238
1239	while (!ha->qla_watchdog_paused)
1240		qls_mdelay(__func__, 1);
1241
1242	qls_del_hw_if(ha);
1243
1244	qls_free_lro(ha);
1245
1246	qls_flush_xmt_bufs(ha);
1247	qls_free_rcv_bufs(ha);
1248
1249	return;
1250}
1251
1252/*
1253 * Buffer Management Functions for Transmit and Receive Rings
1254 */
1255/*
1256 * Release mbuf after it sent on the wire
1257 */
1258static void
1259qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1260{
1261	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1262
1263	if (txb->m_head) {
1264
1265		bus_dmamap_unload(ha->tx_tag, txb->map);
1266
1267		m_freem(txb->m_head);
1268		txb->m_head = NULL;
1269	}
1270
1271	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1272}
1273
1274static void
1275qls_flush_xmt_bufs(qla_host_t *ha)
1276{
1277	int		i, j;
1278
1279	for (j = 0; j < ha->num_tx_rings; j++) {
1280		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1281			qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1282	}
1283
1284	return;
1285}
1286
1287
1288static int
1289qls_alloc_rcv_mbufs(qla_host_t *ha, int r)
1290{
1291	int			i, j, ret = 0;
1292	qla_rx_buf_t		*rxb;
1293	qla_rx_ring_t		*rx_ring;
1294	volatile q81_bq_addr_e_t *sbq_e;
1295
1296
1297	rx_ring = &ha->rx_ring[r];
1298
1299	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1300
1301		rxb = &rx_ring->rx_buf[i];
1302
1303		ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1304
1305		if (ret) {
1306			device_printf(ha->pci_dev,
1307				"%s: dmamap[%d, %d] failed\n", __func__, r, i);
1308
1309			for (j = 0; j < i; j++) {
1310				rxb = &rx_ring->rx_buf[j];
1311				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1312			}
1313			goto qls_alloc_rcv_mbufs_err;
1314		}
1315	}
1316
1317	rx_ring = &ha->rx_ring[r];
1318
1319	sbq_e = rx_ring->sbq_vaddr;
1320
1321	rxb = &rx_ring->rx_buf[0];
1322
1323	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1324
1325		if (!(ret = qls_get_mbuf(ha, rxb, NULL))) {
1326
1327			/*
1328		 	 * set the physical address in the
1329			 * corresponding descriptor entry in the
1330			 * receive ring/queue for the hba
1331			 */
1332
1333			sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF;
1334			sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF;
1335
1336		} else {
1337			device_printf(ha->pci_dev,
1338				"%s: qls_get_mbuf [%d, %d] failed\n",
1339					__func__, r, i);
1340			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1341			goto qls_alloc_rcv_mbufs_err;
1342		}
1343
1344		rxb++;
1345		sbq_e++;
1346	}
1347	return 0;
1348
1349qls_alloc_rcv_mbufs_err:
1350	return (-1);
1351}
1352
1353static void
1354qls_free_rcv_bufs(qla_host_t *ha)
1355{
1356	int		i, r;
1357	qla_rx_buf_t	*rxb;
1358	qla_rx_ring_t	*rxr;
1359
1360	for (r = 0; r < ha->num_rx_rings; r++) {
1361
1362		rxr = &ha->rx_ring[r];
1363
1364		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1365
1366			rxb = &rxr->rx_buf[i];
1367
1368			if (rxb->m_head != NULL) {
1369				bus_dmamap_unload(ha->rx_tag, rxb->map);
1370				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1371				m_freem(rxb->m_head);
1372			}
1373		}
1374		bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1375	}
1376	return;
1377}
1378
1379static int
1380qls_alloc_rcv_bufs(qla_host_t *ha)
1381{
1382	int		r, ret = 0;
1383	qla_rx_ring_t	*rxr;
1384
1385	for (r = 0; r < ha->num_rx_rings; r++) {
1386		rxr = &ha->rx_ring[r];
1387		bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1388	}
1389
1390	for (r = 0; r < ha->num_rx_rings; r++) {
1391
1392		ret = qls_alloc_rcv_mbufs(ha, r);
1393
1394		if (ret)
1395			qls_free_rcv_bufs(ha);
1396	}
1397
1398	return (ret);
1399}
1400
1401int
1402qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1403{
1404	register struct mbuf *mp = nmp;
1405	struct ifnet   		*ifp;
1406	int            		ret = 0;
1407	uint32_t		offset;
1408	bus_dma_segment_t	segs[1];
1409	int			nsegs;
1410
1411	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1412
1413	ifp = ha->ifp;
1414
1415	if (mp == NULL) {
1416
1417		mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, ha->msize);
1418
1419		if (mp == NULL) {
1420
1421			if (ha->msize == MCLBYTES)
1422				ha->err_m_getcl++;
1423			else
1424				ha->err_m_getjcl++;
1425
1426			ret = ENOBUFS;
1427			device_printf(ha->pci_dev,
1428					"%s: m_getcl failed\n", __func__);
1429			goto exit_qls_get_mbuf;
1430		}
1431		mp->m_len = mp->m_pkthdr.len = ha->msize;
1432	} else {
1433		mp->m_len = mp->m_pkthdr.len = ha->msize;
1434		mp->m_data = mp->m_ext.ext_buf;
1435		mp->m_next = NULL;
1436	}
1437
1438	/* align the receive buffers to 8 byte boundary */
1439	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1440	if (offset) {
1441		offset = 8 - offset;
1442		m_adj(mp, offset);
1443	}
1444
1445	/*
1446	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1447	 * machinery to arrange the memory mapping.
1448	 */
1449	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1450			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1451	rxb->paddr = segs[0].ds_addr;
1452
1453	if (ret || !rxb->paddr || (nsegs != 1)) {
1454		m_freem(mp);
1455		rxb->m_head = NULL;
1456		device_printf(ha->pci_dev,
1457			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1458			__func__, ret, (long long unsigned int)rxb->paddr,
1459			nsegs);
1460                ret = -1;
1461		goto exit_qls_get_mbuf;
1462	}
1463	rxb->m_head = mp;
1464	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1465
1466exit_qls_get_mbuf:
1467	QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1468	return (ret);
1469}
1470
1471static void
1472qls_tx_done(void *context, int pending)
1473{
1474	qla_host_t *ha = context;
1475	struct ifnet   *ifp;
1476
1477	ifp = ha->ifp;
1478
1479	if (!ifp)
1480		return;
1481
1482	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1483		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1484		return;
1485	}
1486
1487	qls_start(ha->ifp);
1488	return;
1489}
1490
1491static int
1492qls_config_lro(qla_host_t *ha)
1493{
1494        int i;
1495        struct lro_ctrl *lro;
1496
1497        for (i = 0; i < ha->num_rx_rings; i++) {
1498                lro = &ha->rx_ring[i].lro;
1499                if (tcp_lro_init(lro)) {
1500                        device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
1501                                __func__);
1502                        return (-1);
1503                }
1504                lro->ifp = ha->ifp;
1505        }
1506        ha->flags.lro_init = 1;
1507
1508        QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
1509        return (0);
1510}
1511
1512static void
1513qls_free_lro(qla_host_t *ha)
1514{
1515        int i;
1516        struct lro_ctrl *lro;
1517
1518        if (!ha->flags.lro_init)
1519                return;
1520
1521        for (i = 0; i < ha->num_rx_rings; i++) {
1522                lro = &ha->rx_ring[i].lro;
1523                tcp_lro_free(lro);
1524        }
1525        ha->flags.lro_init = 0;
1526}
1527
1528static void
1529qls_error_recovery(void *context, int pending)
1530{
1531        qla_host_t *ha = context;
1532
1533	qls_init(ha);
1534
1535	return;
1536}
1537
1538