1/*-
2 * Copyright (c) 2013-2014 Kevin Lo
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30/*
31 * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver.
32 */
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#include <sys/condvar.h>
38#include <sys/kernel.h>
39#include <sys/lock.h>
40#include <sys/module.h>
41#include <sys/mutex.h>
42#include <sys/socket.h>
43#include <sys/sysctl.h>
44#include <sys/unistd.h>
45
46#include <net/if.h>
47#include <net/if_var.h>
48
49#include <dev/usb/usb.h>
50#include <dev/usb/usbdi.h>
51#include <dev/usb/usbdi_util.h>
52#include "usbdevs.h"
53
54#define	USB_DEBUG_VAR 	axge_debug
55#include <dev/usb/usb_debug.h>
56#include <dev/usb/usb_process.h>
57
58#include <dev/usb/net/usb_ethernet.h>
59#include <dev/usb/net/if_axgereg.h>
60
61/*
62 * Various supported device vendors/products.
63 */
64
65static const STRUCT_USB_HOST_ID axge_devs[] = {
66#define	AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) }
67	AXGE_DEV(ASIX, AX88178A),
68	AXGE_DEV(ASIX, AX88179),
69	AXGE_DEV(DLINK, DUB1312),
70	AXGE_DEV(SITECOMEU, LN032),
71#undef AXGE_DEV
72};
73
74static const struct {
75	uint8_t	ctrl;
76	uint8_t timer_l;
77	uint8_t	timer_h;
78	uint8_t	size;
79	uint8_t	ifg;
80} __packed axge_bulk_size[] = {
81	{ 7, 0x4f, 0x00, 0x12, 0xff },
82	{ 7, 0x20, 0x03, 0x16, 0xff },
83	{ 7, 0xae, 0x07, 0x18, 0xff },
84	{ 7, 0xcc, 0x4c, 0x18, 0x08 }
85};
86
87/* prototypes */
88
89static device_probe_t axge_probe;
90static device_attach_t axge_attach;
91static device_detach_t axge_detach;
92
93static usb_callback_t axge_bulk_read_callback;
94static usb_callback_t axge_bulk_write_callback;
95
96static miibus_readreg_t axge_miibus_readreg;
97static miibus_writereg_t axge_miibus_writereg;
98static miibus_statchg_t axge_miibus_statchg;
99
100static uether_fn_t axge_attach_post;
101static uether_fn_t axge_init;
102static uether_fn_t axge_stop;
103static uether_fn_t axge_start;
104static uether_fn_t axge_tick;
105static uether_fn_t axge_setmulti;
106static uether_fn_t axge_setpromisc;
107
108static int	axge_read_mem(struct axge_softc *, uint8_t, uint16_t,
109		    uint16_t, void *, int);
110static void	axge_write_mem(struct axge_softc *, uint8_t, uint16_t,
111		    uint16_t, void *, int);
112static uint8_t	axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t);
113static uint16_t	axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t,
114		    uint16_t);
115static void	axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t,
116		    uint8_t);
117static void	axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t,
118		    uint16_t, uint16_t);
119static void	axge_chip_init(struct axge_softc *);
120static void	axge_reset(struct axge_softc *);
121
122static int	axge_attach_post_sub(struct usb_ether *);
123static int	axge_ifmedia_upd(struct ifnet *);
124static void	axge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
125static int	axge_ioctl(struct ifnet *, u_long, caddr_t);
126static void	axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int);
127static void	axge_rxeof(struct usb_ether *, struct usb_page_cache *,
128		    unsigned int, unsigned int, uint32_t);
129static void	axge_csum_cfg(struct usb_ether *);
130
131#define	AXGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
132
133#ifdef USB_DEBUG
134static int axge_debug = 0;
135
136static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW, 0, "USB axge");
137SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RW, &axge_debug, 0,
138    "Debug level");
139#endif
140
141static const struct usb_config axge_config[AXGE_N_TRANSFER] = {
142	[AXGE_BULK_DT_WR] = {
143		.type = UE_BULK,
144		.endpoint = UE_ADDR_ANY,
145		.direction = UE_DIR_OUT,
146		.frames = 16,
147		.bufsize = 16 * MCLBYTES,
148		.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
149		.callback = axge_bulk_write_callback,
150		.timeout = 10000,	/* 10 seconds */
151	},
152	[AXGE_BULK_DT_RD] = {
153		.type = UE_BULK,
154		.endpoint = UE_ADDR_ANY,
155		.direction = UE_DIR_IN,
156		.bufsize = 65536,
157		.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
158		.callback = axge_bulk_read_callback,
159		.timeout = 0,		/* no timeout */
160	},
161};
162
163static device_method_t axge_methods[] = {
164	/* Device interface. */
165	DEVMETHOD(device_probe,		axge_probe),
166	DEVMETHOD(device_attach,	axge_attach),
167	DEVMETHOD(device_detach,	axge_detach),
168
169	/* MII interface. */
170	DEVMETHOD(miibus_readreg,	axge_miibus_readreg),
171	DEVMETHOD(miibus_writereg,	axge_miibus_writereg),
172	DEVMETHOD(miibus_statchg,	axge_miibus_statchg),
173
174	DEVMETHOD_END
175};
176
177static driver_t axge_driver = {
178	.name = "axge",
179	.methods = axge_methods,
180	.size = sizeof(struct axge_softc),
181};
182
183static devclass_t axge_devclass;
184
185DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, NULL, NULL);
186DRIVER_MODULE(miibus, axge, miibus_driver, miibus_devclass, NULL, NULL);
187MODULE_DEPEND(axge, uether, 1, 1, 1);
188MODULE_DEPEND(axge, usb, 1, 1, 1);
189MODULE_DEPEND(axge, ether, 1, 1, 1);
190MODULE_DEPEND(axge, miibus, 1, 1, 1);
191MODULE_VERSION(axge, 1);
192
193static const struct usb_ether_methods axge_ue_methods = {
194	.ue_attach_post = axge_attach_post,
195	.ue_attach_post_sub = axge_attach_post_sub,
196	.ue_start = axge_start,
197	.ue_init = axge_init,
198	.ue_stop = axge_stop,
199	.ue_tick = axge_tick,
200	.ue_setmulti = axge_setmulti,
201	.ue_setpromisc = axge_setpromisc,
202	.ue_mii_upd = axge_ifmedia_upd,
203	.ue_mii_sts = axge_ifmedia_sts,
204};
205
206static int
207axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
208    uint16_t val, void *buf, int len)
209{
210	struct usb_device_request req;
211
212	AXGE_LOCK_ASSERT(sc, MA_OWNED);
213
214	req.bmRequestType = UT_READ_VENDOR_DEVICE;
215	req.bRequest = cmd;
216	USETW(req.wValue, val);
217	USETW(req.wIndex, index);
218	USETW(req.wLength, len);
219
220	return (uether_do_request(&sc->sc_ue, &req, buf, 1000));
221}
222
223static void
224axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
225    uint16_t val, void *buf, int len)
226{
227	struct usb_device_request req;
228
229	AXGE_LOCK_ASSERT(sc, MA_OWNED);
230
231	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
232	req.bRequest = cmd;
233	USETW(req.wValue, val);
234	USETW(req.wIndex, index);
235	USETW(req.wLength, len);
236
237	if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) {
238		/* Error ignored. */
239	}
240}
241
242static uint8_t
243axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg)
244{
245	uint8_t val;
246
247	axge_read_mem(sc, cmd, 1, reg, &val, 1);
248	return (val);
249}
250
251static uint16_t
252axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
253    uint16_t reg)
254{
255	uint8_t val[2];
256
257	axge_read_mem(sc, cmd, index, reg, &val, 2);
258	return (UGETW(val));
259}
260
261static void
262axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val)
263{
264	axge_write_mem(sc, cmd, 1, reg, &val, 1);
265}
266
267static void
268axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
269    uint16_t reg, uint16_t val)
270{
271	uint8_t temp[2];
272
273	USETW(temp, val);
274	axge_write_mem(sc, cmd, index, reg, &temp, 2);
275}
276
277static int
278axge_miibus_readreg(device_t dev, int phy, int reg)
279{
280	struct axge_softc *sc;
281	uint16_t val;
282	int locked;
283
284	sc = device_get_softc(dev);
285	locked = mtx_owned(&sc->sc_mtx);
286	if (!locked)
287		AXGE_LOCK(sc);
288
289	val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy);
290
291	if (!locked)
292		AXGE_UNLOCK(sc);
293
294	return (val);
295}
296
297static int
298axge_miibus_writereg(device_t dev, int phy, int reg, int val)
299{
300	struct axge_softc *sc;
301	int locked;
302
303	sc = device_get_softc(dev);
304	if (sc->sc_phyno != phy)
305		return (0);
306	locked = mtx_owned(&sc->sc_mtx);
307	if (!locked)
308		AXGE_LOCK(sc);
309
310	axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val);
311
312	if (!locked)
313		AXGE_UNLOCK(sc);
314
315	return (0);
316}
317
318static void
319axge_miibus_statchg(device_t dev)
320{
321	struct axge_softc *sc;
322	struct mii_data *mii;
323	struct ifnet *ifp;
324	uint8_t link_status, tmp[5];
325	uint16_t val;
326	int locked;
327
328	sc = device_get_softc(dev);
329	mii = GET_MII(sc);
330	locked = mtx_owned(&sc->sc_mtx);
331	if (!locked)
332		AXGE_LOCK(sc);
333
334	ifp = uether_getifp(&sc->sc_ue);
335	if (mii == NULL || ifp == NULL ||
336	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
337		goto done;
338
339	sc->sc_flags &= ~AXGE_FLAG_LINK;
340	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
341	    (IFM_ACTIVE | IFM_AVALID)) {
342		switch (IFM_SUBTYPE(mii->mii_media_active)) {
343		case IFM_10_T:
344		case IFM_100_TX:
345		case IFM_1000_T:
346			sc->sc_flags |= AXGE_FLAG_LINK;
347			break;
348		default:
349			break;
350		}
351	}
352
353	/* Lost link, do nothing. */
354	if ((sc->sc_flags & AXGE_FLAG_LINK) == 0)
355		goto done;
356
357	link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR);
358
359	val = 0;
360	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
361		val |= MSR_FD;
362		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
363			val |= MSR_TFC;
364		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
365			val |= MSR_RFC;
366	}
367	val |=  MSR_RE;
368	switch (IFM_SUBTYPE(mii->mii_media_active)) {
369	case IFM_1000_T:
370		val |= MSR_GM | MSR_EN_125MHZ;
371		if (link_status & PLSR_USB_SS)
372			memcpy(tmp, &axge_bulk_size[0], 5);
373		else if (link_status & PLSR_USB_HS)
374			memcpy(tmp, &axge_bulk_size[1], 5);
375		else
376			memcpy(tmp, &axge_bulk_size[3], 5);
377		break;
378	case IFM_100_TX:
379		val |= MSR_PS;
380		if (link_status & (PLSR_USB_SS | PLSR_USB_HS))
381			memcpy(tmp, &axge_bulk_size[2], 5);
382		else
383			memcpy(tmp, &axge_bulk_size[3], 5);
384		break;
385	case IFM_10_T:
386		memcpy(tmp, &axge_bulk_size[3], 5);
387		break;
388	}
389	/* Rx bulk configuration. */
390	axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5);
391	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val);
392done:
393	if (!locked)
394		AXGE_UNLOCK(sc);
395}
396
397static void
398axge_chip_init(struct axge_softc *sc)
399{
400	/* Power up ethernet PHY. */
401	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0);
402	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL);
403	uether_pause(&sc->sc_ue, hz / 4);
404	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT,
405	    AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS);
406	uether_pause(&sc->sc_ue, hz / 10);
407}
408
409static void
410axge_reset(struct axge_softc *sc)
411{
412	struct usb_config_descriptor *cd;
413	usb_error_t err;
414
415	cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev);
416
417	err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx,
418	    cd->bConfigurationValue);
419	if (err)
420		DPRINTF("reset failed (ignored)\n");
421
422	/* Wait a little while for the chip to get its brains in order. */
423	uether_pause(&sc->sc_ue, hz / 100);
424
425	/* Reinitialize controller to achieve full reset. */
426	axge_chip_init(sc);
427}
428
429static void
430axge_attach_post(struct usb_ether *ue)
431{
432	struct axge_softc *sc;
433
434	sc = uether_getsc(ue);
435	sc->sc_phyno = 3;
436
437	/* Initialize controller and get station address. */
438	axge_chip_init(sc);
439	axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR,
440	    ue->ue_eaddr, ETHER_ADDR_LEN);
441}
442
443static int
444axge_attach_post_sub(struct usb_ether *ue)
445{
446	struct axge_softc *sc;
447	struct ifnet *ifp;
448	int error;
449
450	sc = uether_getsc(ue);
451	ifp = ue->ue_ifp;
452	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
453	ifp->if_start = uether_start;
454	ifp->if_ioctl = axge_ioctl;
455	ifp->if_init = uether_init;
456	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
457	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
458	IFQ_SET_READY(&ifp->if_snd);
459
460	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM;
461	ifp->if_hwassist = AXGE_CSUM_FEATURES;
462	ifp->if_capenable = ifp->if_capabilities;
463
464	mtx_lock(&Giant);
465	error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
466	    uether_ifmedia_upd, ue->ue_methods->ue_mii_sts,
467	    BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, MIIF_DOPAUSE);
468	mtx_unlock(&Giant);
469
470	return (error);
471}
472
473/*
474 * Set media options.
475 */
476static int
477axge_ifmedia_upd(struct ifnet *ifp)
478{
479	struct axge_softc *sc;
480	struct mii_data *mii;
481	struct mii_softc *miisc;
482	int error;
483
484	sc = ifp->if_softc;
485	mii = GET_MII(sc);
486	AXGE_LOCK_ASSERT(sc, MA_OWNED);
487
488	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
489	    PHY_RESET(miisc);
490	error = mii_mediachg(mii);
491
492	return (error);
493}
494
495/*
496 * Report current media status.
497 */
498static void
499axge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
500{
501	struct axge_softc *sc;
502	struct mii_data *mii;
503
504	sc = ifp->if_softc;
505	mii = GET_MII(sc);
506	AXGE_LOCK(sc);
507	mii_pollstat(mii);
508	ifmr->ifm_active = mii->mii_media_active;
509	ifmr->ifm_status = mii->mii_media_status;
510	AXGE_UNLOCK(sc);
511}
512
513/*
514 * Probe for a AX88179 chip.
515 */
516static int
517axge_probe(device_t dev)
518{
519	struct usb_attach_arg *uaa;
520
521	uaa = device_get_ivars(dev);
522	if (uaa->usb_mode != USB_MODE_HOST)
523		return (ENXIO);
524	if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX)
525		return (ENXIO);
526	if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX)
527		return (ENXIO);
528
529	return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa));
530}
531
532/*
533 * Attach the interface. Allocate softc structures, do ifmedia
534 * setup and ethernet/BPF attach.
535 */
536static int
537axge_attach(device_t dev)
538{
539	struct usb_attach_arg *uaa;
540	struct axge_softc *sc;
541	struct usb_ether *ue;
542	uint8_t iface_index;
543	int error;
544
545	uaa = device_get_ivars(dev);
546	sc = device_get_softc(dev);
547	ue = &sc->sc_ue;
548
549	device_set_usb_desc(dev);
550	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
551
552	iface_index = AXGE_IFACE_IDX;
553	error = usbd_transfer_setup(uaa->device, &iface_index,
554	    sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx);
555	if (error) {
556		device_printf(dev, "allocating USB transfers failed\n");
557		goto detach;
558	}
559
560	ue->ue_sc = sc;
561	ue->ue_dev = dev;
562	ue->ue_udev = uaa->device;
563	ue->ue_mtx = &sc->sc_mtx;
564	ue->ue_methods = &axge_ue_methods;
565
566	error = uether_ifattach(ue);
567	if (error) {
568		device_printf(dev, "could not attach interface\n");
569		goto detach;
570	}
571	return (0);			/* success */
572
573detach:
574	axge_detach(dev);
575	return (ENXIO);			/* failure */
576}
577
578static int
579axge_detach(device_t dev)
580{
581	struct axge_softc *sc;
582	struct usb_ether *ue;
583
584	sc = device_get_softc(dev);
585	ue = &sc->sc_ue;
586	usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER);
587	uether_ifdetach(ue);
588	mtx_destroy(&sc->sc_mtx);
589
590	return (0);
591}
592
593static void
594axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
595{
596	struct axge_softc *sc;
597	struct usb_ether *ue;
598	struct usb_page_cache *pc;
599	int actlen;
600
601	sc = usbd_xfer_softc(xfer);
602	ue = &sc->sc_ue;
603	usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
604
605	switch (USB_GET_STATE(xfer)) {
606	case USB_ST_TRANSFERRED:
607		pc = usbd_xfer_get_frame(xfer, 0);
608		axge_rx_frame(ue, pc, actlen);
609
610		/* FALLTHROUGH */
611	case USB_ST_SETUP:
612tr_setup:
613		usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
614		usbd_transfer_submit(xfer);
615		uether_rxflush(ue);
616		break;
617
618	default:
619		if (error != USB_ERR_CANCELLED) {
620			usbd_xfer_set_stall(xfer);
621			goto tr_setup;
622		}
623		break;
624	}
625}
626
627static void
628axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
629{
630	struct axge_softc *sc;
631	struct ifnet *ifp;
632	struct usb_page_cache *pc;
633	struct mbuf *m;
634	uint32_t txhdr;
635	int nframes, pos;
636
637	sc = usbd_xfer_softc(xfer);
638	ifp = uether_getifp(&sc->sc_ue);
639
640	switch (USB_GET_STATE(xfer)) {
641	case USB_ST_TRANSFERRED:
642		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
643		/* FALLTHROUGH */
644	case USB_ST_SETUP:
645tr_setup:
646		if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 ||
647		    (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) {
648			/*
649			 * Don't send anything if there is no link or
650			 * controller is busy.
651			 */
652			return;
653		}
654
655		for (nframes = 0; nframes < 16 &&
656		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) {
657			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
658			if (m == NULL)
659				break;
660			usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES,
661				nframes);
662			pos = 0;
663			pc = usbd_xfer_get_frame(xfer, nframes);
664			txhdr = htole32(m->m_pkthdr.len);
665			usbd_copy_in(pc, 0, &txhdr, sizeof(txhdr));
666			txhdr = 0;
667			txhdr = htole32(txhdr);
668			usbd_copy_in(pc, 4, &txhdr, sizeof(txhdr));
669			pos += 8;
670			usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
671			pos += m->m_pkthdr.len;
672			if ((pos % usbd_xfer_max_framelen(xfer)) == 0)
673				txhdr |= 0x80008000;
674
675			/*
676			 * XXX
677			 * Update TX packet counter here. This is not
678			 * correct way but it seems that there is no way
679			 * to know how many packets are sent at the end
680			 * of transfer because controller combines
681			 * multiple writes into single one if there is
682			 * room in TX buffer of controller.
683			 */
684			ifp->if_opackets++;
685
686			/*
687			 * if there's a BPF listener, bounce a copy
688			 * of this frame to him:
689			 */
690			BPF_MTAP(ifp, m);
691
692			m_freem(m);
693
694			/* Set frame length. */
695			usbd_xfer_set_frame_len(xfer, nframes, pos);
696		}
697		if (nframes != 0) {
698			usbd_xfer_set_frames(xfer, nframes);
699			usbd_transfer_submit(xfer);
700			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
701		}
702		return;
703		/* NOTREACHED */
704	default:
705		ifp->if_oerrors++;
706		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
707
708		if (error != USB_ERR_CANCELLED) {
709			usbd_xfer_set_stall(xfer);
710			goto tr_setup;
711		}
712		return;
713
714	}
715}
716
717static void
718axge_tick(struct usb_ether *ue)
719{
720	struct axge_softc *sc;
721	struct mii_data *mii;
722
723	sc = uether_getsc(ue);
724	mii = GET_MII(sc);
725	AXGE_LOCK_ASSERT(sc, MA_OWNED);
726
727	mii_tick(mii);
728	if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) {
729		axge_miibus_statchg(ue->ue_dev);
730		if ((sc->sc_flags & AXGE_FLAG_LINK) != 0)
731			axge_start(ue);
732	}
733}
734
735static void
736axge_setmulti(struct usb_ether *ue)
737{
738	struct axge_softc *sc;
739	struct ifnet *ifp;
740	struct ifmultiaddr *ifma;
741	uint32_t h;
742	uint16_t rxmode;
743	uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
744
745	sc = uether_getsc(ue);
746	ifp = uether_getifp(ue);
747	h = 0;
748	AXGE_LOCK_ASSERT(sc, MA_OWNED);
749
750	rxmode = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR);
751	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
752		rxmode |= RCR_AMALL;
753		axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
754		return;
755	}
756	rxmode &= ~RCR_AMALL;
757
758	if_maddr_rlock(ifp);
759	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
760		if (ifma->ifma_addr->sa_family != AF_LINK)
761			continue;
762		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
763		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
764		hashtbl[h / 8] |= 1 << (h % 8);
765	}
766	if_maddr_runlock(ifp);
767
768	axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8);
769	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
770}
771
772static void
773axge_setpromisc(struct usb_ether *ue)
774{
775	struct axge_softc *sc;
776	struct ifnet *ifp;
777	uint16_t rxmode;
778
779	sc = uether_getsc(ue);
780	ifp = uether_getifp(ue);
781	rxmode = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR);
782
783	if (ifp->if_flags & IFF_PROMISC)
784		rxmode |= RCR_PRO;
785	else
786		rxmode &= ~RCR_PRO;
787
788	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
789	axge_setmulti(ue);
790}
791
792static void
793axge_start(struct usb_ether *ue)
794{
795	struct axge_softc *sc;
796
797	sc = uether_getsc(ue);
798	/*
799	 * Start the USB transfers, if not already started.
800	 */
801	usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]);
802	usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]);
803}
804
805static void
806axge_init(struct usb_ether *ue)
807{
808	struct axge_softc *sc;
809	struct ifnet *ifp;
810	uint16_t rxmode;
811
812	sc = uether_getsc(ue);
813	ifp = uether_getifp(ue);
814	AXGE_LOCK_ASSERT(sc, MA_OWNED);
815
816	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
817		return;
818
819	/*
820	 * Cancel pending I/O and free all RX/TX buffers.
821	 */
822	axge_stop(ue);
823
824	axge_reset(sc);
825
826	/* Set MAC address. */
827	axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR,
828	    IF_LLADDR(ifp), ETHER_ADDR_LEN);
829
830	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34);
831	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52);
832
833	/* Configure TX/RX checksum offloading. */
834	axge_csum_cfg(ue);
835
836	/* Configure RX settings. */
837	rxmode = (RCR_AM | RCR_SO | RCR_DROP_CRCE);
838	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
839		rxmode |= RCR_IPE;
840
841	/* If we want promiscuous mode, set the allframes bit. */
842	if (ifp->if_flags & IFF_PROMISC)
843		rxmode |= RCR_PRO;
844
845	if (ifp->if_flags & IFF_BROADCAST)
846		rxmode |= RCR_AB;
847
848	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
849
850	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR,
851	    MMSR_PME_TYPE | MMSR_PME_POL | MMSR_RWMP);
852
853	/* Load the multicast filter. */
854	axge_setmulti(ue);
855
856	usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]);
857
858	ifp->if_drv_flags |= IFF_DRV_RUNNING;
859	/* Switch to selected media. */
860	axge_ifmedia_upd(ifp);
861}
862
863static void
864axge_stop(struct usb_ether *ue)
865{
866	struct axge_softc *sc;
867	struct ifnet *ifp;
868
869	sc = uether_getsc(ue);
870	ifp = uether_getifp(ue);
871
872	AXGE_LOCK_ASSERT(sc, MA_OWNED);
873
874	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
875	sc->sc_flags &= ~AXGE_FLAG_LINK;
876
877	/*
878	 * Stop all the transfers, if not already stopped:
879	 */
880	usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]);
881	usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]);
882}
883
884static int
885axge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
886{
887	struct usb_ether *ue;
888	struct axge_softc *sc;
889	struct ifreq *ifr;
890	int error, mask, reinit;
891
892	ue = ifp->if_softc;
893	sc = uether_getsc(ue);
894	ifr = (struct ifreq *)data;
895	error = 0;
896	reinit = 0;
897	if (cmd == SIOCSIFCAP) {
898		AXGE_LOCK(sc);
899		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
900		if ((mask & IFCAP_TXCSUM) != 0 &&
901		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
902			ifp->if_capenable ^= IFCAP_TXCSUM;
903			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
904				ifp->if_hwassist |= AXGE_CSUM_FEATURES;
905			else
906				ifp->if_hwassist &= ~AXGE_CSUM_FEATURES;
907			reinit++;
908		}
909		if ((mask & IFCAP_RXCSUM) != 0 &&
910		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
911			ifp->if_capenable ^= IFCAP_RXCSUM;
912			reinit++;
913		}
914		if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING)
915			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
916		else
917			reinit = 0;
918		AXGE_UNLOCK(sc);
919		if (reinit > 0)
920			uether_init(ue);
921	} else
922		error = uether_ioctl(ifp, cmd, data);
923
924	return (error);
925}
926
927static void
928axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen)
929{
930	uint32_t pos;
931	uint32_t pkt_cnt;
932	uint32_t rxhdr;
933	uint32_t pkt_hdr;
934	uint32_t hdr_off;
935	uint32_t pktlen;
936
937	/* verify we have enough data */
938	if (actlen < (int)sizeof(rxhdr))
939		return;
940
941	pos = 0;
942
943	usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr));
944	rxhdr = le32toh(rxhdr);
945
946	pkt_cnt = (uint16_t)rxhdr;
947	hdr_off = (uint16_t)(rxhdr >> 16);
948
949	while (pkt_cnt--) {
950		/* verify the header offset */
951		if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) {
952			DPRINTF("End of packet headers\n");
953			break;
954		}
955		if ((int)pos >= actlen) {
956			DPRINTF("Data position reached end\n");
957			break;
958		}
959		usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr));
960
961		pkt_hdr = le32toh(pkt_hdr);
962		pktlen = (pkt_hdr >> 16) & 0x1fff;
963		if (pkt_hdr & (AXGE_RXHDR_CRC_ERR | AXGE_RXHDR_DROP_ERR)) {
964			DPRINTF("Dropped a packet\n");
965			ue->ue_ifp->if_ierrors++;
966		}
967		if (pktlen >= 6 && (int)(pos + pktlen) <= actlen) {
968			axge_rxeof(ue, pc, pos + 2, pktlen - 6, pkt_hdr);
969		} else {
970			DPRINTF("Invalid packet pos=%d len=%d\n",
971			    (int)pos, (int)pktlen);
972		}
973		pos += (pktlen + 7) & ~7;
974		hdr_off += sizeof(pkt_hdr);
975	}
976}
977
978static void
979axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc,
980    unsigned int offset, unsigned int len, uint32_t pkt_hdr)
981{
982	struct ifnet *ifp;
983	struct mbuf *m;
984
985	ifp = ue->ue_ifp;
986	if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) {
987		ifp->if_ierrors++;
988		return;
989	}
990
991	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
992	if (m == NULL) {
993		ifp->if_iqdrops++;
994		return;
995	}
996	m->m_pkthdr.rcvif = ifp;
997	m->m_len = m->m_pkthdr.len = len + ETHER_ALIGN;
998	m_adj(m, ETHER_ALIGN);
999
1000	usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
1001
1002	ifp->if_ipackets++;
1003
1004	if ((pkt_hdr & (AXGE_RXHDR_L4CSUM_ERR | AXGE_RXHDR_L3CSUM_ERR)) == 0) {
1005		if ((pkt_hdr & AXGE_RXHDR_L4_TYPE_MASK) ==
1006		    AXGE_RXHDR_L4_TYPE_TCP ||
1007		    (pkt_hdr & AXGE_RXHDR_L4_TYPE_MASK) ==
1008		    AXGE_RXHDR_L4_TYPE_UDP) {
1009			m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1010			    CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
1011			m->m_pkthdr.csum_data = 0xffff;
1012		}
1013	}
1014
1015	_IF_ENQUEUE(&ue->ue_rxq, m);
1016}
1017
1018static void
1019axge_csum_cfg(struct usb_ether *ue)
1020{
1021	struct axge_softc *sc;
1022	struct ifnet *ifp;
1023	uint8_t csum;
1024
1025	sc = uether_getsc(ue);
1026	AXGE_LOCK_ASSERT(sc, MA_OWNED);
1027	ifp = uether_getifp(ue);
1028
1029	csum = 0;
1030	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1031		csum |= CTCR_IP | CTCR_TCP | CTCR_UDP;
1032	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum);
1033
1034	csum = 0;
1035	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1036		csum |= CRCR_IP | CRCR_TCP | CRCR_UDP;
1037	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum);
1038}
1039