1/*	$OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $	*/
2
3/*-
4 * Copyright (c) 1997, 1998, 1999, 2000
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34/*-
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD$");
52
53/*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * 	The XaQti XMAC II datasheet,
58 *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71/*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88#include <sys/param.h>
89#include <sys/systm.h>
90#include <sys/bus.h>
91#include <sys/endian.h>
92#include <sys/mbuf.h>
93#include <sys/malloc.h>
94#include <sys/kernel.h>
95#include <sys/module.h>
96#include <sys/socket.h>
97#include <sys/sockio.h>
98#include <sys/queue.h>
99#include <sys/sysctl.h>
100
101#include <net/bpf.h>
102#include <net/ethernet.h>
103#include <net/if.h>
104#include <net/if_arp.h>
105#include <net/if_dl.h>
106#include <net/if_media.h>
107#include <net/if_types.h>
108#include <net/if_vlan_var.h>
109
110#include <netinet/in.h>
111#include <netinet/in_systm.h>
112#include <netinet/ip.h>
113
114#include <machine/bus.h>
115#include <machine/in_cksum.h>
116#include <machine/resource.h>
117#include <sys/rman.h>
118
119#include <dev/mii/mii.h>
120#include <dev/mii/miivar.h>
121#include <dev/mii/brgphyreg.h>
122
123#include <dev/pci/pcireg.h>
124#include <dev/pci/pcivar.h>
125
126#if 0
127#define SK_USEIOSPACE
128#endif
129
130#include <dev/sk/if_skreg.h>
131#include <dev/sk/xmaciireg.h>
132#include <dev/sk/yukonreg.h>
133
134MODULE_DEPEND(sk, pci, 1, 1, 1);
135MODULE_DEPEND(sk, ether, 1, 1, 1);
136MODULE_DEPEND(sk, miibus, 1, 1, 1);
137
138/* "device miibus" required.  See GENERIC if you get errors here. */
139#include "miibus_if.h"
140
141#ifndef lint
142static const char rcsid[] =
143  "$FreeBSD$";
144#endif
145
146static const struct sk_type sk_devs[] = {
147	{
148		VENDORID_SK,
149		DEVICEID_SK_V1,
150		"SysKonnect Gigabit Ethernet (V1.0)"
151	},
152	{
153		VENDORID_SK,
154		DEVICEID_SK_V2,
155		"SysKonnect Gigabit Ethernet (V2.0)"
156	},
157	{
158		VENDORID_MARVELL,
159		DEVICEID_SK_V2,
160		"Marvell Gigabit Ethernet"
161	},
162	{
163		VENDORID_MARVELL,
164		DEVICEID_BELKIN_5005,
165		"Belkin F5D5005 Gigabit Ethernet"
166	},
167	{
168		VENDORID_3COM,
169		DEVICEID_3COM_3C940,
170		"3Com 3C940 Gigabit Ethernet"
171	},
172	{
173		VENDORID_LINKSYS,
174		DEVICEID_LINKSYS_EG1032,
175		"Linksys EG1032 Gigabit Ethernet"
176	},
177	{
178		VENDORID_DLINK,
179		DEVICEID_DLINK_DGE530T_A1,
180		"D-Link DGE-530T Gigabit Ethernet"
181	},
182	{
183		VENDORID_DLINK,
184		DEVICEID_DLINK_DGE530T_B1,
185		"D-Link DGE-530T Gigabit Ethernet"
186	},
187	{ 0, 0, NULL }
188};
189
190static int skc_probe(device_t);
191static int skc_attach(device_t);
192static int skc_detach(device_t);
193static int skc_shutdown(device_t);
194static int skc_suspend(device_t);
195static int skc_resume(device_t);
196static bus_dma_tag_t skc_get_dma_tag(device_t, device_t);
197static int sk_detach(device_t);
198static int sk_probe(device_t);
199static int sk_attach(device_t);
200static void sk_tick(void *);
201static void sk_yukon_tick(void *);
202static void sk_intr(void *);
203static void sk_intr_xmac(struct sk_if_softc *);
204static void sk_intr_bcom(struct sk_if_softc *);
205static void sk_intr_yukon(struct sk_if_softc *);
206static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
207static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
208static void sk_rxeof(struct sk_if_softc *);
209static void sk_jumbo_rxeof(struct sk_if_softc *);
210static void sk_txeof(struct sk_if_softc *);
211static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
212static int sk_encap(struct sk_if_softc *, struct mbuf **);
213static void sk_start(struct ifnet *);
214static void sk_start_locked(struct ifnet *);
215static int sk_ioctl(struct ifnet *, u_long, caddr_t);
216static void sk_init(void *);
217static void sk_init_locked(struct sk_if_softc *);
218static void sk_init_xmac(struct sk_if_softc *);
219static void sk_init_yukon(struct sk_if_softc *);
220static void sk_stop(struct sk_if_softc *);
221static void sk_watchdog(void *);
222static int sk_ifmedia_upd(struct ifnet *);
223static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
224static void sk_reset(struct sk_softc *);
225static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
226static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
227static int sk_newbuf(struct sk_if_softc *, int);
228static int sk_jumbo_newbuf(struct sk_if_softc *, int);
229static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
230static int sk_dma_alloc(struct sk_if_softc *);
231static int sk_dma_jumbo_alloc(struct sk_if_softc *);
232static void sk_dma_free(struct sk_if_softc *);
233static void sk_dma_jumbo_free(struct sk_if_softc *);
234static int sk_init_rx_ring(struct sk_if_softc *);
235static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
236static void sk_init_tx_ring(struct sk_if_softc *);
237static u_int32_t sk_win_read_4(struct sk_softc *, int);
238static u_int16_t sk_win_read_2(struct sk_softc *, int);
239static u_int8_t sk_win_read_1(struct sk_softc *, int);
240static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
241static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
242static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
243
244static int sk_miibus_readreg(device_t, int, int);
245static int sk_miibus_writereg(device_t, int, int, int);
246static void sk_miibus_statchg(device_t);
247
248static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
249static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
250						int);
251static void sk_xmac_miibus_statchg(struct sk_if_softc *);
252
253static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
254static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
255						int);
256static void sk_marv_miibus_statchg(struct sk_if_softc *);
257
258static uint32_t sk_xmchash(const uint8_t *);
259static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
260static void sk_rxfilter(struct sk_if_softc *);
261static void sk_rxfilter_genesis(struct sk_if_softc *);
262static void sk_rxfilter_yukon(struct sk_if_softc *);
263
264static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
265static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
266
267/* Tunables. */
268static int jumbo_disable = 0;
269TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
270
271/*
272 * It seems that SK-NET GENESIS supports very simple checksum offload
273 * capability for Tx and I believe it can generate 0 checksum value for
274 * UDP packets in Tx as the hardware can't differenciate UDP packets from
275 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
276 * means sender didn't perforam checksum computation. For the safety I
277 * disabled UDP checksum offload capability at the moment. Alternatively
278 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
279 * offload routine.
280 */
281#define SK_CSUM_FEATURES	(CSUM_TCP)
282
283/*
284 * Note that we have newbus methods for both the GEnesis controller
285 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
286 * the miibus code is a child of the XMACs. We need to do it this way
287 * so that the miibus drivers can access the PHY registers on the
288 * right PHY. It's not quite what I had in mind, but it's the only
289 * design that achieves the desired effect.
290 */
291static device_method_t skc_methods[] = {
292	/* Device interface */
293	DEVMETHOD(device_probe,		skc_probe),
294	DEVMETHOD(device_attach,	skc_attach),
295	DEVMETHOD(device_detach,	skc_detach),
296	DEVMETHOD(device_suspend,	skc_suspend),
297	DEVMETHOD(device_resume,	skc_resume),
298	DEVMETHOD(device_shutdown,	skc_shutdown),
299
300	DEVMETHOD(bus_get_dma_tag,	skc_get_dma_tag),
301
302	DEVMETHOD_END
303};
304
305static driver_t skc_driver = {
306	"skc",
307	skc_methods,
308	sizeof(struct sk_softc)
309};
310
311static devclass_t skc_devclass;
312
313static device_method_t sk_methods[] = {
314	/* Device interface */
315	DEVMETHOD(device_probe,		sk_probe),
316	DEVMETHOD(device_attach,	sk_attach),
317	DEVMETHOD(device_detach,	sk_detach),
318	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
319
320	/* MII interface */
321	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
322	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
323	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
324
325	DEVMETHOD_END
326};
327
328static driver_t sk_driver = {
329	"sk",
330	sk_methods,
331	sizeof(struct sk_if_softc)
332};
333
334static devclass_t sk_devclass;
335
336DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, NULL, NULL);
337DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, NULL, NULL);
338DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, NULL, NULL);
339
340static struct resource_spec sk_res_spec_io[] = {
341	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
342	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
343	{ -1,			0,		0 }
344};
345
346static struct resource_spec sk_res_spec_mem[] = {
347	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
348	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
349	{ -1,			0,		0 }
350};
351
352#define SK_SETBIT(sc, reg, x)		\
353	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
354
355#define SK_CLRBIT(sc, reg, x)		\
356	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
357
358#define SK_WIN_SETBIT_4(sc, reg, x)	\
359	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
360
361#define SK_WIN_CLRBIT_4(sc, reg, x)	\
362	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
363
364#define SK_WIN_SETBIT_2(sc, reg, x)	\
365	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
366
367#define SK_WIN_CLRBIT_2(sc, reg, x)	\
368	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
369
370static u_int32_t
371sk_win_read_4(sc, reg)
372	struct sk_softc		*sc;
373	int			reg;
374{
375#ifdef SK_USEIOSPACE
376	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
377	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
378#else
379	return(CSR_READ_4(sc, reg));
380#endif
381}
382
383static u_int16_t
384sk_win_read_2(sc, reg)
385	struct sk_softc		*sc;
386	int			reg;
387{
388#ifdef SK_USEIOSPACE
389	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
390	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
391#else
392	return(CSR_READ_2(sc, reg));
393#endif
394}
395
396static u_int8_t
397sk_win_read_1(sc, reg)
398	struct sk_softc		*sc;
399	int			reg;
400{
401#ifdef SK_USEIOSPACE
402	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
403	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
404#else
405	return(CSR_READ_1(sc, reg));
406#endif
407}
408
409static void
410sk_win_write_4(sc, reg, val)
411	struct sk_softc		*sc;
412	int			reg;
413	u_int32_t		val;
414{
415#ifdef SK_USEIOSPACE
416	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
417	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
418#else
419	CSR_WRITE_4(sc, reg, val);
420#endif
421	return;
422}
423
424static void
425sk_win_write_2(sc, reg, val)
426	struct sk_softc		*sc;
427	int			reg;
428	u_int32_t		val;
429{
430#ifdef SK_USEIOSPACE
431	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
432	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
433#else
434	CSR_WRITE_2(sc, reg, val);
435#endif
436	return;
437}
438
439static void
440sk_win_write_1(sc, reg, val)
441	struct sk_softc		*sc;
442	int			reg;
443	u_int32_t		val;
444{
445#ifdef SK_USEIOSPACE
446	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
447	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
448#else
449	CSR_WRITE_1(sc, reg, val);
450#endif
451	return;
452}
453
454static int
455sk_miibus_readreg(dev, phy, reg)
456	device_t		dev;
457	int			phy, reg;
458{
459	struct sk_if_softc	*sc_if;
460	int			v;
461
462	sc_if = device_get_softc(dev);
463
464	SK_IF_MII_LOCK(sc_if);
465	switch(sc_if->sk_softc->sk_type) {
466	case SK_GENESIS:
467		v = sk_xmac_miibus_readreg(sc_if, phy, reg);
468		break;
469	case SK_YUKON:
470	case SK_YUKON_LITE:
471	case SK_YUKON_LP:
472		v = sk_marv_miibus_readreg(sc_if, phy, reg);
473		break;
474	default:
475		v = 0;
476		break;
477	}
478	SK_IF_MII_UNLOCK(sc_if);
479
480	return (v);
481}
482
483static int
484sk_miibus_writereg(dev, phy, reg, val)
485	device_t		dev;
486	int			phy, reg, val;
487{
488	struct sk_if_softc	*sc_if;
489	int			v;
490
491	sc_if = device_get_softc(dev);
492
493	SK_IF_MII_LOCK(sc_if);
494	switch(sc_if->sk_softc->sk_type) {
495	case SK_GENESIS:
496		v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
497		break;
498	case SK_YUKON:
499	case SK_YUKON_LITE:
500	case SK_YUKON_LP:
501		v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
502		break;
503	default:
504		v = 0;
505		break;
506	}
507	SK_IF_MII_UNLOCK(sc_if);
508
509	return (v);
510}
511
512static void
513sk_miibus_statchg(dev)
514	device_t		dev;
515{
516	struct sk_if_softc	*sc_if;
517
518	sc_if = device_get_softc(dev);
519
520	SK_IF_MII_LOCK(sc_if);
521	switch(sc_if->sk_softc->sk_type) {
522	case SK_GENESIS:
523		sk_xmac_miibus_statchg(sc_if);
524		break;
525	case SK_YUKON:
526	case SK_YUKON_LITE:
527	case SK_YUKON_LP:
528		sk_marv_miibus_statchg(sc_if);
529		break;
530	}
531	SK_IF_MII_UNLOCK(sc_if);
532
533	return;
534}
535
536static int
537sk_xmac_miibus_readreg(sc_if, phy, reg)
538	struct sk_if_softc	*sc_if;
539	int			phy, reg;
540{
541	int			i;
542
543	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
544	SK_XM_READ_2(sc_if, XM_PHY_DATA);
545	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
546		for (i = 0; i < SK_TIMEOUT; i++) {
547			DELAY(1);
548			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
549			    XM_MMUCMD_PHYDATARDY)
550				break;
551		}
552
553		if (i == SK_TIMEOUT) {
554			if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
555			return(0);
556		}
557	}
558	DELAY(1);
559	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
560
561	return(i);
562}
563
564static int
565sk_xmac_miibus_writereg(sc_if, phy, reg, val)
566	struct sk_if_softc	*sc_if;
567	int			phy, reg, val;
568{
569	int			i;
570
571	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
572	for (i = 0; i < SK_TIMEOUT; i++) {
573		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
574			break;
575	}
576
577	if (i == SK_TIMEOUT) {
578		if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
579		return (ETIMEDOUT);
580	}
581
582	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
583	for (i = 0; i < SK_TIMEOUT; i++) {
584		DELAY(1);
585		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
586			break;
587	}
588	if (i == SK_TIMEOUT)
589		if_printf(sc_if->sk_ifp, "phy write timed out\n");
590
591	return(0);
592}
593
594static void
595sk_xmac_miibus_statchg(sc_if)
596	struct sk_if_softc	*sc_if;
597{
598	struct mii_data		*mii;
599
600	mii = device_get_softc(sc_if->sk_miibus);
601
602	/*
603	 * If this is a GMII PHY, manually set the XMAC's
604	 * duplex mode accordingly.
605	 */
606	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
607		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
608			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
609		} else {
610			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
611		}
612	}
613}
614
615static int
616sk_marv_miibus_readreg(sc_if, phy, reg)
617	struct sk_if_softc	*sc_if;
618	int			phy, reg;
619{
620	u_int16_t		val;
621	int			i;
622
623	if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
624	    sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
625		return(0);
626	}
627
628        SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
629		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
630
631	for (i = 0; i < SK_TIMEOUT; i++) {
632		DELAY(1);
633		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
634		if (val & YU_SMICR_READ_VALID)
635			break;
636	}
637
638	if (i == SK_TIMEOUT) {
639		if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
640		return(0);
641	}
642
643	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
644
645	return(val);
646}
647
648static int
649sk_marv_miibus_writereg(sc_if, phy, reg, val)
650	struct sk_if_softc	*sc_if;
651	int			phy, reg, val;
652{
653	int			i;
654
655	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
656	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
657		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
658
659	for (i = 0; i < SK_TIMEOUT; i++) {
660		DELAY(1);
661		if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
662			break;
663	}
664	if (i == SK_TIMEOUT)
665		if_printf(sc_if->sk_ifp, "phy write timeout\n");
666
667	return(0);
668}
669
670static void
671sk_marv_miibus_statchg(sc_if)
672	struct sk_if_softc	*sc_if;
673{
674	return;
675}
676
677#define HASH_BITS		6
678
679static u_int32_t
680sk_xmchash(addr)
681	const uint8_t *addr;
682{
683	uint32_t crc;
684
685	/* Compute CRC for the address value. */
686	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
687
688	return (~crc & ((1 << HASH_BITS) - 1));
689}
690
691static void
692sk_setfilt(sc_if, addr, slot)
693	struct sk_if_softc	*sc_if;
694	u_int16_t		*addr;
695	int			slot;
696{
697	int			base;
698
699	base = XM_RXFILT_ENTRY(slot);
700
701	SK_XM_WRITE_2(sc_if, base, addr[0]);
702	SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
703	SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
704
705	return;
706}
707
708static void
709sk_rxfilter(sc_if)
710	struct sk_if_softc	*sc_if;
711{
712	struct sk_softc		*sc;
713
714	SK_IF_LOCK_ASSERT(sc_if);
715
716	sc = sc_if->sk_softc;
717	if (sc->sk_type == SK_GENESIS)
718		sk_rxfilter_genesis(sc_if);
719	else
720		sk_rxfilter_yukon(sc_if);
721}
722
723static void
724sk_rxfilter_genesis(sc_if)
725	struct sk_if_softc	*sc_if;
726{
727	struct ifnet		*ifp = sc_if->sk_ifp;
728	u_int32_t		hashes[2] = { 0, 0 }, mode;
729	int			h = 0, i;
730	struct ifmultiaddr	*ifma;
731	u_int16_t		dummy[] = { 0, 0, 0 };
732	u_int16_t		maddr[(ETHER_ADDR_LEN+1)/2];
733
734	SK_IF_LOCK_ASSERT(sc_if);
735
736	mode = SK_XM_READ_4(sc_if, XM_MODE);
737	mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
738	    XM_MODE_RX_USE_PERFECT);
739	/* First, zot all the existing perfect filters. */
740	for (i = 1; i < XM_RXFILT_MAX; i++)
741		sk_setfilt(sc_if, dummy, i);
742
743	/* Now program new ones. */
744	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
745		if (ifp->if_flags & IFF_ALLMULTI)
746			mode |= XM_MODE_RX_USE_HASH;
747		if (ifp->if_flags & IFF_PROMISC)
748			mode |= XM_MODE_RX_PROMISC;
749		hashes[0] = 0xFFFFFFFF;
750		hashes[1] = 0xFFFFFFFF;
751	} else {
752		i = 1;
753		if_maddr_rlock(ifp);
754		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
755		    ifma_link) {
756			if (ifma->ifma_addr->sa_family != AF_LINK)
757				continue;
758			/*
759			 * Program the first XM_RXFILT_MAX multicast groups
760			 * into the perfect filter.
761			 */
762			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
763			    maddr, ETHER_ADDR_LEN);
764			if (i < XM_RXFILT_MAX) {
765				sk_setfilt(sc_if, maddr, i);
766				mode |= XM_MODE_RX_USE_PERFECT;
767				i++;
768				continue;
769			}
770			h = sk_xmchash((const uint8_t *)maddr);
771			if (h < 32)
772				hashes[0] |= (1 << h);
773			else
774				hashes[1] |= (1 << (h - 32));
775			mode |= XM_MODE_RX_USE_HASH;
776		}
777		if_maddr_runlock(ifp);
778	}
779
780	SK_XM_WRITE_4(sc_if, XM_MODE, mode);
781	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
782	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
783}
784
785static void
786sk_rxfilter_yukon(sc_if)
787	struct sk_if_softc	*sc_if;
788{
789	struct ifnet		*ifp;
790	u_int32_t		crc, hashes[2] = { 0, 0 }, mode;
791	struct ifmultiaddr	*ifma;
792
793	SK_IF_LOCK_ASSERT(sc_if);
794
795	ifp = sc_if->sk_ifp;
796	mode = SK_YU_READ_2(sc_if, YUKON_RCR);
797	if (ifp->if_flags & IFF_PROMISC)
798		mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
799	else if (ifp->if_flags & IFF_ALLMULTI) {
800		mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN;
801		hashes[0] = 0xFFFFFFFF;
802		hashes[1] = 0xFFFFFFFF;
803	} else {
804		mode |= YU_RCR_UFLEN;
805		if_maddr_rlock(ifp);
806		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
807			if (ifma->ifma_addr->sa_family != AF_LINK)
808				continue;
809			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
810			    ifma->ifma_addr), ETHER_ADDR_LEN);
811			/* Just want the 6 least significant bits. */
812			crc &= 0x3f;
813			/* Set the corresponding bit in the hash table. */
814			hashes[crc >> 5] |= 1 << (crc & 0x1f);
815		}
816		if_maddr_runlock(ifp);
817		if (hashes[0] != 0 || hashes[1] != 0)
818			mode |= YU_RCR_MUFLEN;
819	}
820
821	SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
822	SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
823	SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
824	SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
825	SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
826}
827
828static int
829sk_init_rx_ring(sc_if)
830	struct sk_if_softc	*sc_if;
831{
832	struct sk_ring_data	*rd;
833	bus_addr_t		addr;
834	u_int32_t		csum_start;
835	int			i;
836
837	sc_if->sk_cdata.sk_rx_cons = 0;
838
839	csum_start = (ETHER_HDR_LEN + sizeof(struct ip))  << 16 |
840	    ETHER_HDR_LEN;
841	rd = &sc_if->sk_rdata;
842	bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
843	for (i = 0; i < SK_RX_RING_CNT; i++) {
844		if (sk_newbuf(sc_if, i) != 0)
845			return (ENOBUFS);
846		if (i == (SK_RX_RING_CNT - 1))
847			addr = SK_RX_RING_ADDR(sc_if, 0);
848		else
849			addr = SK_RX_RING_ADDR(sc_if, i + 1);
850		rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
851		rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
852	}
853
854	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
855	    sc_if->sk_cdata.sk_rx_ring_map,
856	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
857
858	return(0);
859}
860
861static int
862sk_init_jumbo_rx_ring(sc_if)
863	struct sk_if_softc	*sc_if;
864{
865	struct sk_ring_data	*rd;
866	bus_addr_t		addr;
867	u_int32_t		csum_start;
868	int			i;
869
870	sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
871
872	csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
873	    ETHER_HDR_LEN;
874	rd = &sc_if->sk_rdata;
875	bzero(rd->sk_jumbo_rx_ring,
876	    sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
877	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
878		if (sk_jumbo_newbuf(sc_if, i) != 0)
879			return (ENOBUFS);
880		if (i == (SK_JUMBO_RX_RING_CNT - 1))
881			addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
882		else
883			addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
884		rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
885		rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
886	}
887
888	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
889	    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
890	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
891
892	return (0);
893}
894
895static void
896sk_init_tx_ring(sc_if)
897	struct sk_if_softc	*sc_if;
898{
899	struct sk_ring_data	*rd;
900	struct sk_txdesc	*txd;
901	bus_addr_t		addr;
902	int			i;
903
904	STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
905	STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
906
907	sc_if->sk_cdata.sk_tx_prod = 0;
908	sc_if->sk_cdata.sk_tx_cons = 0;
909	sc_if->sk_cdata.sk_tx_cnt = 0;
910
911	rd = &sc_if->sk_rdata;
912	bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
913	for (i = 0; i < SK_TX_RING_CNT; i++) {
914		if (i == (SK_TX_RING_CNT - 1))
915			addr = SK_TX_RING_ADDR(sc_if, 0);
916		else
917			addr = SK_TX_RING_ADDR(sc_if, i + 1);
918		rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
919		txd = &sc_if->sk_cdata.sk_txdesc[i];
920		STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
921	}
922
923	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
924	    sc_if->sk_cdata.sk_tx_ring_map,
925	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
926}
927
928static __inline void
929sk_discard_rxbuf(sc_if, idx)
930	struct sk_if_softc	*sc_if;
931	int			idx;
932{
933	struct sk_rx_desc	*r;
934	struct sk_rxdesc	*rxd;
935	struct mbuf		*m;
936
937
938	r = &sc_if->sk_rdata.sk_rx_ring[idx];
939	rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
940	m = rxd->rx_m;
941	r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
942}
943
944static __inline void
945sk_discard_jumbo_rxbuf(sc_if, idx)
946	struct sk_if_softc	*sc_if;
947	int			idx;
948{
949	struct sk_rx_desc	*r;
950	struct sk_rxdesc	*rxd;
951	struct mbuf		*m;
952
953	r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
954	rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
955	m = rxd->rx_m;
956	r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
957}
958
959static int
960sk_newbuf(sc_if, idx)
961	struct sk_if_softc	*sc_if;
962	int 			idx;
963{
964	struct sk_rx_desc	*r;
965	struct sk_rxdesc	*rxd;
966	struct mbuf		*m;
967	bus_dma_segment_t	segs[1];
968	bus_dmamap_t		map;
969	int			nsegs;
970
971	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
972	if (m == NULL)
973		return (ENOBUFS);
974	m->m_len = m->m_pkthdr.len = MCLBYTES;
975	m_adj(m, ETHER_ALIGN);
976
977	if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
978	    sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
979		m_freem(m);
980		return (ENOBUFS);
981	}
982	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
983
984	rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
985	if (rxd->rx_m != NULL) {
986		bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
987		    BUS_DMASYNC_POSTREAD);
988		bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
989	}
990	map = rxd->rx_dmamap;
991	rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
992	sc_if->sk_cdata.sk_rx_sparemap = map;
993	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
994	    BUS_DMASYNC_PREREAD);
995	rxd->rx_m = m;
996	r = &sc_if->sk_rdata.sk_rx_ring[idx];
997	r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
998	r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
999	r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1000
1001	return (0);
1002}
1003
1004static int
1005sk_jumbo_newbuf(sc_if, idx)
1006	struct sk_if_softc	*sc_if;
1007	int			idx;
1008{
1009	struct sk_rx_desc	*r;
1010	struct sk_rxdesc	*rxd;
1011	struct mbuf		*m;
1012	bus_dma_segment_t	segs[1];
1013	bus_dmamap_t		map;
1014	int			nsegs;
1015
1016	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1017	if (m == NULL)
1018		return (ENOBUFS);
1019	if ((m->m_flags & M_EXT) == 0) {
1020		m_freem(m);
1021		return (ENOBUFS);
1022	}
1023	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1024	/*
1025	 * Adjust alignment so packet payload begins on a
1026	 * longword boundary. Mandatory for Alpha, useful on
1027	 * x86 too.
1028	 */
1029	m_adj(m, ETHER_ALIGN);
1030
1031	if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1032	    sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1033		m_freem(m);
1034		return (ENOBUFS);
1035	}
1036	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1037
1038	rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1039	if (rxd->rx_m != NULL) {
1040		bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1041		    BUS_DMASYNC_POSTREAD);
1042		bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1043		    rxd->rx_dmamap);
1044	}
1045	map = rxd->rx_dmamap;
1046	rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1047	sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1048	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1049	    BUS_DMASYNC_PREREAD);
1050	rxd->rx_m = m;
1051	r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1052	r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1053	r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1054	r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1055
1056	return (0);
1057}
1058
1059/*
1060 * Set media options.
1061 */
1062static int
1063sk_ifmedia_upd(ifp)
1064	struct ifnet		*ifp;
1065{
1066	struct sk_if_softc	*sc_if = ifp->if_softc;
1067	struct mii_data		*mii;
1068
1069	mii = device_get_softc(sc_if->sk_miibus);
1070	sk_init(sc_if);
1071	mii_mediachg(mii);
1072
1073	return(0);
1074}
1075
1076/*
1077 * Report current media status.
1078 */
1079static void
1080sk_ifmedia_sts(ifp, ifmr)
1081	struct ifnet		*ifp;
1082	struct ifmediareq	*ifmr;
1083{
1084	struct sk_if_softc	*sc_if;
1085	struct mii_data		*mii;
1086
1087	sc_if = ifp->if_softc;
1088	mii = device_get_softc(sc_if->sk_miibus);
1089
1090	mii_pollstat(mii);
1091	ifmr->ifm_active = mii->mii_media_active;
1092	ifmr->ifm_status = mii->mii_media_status;
1093
1094	return;
1095}
1096
1097static int
1098sk_ioctl(ifp, command, data)
1099	struct ifnet		*ifp;
1100	u_long			command;
1101	caddr_t			data;
1102{
1103	struct sk_if_softc	*sc_if = ifp->if_softc;
1104	struct ifreq		*ifr = (struct ifreq *) data;
1105	int			error, mask;
1106	struct mii_data		*mii;
1107
1108	error = 0;
1109	switch(command) {
1110	case SIOCSIFMTU:
1111		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
1112			error = EINVAL;
1113		else if (ifp->if_mtu != ifr->ifr_mtu) {
1114			if (sc_if->sk_jumbo_disable != 0 &&
1115			    ifr->ifr_mtu > SK_MAX_FRAMELEN)
1116				error = EINVAL;
1117			else {
1118				SK_IF_LOCK(sc_if);
1119				ifp->if_mtu = ifr->ifr_mtu;
1120				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1121					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1122					sk_init_locked(sc_if);
1123				}
1124				SK_IF_UNLOCK(sc_if);
1125			}
1126		}
1127		break;
1128	case SIOCSIFFLAGS:
1129		SK_IF_LOCK(sc_if);
1130		if (ifp->if_flags & IFF_UP) {
1131			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1132				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1133				    & (IFF_PROMISC | IFF_ALLMULTI))
1134					sk_rxfilter(sc_if);
1135			} else
1136				sk_init_locked(sc_if);
1137		} else {
1138			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1139				sk_stop(sc_if);
1140		}
1141		sc_if->sk_if_flags = ifp->if_flags;
1142		SK_IF_UNLOCK(sc_if);
1143		break;
1144	case SIOCADDMULTI:
1145	case SIOCDELMULTI:
1146		SK_IF_LOCK(sc_if);
1147		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1148			sk_rxfilter(sc_if);
1149		SK_IF_UNLOCK(sc_if);
1150		break;
1151	case SIOCGIFMEDIA:
1152	case SIOCSIFMEDIA:
1153		mii = device_get_softc(sc_if->sk_miibus);
1154		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1155		break;
1156	case SIOCSIFCAP:
1157		SK_IF_LOCK(sc_if);
1158		if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1159			SK_IF_UNLOCK(sc_if);
1160			break;
1161		}
1162		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1163		if ((mask & IFCAP_TXCSUM) != 0 &&
1164		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1165			ifp->if_capenable ^= IFCAP_TXCSUM;
1166			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1167				ifp->if_hwassist |= SK_CSUM_FEATURES;
1168			else
1169				ifp->if_hwassist &= ~SK_CSUM_FEATURES;
1170		}
1171		if ((mask & IFCAP_RXCSUM) != 0 &&
1172		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
1173			ifp->if_capenable ^= IFCAP_RXCSUM;
1174		SK_IF_UNLOCK(sc_if);
1175		break;
1176	default:
1177		error = ether_ioctl(ifp, command, data);
1178		break;
1179	}
1180
1181	return (error);
1182}
1183
1184/*
1185 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1186 * IDs against our list and return a device name if we find a match.
1187 */
1188static int
1189skc_probe(dev)
1190	device_t		dev;
1191{
1192	const struct sk_type	*t = sk_devs;
1193
1194	while(t->sk_name != NULL) {
1195		if ((pci_get_vendor(dev) == t->sk_vid) &&
1196		    (pci_get_device(dev) == t->sk_did)) {
1197			/*
1198			 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1199			 * Rev. 3 is supported by re(4).
1200			 */
1201			if ((t->sk_vid == VENDORID_LINKSYS) &&
1202				(t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1203				(pci_get_subdevice(dev) !=
1204				 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1205				t++;
1206				continue;
1207			}
1208			device_set_desc(dev, t->sk_name);
1209			return (BUS_PROBE_DEFAULT);
1210		}
1211		t++;
1212	}
1213
1214	return(ENXIO);
1215}
1216
1217/*
1218 * Force the GEnesis into reset, then bring it out of reset.
1219 */
1220static void
1221sk_reset(sc)
1222	struct sk_softc		*sc;
1223{
1224
1225	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1226	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1227	if (SK_YUKON_FAMILY(sc->sk_type))
1228		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1229
1230	DELAY(1000);
1231	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1232	DELAY(2);
1233	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1234	if (SK_YUKON_FAMILY(sc->sk_type))
1235		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1236
1237	if (sc->sk_type == SK_GENESIS) {
1238		/* Configure packet arbiter */
1239		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1240		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1241		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1242		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1243		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1244	}
1245
1246	/* Enable RAM interface */
1247	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1248
1249	/*
1250         * Configure interrupt moderation. The moderation timer
1251	 * defers interrupts specified in the interrupt moderation
1252	 * timer mask based on the timeout specified in the interrupt
1253	 * moderation timer init register. Each bit in the timer
1254	 * register represents one tick, so to specify a timeout in
1255	 * microseconds, we have to multiply by the correct number of
1256	 * ticks-per-microsecond.
1257	 */
1258	switch (sc->sk_type) {
1259	case SK_GENESIS:
1260		sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1261		break;
1262	default:
1263		sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1264		break;
1265	}
1266	if (bootverbose)
1267		device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1268		    sc->sk_int_mod);
1269	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1270	    sc->sk_int_ticks));
1271	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1272	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1273	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1274
1275	return;
1276}
1277
1278static int
1279sk_probe(dev)
1280	device_t		dev;
1281{
1282	struct sk_softc		*sc;
1283
1284	sc = device_get_softc(device_get_parent(dev));
1285
1286	/*
1287	 * Not much to do here. We always know there will be
1288	 * at least one XMAC present, and if there are two,
1289	 * skc_attach() will create a second device instance
1290	 * for us.
1291	 */
1292	switch (sc->sk_type) {
1293	case SK_GENESIS:
1294		device_set_desc(dev, "XaQti Corp. XMAC II");
1295		break;
1296	case SK_YUKON:
1297	case SK_YUKON_LITE:
1298	case SK_YUKON_LP:
1299		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1300		break;
1301	}
1302
1303	return (BUS_PROBE_DEFAULT);
1304}
1305
1306/*
1307 * Each XMAC chip is attached as a separate logical IP interface.
1308 * Single port cards will have only one logical interface of course.
1309 */
1310static int
1311sk_attach(dev)
1312	device_t		dev;
1313{
1314	struct sk_softc		*sc;
1315	struct sk_if_softc	*sc_if;
1316	struct ifnet		*ifp;
1317	u_int32_t		r;
1318	int			error, i, phy, port;
1319	u_char			eaddr[6];
1320	u_char			inv_mac[] = {0, 0, 0, 0, 0, 0};
1321
1322	if (dev == NULL)
1323		return(EINVAL);
1324
1325	error = 0;
1326	sc_if = device_get_softc(dev);
1327	sc = device_get_softc(device_get_parent(dev));
1328	port = *(int *)device_get_ivars(dev);
1329
1330	sc_if->sk_if_dev = dev;
1331	sc_if->sk_port = port;
1332	sc_if->sk_softc = sc;
1333	sc->sk_if[port] = sc_if;
1334	if (port == SK_PORT_A)
1335		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1336	if (port == SK_PORT_B)
1337		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1338
1339	callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1340	callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1341
1342	if (sk_dma_alloc(sc_if) != 0) {
1343		error = ENOMEM;
1344		goto fail;
1345	}
1346	sk_dma_jumbo_alloc(sc_if);
1347
1348	ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1349	if (ifp == NULL) {
1350		device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1351		error = ENOSPC;
1352		goto fail;
1353	}
1354	ifp->if_softc = sc_if;
1355	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1356	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1357	/*
1358	 * SK_GENESIS has a bug in checksum offload - From linux.
1359	 */
1360	if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1361		ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
1362		ifp->if_hwassist = 0;
1363	} else {
1364		ifp->if_capabilities = 0;
1365		ifp->if_hwassist = 0;
1366	}
1367	ifp->if_capenable = ifp->if_capabilities;
1368	/*
1369	 * Some revision of Yukon controller generates corrupted
1370	 * frame when TX checksum offloading is enabled.  The
1371	 * frame has a valid checksum value so payload might be
1372	 * modified during TX checksum calculation. Disable TX
1373	 * checksum offloading but give users chance to enable it
1374	 * when they know their controller works without problems
1375	 * with TX checksum offloading.
1376	 */
1377	ifp->if_capenable &= ~IFCAP_TXCSUM;
1378	ifp->if_ioctl = sk_ioctl;
1379	ifp->if_start = sk_start;
1380	ifp->if_init = sk_init;
1381	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1382	ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1383	IFQ_SET_READY(&ifp->if_snd);
1384
1385	/*
1386	 * Get station address for this interface. Note that
1387	 * dual port cards actually come with three station
1388	 * addresses: one for each port, plus an extra. The
1389	 * extra one is used by the SysKonnect driver software
1390	 * as a 'virtual' station address for when both ports
1391	 * are operating in failover mode. Currently we don't
1392	 * use this extra address.
1393	 */
1394	SK_IF_LOCK(sc_if);
1395	for (i = 0; i < ETHER_ADDR_LEN; i++)
1396		eaddr[i] =
1397		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1398
1399	/* Verify whether the station address is invalid or not. */
1400	if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) {
1401		device_printf(sc_if->sk_if_dev,
1402		    "Generating random ethernet address\n");
1403		r = arc4random();
1404		/*
1405		 * Set OUI to convenient locally assigned address.  'b'
1406		 * is 0x62, which has the locally assigned bit set, and
1407		 * the broadcast/multicast bit clear.
1408		 */
1409		eaddr[0] = 'b';
1410		eaddr[1] = 's';
1411		eaddr[2] = 'd';
1412		eaddr[3] = (r >> 16) & 0xff;
1413		eaddr[4] = (r >>  8) & 0xff;
1414		eaddr[5] = (r >>  0) & 0xff;
1415	}
1416	/*
1417	 * Set up RAM buffer addresses. The NIC will have a certain
1418	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1419	 * need to divide this up a) between the transmitter and
1420 	 * receiver and b) between the two XMACs, if this is a
1421	 * dual port NIC. Our algotithm is to divide up the memory
1422	 * evenly so that everyone gets a fair share.
1423	 *
1424	 * Just to be contrary, Yukon2 appears to have separate memory
1425	 * for each MAC.
1426	 */
1427	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1428		u_int32_t		chunk, val;
1429
1430		chunk = sc->sk_ramsize / 2;
1431		val = sc->sk_rboff / sizeof(u_int64_t);
1432		sc_if->sk_rx_ramstart = val;
1433		val += (chunk / sizeof(u_int64_t));
1434		sc_if->sk_rx_ramend = val - 1;
1435		sc_if->sk_tx_ramstart = val;
1436		val += (chunk / sizeof(u_int64_t));
1437		sc_if->sk_tx_ramend = val - 1;
1438	} else {
1439		u_int32_t		chunk, val;
1440
1441		chunk = sc->sk_ramsize / 4;
1442		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1443		    sizeof(u_int64_t);
1444		sc_if->sk_rx_ramstart = val;
1445		val += (chunk / sizeof(u_int64_t));
1446		sc_if->sk_rx_ramend = val - 1;
1447		sc_if->sk_tx_ramstart = val;
1448		val += (chunk / sizeof(u_int64_t));
1449		sc_if->sk_tx_ramend = val - 1;
1450	}
1451
1452	/* Read and save PHY type and set PHY address */
1453	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1454	if (!SK_YUKON_FAMILY(sc->sk_type)) {
1455		switch(sc_if->sk_phytype) {
1456		case SK_PHYTYPE_XMAC:
1457			sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1458			break;
1459		case SK_PHYTYPE_BCOM:
1460			sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1461			break;
1462		default:
1463			device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1464			    sc_if->sk_phytype);
1465			error = ENODEV;
1466			SK_IF_UNLOCK(sc_if);
1467			goto fail;
1468		}
1469	} else {
1470		if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1471		    sc->sk_pmd != 'S') {
1472			/* not initialized, punt */
1473			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1474			sc->sk_coppertype = 1;
1475		}
1476
1477		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1478
1479		if (!(sc->sk_coppertype))
1480			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1481	}
1482
1483	/*
1484	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1485	 */
1486	SK_IF_UNLOCK(sc_if);
1487	ether_ifattach(ifp, eaddr);
1488	SK_IF_LOCK(sc_if);
1489
1490	/*
1491	 * The hardware should be ready for VLAN_MTU by default:
1492	 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1493	 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1494	 *
1495	 */
1496        ifp->if_capabilities |= IFCAP_VLAN_MTU;
1497        ifp->if_capenable |= IFCAP_VLAN_MTU;
1498	/*
1499	 * Tell the upper layer(s) we support long frames.
1500	 * Must appear after the call to ether_ifattach() because
1501	 * ether_ifattach() sets ifi_hdrlen to the default value.
1502	 */
1503        ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1504
1505	/*
1506	 * Do miibus setup.
1507	 */
1508	phy = MII_PHY_ANY;
1509	switch (sc->sk_type) {
1510	case SK_GENESIS:
1511		sk_init_xmac(sc_if);
1512		if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1513			phy = 0;
1514		break;
1515	case SK_YUKON:
1516	case SK_YUKON_LITE:
1517	case SK_YUKON_LP:
1518		sk_init_yukon(sc_if);
1519		phy = 0;
1520		break;
1521	}
1522
1523	SK_IF_UNLOCK(sc_if);
1524	error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
1525	    sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
1526	if (error != 0) {
1527		device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
1528		ether_ifdetach(ifp);
1529		goto fail;
1530	}
1531
1532fail:
1533	if (error) {
1534		/* Access should be ok even though lock has been dropped */
1535		sc->sk_if[port] = NULL;
1536		sk_detach(dev);
1537	}
1538
1539	return(error);
1540}
1541
1542/*
1543 * Attach the interface. Allocate softc structures, do ifmedia
1544 * setup and ethernet/BPF attach.
1545 */
1546static int
1547skc_attach(dev)
1548	device_t		dev;
1549{
1550	struct sk_softc		*sc;
1551	int			error = 0, *port;
1552	uint8_t			skrs;
1553	const char		*pname = NULL;
1554	char			*revstr;
1555
1556	sc = device_get_softc(dev);
1557	sc->sk_dev = dev;
1558
1559	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1560	    MTX_DEF);
1561	mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1562	/*
1563	 * Map control/status registers.
1564	 */
1565	pci_enable_busmaster(dev);
1566
1567	/* Allocate resources */
1568#ifdef SK_USEIOSPACE
1569	sc->sk_res_spec = sk_res_spec_io;
1570#else
1571	sc->sk_res_spec = sk_res_spec_mem;
1572#endif
1573	error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1574	if (error) {
1575		if (sc->sk_res_spec == sk_res_spec_mem)
1576			sc->sk_res_spec = sk_res_spec_io;
1577		else
1578			sc->sk_res_spec = sk_res_spec_mem;
1579		error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1580		if (error) {
1581			device_printf(dev, "couldn't allocate %s resources\n",
1582			    sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1583			    "I/O");
1584			goto fail;
1585		}
1586	}
1587
1588	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1589	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1590
1591	/* Bail out if chip is not recognized. */
1592	if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1593		device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1594		    sc->sk_type, sc->sk_rev);
1595		error = ENXIO;
1596		goto fail;
1597	}
1598
1599	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1600		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1601		OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1602		&sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1603		"SK interrupt moderation");
1604
1605	/* Pull in device tunables. */
1606	sc->sk_int_mod = SK_IM_DEFAULT;
1607	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1608		"int_mod", &sc->sk_int_mod);
1609	if (error == 0) {
1610		if (sc->sk_int_mod < SK_IM_MIN ||
1611		    sc->sk_int_mod > SK_IM_MAX) {
1612			device_printf(dev, "int_mod value out of range; "
1613			    "using default: %d\n", SK_IM_DEFAULT);
1614			sc->sk_int_mod = SK_IM_DEFAULT;
1615		}
1616	}
1617
1618	/* Reset the adapter. */
1619	sk_reset(sc);
1620
1621	skrs = sk_win_read_1(sc, SK_EPROM0);
1622	if (sc->sk_type == SK_GENESIS) {
1623		/* Read and save RAM size and RAMbuffer offset */
1624		switch(skrs) {
1625		case SK_RAMSIZE_512K_64:
1626			sc->sk_ramsize = 0x80000;
1627			sc->sk_rboff = SK_RBOFF_0;
1628			break;
1629		case SK_RAMSIZE_1024K_64:
1630			sc->sk_ramsize = 0x100000;
1631			sc->sk_rboff = SK_RBOFF_80000;
1632			break;
1633		case SK_RAMSIZE_1024K_128:
1634			sc->sk_ramsize = 0x100000;
1635			sc->sk_rboff = SK_RBOFF_0;
1636			break;
1637		case SK_RAMSIZE_2048K_128:
1638			sc->sk_ramsize = 0x200000;
1639			sc->sk_rboff = SK_RBOFF_0;
1640			break;
1641		default:
1642			device_printf(dev, "unknown ram size: %d\n", skrs);
1643			error = ENXIO;
1644			goto fail;
1645		}
1646	} else { /* SK_YUKON_FAMILY */
1647		if (skrs == 0x00)
1648			sc->sk_ramsize = 0x20000;
1649		else
1650			sc->sk_ramsize = skrs * (1<<12);
1651		sc->sk_rboff = SK_RBOFF_0;
1652	}
1653
1654	/* Read and save physical media type */
1655	 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1656
1657	 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1658		 sc->sk_coppertype = 1;
1659	 else
1660		 sc->sk_coppertype = 0;
1661
1662	/* Determine whether to name it with VPD PN or just make it up.
1663	 * Marvell Yukon VPD PN seems to freqently be bogus. */
1664	switch (pci_get_device(dev)) {
1665	case DEVICEID_SK_V1:
1666	case DEVICEID_BELKIN_5005:
1667	case DEVICEID_3COM_3C940:
1668	case DEVICEID_LINKSYS_EG1032:
1669	case DEVICEID_DLINK_DGE530T_A1:
1670	case DEVICEID_DLINK_DGE530T_B1:
1671		/* Stay with VPD PN. */
1672		(void) pci_get_vpd_ident(dev, &pname);
1673		break;
1674	case DEVICEID_SK_V2:
1675		/* YUKON VPD PN might bear no resemblance to reality. */
1676		switch (sc->sk_type) {
1677		case SK_GENESIS:
1678			/* Stay with VPD PN. */
1679			(void) pci_get_vpd_ident(dev, &pname);
1680			break;
1681		case SK_YUKON:
1682			pname = "Marvell Yukon Gigabit Ethernet";
1683			break;
1684		case SK_YUKON_LITE:
1685			pname = "Marvell Yukon Lite Gigabit Ethernet";
1686			break;
1687		case SK_YUKON_LP:
1688			pname = "Marvell Yukon LP Gigabit Ethernet";
1689			break;
1690		default:
1691			pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1692			break;
1693		}
1694
1695		/* Yukon Lite Rev. A0 needs special test. */
1696		if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1697			u_int32_t far;
1698			u_int8_t testbyte;
1699
1700			/* Save flash address register before testing. */
1701			far = sk_win_read_4(sc, SK_EP_ADDR);
1702
1703			sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1704			testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1705
1706			if (testbyte != 0x00) {
1707				/* Yukon Lite Rev. A0 detected. */
1708				sc->sk_type = SK_YUKON_LITE;
1709				sc->sk_rev = SK_YUKON_LITE_REV_A0;
1710				/* Restore flash address register. */
1711				sk_win_write_4(sc, SK_EP_ADDR, far);
1712			}
1713		}
1714		break;
1715	default:
1716		device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1717			"chipver=%02x, rev=%x\n",
1718			pci_get_vendor(dev), pci_get_device(dev),
1719			sc->sk_type, sc->sk_rev);
1720		error = ENXIO;
1721		goto fail;
1722	}
1723
1724	if (sc->sk_type == SK_YUKON_LITE) {
1725		switch (sc->sk_rev) {
1726		case SK_YUKON_LITE_REV_A0:
1727			revstr = "A0";
1728			break;
1729		case SK_YUKON_LITE_REV_A1:
1730			revstr = "A1";
1731			break;
1732		case SK_YUKON_LITE_REV_A3:
1733			revstr = "A3";
1734			break;
1735		default:
1736			revstr = "";
1737			break;
1738		}
1739	} else {
1740		revstr = "";
1741	}
1742
1743	/* Announce the product name and more VPD data if there. */
1744	if (pname != NULL)
1745		device_printf(dev, "%s rev. %s(0x%x)\n",
1746			pname, revstr, sc->sk_rev);
1747
1748	if (bootverbose) {
1749		device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
1750		device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
1751		device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1752		device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1753	}
1754
1755	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1756	if (sc->sk_devs[SK_PORT_A] == NULL) {
1757		device_printf(dev, "failed to add child for PORT_A\n");
1758		error = ENXIO;
1759		goto fail;
1760	}
1761	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1762	if (port == NULL) {
1763		device_printf(dev, "failed to allocate memory for "
1764		    "ivars of PORT_A\n");
1765		error = ENXIO;
1766		goto fail;
1767	}
1768	*port = SK_PORT_A;
1769	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1770
1771	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1772		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1773		if (sc->sk_devs[SK_PORT_B] == NULL) {
1774			device_printf(dev, "failed to add child for PORT_B\n");
1775			error = ENXIO;
1776			goto fail;
1777		}
1778		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1779		if (port == NULL) {
1780			device_printf(dev, "failed to allocate memory for "
1781			    "ivars of PORT_B\n");
1782			error = ENXIO;
1783			goto fail;
1784		}
1785		*port = SK_PORT_B;
1786		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1787	}
1788
1789	/* Turn on the 'driver is loaded' LED. */
1790	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1791
1792	error = bus_generic_attach(dev);
1793	if (error) {
1794		device_printf(dev, "failed to attach port(s)\n");
1795		goto fail;
1796	}
1797
1798	/* Hook interrupt last to avoid having to lock softc */
1799	error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1800	    NULL, sk_intr, sc, &sc->sk_intrhand);
1801
1802	if (error) {
1803		device_printf(dev, "couldn't set up irq\n");
1804		goto fail;
1805	}
1806
1807fail:
1808	if (error)
1809		skc_detach(dev);
1810
1811	return(error);
1812}
1813
1814/*
1815 * Shutdown hardware and free up resources. This can be called any
1816 * time after the mutex has been initialized. It is called in both
1817 * the error case in attach and the normal detach case so it needs
1818 * to be careful about only freeing resources that have actually been
1819 * allocated.
1820 */
1821static int
1822sk_detach(dev)
1823	device_t		dev;
1824{
1825	struct sk_if_softc	*sc_if;
1826	struct ifnet		*ifp;
1827
1828	sc_if = device_get_softc(dev);
1829	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1830	    ("sk mutex not initialized in sk_detach"));
1831	SK_IF_LOCK(sc_if);
1832
1833	ifp = sc_if->sk_ifp;
1834	/* These should only be active if attach_xmac succeeded */
1835	if (device_is_attached(dev)) {
1836		sk_stop(sc_if);
1837		/* Can't hold locks while calling detach */
1838		SK_IF_UNLOCK(sc_if);
1839		callout_drain(&sc_if->sk_tick_ch);
1840		callout_drain(&sc_if->sk_watchdog_ch);
1841		ether_ifdetach(ifp);
1842		SK_IF_LOCK(sc_if);
1843	}
1844	if (ifp)
1845		if_free(ifp);
1846	/*
1847	 * We're generally called from skc_detach() which is using
1848	 * device_delete_child() to get to here. It's already trashed
1849	 * miibus for us, so don't do it here or we'll panic.
1850	 */
1851	/*
1852	if (sc_if->sk_miibus != NULL)
1853		device_delete_child(dev, sc_if->sk_miibus);
1854	*/
1855	bus_generic_detach(dev);
1856	sk_dma_jumbo_free(sc_if);
1857	sk_dma_free(sc_if);
1858	SK_IF_UNLOCK(sc_if);
1859
1860	return(0);
1861}
1862
1863static int
1864skc_detach(dev)
1865	device_t		dev;
1866{
1867	struct sk_softc		*sc;
1868
1869	sc = device_get_softc(dev);
1870	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1871
1872	if (device_is_alive(dev)) {
1873		if (sc->sk_devs[SK_PORT_A] != NULL) {
1874			free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1875			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1876		}
1877		if (sc->sk_devs[SK_PORT_B] != NULL) {
1878			free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1879			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1880		}
1881		bus_generic_detach(dev);
1882	}
1883
1884	if (sc->sk_intrhand)
1885		bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1886	bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1887
1888	mtx_destroy(&sc->sk_mii_mtx);
1889	mtx_destroy(&sc->sk_mtx);
1890
1891	return(0);
1892}
1893
1894static bus_dma_tag_t
1895skc_get_dma_tag(device_t bus, device_t child __unused)
1896{
1897
1898	return (bus_get_dma_tag(bus));
1899}
1900
1901struct sk_dmamap_arg {
1902	bus_addr_t	sk_busaddr;
1903};
1904
1905static void
1906sk_dmamap_cb(arg, segs, nseg, error)
1907	void			*arg;
1908	bus_dma_segment_t	*segs;
1909	int			nseg;
1910	int			error;
1911{
1912	struct sk_dmamap_arg	*ctx;
1913
1914	if (error != 0)
1915		return;
1916
1917	ctx = arg;
1918	ctx->sk_busaddr = segs[0].ds_addr;
1919}
1920
1921/*
1922 * Allocate jumbo buffer storage. The SysKonnect adapters support
1923 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1924 * use them in their drivers. In order for us to use them, we need
1925 * large 9K receive buffers, however standard mbuf clusters are only
1926 * 2048 bytes in size. Consequently, we need to allocate and manage
1927 * our own jumbo buffer pool. Fortunately, this does not require an
1928 * excessive amount of additional code.
1929 */
1930static int
1931sk_dma_alloc(sc_if)
1932	struct sk_if_softc	*sc_if;
1933{
1934	struct sk_dmamap_arg	ctx;
1935	struct sk_txdesc	*txd;
1936	struct sk_rxdesc	*rxd;
1937	int			error, i;
1938
1939	/* create parent tag */
1940	/*
1941	 * XXX
1942	 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1943	 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1944	 * However bz@ reported that it does not work on amd64 with > 4GB
1945	 * RAM. Until we have more clues of the breakage, disable DAC mode
1946	 * by limiting DMA address to be in 32bit address space.
1947	 */
1948	error = bus_dma_tag_create(
1949		    bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1950		    1, 0,			/* algnmnt, boundary */
1951		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1952		    BUS_SPACE_MAXADDR,		/* highaddr */
1953		    NULL, NULL,			/* filter, filterarg */
1954		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1955		    0,				/* nsegments */
1956		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1957		    0,				/* flags */
1958		    NULL, NULL,			/* lockfunc, lockarg */
1959		    &sc_if->sk_cdata.sk_parent_tag);
1960	if (error != 0) {
1961		device_printf(sc_if->sk_if_dev,
1962		    "failed to create parent DMA tag\n");
1963		goto fail;
1964	}
1965
1966	/* create tag for Tx ring */
1967	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1968		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
1969		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1970		    BUS_SPACE_MAXADDR,		/* highaddr */
1971		    NULL, NULL,			/* filter, filterarg */
1972		    SK_TX_RING_SZ,		/* maxsize */
1973		    1,				/* nsegments */
1974		    SK_TX_RING_SZ,		/* maxsegsize */
1975		    0,				/* flags */
1976		    NULL, NULL,			/* lockfunc, lockarg */
1977		    &sc_if->sk_cdata.sk_tx_ring_tag);
1978	if (error != 0) {
1979		device_printf(sc_if->sk_if_dev,
1980		    "failed to allocate Tx ring DMA tag\n");
1981		goto fail;
1982	}
1983
1984	/* create tag for Rx ring */
1985	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1986		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
1987		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1988		    BUS_SPACE_MAXADDR,		/* highaddr */
1989		    NULL, NULL,			/* filter, filterarg */
1990		    SK_RX_RING_SZ,		/* maxsize */
1991		    1,				/* nsegments */
1992		    SK_RX_RING_SZ,		/* maxsegsize */
1993		    0,				/* flags */
1994		    NULL, NULL,			/* lockfunc, lockarg */
1995		    &sc_if->sk_cdata.sk_rx_ring_tag);
1996	if (error != 0) {
1997		device_printf(sc_if->sk_if_dev,
1998		    "failed to allocate Rx ring DMA tag\n");
1999		goto fail;
2000	}
2001
2002	/* create tag for Tx buffers */
2003	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2004		    1, 0,			/* algnmnt, boundary */
2005		    BUS_SPACE_MAXADDR,		/* lowaddr */
2006		    BUS_SPACE_MAXADDR,		/* highaddr */
2007		    NULL, NULL,			/* filter, filterarg */
2008		    MCLBYTES * SK_MAXTXSEGS,	/* maxsize */
2009		    SK_MAXTXSEGS,		/* nsegments */
2010		    MCLBYTES,			/* maxsegsize */
2011		    0,				/* flags */
2012		    NULL, NULL,			/* lockfunc, lockarg */
2013		    &sc_if->sk_cdata.sk_tx_tag);
2014	if (error != 0) {
2015		device_printf(sc_if->sk_if_dev,
2016		    "failed to allocate Tx DMA tag\n");
2017		goto fail;
2018	}
2019
2020	/* create tag for Rx buffers */
2021	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2022		    1, 0,			/* algnmnt, boundary */
2023		    BUS_SPACE_MAXADDR,		/* lowaddr */
2024		    BUS_SPACE_MAXADDR,		/* highaddr */
2025		    NULL, NULL,			/* filter, filterarg */
2026		    MCLBYTES,			/* maxsize */
2027		    1,				/* nsegments */
2028		    MCLBYTES,			/* maxsegsize */
2029		    0,				/* flags */
2030		    NULL, NULL,			/* lockfunc, lockarg */
2031		    &sc_if->sk_cdata.sk_rx_tag);
2032	if (error != 0) {
2033		device_printf(sc_if->sk_if_dev,
2034		    "failed to allocate Rx DMA tag\n");
2035		goto fail;
2036	}
2037
2038	/* allocate DMA'able memory and load the DMA map for Tx ring */
2039	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2040	    (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT |
2041	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map);
2042	if (error != 0) {
2043		device_printf(sc_if->sk_if_dev,
2044		    "failed to allocate DMA'able memory for Tx ring\n");
2045		goto fail;
2046	}
2047
2048	ctx.sk_busaddr = 0;
2049	error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2050	    sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2051	    SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2052	if (error != 0) {
2053		device_printf(sc_if->sk_if_dev,
2054		    "failed to load DMA'able memory for Tx ring\n");
2055		goto fail;
2056	}
2057	sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2058
2059	/* allocate DMA'able memory and load the DMA map for Rx ring */
2060	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2061	    (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT |
2062	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map);
2063	if (error != 0) {
2064		device_printf(sc_if->sk_if_dev,
2065		    "failed to allocate DMA'able memory for Rx ring\n");
2066		goto fail;
2067	}
2068
2069	ctx.sk_busaddr = 0;
2070	error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2071	    sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2072	    SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2073	if (error != 0) {
2074		device_printf(sc_if->sk_if_dev,
2075		    "failed to load DMA'able memory for Rx ring\n");
2076		goto fail;
2077	}
2078	sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2079
2080	/* create DMA maps for Tx buffers */
2081	for (i = 0; i < SK_TX_RING_CNT; i++) {
2082		txd = &sc_if->sk_cdata.sk_txdesc[i];
2083		txd->tx_m = NULL;
2084		txd->tx_dmamap = NULL;
2085		error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2086		    &txd->tx_dmamap);
2087		if (error != 0) {
2088			device_printf(sc_if->sk_if_dev,
2089			    "failed to create Tx dmamap\n");
2090			goto fail;
2091		}
2092	}
2093
2094	/* create DMA maps for Rx buffers */
2095	if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2096	    &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2097		device_printf(sc_if->sk_if_dev,
2098		    "failed to create spare Rx dmamap\n");
2099		goto fail;
2100	}
2101	for (i = 0; i < SK_RX_RING_CNT; i++) {
2102		rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2103		rxd->rx_m = NULL;
2104		rxd->rx_dmamap = NULL;
2105		error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2106		    &rxd->rx_dmamap);
2107		if (error != 0) {
2108			device_printf(sc_if->sk_if_dev,
2109			    "failed to create Rx dmamap\n");
2110			goto fail;
2111		}
2112	}
2113
2114fail:
2115	return (error);
2116}
2117
2118static int
2119sk_dma_jumbo_alloc(sc_if)
2120	struct sk_if_softc	*sc_if;
2121{
2122	struct sk_dmamap_arg	ctx;
2123	struct sk_rxdesc	*jrxd;
2124	int			error, i;
2125
2126	if (jumbo_disable != 0) {
2127		device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
2128		sc_if->sk_jumbo_disable = 1;
2129		return (0);
2130	}
2131	/* create tag for jumbo Rx ring */
2132	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2133		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
2134		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2135		    BUS_SPACE_MAXADDR,		/* highaddr */
2136		    NULL, NULL,			/* filter, filterarg */
2137		    SK_JUMBO_RX_RING_SZ,	/* maxsize */
2138		    1,				/* nsegments */
2139		    SK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2140		    0,				/* flags */
2141		    NULL, NULL,			/* lockfunc, lockarg */
2142		    &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2143	if (error != 0) {
2144		device_printf(sc_if->sk_if_dev,
2145		    "failed to allocate jumbo Rx ring DMA tag\n");
2146		goto jumbo_fail;
2147	}
2148
2149	/* create tag for jumbo Rx buffers */
2150	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2151		    1, 0,			/* algnmnt, boundary */
2152		    BUS_SPACE_MAXADDR,		/* lowaddr */
2153		    BUS_SPACE_MAXADDR,		/* highaddr */
2154		    NULL, NULL,			/* filter, filterarg */
2155		    MJUM9BYTES,			/* maxsize */
2156		    1,				/* nsegments */
2157		    MJUM9BYTES,			/* maxsegsize */
2158		    0,				/* flags */
2159		    NULL, NULL,			/* lockfunc, lockarg */
2160		    &sc_if->sk_cdata.sk_jumbo_rx_tag);
2161	if (error != 0) {
2162		device_printf(sc_if->sk_if_dev,
2163		    "failed to allocate jumbo Rx DMA tag\n");
2164		goto jumbo_fail;
2165	}
2166
2167	/* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2168	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2169	    (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT |
2170	    BUS_DMA_COHERENT | BUS_DMA_ZERO,
2171	    &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2172	if (error != 0) {
2173		device_printf(sc_if->sk_if_dev,
2174		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2175		goto jumbo_fail;
2176	}
2177
2178	ctx.sk_busaddr = 0;
2179	error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2180	    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2181	    sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2182	    &ctx, BUS_DMA_NOWAIT);
2183	if (error != 0) {
2184		device_printf(sc_if->sk_if_dev,
2185		    "failed to load DMA'able memory for jumbo Rx ring\n");
2186		goto jumbo_fail;
2187	}
2188	sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2189
2190	/* create DMA maps for jumbo Rx buffers */
2191	if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2192	    &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2193		device_printf(sc_if->sk_if_dev,
2194		    "failed to create spare jumbo Rx dmamap\n");
2195		goto jumbo_fail;
2196	}
2197	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2198		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2199		jrxd->rx_m = NULL;
2200		jrxd->rx_dmamap = NULL;
2201		error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2202		    &jrxd->rx_dmamap);
2203		if (error != 0) {
2204			device_printf(sc_if->sk_if_dev,
2205			    "failed to create jumbo Rx dmamap\n");
2206			goto jumbo_fail;
2207		}
2208	}
2209
2210	return (0);
2211
2212jumbo_fail:
2213	sk_dma_jumbo_free(sc_if);
2214	device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
2215	    "resource shortage\n");
2216	sc_if->sk_jumbo_disable = 1;
2217	return (0);
2218}
2219
2220static void
2221sk_dma_free(sc_if)
2222	struct sk_if_softc	*sc_if;
2223{
2224	struct sk_txdesc	*txd;
2225	struct sk_rxdesc	*rxd;
2226	int			i;
2227
2228	/* Tx ring */
2229	if (sc_if->sk_cdata.sk_tx_ring_tag) {
2230		if (sc_if->sk_cdata.sk_tx_ring_map)
2231			bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2232			    sc_if->sk_cdata.sk_tx_ring_map);
2233		if (sc_if->sk_cdata.sk_tx_ring_map &&
2234		    sc_if->sk_rdata.sk_tx_ring)
2235			bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2236			    sc_if->sk_rdata.sk_tx_ring,
2237			    sc_if->sk_cdata.sk_tx_ring_map);
2238		sc_if->sk_rdata.sk_tx_ring = NULL;
2239		sc_if->sk_cdata.sk_tx_ring_map = NULL;
2240		bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2241		sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2242	}
2243	/* Rx ring */
2244	if (sc_if->sk_cdata.sk_rx_ring_tag) {
2245		if (sc_if->sk_cdata.sk_rx_ring_map)
2246			bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2247			    sc_if->sk_cdata.sk_rx_ring_map);
2248		if (sc_if->sk_cdata.sk_rx_ring_map &&
2249		    sc_if->sk_rdata.sk_rx_ring)
2250			bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2251			    sc_if->sk_rdata.sk_rx_ring,
2252			    sc_if->sk_cdata.sk_rx_ring_map);
2253		sc_if->sk_rdata.sk_rx_ring = NULL;
2254		sc_if->sk_cdata.sk_rx_ring_map = NULL;
2255		bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2256		sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2257	}
2258	/* Tx buffers */
2259	if (sc_if->sk_cdata.sk_tx_tag) {
2260		for (i = 0; i < SK_TX_RING_CNT; i++) {
2261			txd = &sc_if->sk_cdata.sk_txdesc[i];
2262			if (txd->tx_dmamap) {
2263				bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2264				    txd->tx_dmamap);
2265				txd->tx_dmamap = NULL;
2266			}
2267		}
2268		bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2269		sc_if->sk_cdata.sk_tx_tag = NULL;
2270	}
2271	/* Rx buffers */
2272	if (sc_if->sk_cdata.sk_rx_tag) {
2273		for (i = 0; i < SK_RX_RING_CNT; i++) {
2274			rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2275			if (rxd->rx_dmamap) {
2276				bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2277				    rxd->rx_dmamap);
2278				rxd->rx_dmamap = NULL;
2279			}
2280		}
2281		if (sc_if->sk_cdata.sk_rx_sparemap) {
2282			bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2283			    sc_if->sk_cdata.sk_rx_sparemap);
2284			sc_if->sk_cdata.sk_rx_sparemap = NULL;
2285		}
2286		bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2287		sc_if->sk_cdata.sk_rx_tag = NULL;
2288	}
2289
2290	if (sc_if->sk_cdata.sk_parent_tag) {
2291		bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2292		sc_if->sk_cdata.sk_parent_tag = NULL;
2293	}
2294}
2295
2296static void
2297sk_dma_jumbo_free(sc_if)
2298	struct sk_if_softc	*sc_if;
2299{
2300	struct sk_rxdesc	*jrxd;
2301	int			i;
2302
2303	/* jumbo Rx ring */
2304	if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2305		if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
2306			bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2307			    sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2308		if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
2309		    sc_if->sk_rdata.sk_jumbo_rx_ring)
2310			bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2311			    sc_if->sk_rdata.sk_jumbo_rx_ring,
2312			    sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2313		sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2314		sc_if->sk_cdata.sk_jumbo_rx_ring_map = NULL;
2315		bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2316		sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2317	}
2318
2319	/* jumbo Rx buffers */
2320	if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2321		for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2322			jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2323			if (jrxd->rx_dmamap) {
2324				bus_dmamap_destroy(
2325				    sc_if->sk_cdata.sk_jumbo_rx_tag,
2326				    jrxd->rx_dmamap);
2327				jrxd->rx_dmamap = NULL;
2328			}
2329		}
2330		if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2331			bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2332			    sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2333			sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
2334		}
2335		bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2336		sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2337	}
2338}
2339
2340static void
2341sk_txcksum(ifp, m, f)
2342	struct ifnet		*ifp;
2343	struct mbuf		*m;
2344	struct sk_tx_desc	*f;
2345{
2346	struct ip		*ip;
2347	u_int16_t		offset;
2348	u_int8_t 		*p;
2349
2350	offset = sizeof(struct ip) + ETHER_HDR_LEN;
2351	for(; m && m->m_len == 0; m = m->m_next)
2352		;
2353	if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2354		if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2355		/* checksum may be corrupted */
2356		goto sendit;
2357	}
2358	if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2359		if (m->m_len != ETHER_HDR_LEN) {
2360			if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2361			    __func__);
2362			/* checksum may be corrupted */
2363			goto sendit;
2364		}
2365		for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2366			;
2367		if (m == NULL) {
2368			offset = sizeof(struct ip) + ETHER_HDR_LEN;
2369			/* checksum may be corrupted */
2370			goto sendit;
2371		}
2372		ip = mtod(m, struct ip *);
2373	} else {
2374		p = mtod(m, u_int8_t *);
2375		p += ETHER_HDR_LEN;
2376		ip = (struct ip *)p;
2377	}
2378	offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2379
2380sendit:
2381	f->sk_csum_startval = 0;
2382	f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2383	    (offset << 16));
2384}
2385
2386static int
2387sk_encap(sc_if, m_head)
2388        struct sk_if_softc	*sc_if;
2389        struct mbuf		**m_head;
2390{
2391	struct sk_txdesc	*txd;
2392	struct sk_tx_desc	*f = NULL;
2393	struct mbuf		*m;
2394	bus_dma_segment_t	txsegs[SK_MAXTXSEGS];
2395	u_int32_t		cflags, frag, si, sk_ctl;
2396	int			error, i, nseg;
2397
2398	SK_IF_LOCK_ASSERT(sc_if);
2399
2400	if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2401		return (ENOBUFS);
2402
2403	error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2404	    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2405	if (error == EFBIG) {
2406		m = m_defrag(*m_head, M_NOWAIT);
2407		if (m == NULL) {
2408			m_freem(*m_head);
2409			*m_head = NULL;
2410			return (ENOMEM);
2411		}
2412		*m_head = m;
2413		error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2414		    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2415		if (error != 0) {
2416			m_freem(*m_head);
2417			*m_head = NULL;
2418			return (error);
2419		}
2420	} else if (error != 0)
2421		return (error);
2422	if (nseg == 0) {
2423		m_freem(*m_head);
2424		*m_head = NULL;
2425		return (EIO);
2426	}
2427	if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2428		bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2429		return (ENOBUFS);
2430	}
2431
2432	m = *m_head;
2433	if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2434		cflags = SK_OPCODE_CSUM;
2435	else
2436		cflags = SK_OPCODE_DEFAULT;
2437	si = frag = sc_if->sk_cdata.sk_tx_prod;
2438	for (i = 0; i < nseg; i++) {
2439		f = &sc_if->sk_rdata.sk_tx_ring[frag];
2440		f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2441		f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2442		sk_ctl = txsegs[i].ds_len | cflags;
2443		if (i == 0) {
2444			if (cflags == SK_OPCODE_CSUM)
2445				sk_txcksum(sc_if->sk_ifp, m, f);
2446			sk_ctl |= SK_TXCTL_FIRSTFRAG;
2447		} else
2448			sk_ctl |= SK_TXCTL_OWN;
2449		f->sk_ctl = htole32(sk_ctl);
2450		sc_if->sk_cdata.sk_tx_cnt++;
2451		SK_INC(frag, SK_TX_RING_CNT);
2452	}
2453	sc_if->sk_cdata.sk_tx_prod = frag;
2454
2455	/* set EOF on the last desciptor */
2456	frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2457	f = &sc_if->sk_rdata.sk_tx_ring[frag];
2458	f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2459
2460	/* turn the first descriptor ownership to NIC */
2461	f = &sc_if->sk_rdata.sk_tx_ring[si];
2462	f->sk_ctl |= htole32(SK_TXCTL_OWN);
2463
2464	STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2465	STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2466	txd->tx_m = m;
2467
2468	/* sync descriptors */
2469	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2470	    BUS_DMASYNC_PREWRITE);
2471	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2472	    sc_if->sk_cdata.sk_tx_ring_map,
2473	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2474
2475	return (0);
2476}
2477
2478static void
2479sk_start(ifp)
2480	struct ifnet		*ifp;
2481{
2482	struct sk_if_softc *sc_if;
2483
2484	sc_if = ifp->if_softc;
2485
2486	SK_IF_LOCK(sc_if);
2487	sk_start_locked(ifp);
2488	SK_IF_UNLOCK(sc_if);
2489
2490	return;
2491}
2492
2493static void
2494sk_start_locked(ifp)
2495	struct ifnet		*ifp;
2496{
2497        struct sk_softc		*sc;
2498        struct sk_if_softc	*sc_if;
2499        struct mbuf		*m_head;
2500	int			enq;
2501
2502	sc_if = ifp->if_softc;
2503	sc = sc_if->sk_softc;
2504
2505	SK_IF_LOCK_ASSERT(sc_if);
2506
2507	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2508	    sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2509		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2510		if (m_head == NULL)
2511			break;
2512
2513		/*
2514		 * Pack the data into the transmit ring. If we
2515		 * don't have room, set the OACTIVE flag and wait
2516		 * for the NIC to drain the ring.
2517		 */
2518		if (sk_encap(sc_if, &m_head)) {
2519			if (m_head == NULL)
2520				break;
2521			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2522			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2523			break;
2524		}
2525
2526		enq++;
2527		/*
2528		 * If there's a BPF listener, bounce a copy of this frame
2529		 * to him.
2530		 */
2531		BPF_MTAP(ifp, m_head);
2532	}
2533
2534	if (enq > 0) {
2535		/* Transmit */
2536		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2537
2538		/* Set a timeout in case the chip goes out to lunch. */
2539		sc_if->sk_watchdog_timer = 5;
2540	}
2541}
2542
2543
2544static void
2545sk_watchdog(arg)
2546	void			*arg;
2547{
2548	struct sk_if_softc	*sc_if;
2549	struct ifnet		*ifp;
2550
2551	ifp = arg;
2552	sc_if = ifp->if_softc;
2553
2554	SK_IF_LOCK_ASSERT(sc_if);
2555
2556	if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2557		goto done;
2558
2559	/*
2560	 * Reclaim first as there is a possibility of losing Tx completion
2561	 * interrupts.
2562	 */
2563	sk_txeof(sc_if);
2564	if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2565		if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2566		ifp->if_oerrors++;
2567		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2568		sk_init_locked(sc_if);
2569	}
2570
2571done:
2572	callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2573
2574	return;
2575}
2576
2577static int
2578skc_shutdown(dev)
2579	device_t		dev;
2580{
2581	struct sk_softc		*sc;
2582
2583	sc = device_get_softc(dev);
2584	SK_LOCK(sc);
2585
2586	/* Turn off the 'driver is loaded' LED. */
2587	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2588
2589	/*
2590	 * Reset the GEnesis controller. Doing this should also
2591	 * assert the resets on the attached XMAC(s).
2592	 */
2593	sk_reset(sc);
2594	SK_UNLOCK(sc);
2595
2596	return (0);
2597}
2598
2599static int
2600skc_suspend(dev)
2601	device_t		dev;
2602{
2603	struct sk_softc		*sc;
2604	struct sk_if_softc	*sc_if0, *sc_if1;
2605	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2606
2607	sc = device_get_softc(dev);
2608
2609	SK_LOCK(sc);
2610
2611	sc_if0 = sc->sk_if[SK_PORT_A];
2612	sc_if1 = sc->sk_if[SK_PORT_B];
2613	if (sc_if0 != NULL)
2614		ifp0 = sc_if0->sk_ifp;
2615	if (sc_if1 != NULL)
2616		ifp1 = sc_if1->sk_ifp;
2617	if (ifp0 != NULL)
2618		sk_stop(sc_if0);
2619	if (ifp1 != NULL)
2620		sk_stop(sc_if1);
2621	sc->sk_suspended = 1;
2622
2623	SK_UNLOCK(sc);
2624
2625	return (0);
2626}
2627
2628static int
2629skc_resume(dev)
2630	device_t		dev;
2631{
2632	struct sk_softc		*sc;
2633	struct sk_if_softc	*sc_if0, *sc_if1;
2634	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2635
2636	sc = device_get_softc(dev);
2637
2638	SK_LOCK(sc);
2639
2640	sc_if0 = sc->sk_if[SK_PORT_A];
2641	sc_if1 = sc->sk_if[SK_PORT_B];
2642	if (sc_if0 != NULL)
2643		ifp0 = sc_if0->sk_ifp;
2644	if (sc_if1 != NULL)
2645		ifp1 = sc_if1->sk_ifp;
2646	if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2647		sk_init_locked(sc_if0);
2648	if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2649		sk_init_locked(sc_if1);
2650	sc->sk_suspended = 0;
2651
2652	SK_UNLOCK(sc);
2653
2654	return (0);
2655}
2656
2657/*
2658 * According to the data sheet from SK-NET GENESIS the hardware can compute
2659 * two Rx checksums at the same time(Each checksum start position is
2660 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2661 * does not work at least on my Yukon hardware. I tried every possible ways
2662 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2663 * checksum offload was disabled at the moment and only IP checksum offload
2664 * was enabled.
2665 * As nomral IP header size is 20 bytes I can't expect it would give an
2666 * increase in throughput. However it seems it doesn't hurt performance in
2667 * my testing. If there is a more detailed information for checksum secret
2668 * of the hardware in question please contact yongari@FreeBSD.org to add
2669 * TCP/UDP checksum offload support.
2670 */
2671static __inline void
2672sk_rxcksum(ifp, m, csum)
2673	struct ifnet		*ifp;
2674	struct mbuf		*m;
2675	u_int32_t		csum;
2676{
2677	struct ether_header	*eh;
2678	struct ip		*ip;
2679	int32_t			hlen, len, pktlen;
2680	u_int16_t		csum1, csum2, ipcsum;
2681
2682	pktlen = m->m_pkthdr.len;
2683	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2684		return;
2685	eh = mtod(m, struct ether_header *);
2686	if (eh->ether_type != htons(ETHERTYPE_IP))
2687		return;
2688	ip = (struct ip *)(eh + 1);
2689	if (ip->ip_v != IPVERSION)
2690		return;
2691	hlen = ip->ip_hl << 2;
2692	pktlen -= sizeof(struct ether_header);
2693	if (hlen < sizeof(struct ip))
2694		return;
2695	if (ntohs(ip->ip_len) < hlen)
2696		return;
2697	if (ntohs(ip->ip_len) != pktlen)
2698		return;
2699
2700	csum1 = htons(csum & 0xffff);
2701	csum2 = htons((csum >> 16) & 0xffff);
2702	ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2703	/* checksum fixup for IP options */
2704	len = hlen - sizeof(struct ip);
2705	if (len > 0) {
2706		/*
2707		 * If the second checksum value is correct we can compute IP
2708		 * checksum with simple math. Unfortunately the second checksum
2709		 * value is wrong so we can't verify the checksum from the
2710		 * value(It seems there is some magic here to get correct
2711		 * value). If the second checksum value is correct it also
2712		 * means we can get TCP/UDP checksum) here. However, it still
2713		 * needs pseudo header checksum calculation due to hardware
2714		 * limitations.
2715		 */
2716		return;
2717	}
2718	m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2719	if (ipcsum == 0xffff)
2720		m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2721}
2722
2723static __inline int
2724sk_rxvalid(sc, stat, len)
2725	struct sk_softc		*sc;
2726	u_int32_t		stat, len;
2727{
2728
2729	if (sc->sk_type == SK_GENESIS) {
2730		if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2731		    XM_RXSTAT_BYTES(stat) != len)
2732			return (0);
2733	} else {
2734		if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2735		    YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2736		    YU_RXSTAT_JABBER)) != 0 ||
2737		    (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2738		    YU_RXSTAT_BYTES(stat) != len)
2739			return (0);
2740	}
2741
2742	return (1);
2743}
2744
2745static void
2746sk_rxeof(sc_if)
2747	struct sk_if_softc	*sc_if;
2748{
2749	struct sk_softc		*sc;
2750	struct mbuf		*m;
2751	struct ifnet		*ifp;
2752	struct sk_rx_desc	*cur_rx;
2753	struct sk_rxdesc	*rxd;
2754	int			cons, prog;
2755	u_int32_t		csum, rxstat, sk_ctl;
2756
2757	sc = sc_if->sk_softc;
2758	ifp = sc_if->sk_ifp;
2759
2760	SK_IF_LOCK_ASSERT(sc_if);
2761
2762	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2763	    sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2764
2765	prog = 0;
2766	for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2767	    prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2768		cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2769		sk_ctl = le32toh(cur_rx->sk_ctl);
2770		if ((sk_ctl & SK_RXCTL_OWN) != 0)
2771			break;
2772		rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2773		rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2774
2775		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2776		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2777		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2778		    SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2779		    SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2780		    sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2781			ifp->if_ierrors++;
2782			sk_discard_rxbuf(sc_if, cons);
2783			continue;
2784		}
2785
2786		m = rxd->rx_m;
2787		csum = le32toh(cur_rx->sk_csum);
2788		if (sk_newbuf(sc_if, cons) != 0) {
2789			ifp->if_iqdrops++;
2790			/* reuse old buffer */
2791			sk_discard_rxbuf(sc_if, cons);
2792			continue;
2793		}
2794		m->m_pkthdr.rcvif = ifp;
2795		m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2796		ifp->if_ipackets++;
2797		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2798			sk_rxcksum(ifp, m, csum);
2799		SK_IF_UNLOCK(sc_if);
2800		(*ifp->if_input)(ifp, m);
2801		SK_IF_LOCK(sc_if);
2802	}
2803
2804	if (prog > 0) {
2805		sc_if->sk_cdata.sk_rx_cons = cons;
2806		bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2807		    sc_if->sk_cdata.sk_rx_ring_map,
2808		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2809	}
2810}
2811
2812static void
2813sk_jumbo_rxeof(sc_if)
2814	struct sk_if_softc	*sc_if;
2815{
2816	struct sk_softc		*sc;
2817	struct mbuf		*m;
2818	struct ifnet		*ifp;
2819	struct sk_rx_desc	*cur_rx;
2820	struct sk_rxdesc	*jrxd;
2821	int			cons, prog;
2822	u_int32_t		csum, rxstat, sk_ctl;
2823
2824	sc = sc_if->sk_softc;
2825	ifp = sc_if->sk_ifp;
2826
2827	SK_IF_LOCK_ASSERT(sc_if);
2828
2829	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2830	    sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2831
2832	prog = 0;
2833	for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2834	    prog < SK_JUMBO_RX_RING_CNT;
2835	    prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2836		cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2837		sk_ctl = le32toh(cur_rx->sk_ctl);
2838		if ((sk_ctl & SK_RXCTL_OWN) != 0)
2839			break;
2840		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2841		rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2842
2843		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2844		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2845		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2846		    SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2847		    SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2848		    sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2849			ifp->if_ierrors++;
2850			sk_discard_jumbo_rxbuf(sc_if, cons);
2851			continue;
2852		}
2853
2854		m = jrxd->rx_m;
2855		csum = le32toh(cur_rx->sk_csum);
2856		if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2857			ifp->if_iqdrops++;
2858			/* reuse old buffer */
2859			sk_discard_jumbo_rxbuf(sc_if, cons);
2860			continue;
2861		}
2862		m->m_pkthdr.rcvif = ifp;
2863		m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2864		ifp->if_ipackets++;
2865		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2866			sk_rxcksum(ifp, m, csum);
2867		SK_IF_UNLOCK(sc_if);
2868		(*ifp->if_input)(ifp, m);
2869		SK_IF_LOCK(sc_if);
2870	}
2871
2872	if (prog > 0) {
2873		sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2874		bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2875		    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2876		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2877	}
2878}
2879
2880static void
2881sk_txeof(sc_if)
2882	struct sk_if_softc	*sc_if;
2883{
2884	struct sk_softc		*sc;
2885	struct sk_txdesc	*txd;
2886	struct sk_tx_desc	*cur_tx;
2887	struct ifnet		*ifp;
2888	u_int32_t		idx, sk_ctl;
2889
2890	sc = sc_if->sk_softc;
2891	ifp = sc_if->sk_ifp;
2892
2893	txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2894	if (txd == NULL)
2895		return;
2896	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2897	    sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2898	/*
2899	 * Go through our tx ring and free mbufs for those
2900	 * frames that have been sent.
2901	 */
2902	for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
2903		if (sc_if->sk_cdata.sk_tx_cnt <= 0)
2904			break;
2905		cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2906		sk_ctl = le32toh(cur_tx->sk_ctl);
2907		if (sk_ctl & SK_TXCTL_OWN)
2908			break;
2909		sc_if->sk_cdata.sk_tx_cnt--;
2910		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2911		if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
2912			continue;
2913		bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2914		    BUS_DMASYNC_POSTWRITE);
2915		bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2916
2917		ifp->if_opackets++;
2918		m_freem(txd->tx_m);
2919		txd->tx_m = NULL;
2920		STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
2921		STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
2922		txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2923	}
2924	sc_if->sk_cdata.sk_tx_cons = idx;
2925	sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
2926
2927	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2928	    sc_if->sk_cdata.sk_tx_ring_map,
2929	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2930}
2931
2932static void
2933sk_tick(xsc_if)
2934	void			*xsc_if;
2935{
2936	struct sk_if_softc	*sc_if;
2937	struct mii_data		*mii;
2938	struct ifnet		*ifp;
2939	int			i;
2940
2941	sc_if = xsc_if;
2942	ifp = sc_if->sk_ifp;
2943	mii = device_get_softc(sc_if->sk_miibus);
2944
2945	if (!(ifp->if_flags & IFF_UP))
2946		return;
2947
2948	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2949		sk_intr_bcom(sc_if);
2950		return;
2951	}
2952
2953	/*
2954	 * According to SysKonnect, the correct way to verify that
2955	 * the link has come back up is to poll bit 0 of the GPIO
2956	 * register three times. This pin has the signal from the
2957	 * link_sync pin connected to it; if we read the same link
2958	 * state 3 times in a row, we know the link is up.
2959	 */
2960	for (i = 0; i < 3; i++) {
2961		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2962			break;
2963	}
2964
2965	if (i != 3) {
2966		callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2967		return;
2968	}
2969
2970	/* Turn the GP0 interrupt back on. */
2971	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2972	SK_XM_READ_2(sc_if, XM_ISR);
2973	mii_tick(mii);
2974	callout_stop(&sc_if->sk_tick_ch);
2975}
2976
2977static void
2978sk_yukon_tick(xsc_if)
2979	void			*xsc_if;
2980{
2981	struct sk_if_softc	*sc_if;
2982	struct mii_data		*mii;
2983
2984	sc_if = xsc_if;
2985	mii = device_get_softc(sc_if->sk_miibus);
2986
2987	mii_tick(mii);
2988	callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
2989}
2990
2991static void
2992sk_intr_bcom(sc_if)
2993	struct sk_if_softc	*sc_if;
2994{
2995	struct mii_data		*mii;
2996	struct ifnet		*ifp;
2997	int			status;
2998	mii = device_get_softc(sc_if->sk_miibus);
2999	ifp = sc_if->sk_ifp;
3000
3001	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3002
3003	/*
3004	 * Read the PHY interrupt register to make sure
3005	 * we clear any pending interrupts.
3006	 */
3007	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
3008
3009	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3010		sk_init_xmac(sc_if);
3011		return;
3012	}
3013
3014	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
3015		int			lstat;
3016		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
3017		    BRGPHY_MII_AUXSTS);
3018
3019		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
3020			mii_mediachg(mii);
3021			/* Turn off the link LED. */
3022			SK_IF_WRITE_1(sc_if, 0,
3023			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
3024			sc_if->sk_link = 0;
3025		} else if (status & BRGPHY_ISR_LNK_CHG) {
3026			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3027	    		    BRGPHY_MII_IMR, 0xFF00);
3028			mii_tick(mii);
3029			sc_if->sk_link = 1;
3030			/* Turn on the link LED. */
3031			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3032			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3033			    SK_LINKLED_BLINK_OFF);
3034		} else {
3035			mii_tick(mii);
3036			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3037		}
3038	}
3039
3040	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3041
3042	return;
3043}
3044
3045static void
3046sk_intr_xmac(sc_if)
3047	struct sk_if_softc	*sc_if;
3048{
3049	struct sk_softc		*sc;
3050	u_int16_t		status;
3051
3052	sc = sc_if->sk_softc;
3053	status = SK_XM_READ_2(sc_if, XM_ISR);
3054
3055	/*
3056	 * Link has gone down. Start MII tick timeout to
3057	 * watch for link resync.
3058	 */
3059	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3060		if (status & XM_ISR_GP0_SET) {
3061			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3062			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3063		}
3064
3065		if (status & XM_ISR_AUTONEG_DONE) {
3066			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3067		}
3068	}
3069
3070	if (status & XM_IMR_TX_UNDERRUN)
3071		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3072
3073	if (status & XM_IMR_RX_OVERRUN)
3074		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3075
3076	status = SK_XM_READ_2(sc_if, XM_ISR);
3077
3078	return;
3079}
3080
3081static void
3082sk_intr_yukon(sc_if)
3083	struct sk_if_softc	*sc_if;
3084{
3085	u_int8_t status;
3086
3087	status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3088	/* RX overrun */
3089	if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3090		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3091		    SK_RFCTL_RX_FIFO_OVER);
3092	}
3093	/* TX underrun */
3094	if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3095		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3096		    SK_TFCTL_TX_FIFO_UNDER);
3097	}
3098}
3099
3100static void
3101sk_intr(xsc)
3102	void			*xsc;
3103{
3104	struct sk_softc		*sc = xsc;
3105	struct sk_if_softc	*sc_if0, *sc_if1;
3106	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
3107	u_int32_t		status;
3108
3109	SK_LOCK(sc);
3110
3111	status = CSR_READ_4(sc, SK_ISSR);
3112	if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3113		goto done_locked;
3114
3115	sc_if0 = sc->sk_if[SK_PORT_A];
3116	sc_if1 = sc->sk_if[SK_PORT_B];
3117
3118	if (sc_if0 != NULL)
3119		ifp0 = sc_if0->sk_ifp;
3120	if (sc_if1 != NULL)
3121		ifp1 = sc_if1->sk_ifp;
3122
3123	for (; (status &= sc->sk_intrmask) != 0;) {
3124		/* Handle receive interrupts first. */
3125		if (status & SK_ISR_RX1_EOF) {
3126			if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3127				sk_jumbo_rxeof(sc_if0);
3128			else
3129				sk_rxeof(sc_if0);
3130			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3131			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3132		}
3133		if (status & SK_ISR_RX2_EOF) {
3134			if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3135				sk_jumbo_rxeof(sc_if1);
3136			else
3137				sk_rxeof(sc_if1);
3138			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3139			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3140		}
3141
3142		/* Then transmit interrupts. */
3143		if (status & SK_ISR_TX1_S_EOF) {
3144			sk_txeof(sc_if0);
3145			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3146		}
3147		if (status & SK_ISR_TX2_S_EOF) {
3148			sk_txeof(sc_if1);
3149			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3150		}
3151
3152		/* Then MAC interrupts. */
3153		if (status & SK_ISR_MAC1 &&
3154		    ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3155			if (sc->sk_type == SK_GENESIS)
3156				sk_intr_xmac(sc_if0);
3157			else
3158				sk_intr_yukon(sc_if0);
3159		}
3160
3161		if (status & SK_ISR_MAC2 &&
3162		    ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3163			if (sc->sk_type == SK_GENESIS)
3164				sk_intr_xmac(sc_if1);
3165			else
3166				sk_intr_yukon(sc_if1);
3167		}
3168
3169		if (status & SK_ISR_EXTERNAL_REG) {
3170			if (ifp0 != NULL &&
3171			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3172				sk_intr_bcom(sc_if0);
3173			if (ifp1 != NULL &&
3174			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3175				sk_intr_bcom(sc_if1);
3176		}
3177		status = CSR_READ_4(sc, SK_ISSR);
3178	}
3179
3180	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3181
3182	if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3183		sk_start_locked(ifp0);
3184	if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3185		sk_start_locked(ifp1);
3186
3187done_locked:
3188	SK_UNLOCK(sc);
3189}
3190
3191static void
3192sk_init_xmac(sc_if)
3193	struct sk_if_softc	*sc_if;
3194{
3195	struct sk_softc		*sc;
3196	struct ifnet		*ifp;
3197	u_int16_t		eaddr[(ETHER_ADDR_LEN+1)/2];
3198	static const struct sk_bcom_hack bhack[] = {
3199	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3200	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3201	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3202	{ 0, 0 } };
3203
3204	SK_IF_LOCK_ASSERT(sc_if);
3205
3206	sc = sc_if->sk_softc;
3207	ifp = sc_if->sk_ifp;
3208
3209	/* Unreset the XMAC. */
3210	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3211	DELAY(1000);
3212
3213	/* Reset the XMAC's internal state. */
3214	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3215
3216	/* Save the XMAC II revision */
3217	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3218
3219	/*
3220	 * Perform additional initialization for external PHYs,
3221	 * namely for the 1000baseTX cards that use the XMAC's
3222	 * GMII mode.
3223	 */
3224	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3225		int			i = 0;
3226		u_int32_t		val;
3227
3228		/* Take PHY out of reset. */
3229		val = sk_win_read_4(sc, SK_GPIO);
3230		if (sc_if->sk_port == SK_PORT_A)
3231			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3232		else
3233			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3234		sk_win_write_4(sc, SK_GPIO, val);
3235
3236		/* Enable GMII mode on the XMAC. */
3237		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3238
3239		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3240		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3241		DELAY(10000);
3242		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3243		    BRGPHY_MII_IMR, 0xFFF0);
3244
3245		/*
3246		 * Early versions of the BCM5400 apparently have
3247		 * a bug that requires them to have their reserved
3248		 * registers initialized to some magic values. I don't
3249		 * know what the numbers do, I'm just the messenger.
3250		 */
3251		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3252		    == 0x6041) {
3253			while(bhack[i].reg) {
3254				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3255				    bhack[i].reg, bhack[i].val);
3256				i++;
3257			}
3258		}
3259	}
3260
3261	/* Set station address */
3262	bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3263	SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3264	SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3265	SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3266	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3267
3268	if (ifp->if_flags & IFF_BROADCAST) {
3269		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3270	} else {
3271		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3272	}
3273
3274	/* We don't need the FCS appended to the packet. */
3275	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3276
3277	/* We want short frames padded to 60 bytes. */
3278	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3279
3280	/*
3281	 * Enable the reception of all error frames. This is is
3282	 * a necessary evil due to the design of the XMAC. The
3283	 * XMAC's receive FIFO is only 8K in size, however jumbo
3284	 * frames can be up to 9000 bytes in length. When bad
3285	 * frame filtering is enabled, the XMAC's RX FIFO operates
3286	 * in 'store and forward' mode. For this to work, the
3287	 * entire frame has to fit into the FIFO, but that means
3288	 * that jumbo frames larger than 8192 bytes will be
3289	 * truncated. Disabling all bad frame filtering causes
3290	 * the RX FIFO to operate in streaming mode, in which
3291	 * case the XMAC will start transfering frames out of the
3292	 * RX FIFO as soon as the FIFO threshold is reached.
3293	 */
3294	if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3295		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3296		    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3297		    XM_MODE_RX_INRANGELEN);
3298		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3299	} else
3300		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3301
3302	/*
3303	 * Bump up the transmit threshold. This helps hold off transmit
3304	 * underruns when we're blasting traffic from both ports at once.
3305	 */
3306	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3307
3308	/* Set Rx filter */
3309	sk_rxfilter_genesis(sc_if);
3310
3311	/* Clear and enable interrupts */
3312	SK_XM_READ_2(sc_if, XM_ISR);
3313	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3314		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3315	else
3316		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3317
3318	/* Configure MAC arbiter */
3319	switch(sc_if->sk_xmac_rev) {
3320	case XM_XMAC_REV_B2:
3321		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3322		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3323		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3324		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3325		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3326		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3327		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3328		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3329		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3330		break;
3331	case XM_XMAC_REV_C1:
3332		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3333		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3334		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3335		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3336		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3337		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3338		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3339		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3340		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3341		break;
3342	default:
3343		break;
3344	}
3345	sk_win_write_2(sc, SK_MACARB_CTL,
3346	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3347
3348	sc_if->sk_link = 1;
3349
3350	return;
3351}
3352
3353static void
3354sk_init_yukon(sc_if)
3355	struct sk_if_softc	*sc_if;
3356{
3357	u_int32_t		phy, v;
3358	u_int16_t		reg;
3359	struct sk_softc		*sc;
3360	struct ifnet		*ifp;
3361	u_int8_t		*eaddr;
3362	int			i;
3363
3364	SK_IF_LOCK_ASSERT(sc_if);
3365
3366	sc = sc_if->sk_softc;
3367	ifp = sc_if->sk_ifp;
3368
3369	if (sc->sk_type == SK_YUKON_LITE &&
3370	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3371		/*
3372		 * Workaround code for COMA mode, set PHY reset.
3373		 * Otherwise it will not correctly take chip out of
3374		 * powerdown (coma)
3375		 */
3376		v = sk_win_read_4(sc, SK_GPIO);
3377		v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3378		sk_win_write_4(sc, SK_GPIO, v);
3379	}
3380
3381	/* GMAC and GPHY Reset */
3382	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3383	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3384	DELAY(1000);
3385
3386	if (sc->sk_type == SK_YUKON_LITE &&
3387	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3388		/*
3389		 * Workaround code for COMA mode, clear PHY reset
3390		 */
3391		v = sk_win_read_4(sc, SK_GPIO);
3392		v |= SK_GPIO_DIR9;
3393		v &= ~SK_GPIO_DAT9;
3394		sk_win_write_4(sc, SK_GPIO, v);
3395	}
3396
3397	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3398		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3399
3400	if (sc->sk_coppertype)
3401		phy |= SK_GPHY_COPPER;
3402	else
3403		phy |= SK_GPHY_FIBER;
3404
3405	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3406	DELAY(1000);
3407	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3408	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3409		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3410
3411	/* unused read of the interrupt source register */
3412	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3413
3414	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3415
3416	/* MIB Counter Clear Mode set */
3417	reg |= YU_PAR_MIB_CLR;
3418	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3419
3420	/* MIB Counter Clear Mode clear */
3421	reg &= ~YU_PAR_MIB_CLR;
3422	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3423
3424	/* receive control reg */
3425	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3426
3427	/* transmit parameter register */
3428	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3429		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3430
3431	/* serial mode register */
3432	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3433	if (ifp->if_mtu > SK_MAX_FRAMELEN)
3434		reg |= YU_SMR_MFL_JUMBO;
3435	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3436
3437	/* Setup Yukon's station address */
3438	eaddr = IF_LLADDR(sc_if->sk_ifp);
3439	for (i = 0; i < 3; i++)
3440		SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4,
3441		    eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3442	/* Set GMAC source address of flow control. */
3443	for (i = 0; i < 3; i++)
3444		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3445		    eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3446	/* Set GMAC virtual address. */
3447	for (i = 0; i < 3; i++)
3448		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4,
3449		    eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3450
3451	/* Set Rx filter */
3452	sk_rxfilter_yukon(sc_if);
3453
3454	/* enable interrupt mask for counter overflows */
3455	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3456	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3457	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3458
3459	/* Configure RX MAC FIFO Flush Mask */
3460	v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3461	    YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3462	    YU_RXSTAT_JABBER;
3463	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3464
3465	/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3466	if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3467		v = SK_TFCTL_OPERATION_ON;
3468	else
3469		v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3470	/* Configure RX MAC FIFO */
3471	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3472	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3473
3474	/* Increase flush threshould to 64 bytes */
3475	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3476	    SK_RFCTL_FIFO_THRESHOLD + 1);
3477
3478	/* Configure TX MAC FIFO */
3479	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3480	SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3481}
3482
3483/*
3484 * Note that to properly initialize any part of the GEnesis chip,
3485 * you first have to take it out of reset mode.
3486 */
3487static void
3488sk_init(xsc)
3489	void			*xsc;
3490{
3491	struct sk_if_softc	*sc_if = xsc;
3492
3493	SK_IF_LOCK(sc_if);
3494	sk_init_locked(sc_if);
3495	SK_IF_UNLOCK(sc_if);
3496
3497	return;
3498}
3499
3500static void
3501sk_init_locked(sc_if)
3502	struct sk_if_softc	*sc_if;
3503{
3504	struct sk_softc		*sc;
3505	struct ifnet		*ifp;
3506	struct mii_data		*mii;
3507	u_int16_t		reg;
3508	u_int32_t		imr;
3509	int			error;
3510
3511	SK_IF_LOCK_ASSERT(sc_if);
3512
3513	ifp = sc_if->sk_ifp;
3514	sc = sc_if->sk_softc;
3515	mii = device_get_softc(sc_if->sk_miibus);
3516
3517	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3518		return;
3519
3520	/* Cancel pending I/O and free all RX/TX buffers. */
3521	sk_stop(sc_if);
3522
3523	if (sc->sk_type == SK_GENESIS) {
3524		/* Configure LINK_SYNC LED */
3525		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3526		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3527			SK_LINKLED_LINKSYNC_ON);
3528
3529		/* Configure RX LED */
3530		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3531			SK_RXLEDCTL_COUNTER_START);
3532
3533		/* Configure TX LED */
3534		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3535			SK_TXLEDCTL_COUNTER_START);
3536	}
3537
3538	/*
3539	 * Configure descriptor poll timer
3540	 *
3541	 * SK-NET GENESIS data sheet says that possibility of losing Start
3542	 * transmit command due to CPU/cache related interim storage problems
3543	 * under certain conditions. The document recommends a polling
3544	 * mechanism to send a Start transmit command to initiate transfer
3545	 * of ready descriptors regulary. To cope with this issue sk(4) now
3546	 * enables descriptor poll timer to initiate descriptor processing
3547	 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3548	 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3549	 * command instead of waiting for next descriptor polling time.
3550	 * The same rule may apply to Rx side too but it seems that is not
3551	 * needed at the moment.
3552	 * Since sk(4) uses descriptor polling as a last resort there is no
3553	 * need to set smaller polling time than maximum allowable one.
3554	 */
3555	SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3556
3557	/* Configure I2C registers */
3558
3559	/* Configure XMAC(s) */
3560	switch (sc->sk_type) {
3561	case SK_GENESIS:
3562		sk_init_xmac(sc_if);
3563		break;
3564	case SK_YUKON:
3565	case SK_YUKON_LITE:
3566	case SK_YUKON_LP:
3567		sk_init_yukon(sc_if);
3568		break;
3569	}
3570	mii_mediachg(mii);
3571
3572	if (sc->sk_type == SK_GENESIS) {
3573		/* Configure MAC FIFOs */
3574		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3575		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3576		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3577
3578		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3579		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3580		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3581	}
3582
3583	/* Configure transmit arbiter(s) */
3584	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3585	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3586
3587	/* Configure RAMbuffers */
3588	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3589	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3590	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3591	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3592	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3593	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3594
3595	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3596	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3597	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3598	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3599	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3600	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3601	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3602
3603	/* Configure BMUs */
3604	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3605	if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3606		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3607		    SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3608		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3609		    SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3610	} else {
3611		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3612		    SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3613		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3614		    SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3615	}
3616
3617	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3618	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3619	    SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3620	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3621	    SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3622
3623	/* Init descriptors */
3624	if (ifp->if_mtu > SK_MAX_FRAMELEN)
3625		error = sk_init_jumbo_rx_ring(sc_if);
3626	else
3627		error = sk_init_rx_ring(sc_if);
3628	if (error != 0) {
3629		device_printf(sc_if->sk_if_dev,
3630		    "initialization failed: no memory for rx buffers\n");
3631		sk_stop(sc_if);
3632		return;
3633	}
3634	sk_init_tx_ring(sc_if);
3635
3636	/* Set interrupt moderation if changed via sysctl. */
3637	imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3638	if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3639		sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3640		    sc->sk_int_ticks));
3641		if (bootverbose)
3642			device_printf(sc_if->sk_if_dev,
3643			    "interrupt moderation is %d us.\n",
3644			    sc->sk_int_mod);
3645	}
3646
3647	/* Configure interrupt handling */
3648	CSR_READ_4(sc, SK_ISSR);
3649	if (sc_if->sk_port == SK_PORT_A)
3650		sc->sk_intrmask |= SK_INTRS1;
3651	else
3652		sc->sk_intrmask |= SK_INTRS2;
3653
3654	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3655
3656	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3657
3658	/* Start BMUs. */
3659	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3660
3661	switch(sc->sk_type) {
3662	case SK_GENESIS:
3663		/* Enable XMACs TX and RX state machines */
3664		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3665		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3666		break;
3667	case SK_YUKON:
3668	case SK_YUKON_LITE:
3669	case SK_YUKON_LP:
3670		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3671		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3672#if 0
3673		/* XXX disable 100Mbps and full duplex mode? */
3674		reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3675#endif
3676		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3677	}
3678
3679	/* Activate descriptor polling timer */
3680	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3681	/* start transfer of Tx descriptors */
3682	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3683
3684	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3685	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3686
3687	switch (sc->sk_type) {
3688	case SK_YUKON:
3689	case SK_YUKON_LITE:
3690	case SK_YUKON_LP:
3691		callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3692		break;
3693	}
3694
3695	callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3696
3697	return;
3698}
3699
3700static void
3701sk_stop(sc_if)
3702	struct sk_if_softc	*sc_if;
3703{
3704	int			i;
3705	struct sk_softc		*sc;
3706	struct sk_txdesc	*txd;
3707	struct sk_rxdesc	*rxd;
3708	struct sk_rxdesc	*jrxd;
3709	struct ifnet		*ifp;
3710	u_int32_t		val;
3711
3712	SK_IF_LOCK_ASSERT(sc_if);
3713	sc = sc_if->sk_softc;
3714	ifp = sc_if->sk_ifp;
3715
3716	callout_stop(&sc_if->sk_tick_ch);
3717	callout_stop(&sc_if->sk_watchdog_ch);
3718
3719	/* stop Tx descriptor polling timer */
3720	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3721	/* stop transfer of Tx descriptors */
3722	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3723	for (i = 0; i < SK_TIMEOUT; i++) {
3724		val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3725		if ((val & SK_TXBMU_TX_STOP) == 0)
3726			break;
3727		DELAY(1);
3728	}
3729	if (i == SK_TIMEOUT)
3730		device_printf(sc_if->sk_if_dev,
3731		    "can not stop transfer of Tx descriptor\n");
3732	/* stop transfer of Rx descriptors */
3733	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3734	for (i = 0; i < SK_TIMEOUT; i++) {
3735		val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3736		if ((val & SK_RXBMU_RX_STOP) == 0)
3737			break;
3738		DELAY(1);
3739	}
3740	if (i == SK_TIMEOUT)
3741		device_printf(sc_if->sk_if_dev,
3742		    "can not stop transfer of Rx descriptor\n");
3743
3744	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3745		/* Put PHY back into reset. */
3746		val = sk_win_read_4(sc, SK_GPIO);
3747		if (sc_if->sk_port == SK_PORT_A) {
3748			val |= SK_GPIO_DIR0;
3749			val &= ~SK_GPIO_DAT0;
3750		} else {
3751			val |= SK_GPIO_DIR2;
3752			val &= ~SK_GPIO_DAT2;
3753		}
3754		sk_win_write_4(sc, SK_GPIO, val);
3755	}
3756
3757	/* Turn off various components of this interface. */
3758	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3759	switch (sc->sk_type) {
3760	case SK_GENESIS:
3761		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3762		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3763		break;
3764	case SK_YUKON:
3765	case SK_YUKON_LITE:
3766	case SK_YUKON_LP:
3767		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3768		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3769		break;
3770	}
3771	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3772	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3773	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3774	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3775	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3776	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3777	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3778	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3779	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3780
3781	/* Disable interrupts */
3782	if (sc_if->sk_port == SK_PORT_A)
3783		sc->sk_intrmask &= ~SK_INTRS1;
3784	else
3785		sc->sk_intrmask &= ~SK_INTRS2;
3786	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3787
3788	SK_XM_READ_2(sc_if, XM_ISR);
3789	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3790
3791	/* Free RX and TX mbufs still in the queues. */
3792	for (i = 0; i < SK_RX_RING_CNT; i++) {
3793		rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3794		if (rxd->rx_m != NULL) {
3795			bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3796			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3797			bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3798			    rxd->rx_dmamap);
3799			m_freem(rxd->rx_m);
3800			rxd->rx_m = NULL;
3801		}
3802	}
3803	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3804		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3805		if (jrxd->rx_m != NULL) {
3806			bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3807			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3808			bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3809			    jrxd->rx_dmamap);
3810			m_freem(jrxd->rx_m);
3811			jrxd->rx_m = NULL;
3812		}
3813	}
3814	for (i = 0; i < SK_TX_RING_CNT; i++) {
3815		txd = &sc_if->sk_cdata.sk_txdesc[i];
3816		if (txd->tx_m != NULL) {
3817			bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3818			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3819			bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3820			    txd->tx_dmamap);
3821			m_freem(txd->tx_m);
3822			txd->tx_m = NULL;
3823		}
3824	}
3825
3826	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3827
3828	return;
3829}
3830
3831static int
3832sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3833{
3834	int error, value;
3835
3836	if (!arg1)
3837		return (EINVAL);
3838	value = *(int *)arg1;
3839	error = sysctl_handle_int(oidp, &value, 0, req);
3840	if (error || !req->newptr)
3841		return (error);
3842	if (value < low || value > high)
3843		return (EINVAL);
3844	*(int *)arg1 = value;
3845	return (0);
3846}
3847
3848static int
3849sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3850{
3851	return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
3852}
3853