1/*	$OpenBSD: xl.c,v 1.139 2023/11/10 15:51:20 bluhm Exp $	*/
2
3/*
4 * Copyright (c) 1997, 1998, 1999
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35 */
36
37/*
38 * 3Com 3c90x Etherlink XL PCI NIC driver
39 *
40 * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41 * bus-master chips (3c90x cards and embedded controllers) including
42 * the following:
43 *
44 * 3Com 3c900-TPO	10Mbps/RJ-45
45 * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
46 * 3Com 3c905-TX	10/100Mbps/RJ-45
47 * 3Com 3c905-T4	10/100Mbps/RJ-45
48 * 3Com 3c900B-TPO	10Mbps/RJ-45
49 * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
50 * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
51 * 3Com 3c900B-FL	10Mbps/Fiber-optic
52 * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
53 * 3Com 3c905B-TX	10/100Mbps/RJ-45
54 * 3Com 3c905B-FL/FX	10/100Mbps/Fiber-optic
55 * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
56 * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
57 * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
58 * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
59 * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
60 * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
61 * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62 * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
63 * 3Com 3c575TX		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64 * 3Com 3c575B		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65 * 3Com 3c575C		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66 * 3Com 3cxfem656	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67 * 3Com 3cxfem656b	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
68 * 3Com 3cxfem656c	10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
69 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
70 * Dell on-board 3c920 10/100Mbps/RJ-45
71 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
72 * Dell Latitude laptop docking station embedded 3c905-TX
73 *
74 * Written by Bill Paul <wpaul@ctr.columbia.edu>
75 * Electrical Engineering Department
76 * Columbia University, New York City
77 */
78
79/*
80 * The 3c90x series chips use a bus-master DMA interface for transferring
81 * packets to and from the controller chip. Some of the "vortex" cards
82 * (3c59x) also supported a bus master mode, however for those chips
83 * you could only DMA packets to/from a contiguous memory buffer. For
84 * transmission this would mean copying the contents of the queued mbuf
85 * chain into an mbuf cluster and then DMAing the cluster. This extra
86 * copy would sort of defeat the purpose of the bus master support for
87 * any packet that doesn't fit into a single mbuf.
88 *
89 * By contrast, the 3c90x cards support a fragment-based bus master
90 * mode where mbuf chains can be encapsulated using TX descriptors.
91 * This is similar to other PCI chips such as the Texas Instruments
92 * ThunderLAN and the Intel 82557/82558.
93 *
94 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
95 * bus master chips because they maintain the old PIO interface for
96 * backwards compatibility, but starting with the 3c905B and the
97 * "cyclone" chips, the compatibility interface has been dropped.
98 * Since using bus master DMA is a big win, we use this driver to
99 * support the PCI "boomerang" chips even though they work with the
100 * "vortex" driver in order to obtain better performance.
101 */
102
103#include "bpfilter.h"
104
105#include <sys/param.h>
106#include <sys/systm.h>
107#include <sys/mbuf.h>
108#include <sys/socket.h>
109#include <sys/ioctl.h>
110#include <sys/errno.h>
111#include <sys/malloc.h>
112#include <sys/kernel.h>
113#include <sys/device.h>
114
115#include <net/if.h>
116#include <net/if_media.h>
117
118#include <netinet/in.h>
119#include <netinet/if_ether.h>
120
121#include <dev/mii/miivar.h>
122
123#include <machine/bus.h>
124
125#if NBPFILTER > 0
126#include <net/bpf.h>
127#endif
128
129#include <dev/ic/xlreg.h>
130
131/*
132 * TX Checksumming is disabled by default for two reasons:
133 * - TX Checksumming will occasionally produce corrupt packets
134 * - TX Checksumming seems to reduce performance
135 *
136 * Only 905B/C cards were reported to have this problem, it is possible
137 * that later chips _may_ be immune.
138 */
139#define	XL905B_TXCSUM_BROKEN	1
140
141int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
142void xl_stats_update(void *);
143int xl_encap(struct xl_softc *, struct xl_chain *,
144    struct mbuf * );
145void xl_rxeof(struct xl_softc *);
146void xl_txeof(struct xl_softc *);
147void xl_txeof_90xB(struct xl_softc *);
148void xl_txeoc(struct xl_softc *);
149int xl_intr(void *);
150void xl_start(struct ifnet *);
151void xl_start_90xB(struct ifnet *);
152int xl_ioctl(struct ifnet *, u_long, caddr_t);
153void xl_freetxrx(struct xl_softc *);
154void xl_watchdog(struct ifnet *);
155int xl_ifmedia_upd(struct ifnet *);
156void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157
158int xl_eeprom_wait(struct xl_softc *);
159int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
160void xl_mii_sync(struct xl_softc *);
161void xl_mii_send(struct xl_softc *, u_int32_t, int);
162int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
163int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
164
165void xl_setcfg(struct xl_softc *);
166void xl_setmode(struct xl_softc *, uint64_t);
167void xl_iff(struct xl_softc *);
168void xl_iff_90x(struct xl_softc *);
169void xl_iff_905b(struct xl_softc *);
170int xl_list_rx_init(struct xl_softc *);
171void xl_fill_rx_ring(struct xl_softc *);
172int xl_list_tx_init(struct xl_softc *);
173int xl_list_tx_init_90xB(struct xl_softc *);
174void xl_wait(struct xl_softc *);
175void xl_mediacheck(struct xl_softc *);
176void xl_choose_xcvr(struct xl_softc *, int);
177
178int xl_miibus_readreg(struct device *, int, int);
179void xl_miibus_writereg(struct device *, int, int, int);
180void xl_miibus_statchg(struct device *);
181#ifndef SMALL_KERNEL
182int xl_wol(struct ifnet *, int);
183void xl_wol_power(struct xl_softc *);
184#endif
185
186int
187xl_activate(struct device *self, int act)
188{
189	struct xl_softc *sc = (struct xl_softc *)self;
190	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
191	int rv = 0;
192
193	switch (act) {
194	case DVACT_SUSPEND:
195		if (ifp->if_flags & IFF_RUNNING)
196			xl_stop(sc);
197		rv = config_activate_children(self, act);
198		break;
199	case DVACT_RESUME:
200		if (ifp->if_flags & IFF_UP)
201			xl_init(sc);
202		break;
203	case DVACT_POWERDOWN:
204		rv = config_activate_children(self, act);
205#ifndef SMALL_KERNEL
206		xl_wol_power(sc);
207#endif
208		break;
209	default:
210		rv = config_activate_children(self, act);
211		break;
212	}
213	return (rv);
214}
215
216/*
217 * Murphy's law says that it's possible the chip can wedge and
218 * the 'command in progress' bit may never clear. Hence, we wait
219 * only a finite amount of time to avoid getting caught in an
220 * infinite loop. Normally this delay routine would be a macro,
221 * but it isn't called during normal operation so we can afford
222 * to make it a function.
223 */
224void
225xl_wait(struct xl_softc *sc)
226{
227	int	i;
228
229	for (i = 0; i < XL_TIMEOUT; i++) {
230		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
231			break;
232	}
233
234	if (i == XL_TIMEOUT)
235		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
236}
237
238/*
239 * MII access routines are provided for adapters with external
240 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
241 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
242 * Note: if you don't perform the MDIO operations just right,
243 * it's possible to end up with code that works correctly with
244 * some chips/CPUs/processor speeds/bus speeds/etc but not
245 * with others.
246 */
247#define MII_SET(x)					\
248	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
249		CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
250
251#define MII_CLR(x)					\
252	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
253		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
254
255/*
256 * Sync the PHYs by setting data bit and strobing the clock 32 times.
257 */
258void
259xl_mii_sync(struct xl_softc *sc)
260{
261	int	i;
262
263	XL_SEL_WIN(4);
264	MII_SET(XL_MII_DIR|XL_MII_DATA);
265
266	for (i = 0; i < 32; i++) {
267		MII_SET(XL_MII_CLK);
268		MII_SET(XL_MII_DATA);
269		MII_SET(XL_MII_DATA);
270		MII_CLR(XL_MII_CLK);
271		MII_SET(XL_MII_DATA);
272		MII_SET(XL_MII_DATA);
273	}
274}
275
276/*
277 * Clock a series of bits through the MII.
278 */
279void
280xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
281{
282	int	i;
283
284	XL_SEL_WIN(4);
285	MII_CLR(XL_MII_CLK);
286
287	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
288                if (bits & i) {
289			MII_SET(XL_MII_DATA);
290                } else {
291			MII_CLR(XL_MII_DATA);
292                }
293		MII_CLR(XL_MII_CLK);
294		MII_SET(XL_MII_CLK);
295	}
296}
297
298/*
299 * Read an PHY register through the MII.
300 */
301int
302xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
303{
304	int	i, ack, s;
305
306	s = splnet();
307
308	/*
309	 * Set up frame for RX.
310	 */
311	frame->mii_stdelim = XL_MII_STARTDELIM;
312	frame->mii_opcode = XL_MII_READOP;
313	frame->mii_turnaround = 0;
314	frame->mii_data = 0;
315
316	/*
317	 * Select register window 4.
318	 */
319
320	XL_SEL_WIN(4);
321
322	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
323	/*
324 	 * Turn on data xmit.
325	 */
326	MII_SET(XL_MII_DIR);
327
328	xl_mii_sync(sc);
329
330	/*
331	 * Send command/address info.
332	 */
333	xl_mii_send(sc, frame->mii_stdelim, 2);
334	xl_mii_send(sc, frame->mii_opcode, 2);
335	xl_mii_send(sc, frame->mii_phyaddr, 5);
336	xl_mii_send(sc, frame->mii_regaddr, 5);
337
338	/* Idle bit */
339	MII_CLR((XL_MII_CLK|XL_MII_DATA));
340	MII_SET(XL_MII_CLK);
341
342	/* Turn off xmit. */
343	MII_CLR(XL_MII_DIR);
344
345	/* Check for ack */
346	MII_CLR(XL_MII_CLK);
347	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
348	MII_SET(XL_MII_CLK);
349
350	/*
351	 * Now try reading data bits. If the ack failed, we still
352	 * need to clock through 16 cycles to keep the PHY(s) in sync.
353	 */
354	if (ack) {
355		for(i = 0; i < 16; i++) {
356			MII_CLR(XL_MII_CLK);
357			MII_SET(XL_MII_CLK);
358		}
359		goto fail;
360	}
361
362	for (i = 0x8000; i; i >>= 1) {
363		MII_CLR(XL_MII_CLK);
364		if (!ack) {
365			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
366				frame->mii_data |= i;
367		}
368		MII_SET(XL_MII_CLK);
369	}
370
371fail:
372
373	MII_CLR(XL_MII_CLK);
374	MII_SET(XL_MII_CLK);
375
376	splx(s);
377
378	if (ack)
379		return (1);
380	return (0);
381}
382
383/*
384 * Write to a PHY register through the MII.
385 */
386int
387xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
388{
389	int	s;
390
391	s = splnet();
392
393	/*
394	 * Set up frame for TX.
395	 */
396
397	frame->mii_stdelim = XL_MII_STARTDELIM;
398	frame->mii_opcode = XL_MII_WRITEOP;
399	frame->mii_turnaround = XL_MII_TURNAROUND;
400
401	/*
402	 * Select the window 4.
403	 */
404	XL_SEL_WIN(4);
405
406	/*
407 	 * Turn on data output.
408	 */
409	MII_SET(XL_MII_DIR);
410
411	xl_mii_sync(sc);
412
413	xl_mii_send(sc, frame->mii_stdelim, 2);
414	xl_mii_send(sc, frame->mii_opcode, 2);
415	xl_mii_send(sc, frame->mii_phyaddr, 5);
416	xl_mii_send(sc, frame->mii_regaddr, 5);
417	xl_mii_send(sc, frame->mii_turnaround, 2);
418	xl_mii_send(sc, frame->mii_data, 16);
419
420	/* Idle bit. */
421	MII_SET(XL_MII_CLK);
422	MII_CLR(XL_MII_CLK);
423
424	/*
425	 * Turn off xmit.
426	 */
427	MII_CLR(XL_MII_DIR);
428
429	splx(s);
430
431	return (0);
432}
433
434int
435xl_miibus_readreg(struct device *self, int phy, int reg)
436{
437	struct xl_softc *sc = (struct xl_softc *)self;
438	struct xl_mii_frame	frame;
439
440	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
441		return (0);
442
443	bzero(&frame, sizeof(frame));
444
445	frame.mii_phyaddr = phy;
446	frame.mii_regaddr = reg;
447	xl_mii_readreg(sc, &frame);
448
449	return (frame.mii_data);
450}
451
452void
453xl_miibus_writereg(struct device *self, int phy, int reg, int data)
454{
455	struct xl_softc *sc = (struct xl_softc *)self;
456	struct xl_mii_frame	frame;
457
458	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
459		return;
460
461	bzero(&frame, sizeof(frame));
462
463	frame.mii_phyaddr = phy;
464	frame.mii_regaddr = reg;
465	frame.mii_data = data;
466
467	xl_mii_writereg(sc, &frame);
468}
469
470void
471xl_miibus_statchg(struct device *self)
472{
473	struct xl_softc *sc = (struct xl_softc *)self;
474
475	xl_setcfg(sc);
476
477	/* Set ASIC's duplex mode to match the PHY. */
478	XL_SEL_WIN(3);
479	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
480		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
481	else
482		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
483		    (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
484}
485
486/*
487 * The EEPROM is slow: give it time to come ready after issuing
488 * it a command.
489 */
490int
491xl_eeprom_wait(struct xl_softc *sc)
492{
493	int	i;
494
495	for (i = 0; i < 100; i++) {
496		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
497			DELAY(162);
498		else
499			break;
500	}
501
502	if (i == 100) {
503		printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname);
504		return (1);
505	}
506
507	return (0);
508}
509
510/*
511 * Read a sequence of words from the EEPROM. Note that ethernet address
512 * data is stored in the EEPROM in network byte order.
513 */
514int
515xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
516{
517	int		err = 0, i;
518	u_int16_t	word = 0, *ptr;
519#define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
520#define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
521	/* WARNING! DANGER!
522	 * It's easy to accidentally overwrite the rom content!
523	 * Note: the 3c575 uses 8bit EEPROM offsets.
524	 */
525	XL_SEL_WIN(0);
526
527	if (xl_eeprom_wait(sc))
528		return (1);
529
530	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
531		off += 0x30;
532
533	for (i = 0; i < cnt; i++) {
534		if (sc->xl_flags & XL_FLAG_8BITROM)
535			CSR_WRITE_2(sc, XL_W0_EE_CMD,
536			    XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
537		else
538			CSR_WRITE_2(sc, XL_W0_EE_CMD,
539			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
540		err = xl_eeprom_wait(sc);
541		if (err)
542			break;
543		word = CSR_READ_2(sc, XL_W0_EE_DATA);
544		ptr = (u_int16_t *)(dest + (i * 2));
545		if (swap)
546			*ptr = ntohs(word);
547		else
548			*ptr = word;
549	}
550
551	return (err ? 1 : 0);
552}
553
554void
555xl_iff(struct xl_softc *sc)
556{
557	if (sc->xl_type == XL_TYPE_905B)
558		xl_iff_905b(sc);
559	else
560		xl_iff_90x(sc);
561}
562
563/*
564 * NICs older than the 3c905B have only one multicast option, which
565 * is to enable reception of all multicast frames.
566 */
567void
568xl_iff_90x(struct xl_softc *sc)
569{
570	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
571	struct arpcom	*ac = &sc->sc_arpcom;
572	u_int8_t	rxfilt;
573
574	XL_SEL_WIN(5);
575
576	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
577	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
578	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
579	ifp->if_flags &= ~IFF_ALLMULTI;
580
581	/*
582	 * Always accept broadcast frames.
583	 * Always accept frames destined to our station address.
584	 */
585	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
586
587	if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
588		ifp->if_flags |= IFF_ALLMULTI;
589		if (ifp->if_flags & IFF_PROMISC)
590			rxfilt |= XL_RXFILTER_ALLFRAMES;
591		else
592			rxfilt |= XL_RXFILTER_ALLMULTI;
593	}
594
595	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
596
597	XL_SEL_WIN(7);
598}
599
600/*
601 * 3c905B adapters have a hash filter that we can program.
602 */
603void
604xl_iff_905b(struct xl_softc *sc)
605{
606	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
607	struct arpcom	*ac = &sc->sc_arpcom;
608	int		h = 0, i;
609	struct ether_multi *enm;
610	struct ether_multistep step;
611	u_int8_t	rxfilt;
612
613	XL_SEL_WIN(5);
614
615	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
616	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
617	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
618	    XL_RXFILTER_MULTIHASH);
619	ifp->if_flags &= ~IFF_ALLMULTI;
620
621	/*
622	 * Always accept broadcast frames.
623	 * Always accept frames destined to our station address.
624	 */
625	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
626
627	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
628		ifp->if_flags |= IFF_ALLMULTI;
629		if (ifp->if_flags & IFF_PROMISC)
630			rxfilt |= XL_RXFILTER_ALLFRAMES;
631		else
632			rxfilt |= XL_RXFILTER_ALLMULTI;
633	} else {
634		rxfilt |= XL_RXFILTER_MULTIHASH;
635
636		/* first, zot all the existing hash bits */
637		for (i = 0; i < XL_HASHFILT_SIZE; i++)
638			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
639
640		/* now program new ones */
641		ETHER_FIRST_MULTI(step, ac, enm);
642		while (enm != NULL) {
643			h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
644			    0x000000FF;
645			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH |
646			    XL_HASH_SET | h);
647
648			ETHER_NEXT_MULTI(step, enm);
649		}
650	}
651
652	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
653
654	XL_SEL_WIN(7);
655}
656
657void
658xl_setcfg(struct xl_softc *sc)
659{
660	u_int32_t icfg;
661
662	XL_SEL_WIN(3);
663	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
664	icfg &= ~XL_ICFG_CONNECTOR_MASK;
665	if (sc->xl_media & XL_MEDIAOPT_MII ||
666		sc->xl_media & XL_MEDIAOPT_BT4)
667		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
668	if (sc->xl_media & XL_MEDIAOPT_BTX)
669		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
670
671	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
672	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
673}
674
675void
676xl_setmode(struct xl_softc *sc, uint64_t media)
677{
678	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
679	u_int32_t icfg;
680	u_int16_t mediastat;
681
682	XL_SEL_WIN(4);
683	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
684	XL_SEL_WIN(3);
685	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
686
687	if (sc->xl_media & XL_MEDIAOPT_BT) {
688		if (IFM_SUBTYPE(media) == IFM_10_T) {
689			ifp->if_baudrate = IF_Mbps(10);
690			sc->xl_xcvr = XL_XCVR_10BT;
691			icfg &= ~XL_ICFG_CONNECTOR_MASK;
692			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
693			mediastat |= XL_MEDIASTAT_LINKBEAT|
694					XL_MEDIASTAT_JABGUARD;
695			mediastat &= ~XL_MEDIASTAT_SQEENB;
696		}
697	}
698
699	if (sc->xl_media & XL_MEDIAOPT_BFX) {
700		if (IFM_SUBTYPE(media) == IFM_100_FX) {
701			ifp->if_baudrate = IF_Mbps(100);
702			sc->xl_xcvr = XL_XCVR_100BFX;
703			icfg &= ~XL_ICFG_CONNECTOR_MASK;
704			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
705			mediastat |= XL_MEDIASTAT_LINKBEAT;
706			mediastat &= ~XL_MEDIASTAT_SQEENB;
707		}
708	}
709
710	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
711		if (IFM_SUBTYPE(media) == IFM_10_5) {
712			ifp->if_baudrate = IF_Mbps(10);
713			sc->xl_xcvr = XL_XCVR_AUI;
714			icfg &= ~XL_ICFG_CONNECTOR_MASK;
715			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
716			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
717					XL_MEDIASTAT_JABGUARD);
718			mediastat |= ~XL_MEDIASTAT_SQEENB;
719		}
720		if (IFM_SUBTYPE(media) == IFM_10_FL) {
721			ifp->if_baudrate = IF_Mbps(10);
722			sc->xl_xcvr = XL_XCVR_AUI;
723			icfg &= ~XL_ICFG_CONNECTOR_MASK;
724			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
725			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
726					XL_MEDIASTAT_JABGUARD);
727			mediastat |= ~XL_MEDIASTAT_SQEENB;
728		}
729	}
730
731	if (sc->xl_media & XL_MEDIAOPT_BNC) {
732		if (IFM_SUBTYPE(media) == IFM_10_2) {
733			ifp->if_baudrate = IF_Mbps(10);
734			sc->xl_xcvr = XL_XCVR_COAX;
735			icfg &= ~XL_ICFG_CONNECTOR_MASK;
736			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
737			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
738					XL_MEDIASTAT_JABGUARD|
739					XL_MEDIASTAT_SQEENB);
740		}
741	}
742
743	if ((media & IFM_GMASK) == IFM_FDX ||
744			IFM_SUBTYPE(media) == IFM_100_FX) {
745		XL_SEL_WIN(3);
746		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
747	} else {
748		XL_SEL_WIN(3);
749		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
750			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
751	}
752
753	if (IFM_SUBTYPE(media) == IFM_10_2)
754		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
755	else
756		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
757	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
758	XL_SEL_WIN(4);
759	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
760	DELAY(800);
761	XL_SEL_WIN(7);
762}
763
764void
765xl_reset(struct xl_softc *sc)
766{
767	int	i;
768
769	XL_SEL_WIN(0);
770	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
771		    ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
772		     XL_RESETOPT_DISADVFD:0));
773
774	/*
775	 * Pause briefly after issuing the reset command before trying
776	 * to access any other registers. With my 3c575C cardbus card,
777	 * failing to do this results in the system locking up while
778	 * trying to poll the command busy bit in the status register.
779	 */
780	DELAY(100000);
781
782	for (i = 0; i < XL_TIMEOUT; i++) {
783		DELAY(10);
784		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
785			break;
786	}
787
788	if (i == XL_TIMEOUT)
789		printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname);
790
791	/* Note: the RX reset takes an absurd amount of time
792	 * on newer versions of the Tornado chips such as those
793	 * on the 3c905CX and newer 3c908C cards. We wait an
794	 * extra amount of time so that xl_wait() doesn't complain
795	 * and annoy the users.
796	 */
797	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
798	DELAY(100000);
799	xl_wait(sc);
800	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
801	xl_wait(sc);
802
803	if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
804	    sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
805		XL_SEL_WIN(2);
806		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
807		    XL_W2_RESET_OPTIONS)
808		    | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
809		    | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
810		    );
811	}
812
813	/* Wait a little while for the chip to get its brains in order. */
814	DELAY(100000);
815}
816
817/*
818 * This routine is a kludge to work around possible hardware faults
819 * or manufacturing defects that can cause the media options register
820 * (or reset options register, as it's called for the first generation
821 * 3c90x adapters) to return an incorrect result. I have encountered
822 * one Dell Latitude laptop docking station with an integrated 3c905-TX
823 * which doesn't have any of the 'mediaopt' bits set. This screws up
824 * the attach routine pretty badly because it doesn't know what media
825 * to look for. If we find ourselves in this predicament, this routine
826 * will try to guess the media options values and warn the user of a
827 * possible manufacturing defect with his adapter/system/whatever.
828 */
829void
830xl_mediacheck(struct xl_softc *sc)
831{
832	/*
833	 * If some of the media options bits are set, assume they are
834	 * correct. If not, try to figure it out down below.
835	 * XXX I should check for 10baseFL, but I don't have an adapter
836	 * to test with.
837	 */
838	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
839		/*
840	 	 * Check the XCVR value. If it's not in the normal range
841	 	 * of values, we need to fake it up here.
842	 	 */
843		if (sc->xl_xcvr <= XL_XCVR_AUTO)
844			return;
845		else {
846			printf("%s: bogus xcvr value "
847			"in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr);
848			printf("%s: choosing new default based "
849				"on card type\n", sc->sc_dev.dv_xname);
850		}
851	} else {
852		if (sc->xl_type == XL_TYPE_905B &&
853		    sc->xl_media & XL_MEDIAOPT_10FL)
854			return;
855		printf("%s: WARNING: no media options bits set in "
856			"the media options register!!\n", sc->sc_dev.dv_xname);
857		printf("%s: this could be a manufacturing defect in "
858			"your adapter or system\n", sc->sc_dev.dv_xname);
859		printf("%s: attempting to guess media type; you "
860			"should probably consult your vendor\n", sc->sc_dev.dv_xname);
861	}
862
863	xl_choose_xcvr(sc, 1);
864}
865
866void
867xl_choose_xcvr(struct xl_softc *sc, int verbose)
868{
869	u_int16_t devid;
870
871	/*
872	 * Read the device ID from the EEPROM.
873	 * This is what's loaded into the PCI device ID register, so it has
874	 * to be correct otherwise we wouldn't have gotten this far.
875	 */
876	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
877
878	switch(devid) {
879	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
880	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
881		sc->xl_media = XL_MEDIAOPT_BT;
882		sc->xl_xcvr = XL_XCVR_10BT;
883		if (verbose)
884			printf("%s: guessing 10BaseT transceiver\n",
885			    sc->sc_dev.dv_xname);
886		break;
887	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
888	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
889		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
890		sc->xl_xcvr = XL_XCVR_10BT;
891		if (verbose)
892			printf("%s: guessing COMBO (AUI/BNC/TP)\n",
893			    sc->sc_dev.dv_xname);
894		break;
895	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
896		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
897		sc->xl_xcvr = XL_XCVR_10BT;
898		if (verbose)
899			printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname);
900		break;
901	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
902		sc->xl_media = XL_MEDIAOPT_10FL;
903		sc->xl_xcvr = XL_XCVR_AUI;
904		if (verbose)
905			printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname);
906		break;
907	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
908	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
909	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
910	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
911	case TC_DEVICEID_HURRICANE_575A:	/* 3c575TX */
912	case TC_DEVICEID_HURRICANE_575B:	/* 3c575B */
913	case TC_DEVICEID_HURRICANE_575C:	/* 3c575C */
914	case TC_DEVICEID_HURRICANE_656:		/* 3c656 */
915	case TC_DEVICEID_HURRICANE_656B:	/* 3c656B */
916	case TC_DEVICEID_TORNADO_656C:		/* 3c656C */
917	case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
918		sc->xl_media = XL_MEDIAOPT_MII;
919		sc->xl_xcvr = XL_XCVR_MII;
920		if (verbose)
921			printf("%s: guessing MII\n", sc->sc_dev.dv_xname);
922		break;
923	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
924	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
925		sc->xl_media = XL_MEDIAOPT_BT4;
926		sc->xl_xcvr = XL_XCVR_MII;
927		if (verbose)
928			printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname);
929		break;
930	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
931	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
932	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
933	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
934	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
935	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
936		sc->xl_media = XL_MEDIAOPT_BTX;
937		sc->xl_xcvr = XL_XCVR_AUTO;
938		if (verbose)
939			printf("%s: guessing 10/100 internal\n",
940			    sc->sc_dev.dv_xname);
941		break;
942	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
943		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
944		sc->xl_xcvr = XL_XCVR_AUTO;
945		if (verbose)
946			printf("%s: guessing 10/100 plus BNC/AUI\n",
947			    sc->sc_dev.dv_xname);
948		break;
949	default:
950		printf("%s: unknown device ID: %x -- "
951			"defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid);
952		sc->xl_media = XL_MEDIAOPT_BT;
953		break;
954	}
955}
956
957/*
958 * Initialize the transmit descriptors.
959 */
960int
961xl_list_tx_init(struct xl_softc *sc)
962{
963	struct xl_chain_data	*cd;
964	struct xl_list_data	*ld;
965	int			i;
966
967	cd = &sc->xl_cdata;
968	ld = sc->xl_ldata;
969	for (i = 0; i < XL_TX_LIST_CNT; i++) {
970		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
971		if (i == (XL_TX_LIST_CNT - 1))
972			cd->xl_tx_chain[i].xl_next = NULL;
973		else
974			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
975	}
976
977	cd->xl_tx_free = &cd->xl_tx_chain[0];
978	cd->xl_tx_tail = cd->xl_tx_head = NULL;
979
980	return (0);
981}
982
983/*
984 * Initialize the transmit descriptors.
985 */
986int
987xl_list_tx_init_90xB(struct xl_softc *sc)
988{
989	struct xl_chain_data	*cd;
990	struct xl_list_data	*ld;
991	int			i, next, prev;
992
993	cd = &sc->xl_cdata;
994	ld = sc->xl_ldata;
995	for (i = 0; i < XL_TX_LIST_CNT; i++) {
996		if (i == (XL_TX_LIST_CNT - 1))
997			next = 0;
998		else
999			next = i + 1;
1000		if (i == 0)
1001			prev = XL_TX_LIST_CNT - 1;
1002		else
1003			prev = i - 1;
1004		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1005		cd->xl_tx_chain[i].xl_phys =
1006		    sc->sc_listmap->dm_segs[0].ds_addr +
1007		    offsetof(struct xl_list_data, xl_tx_list[i]);
1008		cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next];
1009		cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev];
1010	}
1011
1012	bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1013	ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1014
1015	cd->xl_tx_prod = 1;
1016	cd->xl_tx_cons = 1;
1017	cd->xl_tx_cnt = 0;
1018
1019	return (0);
1020}
1021
1022/*
1023 * Initialize the RX descriptors and allocate mbufs for them. Note that
1024 * we arrange the descriptors in a closed ring, so that the last descriptor
1025 * points back to the first.
1026 */
1027int
1028xl_list_rx_init(struct xl_softc *sc)
1029{
1030	struct xl_chain_data	*cd;
1031	struct xl_list_data	*ld;
1032	int			i, n;
1033	bus_addr_t		next;
1034
1035	cd = &sc->xl_cdata;
1036	ld = sc->xl_ldata;
1037
1038	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1039		cd->xl_rx_chain[i].xl_ptr =
1040			(struct xl_list_onefrag *)&ld->xl_rx_list[i];
1041		if (i == (XL_RX_LIST_CNT - 1))
1042			n = 0;
1043		else
1044			n = i + 1;
1045		cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n];
1046		next = sc->sc_listmap->dm_segs[0].ds_addr +
1047		       offsetof(struct xl_list_data, xl_rx_list[n]);
1048		ld->xl_rx_list[i].xl_next = htole32(next);
1049	}
1050
1051	cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0];
1052	if_rxr_init(&cd->xl_rx_ring, 2, XL_RX_LIST_CNT - 1);
1053	xl_fill_rx_ring(sc);
1054	return (0);
1055}
1056
1057void
1058xl_fill_rx_ring(struct xl_softc *sc)
1059{
1060	struct xl_chain_data    *cd;
1061	u_int			slots;
1062
1063	cd = &sc->xl_cdata;
1064
1065	for (slots = if_rxr_get(&cd->xl_rx_ring, XL_RX_LIST_CNT);
1066	     slots > 0; slots--) {
1067		if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS)
1068			break;
1069		cd->xl_rx_prod = cd->xl_rx_prod->xl_next;
1070	}
1071	if_rxr_put(&cd->xl_rx_ring, slots);
1072}
1073
1074/*
1075 * Initialize an RX descriptor and attach an MBUF cluster.
1076 */
1077int
1078xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1079{
1080	struct mbuf	*m_new = NULL;
1081	bus_dmamap_t	map;
1082
1083	m_new = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1084	if (!m_new)
1085		return (ENOBUFS);
1086
1087	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1088	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1089	    mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) {
1090		m_freem(m_new);
1091		return (ENOBUFS);
1092	}
1093
1094	/* sync the old map, and unload it (if necessary) */
1095	if (c->map->dm_nsegs != 0) {
1096		bus_dmamap_sync(sc->sc_dmat, c->map,
1097		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1098		bus_dmamap_unload(sc->sc_dmat, c->map);
1099	}
1100
1101	map = c->map;
1102	c->map = sc->sc_rx_sparemap;
1103	sc->sc_rx_sparemap = map;
1104
1105	/* Force longword alignment for packet payload. */
1106	m_adj(m_new, ETHER_ALIGN);
1107
1108	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1109	    BUS_DMASYNC_PREREAD);
1110
1111	c->xl_mbuf = m_new;
1112	c->xl_ptr->xl_frag.xl_addr =
1113	    htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN);
1114	c->xl_ptr->xl_frag.xl_len =
1115	    htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG);
1116	c->xl_ptr->xl_status = htole32(0);
1117
1118	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1119	    ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list),
1120	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1121
1122	return (0);
1123}
1124
1125/*
1126 * A frame has been uploaded: pass the resulting mbuf chain up to
1127 * the higher level protocols.
1128 */
1129void
1130xl_rxeof(struct xl_softc *sc)
1131{
1132	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
1133        struct mbuf		*m;
1134        struct ifnet		*ifp;
1135	struct xl_chain_onefrag	*cur_rx;
1136	int			total_len = 0;
1137	u_int32_t		rxstat;
1138	u_int16_t		sumflags = 0;
1139
1140	ifp = &sc->sc_arpcom.ac_if;
1141
1142again:
1143
1144	while (if_rxr_inuse(&sc->xl_cdata.xl_rx_ring) > 0) {
1145		cur_rx = sc->xl_cdata.xl_rx_cons;
1146		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1147		    ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva),
1148		    sizeof(struct xl_list),
1149		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1150		if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0)
1151			break;
1152		m = cur_rx->xl_mbuf;
1153		cur_rx->xl_mbuf = NULL;
1154		sc->xl_cdata.xl_rx_cons = cur_rx->xl_next;
1155		if_rxr_put(&sc->xl_cdata.xl_rx_ring, 1);
1156		total_len = rxstat & XL_RXSTAT_LENMASK;
1157
1158		/*
1159		 * Since we have told the chip to allow large frames,
1160		 * we need to trap giant frame errors in software. We allow
1161		 * a little more than the normal frame size to account for
1162		 * frames with VLAN tags.
1163		 */
1164		if (total_len > XL_MAX_FRAMELEN)
1165			rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1166
1167		/*
1168		 * If an error occurs, update stats, clear the
1169		 * status word and leave the mbuf cluster in place:
1170		 * it should simply get re-used next time this descriptor
1171	 	 * comes up in the ring.
1172		 */
1173		if (rxstat & XL_RXSTAT_UP_ERROR) {
1174			ifp->if_ierrors++;
1175			cur_rx->xl_ptr->xl_status = htole32(0);
1176			m_freem(m);
1177			continue;
1178		}
1179
1180		/*
1181		 * If the error bit was not set, the upload complete
1182		 * bit should be set which means we have a valid packet.
1183		 * If not, something truly strange has happened.
1184		 */
1185		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1186			printf("%s: bad receive status -- "
1187			    "packet dropped\n", sc->sc_dev.dv_xname);
1188			ifp->if_ierrors++;
1189			cur_rx->xl_ptr->xl_status = htole32(0);
1190			m_freem(m);
1191			continue;
1192		}
1193
1194		m->m_pkthdr.len = m->m_len = total_len;
1195
1196		if (sc->xl_type == XL_TYPE_905B) {
1197			if (!(rxstat & XL_RXSTAT_IPCKERR) &&
1198			    (rxstat & XL_RXSTAT_IPCKOK))
1199				sumflags |= M_IPV4_CSUM_IN_OK;
1200
1201			if (!(rxstat & XL_RXSTAT_TCPCKERR) &&
1202			    (rxstat & XL_RXSTAT_TCPCKOK))
1203				sumflags |= M_TCP_CSUM_IN_OK;
1204
1205			if (!(rxstat & XL_RXSTAT_UDPCKERR) &&
1206			    (rxstat & XL_RXSTAT_UDPCKOK))
1207				sumflags |= M_UDP_CSUM_IN_OK;
1208
1209			m->m_pkthdr.csum_flags = sumflags;
1210		}
1211
1212		ml_enqueue(&ml, m);
1213	}
1214
1215	if (ifiq_input(&ifp->if_rcv, &ml))
1216		if_rxr_livelocked(&sc->xl_cdata.xl_rx_ring);
1217
1218	xl_fill_rx_ring(sc);
1219
1220	/*
1221	 * Handle the 'end of channel' condition. When the upload
1222	 * engine hits the end of the RX ring, it will stall. This
1223	 * is our cue to flush the RX ring, reload the uplist pointer
1224	 * register and unstall the engine.
1225	 * XXX This is actually a little goofy. With the ThunderLAN
1226	 * chip, you get an interrupt when the receiver hits the end
1227	 * of the receive ring, which tells you exactly when you
1228	 * you need to reload the ring pointer. Here we have to
1229	 * fake it. I'm mad at myself for not being clever enough
1230	 * to avoid the use of a goto here.
1231	 */
1232	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1233		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1234		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1235		xl_wait(sc);
1236		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1237		xl_fill_rx_ring(sc);
1238		goto again;
1239	}
1240}
1241
1242/*
1243 * A frame was downloaded to the chip. It's safe for us to clean up
1244 * the list buffers.
1245 */
1246void
1247xl_txeof(struct xl_softc *sc)
1248{
1249	struct xl_chain		*cur_tx;
1250	struct ifnet		*ifp;
1251
1252	ifp = &sc->sc_arpcom.ac_if;
1253
1254	/*
1255	 * Go through our tx list and free mbufs for those
1256	 * frames that have been uploaded. Note: the 3c905B
1257	 * sets a special bit in the status word to let us
1258	 * know that a frame has been downloaded, but the
1259	 * original 3c900/3c905 adapters don't do that.
1260	 * Consequently, we have to use a different test if
1261	 * xl_type != XL_TYPE_905B.
1262	 */
1263	while (sc->xl_cdata.xl_tx_head != NULL) {
1264		cur_tx = sc->xl_cdata.xl_tx_head;
1265
1266		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1267		    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva),
1268		    sizeof(struct xl_list),
1269		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1270
1271		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1272			break;
1273
1274		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1275		if (cur_tx->map->dm_nsegs != 0) {
1276			bus_dmamap_t map = cur_tx->map;
1277
1278			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1279			    BUS_DMASYNC_POSTWRITE);
1280			bus_dmamap_unload(sc->sc_dmat, map);
1281		}
1282		if (cur_tx->xl_mbuf != NULL) {
1283			m_freem(cur_tx->xl_mbuf);
1284			cur_tx->xl_mbuf = NULL;
1285		}
1286		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1287		sc->xl_cdata.xl_tx_free = cur_tx;
1288	}
1289
1290	if (sc->xl_cdata.xl_tx_head == NULL) {
1291		ifq_clr_oactive(&ifp->if_snd);
1292		/* Clear the timeout timer. */
1293		ifp->if_timer = 0;
1294		sc->xl_cdata.xl_tx_tail = NULL;
1295	} else {
1296		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1297			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1298			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1299			    sc->sc_listmap->dm_segs[0].ds_addr +
1300			    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1301			    sc->sc_listkva));
1302			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1303		}
1304	}
1305}
1306
1307void
1308xl_txeof_90xB(struct xl_softc *sc)
1309{
1310	struct xl_chain *cur_tx = NULL;
1311	struct ifnet *ifp;
1312	int idx;
1313
1314	ifp = &sc->sc_arpcom.ac_if;
1315
1316	idx = sc->xl_cdata.xl_tx_cons;
1317	while (idx != sc->xl_cdata.xl_tx_prod) {
1318
1319		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1320
1321		if ((cur_tx->xl_ptr->xl_status &
1322		    htole32(XL_TXSTAT_DL_COMPLETE)) == 0)
1323			break;
1324
1325		if (cur_tx->xl_mbuf != NULL) {
1326			m_freem(cur_tx->xl_mbuf);
1327			cur_tx->xl_mbuf = NULL;
1328		}
1329
1330		if (cur_tx->map->dm_nsegs != 0) {
1331			bus_dmamap_sync(sc->sc_dmat, cur_tx->map,
1332			    0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1333			bus_dmamap_unload(sc->sc_dmat, cur_tx->map);
1334		}
1335
1336		sc->xl_cdata.xl_tx_cnt--;
1337		XL_INC(idx, XL_TX_LIST_CNT);
1338	}
1339
1340	sc->xl_cdata.xl_tx_cons = idx;
1341
1342	if (cur_tx != NULL)
1343		ifq_clr_oactive(&ifp->if_snd);
1344	if (sc->xl_cdata.xl_tx_cnt == 0)
1345		ifp->if_timer = 0;
1346}
1347
1348/*
1349 * TX 'end of channel' interrupt handler. Actually, we should
1350 * only get a 'TX complete' interrupt if there's a transmit error,
1351 * so this is really TX error handler.
1352 */
1353void
1354xl_txeoc(struct xl_softc *sc)
1355{
1356	u_int8_t	txstat;
1357
1358	while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1359		if (txstat & XL_TXSTATUS_UNDERRUN ||
1360			txstat & XL_TXSTATUS_JABBER ||
1361			txstat & XL_TXSTATUS_RECLAIM) {
1362			if (txstat != 0x90) {
1363				printf("%s: transmission error: %x\n",
1364				    sc->sc_dev.dv_xname, txstat);
1365			}
1366			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1367			xl_wait(sc);
1368			if (sc->xl_type == XL_TYPE_905B) {
1369				if (sc->xl_cdata.xl_tx_cnt) {
1370					int i;
1371					struct xl_chain *c;
1372
1373					i = sc->xl_cdata.xl_tx_cons;
1374					c = &sc->xl_cdata.xl_tx_chain[i];
1375					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1376					    c->xl_phys);
1377					CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1378				}
1379			} else {
1380				if (sc->xl_cdata.xl_tx_head != NULL)
1381					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1382					    sc->sc_listmap->dm_segs[0].ds_addr +
1383					    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1384					    sc->sc_listkva));
1385			}
1386			/*
1387			 * Remember to set this for the
1388			 * first generation 3c90X chips.
1389			 */
1390			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1391			if (txstat & XL_TXSTATUS_UNDERRUN &&
1392			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
1393				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1394#ifdef notdef
1395				printf("%s: tx underrun, increasing tx start"
1396				    " threshold to %d\n", sc->sc_dev.dv_xname,
1397				    sc->xl_tx_thresh);
1398#endif
1399			}
1400			CSR_WRITE_2(sc, XL_COMMAND,
1401			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1402			if (sc->xl_type == XL_TYPE_905B) {
1403				CSR_WRITE_2(sc, XL_COMMAND,
1404				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1405			}
1406			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1407			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1408		} else {
1409			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1410			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1411		}
1412		/*
1413		 * Write an arbitrary byte to the TX_STATUS register
1414	 	 * to clear this interrupt/error and advance to the next.
1415		 */
1416		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1417	}
1418}
1419
1420int
1421xl_intr(void *arg)
1422{
1423	struct xl_softc		*sc;
1424	struct ifnet		*ifp;
1425	u_int16_t		status;
1426	int			claimed = 0;
1427
1428	sc = arg;
1429	ifp = &sc->sc_arpcom.ac_if;
1430
1431	while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
1432
1433		claimed = 1;
1434
1435		CSR_WRITE_2(sc, XL_COMMAND,
1436		    XL_CMD_INTR_ACK|(status & XL_INTRS));
1437
1438		if (sc->intr_ack)
1439			(*sc->intr_ack)(sc);
1440
1441		if (!(ifp->if_flags & IFF_RUNNING))
1442			return (claimed);
1443
1444		if (status & XL_STAT_UP_COMPLETE)
1445			xl_rxeof(sc);
1446
1447		if (status & XL_STAT_DOWN_COMPLETE) {
1448			if (sc->xl_type == XL_TYPE_905B)
1449				xl_txeof_90xB(sc);
1450			else
1451				xl_txeof(sc);
1452		}
1453
1454		if (status & XL_STAT_TX_COMPLETE) {
1455			ifp->if_oerrors++;
1456			xl_txeoc(sc);
1457		}
1458
1459		if (status & XL_STAT_ADFAIL)
1460			xl_init(sc);
1461
1462		if (status & XL_STAT_STATSOFLOW) {
1463			sc->xl_stats_no_timeout = 1;
1464			xl_stats_update(sc);
1465			sc->xl_stats_no_timeout = 0;
1466		}
1467	}
1468
1469	if (!ifq_empty(&ifp->if_snd))
1470		(*ifp->if_start)(ifp);
1471
1472	return (claimed);
1473}
1474
1475void
1476xl_stats_update(void *xsc)
1477{
1478	struct xl_softc		*sc;
1479	struct ifnet		*ifp;
1480	struct xl_stats		xl_stats;
1481	u_int8_t		*p;
1482	int			i;
1483	struct mii_data		*mii = NULL;
1484
1485	bzero(&xl_stats, sizeof(struct xl_stats));
1486
1487	sc = xsc;
1488	ifp = &sc->sc_arpcom.ac_if;
1489	if (sc->xl_hasmii)
1490		mii = &sc->sc_mii;
1491
1492	p = (u_int8_t *)&xl_stats;
1493
1494	/* Read all the stats registers. */
1495	XL_SEL_WIN(6);
1496
1497	for (i = 0; i < 16; i++)
1498		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1499
1500	ifp->if_ierrors += xl_stats.xl_rx_overrun;
1501
1502	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1503				xl_stats.xl_tx_single_collision +
1504				xl_stats.xl_tx_late_collision;
1505
1506	/*
1507	 * Boomerang and cyclone chips have an extra stats counter
1508	 * in window 4 (BadSSD). We have to read this too in order
1509	 * to clear out all the stats registers and avoid a statsoflow
1510	 * interrupt.
1511	 */
1512	XL_SEL_WIN(4);
1513	CSR_READ_1(sc, XL_W4_BADSSD);
1514
1515	if (mii != NULL && (!sc->xl_stats_no_timeout))
1516		mii_tick(mii);
1517
1518	XL_SEL_WIN(7);
1519
1520	if (!sc->xl_stats_no_timeout)
1521		timeout_add_sec(&sc->xl_stsup_tmo, 1);
1522}
1523
1524/*
1525 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1526 * pointers to the fragment pointers.
1527 */
1528int
1529xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
1530{
1531	int		error, frag, total_len;
1532	u_int32_t	status;
1533	bus_dmamap_t	map;
1534
1535	map = sc->sc_tx_sparemap;
1536
1537reload:
1538	error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1539	    m_head, BUS_DMA_NOWAIT);
1540
1541	if (error && error != EFBIG) {
1542		m_freem(m_head);
1543		return (1);
1544	}
1545
1546	/*
1547 	 * Start packing the mbufs in this chain into
1548	 * the fragment pointers. Stop when we run out
1549 	 * of fragments or hit the end of the mbuf chain.
1550	 */
1551	for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) {
1552		if (frag == XL_MAXFRAGS)
1553			break;
1554		total_len += map->dm_segs[frag].ds_len;
1555		c->xl_ptr->xl_frag[frag].xl_addr =
1556		    htole32(map->dm_segs[frag].ds_addr);
1557		c->xl_ptr->xl_frag[frag].xl_len =
1558		    htole32(map->dm_segs[frag].ds_len);
1559	}
1560
1561	/*
1562	 * Handle special case: we used up all 63 fragments,
1563	 * but we have more mbufs left in the chain. Copy the
1564	 * data into an mbuf cluster. Note that we don't
1565	 * bother clearing the values in the other fragment
1566	 * pointers/counters; it wouldn't gain us anything,
1567	 * and would waste cycles.
1568	 */
1569	if (error) {
1570		struct mbuf	*m_new = NULL;
1571
1572		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1573		if (m_new == NULL) {
1574			m_freem(m_head);
1575			return (1);
1576		}
1577		if (m_head->m_pkthdr.len > MHLEN) {
1578			MCLGET(m_new, M_DONTWAIT);
1579			if (!(m_new->m_flags & M_EXT)) {
1580				m_freem(m_new);
1581				m_freem(m_head);
1582				return (1);
1583			}
1584		}
1585		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1586		    mtod(m_new, caddr_t));
1587		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1588		m_freem(m_head);
1589		m_head = m_new;
1590		goto reload;
1591	}
1592
1593	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1594	    BUS_DMASYNC_PREWRITE);
1595
1596	if (c->map->dm_nsegs != 0) {
1597		bus_dmamap_sync(sc->sc_dmat, c->map,
1598		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1599		bus_dmamap_unload(sc->sc_dmat, c->map);
1600	}
1601
1602	c->xl_mbuf = m_head;
1603	sc->sc_tx_sparemap = c->map;
1604	c->map = map;
1605	c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG);
1606	c->xl_ptr->xl_status = htole32(total_len);
1607	c->xl_ptr->xl_next = 0;
1608
1609	if (sc->xl_type == XL_TYPE_905B) {
1610		status = XL_TXSTAT_RND_DEFEAT;
1611
1612#ifndef XL905B_TXCSUM_BROKEN
1613		if (m_head->m_pkthdr.csum_flags) {
1614			if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1615				status |= XL_TXSTAT_IPCKSUM;
1616			if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1617				status |= XL_TXSTAT_TCPCKSUM;
1618			if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1619				status |= XL_TXSTAT_UDPCKSUM;
1620		}
1621#endif
1622		c->xl_ptr->xl_status = htole32(status);
1623	}
1624
1625	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1626	    offsetof(struct xl_list_data, xl_tx_list[0]),
1627	    sizeof(struct xl_list) * XL_TX_LIST_CNT,
1628	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1629
1630	return (0);
1631}
1632
1633/*
1634 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1635 * to the mbuf data regions directly in the transmit lists. We also save a
1636 * copy of the pointers since the transmit list fragment pointers are
1637 * physical addresses.
1638 */
1639void
1640xl_start(struct ifnet *ifp)
1641{
1642	struct xl_softc		*sc;
1643	struct mbuf		*m_head = NULL;
1644	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1645	struct xl_chain		*prev_tx;
1646	int			error;
1647
1648	sc = ifp->if_softc;
1649
1650	/*
1651	 * Check for an available queue slot. If there are none,
1652	 * punt.
1653	 */
1654	if (sc->xl_cdata.xl_tx_free == NULL) {
1655		xl_txeoc(sc);
1656		xl_txeof(sc);
1657		if (sc->xl_cdata.xl_tx_free == NULL) {
1658			ifq_set_oactive(&ifp->if_snd);
1659			return;
1660		}
1661	}
1662
1663	start_tx = sc->xl_cdata.xl_tx_free;
1664
1665	while (sc->xl_cdata.xl_tx_free != NULL) {
1666		m_head = ifq_dequeue(&ifp->if_snd);
1667		if (m_head == NULL)
1668			break;
1669
1670		/* Pick a descriptor off the free list. */
1671		prev_tx = cur_tx;
1672		cur_tx = sc->xl_cdata.xl_tx_free;
1673
1674		/* Pack the data into the descriptor. */
1675		error = xl_encap(sc, cur_tx, m_head);
1676		if (error) {
1677			cur_tx = prev_tx;
1678			continue;
1679		}
1680
1681		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1682		cur_tx->xl_next = NULL;
1683
1684		/* Chain it together. */
1685		if (prev != NULL) {
1686			prev->xl_next = cur_tx;
1687			prev->xl_ptr->xl_next =
1688			    sc->sc_listmap->dm_segs[0].ds_addr +
1689			    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva);
1690
1691		}
1692		prev = cur_tx;
1693
1694#if NBPFILTER > 0
1695		/*
1696		 * If there's a BPF listener, bounce a copy of this frame
1697		 * to him.
1698		 */
1699		if (ifp->if_bpf)
1700			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1701			    BPF_DIRECTION_OUT);
1702#endif
1703	}
1704
1705	/*
1706	 * If there are no packets queued, bail.
1707	 */
1708	if (cur_tx == NULL)
1709		return;
1710
1711	/*
1712	 * Place the request for the upload interrupt
1713	 * in the last descriptor in the chain. This way, if
1714	 * we're chaining several packets at once, we'll only
1715	 * get an interrupt once for the whole chain rather than
1716	 * once for each packet.
1717	 */
1718	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1719
1720	/*
1721	 * Queue the packets. If the TX channel is clear, update
1722	 * the downlist pointer register.
1723	 */
1724	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1725	xl_wait(sc);
1726
1727	if (sc->xl_cdata.xl_tx_head != NULL) {
1728		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1729		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1730		    sc->sc_listmap->dm_segs[0].ds_addr +
1731		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva);
1732		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1733		    htole32(~XL_TXSTAT_DL_INTR);
1734		sc->xl_cdata.xl_tx_tail = cur_tx;
1735	} else {
1736		sc->xl_cdata.xl_tx_head = start_tx;
1737		sc->xl_cdata.xl_tx_tail = cur_tx;
1738	}
1739	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1740		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1741		    sc->sc_listmap->dm_segs[0].ds_addr +
1742		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva));
1743
1744	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1745
1746	XL_SEL_WIN(7);
1747
1748	/*
1749	 * Set a timeout in case the chip goes out to lunch.
1750	 */
1751	ifp->if_timer = 5;
1752
1753	/*
1754	 * XXX Under certain conditions, usually on slower machines
1755	 * where interrupts may be dropped, it's possible for the
1756	 * adapter to chew up all the buffers in the receive ring
1757	 * and stall, without us being able to do anything about it.
1758	 * To guard against this, we need to make a pass over the
1759	 * RX queue to make sure there aren't any packets pending.
1760	 * Doing it here means we can flush the receive ring at the
1761	 * same time the chip is DMAing the transmit descriptors we
1762	 * just gave it.
1763 	 *
1764	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1765	 * nature of their chips in all their marketing literature;
1766	 * we may as well take advantage of it. :)
1767	 */
1768	xl_rxeof(sc);
1769}
1770
1771void
1772xl_start_90xB(struct ifnet *ifp)
1773{
1774	struct xl_softc	*sc;
1775	struct mbuf	*m_head = NULL;
1776	struct xl_chain	*prev = NULL, *cur_tx = NULL, *start_tx;
1777	struct xl_chain	*prev_tx;
1778	int		error, idx;
1779
1780	sc = ifp->if_softc;
1781
1782	if (ifq_is_oactive(&ifp->if_snd))
1783		return;
1784
1785	idx = sc->xl_cdata.xl_tx_prod;
1786	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1787
1788	while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1789
1790		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1791			ifq_set_oactive(&ifp->if_snd);
1792			break;
1793		}
1794
1795		m_head = ifq_dequeue(&ifp->if_snd);
1796		if (m_head == NULL)
1797			break;
1798
1799		prev_tx = cur_tx;
1800		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1801
1802		/* Pack the data into the descriptor. */
1803		error = xl_encap(sc, cur_tx, m_head);
1804		if (error) {
1805			cur_tx = prev_tx;
1806			continue;
1807		}
1808
1809		/* Chain it together. */
1810		if (prev != NULL)
1811			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
1812		prev = cur_tx;
1813
1814#if NBPFILTER > 0
1815		/*
1816		 * If there's a BPF listener, bounce a copy of this frame
1817		 * to him.
1818		 */
1819		if (ifp->if_bpf)
1820			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1821			    BPF_DIRECTION_OUT);
1822#endif
1823
1824		XL_INC(idx, XL_TX_LIST_CNT);
1825		sc->xl_cdata.xl_tx_cnt++;
1826	}
1827
1828	/*
1829	 * If there are no packets queued, bail.
1830	 */
1831	if (cur_tx == NULL)
1832		return;
1833
1834	/*
1835	 * Place the request for the upload interrupt
1836	 * in the last descriptor in the chain. This way, if
1837	 * we're chaining several packets at once, we'll only
1838	 * get an interrupt once for the whole chain rather than
1839	 * once for each packet.
1840	 */
1841	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1842
1843	/* Start transmission */
1844	sc->xl_cdata.xl_tx_prod = idx;
1845	start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
1846
1847	/*
1848	 * Set a timeout in case the chip goes out to lunch.
1849	 */
1850	ifp->if_timer = 5;
1851}
1852
1853void
1854xl_init(void *xsc)
1855{
1856	struct xl_softc		*sc = xsc;
1857	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
1858	int			s, i;
1859	struct mii_data		*mii = NULL;
1860
1861	s = splnet();
1862
1863	/*
1864	 * Cancel pending I/O and free all RX/TX buffers.
1865	 */
1866	xl_stop(sc);
1867
1868	/* Reset the chip to a known state. */
1869	xl_reset(sc);
1870
1871	if (sc->xl_hasmii)
1872		mii = &sc->sc_mii;
1873
1874	if (mii == NULL) {
1875		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1876		xl_wait(sc);
1877	}
1878	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1879	xl_wait(sc);
1880	DELAY(10000);
1881
1882	/* Init our MAC address */
1883	XL_SEL_WIN(2);
1884	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1885		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1886				sc->sc_arpcom.ac_enaddr[i]);
1887	}
1888
1889	/* Clear the station mask. */
1890	for (i = 0; i < 3; i++)
1891		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1892#ifdef notdef
1893	/* Reset TX and RX. */
1894	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1895	xl_wait(sc);
1896	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1897	xl_wait(sc);
1898#endif
1899	/* Init circular RX list. */
1900	if (xl_list_rx_init(sc) == ENOBUFS) {
1901		printf("%s: initialization failed: no "
1902			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1903		xl_stop(sc);
1904		splx(s);
1905		return;
1906	}
1907
1908	/* Init TX descriptors. */
1909	if (sc->xl_type == XL_TYPE_905B)
1910		xl_list_tx_init_90xB(sc);
1911	else
1912		xl_list_tx_init(sc);
1913
1914	/*
1915	 * Set the TX freethresh value.
1916	 * Note that this has no effect on 3c905B "cyclone"
1917	 * cards but is required for 3c900/3c905 "boomerang"
1918	 * cards in order to enable the download engine.
1919	 */
1920	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1921
1922	/* Set the TX start threshold for best performance. */
1923	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1924
1925	/*
1926	 * If this is a 3c905B, also set the tx reclaim threshold.
1927	 * This helps cut down on the number of tx reclaim errors
1928	 * that could happen on a busy network. The chip multiplies
1929	 * the register value by 16 to obtain the actual threshold
1930	 * in bytes, so we divide by 16 when setting the value here.
1931	 * The existing threshold value can be examined by reading
1932	 * the register at offset 9 in window 5.
1933	 */
1934	if (sc->xl_type == XL_TYPE_905B) {
1935		CSR_WRITE_2(sc, XL_COMMAND,
1936		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1937	}
1938
1939	/* Program promiscuous mode and multicast filters. */
1940	xl_iff(sc);
1941
1942	/*
1943	 * Load the address of the RX list. We have to
1944	 * stall the upload engine before we can manipulate
1945	 * the uplist pointer register, then unstall it when
1946	 * we're finished. We also have to wait for the
1947	 * stall command to complete before proceeding.
1948	 * Note that we have to do this after any RX resets
1949	 * have completed since the uplist register is cleared
1950	 * by a reset.
1951	 */
1952	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1953	xl_wait(sc);
1954	CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr +
1955	    offsetof(struct xl_list_data, xl_rx_list[0]));
1956	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1957	xl_wait(sc);
1958
1959	if (sc->xl_type == XL_TYPE_905B) {
1960		/* Set polling interval */
1961		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1962		/* Load the address of the TX list */
1963		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1964		xl_wait(sc);
1965		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1966		    sc->sc_listmap->dm_segs[0].ds_addr +
1967		    offsetof(struct xl_list_data, xl_tx_list[0]));
1968		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1969		xl_wait(sc);
1970	}
1971
1972	/*
1973	 * If the coax transceiver is on, make sure to enable
1974	 * the DC-DC converter.
1975 	 */
1976	XL_SEL_WIN(3);
1977	if (sc->xl_xcvr == XL_XCVR_COAX)
1978		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1979	else
1980		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1981
1982	/*
1983	 * increase packet size to allow reception of 802.1q or ISL packets.
1984	 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
1985	 * control register. For 3c90xB/C chips, use the RX packet size
1986	 * register.
1987	 */
1988
1989	if (sc->xl_type == XL_TYPE_905B)
1990		CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
1991	else {
1992		u_int8_t macctl;
1993		macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
1994		macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
1995		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
1996	}
1997
1998	/* Clear out the stats counters. */
1999	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2000	sc->xl_stats_no_timeout = 1;
2001	xl_stats_update(sc);
2002	sc->xl_stats_no_timeout = 0;
2003	XL_SEL_WIN(4);
2004	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2005	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2006
2007	/*
2008	 * Enable interrupts.
2009	 */
2010	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2011	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2012	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2013
2014	if (sc->intr_ack)
2015		(*sc->intr_ack)(sc);
2016
2017	/* Set the RX early threshold */
2018	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2019	CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2020
2021	/* Enable receiver and transmitter. */
2022	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2023	xl_wait(sc);
2024	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2025	xl_wait(sc);
2026
2027	/* Restore state of BMCR */
2028	if (mii != NULL)
2029		mii_mediachg(mii);
2030
2031	/* Select window 7 for normal operations. */
2032	XL_SEL_WIN(7);
2033
2034	ifp->if_flags |= IFF_RUNNING;
2035	ifq_clr_oactive(&ifp->if_snd);
2036
2037	splx(s);
2038
2039	timeout_add_sec(&sc->xl_stsup_tmo, 1);
2040}
2041
2042/*
2043 * Set media options.
2044 */
2045int
2046xl_ifmedia_upd(struct ifnet *ifp)
2047{
2048	struct xl_softc		*sc;
2049	struct ifmedia		*ifm = NULL;
2050	struct mii_data		*mii = NULL;
2051
2052	sc = ifp->if_softc;
2053
2054	if (sc->xl_hasmii)
2055		mii = &sc->sc_mii;
2056	if (mii == NULL)
2057		ifm = &sc->ifmedia;
2058	else
2059		ifm = &mii->mii_media;
2060
2061	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2062	case IFM_100_FX:
2063	case IFM_10_FL:
2064	case IFM_10_2:
2065	case IFM_10_5:
2066		xl_setmode(sc, ifm->ifm_media);
2067		return (0);
2068		break;
2069	default:
2070		break;
2071	}
2072
2073	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2074		|| sc->xl_media & XL_MEDIAOPT_BT4) {
2075		xl_init(sc);
2076	} else {
2077		xl_setmode(sc, ifm->ifm_media);
2078	}
2079
2080	return (0);
2081}
2082
2083/*
2084 * Report current media status.
2085 */
2086void
2087xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2088{
2089	struct xl_softc		*sc;
2090	u_int32_t		icfg;
2091	u_int16_t		status = 0;
2092	struct mii_data		*mii = NULL;
2093
2094	sc = ifp->if_softc;
2095	if (sc->xl_hasmii != 0)
2096		mii = &sc->sc_mii;
2097
2098	XL_SEL_WIN(4);
2099	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2100
2101	XL_SEL_WIN(3);
2102	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2103	icfg >>= XL_ICFG_CONNECTOR_BITS;
2104
2105	ifmr->ifm_active = IFM_ETHER;
2106	ifmr->ifm_status = IFM_AVALID;
2107
2108	if ((status & XL_MEDIASTAT_CARRIER) == 0)
2109		ifmr->ifm_status |= IFM_ACTIVE;
2110
2111	switch(icfg) {
2112	case XL_XCVR_10BT:
2113		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2114		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2115			ifmr->ifm_active |= IFM_FDX;
2116		else
2117			ifmr->ifm_active |= IFM_HDX;
2118		break;
2119	case XL_XCVR_AUI:
2120		if (sc->xl_type == XL_TYPE_905B &&
2121		    sc->xl_media == XL_MEDIAOPT_10FL) {
2122			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2123			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2124				ifmr->ifm_active |= IFM_FDX;
2125			else
2126				ifmr->ifm_active |= IFM_HDX;
2127		} else
2128			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2129		break;
2130	case XL_XCVR_COAX:
2131		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2132		break;
2133	/*
2134	 * XXX MII and BTX/AUTO should be separate cases.
2135	 */
2136
2137	case XL_XCVR_100BTX:
2138	case XL_XCVR_AUTO:
2139	case XL_XCVR_MII:
2140		if (mii != NULL) {
2141			mii_pollstat(mii);
2142			ifmr->ifm_active = mii->mii_media_active;
2143			ifmr->ifm_status = mii->mii_media_status;
2144		}
2145		break;
2146	case XL_XCVR_100BFX:
2147		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2148		break;
2149	default:
2150		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg);
2151		break;
2152	}
2153}
2154
2155int
2156xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2157{
2158	struct xl_softc *sc = ifp->if_softc;
2159	struct ifreq *ifr = (struct ifreq *)data;
2160	int s, error = 0;
2161	struct mii_data *mii = NULL;
2162
2163	s = splnet();
2164
2165	switch(command) {
2166	case SIOCSIFADDR:
2167		ifp->if_flags |= IFF_UP;
2168		if (!(ifp->if_flags & IFF_RUNNING))
2169			xl_init(sc);
2170		break;
2171
2172	case SIOCSIFFLAGS:
2173		if (ifp->if_flags & IFF_UP) {
2174			if (ifp->if_flags & IFF_RUNNING)
2175				error = ENETRESET;
2176			else
2177				xl_init(sc);
2178		} else {
2179			if (ifp->if_flags & IFF_RUNNING)
2180				xl_stop(sc);
2181		}
2182		break;
2183
2184	case SIOCGIFMEDIA:
2185	case SIOCSIFMEDIA:
2186		if (sc->xl_hasmii != 0)
2187			mii = &sc->sc_mii;
2188		if (mii == NULL)
2189			error = ifmedia_ioctl(ifp, ifr,
2190			    &sc->ifmedia, command);
2191		else
2192			error = ifmedia_ioctl(ifp, ifr,
2193			    &mii->mii_media, command);
2194		break;
2195
2196	case SIOCGIFRXR:
2197		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
2198		    NULL, MCLBYTES, &sc->xl_cdata.xl_rx_ring);
2199		break;
2200
2201	default:
2202		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2203	}
2204
2205	if (error == ENETRESET) {
2206		if (ifp->if_flags & IFF_RUNNING)
2207			xl_iff(sc);
2208		error = 0;
2209	}
2210
2211	splx(s);
2212	return (error);
2213}
2214
2215void
2216xl_watchdog(struct ifnet *ifp)
2217{
2218	struct xl_softc		*sc;
2219	u_int16_t		status = 0;
2220
2221	sc = ifp->if_softc;
2222
2223	ifp->if_oerrors++;
2224	XL_SEL_WIN(4);
2225	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2226	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2227
2228	if (status & XL_MEDIASTAT_CARRIER)
2229		printf("%s: no carrier - transceiver cable problem?\n",
2230								sc->sc_dev.dv_xname);
2231	xl_txeoc(sc);
2232	xl_txeof(sc);
2233	xl_rxeof(sc);
2234	xl_init(sc);
2235
2236	if (!ifq_empty(&ifp->if_snd))
2237		(*ifp->if_start)(ifp);
2238}
2239
2240void
2241xl_freetxrx(struct xl_softc *sc)
2242{
2243	bus_dmamap_t	map;
2244	int		i;
2245
2246	/*
2247	 * Free data in the RX lists.
2248	 */
2249	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2250		if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) {
2251			map = sc->xl_cdata.xl_rx_chain[i].map;
2252
2253			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2254			    BUS_DMASYNC_POSTREAD);
2255			bus_dmamap_unload(sc->sc_dmat, map);
2256		}
2257		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2258			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2259			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2260		}
2261	}
2262	bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list));
2263	/*
2264	 * Free the TX list buffers.
2265	 */
2266	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2267		if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) {
2268			map = sc->xl_cdata.xl_tx_chain[i].map;
2269
2270			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2271			    BUS_DMASYNC_POSTWRITE);
2272			bus_dmamap_unload(sc->sc_dmat, map);
2273		}
2274		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2275			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2276			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2277		}
2278	}
2279	bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list));
2280}
2281
2282/*
2283 * Stop the adapter and free any mbufs allocated to the
2284 * RX and TX lists.
2285 */
2286void
2287xl_stop(struct xl_softc *sc)
2288{
2289	struct ifnet *ifp;
2290
2291	/* Stop the stats updater. */
2292	timeout_del(&sc->xl_stsup_tmo);
2293
2294	ifp = &sc->sc_arpcom.ac_if;
2295
2296	ifp->if_flags &= ~IFF_RUNNING;
2297	ifq_clr_oactive(&ifp->if_snd);
2298	ifp->if_timer = 0;
2299
2300	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2301	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2302	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2303	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2304	xl_wait(sc);
2305	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2306	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2307	DELAY(800);
2308
2309#ifdef foo
2310	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2311	xl_wait(sc);
2312	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2313	xl_wait(sc);
2314#endif
2315
2316	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2317	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2318	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2319
2320	if (sc->intr_ack)
2321		(*sc->intr_ack)(sc);
2322
2323	xl_freetxrx(sc);
2324}
2325
2326#ifndef SMALL_KERNEL
2327void
2328xl_wol_power(struct xl_softc *sc)
2329{
2330	/* Re-enable RX and call upper layer WOL power routine
2331	 * if WOL is enabled. */
2332	if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power) {
2333		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2334		sc->wol_power(sc->wol_power_arg);
2335	}
2336}
2337#endif
2338
2339void
2340xl_attach(struct xl_softc *sc)
2341{
2342	u_int8_t enaddr[ETHER_ADDR_LEN];
2343	u_int16_t		xcvr[2];
2344	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2345	int i;
2346	uint64_t media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2347	struct ifmedia *ifm;
2348
2349	i = splnet();
2350	xl_reset(sc);
2351	splx(i);
2352
2353	/*
2354	 * Get station address from the EEPROM.
2355	 */
2356	if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2357		printf("\n%s: failed to read station address\n",
2358		    sc->sc_dev.dv_xname);
2359		return;
2360	}
2361	memcpy(&sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
2362
2363	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data),
2364	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
2365	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2366		printf(": can't alloc list mem\n");
2367		return;
2368	}
2369	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
2370	    sizeof(struct xl_list_data), &sc->sc_listkva,
2371	    BUS_DMA_NOWAIT) != 0) {
2372		printf(": can't map list mem\n");
2373		return;
2374	}
2375	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1,
2376	    sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT,
2377	    &sc->sc_listmap) != 0) {
2378		printf(": can't alloc list map\n");
2379		return;
2380	}
2381	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
2382	    sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
2383		printf(": can't load list map\n");
2384		return;
2385	}
2386	sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva;
2387
2388	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2389		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
2390		    0, BUS_DMA_NOWAIT,
2391		    &sc->xl_cdata.xl_rx_chain[i].map) != 0) {
2392			printf(": can't create rx map\n");
2393			return;
2394		}
2395	}
2396	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2397	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
2398		printf(": can't create rx spare map\n");
2399		return;
2400	}
2401
2402	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2403		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2404		    XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
2405		    &sc->xl_cdata.xl_tx_chain[i].map) != 0) {
2406			printf(": can't create tx map\n");
2407			return;
2408		}
2409	}
2410	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3,
2411	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
2412		printf(": can't create tx spare map\n");
2413		return;
2414	}
2415
2416	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
2417
2418	if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2419		u_int16_t n;
2420
2421		XL_SEL_WIN(2);
2422		n = CSR_READ_2(sc, 12);
2423
2424		if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2425			n |= 0x0010;
2426
2427		if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2428			n |= 0x4000;
2429
2430		CSR_WRITE_2(sc, 12, n);
2431	}
2432
2433	/*
2434	 * Figure out the card type. 3c905B adapters have the
2435	 * 'supportsNoTxLength' bit set in the capabilities
2436	 * word in the EEPROM.
2437	 * Note: my 3c575C cardbus card lies. It returns a value
2438	 * of 0x1578 for its capabilities word, which is somewhat
2439	 * nonsensical. Another way to distinguish a 3c90x chip
2440	 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
2441	 * bit. This will only be set for 3c90x boomerang chips.
2442	 */
2443	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2444	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
2445	    !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
2446		sc->xl_type = XL_TYPE_905B;
2447	else
2448		sc->xl_type = XL_TYPE_90X;
2449
2450	/* Set the TX start threshold for best performance. */
2451	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2452
2453	timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2454
2455	ifp->if_softc = sc;
2456	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2457	ifp->if_ioctl = xl_ioctl;
2458	if (sc->xl_type == XL_TYPE_905B)
2459		ifp->if_start = xl_start_90xB;
2460	else
2461		ifp->if_start = xl_start;
2462	ifp->if_watchdog = xl_watchdog;
2463	ifp->if_baudrate = 10000000;
2464	ifq_init_maxlen(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2465	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
2466
2467	ifp->if_capabilities = IFCAP_VLAN_MTU;
2468
2469#ifndef XL905B_TXCSUM_BROKEN
2470	ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2471				IFCAP_CSUM_UDPv4;
2472#endif
2473
2474	XL_SEL_WIN(3);
2475	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2476
2477	xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
2478	sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
2479	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2480	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2481
2482	xl_mediacheck(sc);
2483
2484	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2485	    || sc->xl_media & XL_MEDIAOPT_BT4) {
2486		ifmedia_init(&sc->sc_mii.mii_media, 0,
2487		    xl_ifmedia_upd, xl_ifmedia_sts);
2488		sc->xl_hasmii = 1;
2489		sc->sc_mii.mii_ifp = ifp;
2490		sc->sc_mii.mii_readreg = xl_miibus_readreg;
2491		sc->sc_mii.mii_writereg = xl_miibus_writereg;
2492		sc->sc_mii.mii_statchg = xl_miibus_statchg;
2493		xl_setcfg(sc);
2494		mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2495		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
2496
2497		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2498			ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2499			    0, NULL);
2500			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2501		}
2502		else {
2503			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2504		}
2505		ifm = &sc->sc_mii.mii_media;
2506	}
2507	else {
2508		ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2509		sc->xl_hasmii = 0;
2510		ifm = &sc->ifmedia;
2511	}
2512
2513	/*
2514	 * Sanity check. If the user has selected "auto" and this isn't
2515	 * a 10/100 card of some kind, we need to force the transceiver
2516	 * type to something sane.
2517	 */
2518	if (sc->xl_xcvr == XL_XCVR_AUTO)
2519		xl_choose_xcvr(sc, 0);
2520
2521	if (sc->xl_media & XL_MEDIAOPT_BT) {
2522		ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2523		ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2524		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2525			ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2526	}
2527
2528	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2529		/*
2530		 * Check for a 10baseFL board in disguise.
2531		 */
2532		if (sc->xl_type == XL_TYPE_905B &&
2533		    sc->xl_media == XL_MEDIAOPT_10FL) {
2534			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2535			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2536			    0, NULL);
2537			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2538				ifmedia_add(ifm,
2539				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2540		} else {
2541			ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2542		}
2543	}
2544
2545	if (sc->xl_media & XL_MEDIAOPT_BNC) {
2546		ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2547	}
2548
2549	if (sc->xl_media & XL_MEDIAOPT_BFX) {
2550		ifp->if_baudrate = 100000000;
2551		ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2552	}
2553
2554	/* Choose a default media. */
2555	switch(sc->xl_xcvr) {
2556	case XL_XCVR_10BT:
2557		media = IFM_ETHER|IFM_10_T;
2558		xl_setmode(sc, media);
2559		break;
2560	case XL_XCVR_AUI:
2561		if (sc->xl_type == XL_TYPE_905B &&
2562		    sc->xl_media == XL_MEDIAOPT_10FL) {
2563			media = IFM_ETHER|IFM_10_FL;
2564			xl_setmode(sc, media);
2565		} else {
2566			media = IFM_ETHER|IFM_10_5;
2567			xl_setmode(sc, media);
2568		}
2569		break;
2570	case XL_XCVR_COAX:
2571		media = IFM_ETHER|IFM_10_2;
2572		xl_setmode(sc, media);
2573		break;
2574	case XL_XCVR_AUTO:
2575	case XL_XCVR_100BTX:
2576	case XL_XCVR_MII:
2577		/* Chosen by miibus */
2578		break;
2579	case XL_XCVR_100BFX:
2580		media = IFM_ETHER|IFM_100_FX;
2581		xl_setmode(sc, media);
2582		break;
2583	default:
2584		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname,
2585							sc->xl_xcvr);
2586		/*
2587		 * This will probably be wrong, but it prevents
2588		 * the ifmedia code from panicking.
2589		 */
2590		media = IFM_ETHER | IFM_10_T;
2591		break;
2592	}
2593
2594	if (sc->xl_hasmii == 0)
2595		ifmedia_set(&sc->ifmedia, media);
2596
2597	if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
2598		XL_SEL_WIN(0);
2599		CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
2600	}
2601
2602#ifndef SMALL_KERNEL
2603	/* Check availability of WOL. */
2604	if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) {
2605		ifp->if_capabilities |= IFCAP_WOL;
2606		ifp->if_wol = xl_wol;
2607		xl_wol(ifp, 0);
2608	}
2609#endif
2610
2611	/*
2612	 * Call MI attach routines.
2613	 */
2614	if_attach(ifp);
2615	ether_ifattach(ifp);
2616}
2617
2618int
2619xl_detach(struct xl_softc *sc)
2620{
2621	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2622	extern void xl_freetxrx(struct xl_softc *);
2623
2624	/* Unhook our tick handler. */
2625	timeout_del(&sc->xl_stsup_tmo);
2626
2627	xl_freetxrx(sc);
2628
2629	/* Detach all PHYs */
2630	if (sc->xl_hasmii)
2631		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2632
2633	/* Delete all remaining media. */
2634	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2635
2636	ether_ifdetach(ifp);
2637	if_detach(ifp);
2638
2639	return (0);
2640}
2641
2642#ifndef SMALL_KERNEL
2643int
2644xl_wol(struct ifnet *ifp, int enable)
2645{
2646	struct xl_softc		*sc = ifp->if_softc;
2647
2648	XL_SEL_WIN(7);
2649	if (enable) {
2650		if (!(ifp->if_flags & IFF_RUNNING))
2651			xl_init(sc);
2652		CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC);
2653		sc->xl_flags |= XL_FLAG_WOL;
2654	} else {
2655		CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
2656		sc->xl_flags &= ~XL_FLAG_WOL;
2657	}
2658	return (0);
2659}
2660#endif
2661
2662struct cfdriver xl_cd = {
2663	0, "xl", DV_IFNET
2664};
2665