if_lpe.c revision 261077
1/*-
2 * Copyright (c) 2011 Jakub Wojciech Klama <jceel@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/arm/lpc/if_lpe.c 261077 2014-01-23 12:02:04Z loos $");
29
30#include <sys/param.h>
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/rman.h>
41#include <sys/bus.h>
42#include <sys/socket.h>
43#include <machine/bus.h>
44#include <machine/intr.h>
45
46#include <net/if.h>
47#include <net/if_arp.h>
48#include <net/ethernet.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51#include <net/if_types.h>
52#include <net/if_var.h>
53
54#include <net/bpf.h>
55
56#include <dev/ofw/ofw_bus.h>
57#include <dev/ofw/ofw_bus_subr.h>
58
59#include <dev/mii/mii.h>
60#include <dev/mii/miivar.h>
61
62#include <arm/lpc/lpcreg.h>
63#include <arm/lpc/lpcvar.h>
64#include <arm/lpc/if_lpereg.h>
65
66#include "miibus_if.h"
67
68#ifdef DEBUG
69#define debugf(fmt, args...) do { printf("%s(): ", __func__);   \
70    printf(fmt,##args); } while (0)
71#else
72#define debugf(fmt, args...)
73#endif
74
75struct lpe_dmamap_arg {
76	bus_addr_t		lpe_dma_busaddr;
77};
78
79struct lpe_rxdesc {
80	struct mbuf *		lpe_rxdesc_mbuf;
81	bus_dmamap_t		lpe_rxdesc_dmamap;
82};
83
84struct lpe_txdesc {
85	int			lpe_txdesc_first;
86	struct mbuf *		lpe_txdesc_mbuf;
87	bus_dmamap_t		lpe_txdesc_dmamap;
88};
89
90struct lpe_chain_data {
91	bus_dma_tag_t		lpe_parent_tag;
92	bus_dma_tag_t		lpe_tx_ring_tag;
93	bus_dmamap_t		lpe_tx_ring_map;
94	bus_dma_tag_t		lpe_tx_status_tag;
95	bus_dmamap_t		lpe_tx_status_map;
96	bus_dma_tag_t		lpe_tx_buf_tag;
97	bus_dma_tag_t		lpe_rx_ring_tag;
98	bus_dmamap_t		lpe_rx_ring_map;
99	bus_dma_tag_t		lpe_rx_status_tag;
100	bus_dmamap_t		lpe_rx_status_map;
101	bus_dma_tag_t		lpe_rx_buf_tag;
102	struct lpe_rxdesc	lpe_rx_desc[LPE_RXDESC_NUM];
103	struct lpe_txdesc	lpe_tx_desc[LPE_TXDESC_NUM];
104	int			lpe_tx_prod;
105	int			lpe_tx_last;
106	int			lpe_tx_used;
107};
108
109struct lpe_ring_data {
110	struct lpe_hwdesc *	lpe_rx_ring;
111	struct lpe_hwstatus *	lpe_rx_status;
112	bus_addr_t		lpe_rx_ring_phys;
113	bus_addr_t		lpe_rx_status_phys;
114	struct lpe_hwdesc *	lpe_tx_ring;
115	struct lpe_hwstatus *	lpe_tx_status;
116	bus_addr_t		lpe_tx_ring_phys;
117	bus_addr_t		lpe_tx_status_phys;
118};
119
120struct lpe_softc {
121	struct ifnet *		lpe_ifp;
122	struct mtx		lpe_mtx;
123	phandle_t		lpe_ofw;
124	device_t		lpe_dev;
125	device_t		lpe_miibus;
126	uint8_t			lpe_enaddr[6];
127	struct resource	*	lpe_mem_res;
128	struct resource *	lpe_irq_res;
129	void *			lpe_intrhand;
130	bus_space_tag_t		lpe_bst;
131	bus_space_handle_t	lpe_bsh;
132#define	LPE_FLAG_LINK		(1 << 0)
133	uint32_t		lpe_flags;
134	int			lpe_watchdog_timer;
135	struct callout		lpe_tick;
136	struct lpe_chain_data	lpe_cdata;
137	struct lpe_ring_data	lpe_rdata;
138};
139
140static int lpe_probe(device_t);
141static int lpe_attach(device_t);
142static int lpe_detach(device_t);
143static int lpe_miibus_readreg(device_t, int, int);
144static int lpe_miibus_writereg(device_t, int, int, int);
145static void lpe_miibus_statchg(device_t);
146
147static void lpe_reset(struct lpe_softc *);
148static void lpe_init(void *);
149static void lpe_init_locked(struct lpe_softc *);
150static void lpe_start(struct ifnet *);
151static void lpe_start_locked(struct ifnet *);
152static void lpe_stop(struct lpe_softc *);
153static void lpe_stop_locked(struct lpe_softc *);
154static int lpe_ioctl(struct ifnet *, u_long, caddr_t);
155static void lpe_set_rxmode(struct lpe_softc *);
156static void lpe_set_rxfilter(struct lpe_softc *);
157static void lpe_intr(void *);
158static void lpe_rxintr(struct lpe_softc *);
159static void lpe_txintr(struct lpe_softc *);
160static void lpe_tick(void *);
161static void lpe_watchdog(struct lpe_softc *);
162static int lpe_encap(struct lpe_softc *, struct mbuf **);
163static int lpe_dma_alloc(struct lpe_softc *);
164static int lpe_dma_alloc_rx(struct lpe_softc *);
165static int lpe_dma_alloc_tx(struct lpe_softc *);
166static int lpe_init_rx(struct lpe_softc *);
167static int lpe_init_rxbuf(struct lpe_softc *, int);
168static void lpe_discard_rxbuf(struct lpe_softc *, int);
169static void lpe_dmamap_cb(void *, bus_dma_segment_t *, int, int);
170static int lpe_ifmedia_upd(struct ifnet *);
171static void lpe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
172
173#define	lpe_lock(_sc)		mtx_lock(&(_sc)->lpe_mtx)
174#define	lpe_unlock(_sc)		mtx_unlock(&(_sc)->lpe_mtx)
175#define	lpe_lock_assert(sc)	mtx_assert(&(_sc)->lpe_mtx, MA_OWNED)
176
177#define	lpe_read_4(_sc, _reg)		\
178    bus_space_read_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg))
179#define	lpe_write_4(_sc, _reg, _val)	\
180    bus_space_write_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg), (_val))
181
182#define	LPE_HWDESC_RXERRS	(LPE_HWDESC_CRCERROR | LPE_HWDESC_SYMBOLERROR | \
183    LPE_HWDESC_LENGTHERROR | LPE_HWDESC_ALIGNERROR | LPE_HWDESC_OVERRUN | \
184    LPE_HWDESC_RXNODESCR)
185
186#define	LPE_HWDESC_TXERRS	(LPE_HWDESC_EXCDEFER | LPE_HWDESC_EXCCOLL | \
187    LPE_HWDESC_LATECOLL | LPE_HWDESC_UNDERRUN | LPE_HWDESC_TXNODESCR)
188
189static int
190lpe_probe(device_t dev)
191{
192
193	if (!ofw_bus_is_compatible(dev, "lpc,ethernet"))
194		return (ENXIO);
195
196	device_set_desc(dev, "LPC32x0 10/100 Ethernet");
197	return (BUS_PROBE_DEFAULT);
198}
199
200static int
201lpe_attach(device_t dev)
202{
203	struct lpe_softc *sc = device_get_softc(dev);
204	struct ifnet *ifp;
205	int rid, i;
206	uint32_t val;
207
208	sc->lpe_dev = dev;
209	sc->lpe_ofw = ofw_bus_get_node(dev);
210
211	i = OF_getprop(sc->lpe_ofw, "local-mac-address", (void *)&sc->lpe_enaddr, 6);
212	if (i != 6) {
213		sc->lpe_enaddr[0] = 0x00;
214		sc->lpe_enaddr[1] = 0x11;
215		sc->lpe_enaddr[2] = 0x22;
216		sc->lpe_enaddr[3] = 0x33;
217		sc->lpe_enaddr[4] = 0x44;
218		sc->lpe_enaddr[5] = 0x55;
219	}
220
221	mtx_init(&sc->lpe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
222	    MTX_DEF);
223
224	callout_init_mtx(&sc->lpe_tick, &sc->lpe_mtx, 0);
225
226	rid = 0;
227	sc->lpe_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
228	    RF_ACTIVE);
229	if (!sc->lpe_mem_res) {
230		device_printf(dev, "cannot allocate memory window\n");
231		goto fail;
232	}
233
234	sc->lpe_bst = rman_get_bustag(sc->lpe_mem_res);
235	sc->lpe_bsh = rman_get_bushandle(sc->lpe_mem_res);
236
237	rid = 0;
238	sc->lpe_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
239	    RF_ACTIVE);
240	if (!sc->lpe_irq_res) {
241		device_printf(dev, "cannot allocate interrupt\n");
242		goto fail;
243	}
244
245	sc->lpe_ifp = if_alloc(IFT_ETHER);
246	if (!sc->lpe_ifp) {
247		device_printf(dev, "cannot allocated ifnet\n");
248		goto fail;
249	}
250
251	ifp = sc->lpe_ifp;
252
253	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
254	ifp->if_softc = sc;
255	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
256	ifp->if_start = lpe_start;
257	ifp->if_ioctl = lpe_ioctl;
258	ifp->if_init = lpe_init;
259	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
260	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
261	IFQ_SET_READY(&ifp->if_snd);
262
263	ether_ifattach(ifp, sc->lpe_enaddr);
264
265	if (bus_setup_intr(dev, sc->lpe_irq_res, INTR_TYPE_NET, NULL,
266	    lpe_intr, sc, &sc->lpe_intrhand)) {
267		device_printf(dev, "cannot establish interrupt handler\n");
268		ether_ifdetach(ifp);
269		goto fail;
270	}
271
272	/* Enable Ethernet clock */
273	lpc_pwr_write(dev, LPC_CLKPWR_MACCLK_CTRL,
274	    LPC_CLKPWR_MACCLK_CTRL_REG |
275	    LPC_CLKPWR_MACCLK_CTRL_SLAVE |
276	    LPC_CLKPWR_MACCLK_CTRL_MASTER |
277	    LPC_CLKPWR_MACCLK_CTRL_HDWINF(3));
278
279	/* Reset chip */
280	lpe_reset(sc);
281
282	/* Initialize MII */
283	val = lpe_read_4(sc, LPE_COMMAND);
284	lpe_write_4(sc, LPE_COMMAND, val | LPE_COMMAND_RMII);
285
286	if (mii_attach(dev, &sc->lpe_miibus, ifp, lpe_ifmedia_upd,
287	    lpe_ifmedia_sts, BMSR_DEFCAPMASK, 0x01,
288	    MII_OFFSET_ANY, 0)) {
289		device_printf(dev, "cannot find PHY\n");
290		goto fail;
291	}
292
293	lpe_dma_alloc(sc);
294
295	return (0);
296
297fail:
298	if (sc->lpe_ifp)
299		if_free(sc->lpe_ifp);
300	if (sc->lpe_intrhand)
301		bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
302	if (sc->lpe_irq_res)
303		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
304	if (sc->lpe_mem_res)
305		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
306	return (ENXIO);
307}
308
309static int
310lpe_detach(device_t dev)
311{
312	struct lpe_softc *sc = device_get_softc(dev);
313
314	lpe_stop(sc);
315
316	if_free(sc->lpe_ifp);
317	bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
318	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
319	bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
320
321	return (0);
322}
323
324static int
325lpe_miibus_readreg(device_t dev, int phy, int reg)
326{
327	struct lpe_softc *sc = device_get_softc(dev);
328	uint32_t val;
329	int result;
330
331	lpe_write_4(sc, LPE_MCMD, LPE_MCMD_READ);
332	lpe_write_4(sc, LPE_MADR,
333	    (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
334	    (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
335
336	val = lpe_read_4(sc, LPE_MIND);
337
338	/* Wait until request is completed */
339	while (val & LPE_MIND_BUSY) {
340		val = lpe_read_4(sc, LPE_MIND);
341		DELAY(10);
342	}
343
344	if (val & LPE_MIND_INVALID)
345		return (0);
346
347	lpe_write_4(sc, LPE_MCMD, 0);
348	result = (lpe_read_4(sc, LPE_MRDD) & LPE_MRDD_DATAMASK);
349	debugf("phy=%d reg=%d result=0x%04x\n", phy, reg, result);
350
351	return (result);
352}
353
354static int
355lpe_miibus_writereg(device_t dev, int phy, int reg, int data)
356{
357	struct lpe_softc *sc = device_get_softc(dev);
358	uint32_t val;
359
360	debugf("phy=%d reg=%d data=0x%04x\n", phy, reg, data);
361
362	lpe_write_4(sc, LPE_MCMD, LPE_MCMD_WRITE);
363	lpe_write_4(sc, LPE_MADR,
364	    (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
365	    (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
366
367	lpe_write_4(sc, LPE_MWTD, (data & LPE_MWTD_DATAMASK));
368
369	val = lpe_read_4(sc, LPE_MIND);
370
371	/* Wait until request is completed */
372	while (val & LPE_MIND_BUSY) {
373		val = lpe_read_4(sc, LPE_MIND);
374		DELAY(10);
375	}
376
377	return (0);
378}
379
380static void
381lpe_miibus_statchg(device_t dev)
382{
383	struct lpe_softc *sc = device_get_softc(dev);
384	struct mii_data *mii = device_get_softc(sc->lpe_miibus);
385
386	lpe_lock(sc);
387
388	if ((mii->mii_media_status & IFM_ACTIVE) &&
389	    (mii->mii_media_status & IFM_AVALID))
390		sc->lpe_flags |= LPE_FLAG_LINK;
391	else
392		sc->lpe_flags &= ~LPE_FLAG_LINK;
393
394	lpe_unlock(sc);
395}
396
397static void
398lpe_reset(struct lpe_softc *sc)
399{
400	uint32_t mac1;
401
402	/* Enter soft reset mode */
403	mac1 = lpe_read_4(sc, LPE_MAC1);
404	lpe_write_4(sc, LPE_MAC1, mac1 | LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
405	    LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX);
406
407	/* Reset registers, Tx path and Rx path */
408	lpe_write_4(sc, LPE_COMMAND, LPE_COMMAND_REGRESET |
409	    LPE_COMMAND_TXRESET | LPE_COMMAND_RXRESET);
410
411	/* Set station address */
412	lpe_write_4(sc, LPE_SA2, sc->lpe_enaddr[1] << 8 | sc->lpe_enaddr[0]);
413	lpe_write_4(sc, LPE_SA1, sc->lpe_enaddr[3] << 8 | sc->lpe_enaddr[2]);
414	lpe_write_4(sc, LPE_SA0, sc->lpe_enaddr[5] << 8 | sc->lpe_enaddr[4]);
415
416	/* Leave soft reset mode */
417	mac1 = lpe_read_4(sc, LPE_MAC1);
418	lpe_write_4(sc, LPE_MAC1, mac1 & ~(LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
419	    LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX));
420}
421
422static void
423lpe_init(void *arg)
424{
425	struct lpe_softc *sc = (struct lpe_softc *)arg;
426
427	lpe_lock(sc);
428	lpe_init_locked(sc);
429	lpe_unlock(sc);
430}
431
432static void
433lpe_init_locked(struct lpe_softc *sc)
434{
435	struct ifnet *ifp = sc->lpe_ifp;
436	uint32_t cmd, mac1;
437
438	lpe_lock_assert(sc);
439
440	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
441		return;
442
443	/* Enable Tx and Rx */
444	cmd = lpe_read_4(sc, LPE_COMMAND);
445	lpe_write_4(sc, LPE_COMMAND, cmd | LPE_COMMAND_RXENABLE |
446	    LPE_COMMAND_TXENABLE | LPE_COMMAND_PASSRUNTFRAME);
447
448	/* Enable receive */
449	mac1 = lpe_read_4(sc, LPE_MAC1);
450	lpe_write_4(sc, LPE_MAC1, /*mac1 |*/ LPE_MAC1_RXENABLE | LPE_MAC1_PASSALL);
451
452	lpe_write_4(sc, LPE_MAC2, LPE_MAC2_CRCENABLE | LPE_MAC2_PADCRCENABLE |
453	    LPE_MAC2_FULLDUPLEX);
454
455	lpe_write_4(sc, LPE_MCFG, LPE_MCFG_CLKSEL(7));
456
457	/* Set up Rx filter */
458	lpe_set_rxmode(sc);
459
460	/* Enable interrupts */
461	lpe_write_4(sc, LPE_INTENABLE, LPE_INT_RXOVERRUN | LPE_INT_RXERROR |
462	    LPE_INT_RXFINISH | LPE_INT_RXDONE | LPE_INT_TXUNDERRUN |
463	    LPE_INT_TXERROR | LPE_INT_TXFINISH | LPE_INT_TXDONE);
464
465	sc->lpe_cdata.lpe_tx_prod = 0;
466	sc->lpe_cdata.lpe_tx_last = 0;
467	sc->lpe_cdata.lpe_tx_used = 0;
468
469	lpe_init_rx(sc);
470
471	/* Initialize Rx packet and status descriptor heads */
472	lpe_write_4(sc, LPE_RXDESC, sc->lpe_rdata.lpe_rx_ring_phys);
473	lpe_write_4(sc, LPE_RXSTATUS, sc->lpe_rdata.lpe_rx_status_phys);
474	lpe_write_4(sc, LPE_RXDESC_NUMBER, LPE_RXDESC_NUM - 1);
475	lpe_write_4(sc, LPE_RXDESC_CONS, 0);
476
477	/* Initialize Tx packet and status descriptor heads */
478	lpe_write_4(sc, LPE_TXDESC, sc->lpe_rdata.lpe_tx_ring_phys);
479	lpe_write_4(sc, LPE_TXSTATUS, sc->lpe_rdata.lpe_tx_status_phys);
480	lpe_write_4(sc, LPE_TXDESC_NUMBER, LPE_TXDESC_NUM - 1);
481	lpe_write_4(sc, LPE_TXDESC_PROD, 0);
482
483	ifp->if_drv_flags |= IFF_DRV_RUNNING;
484	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
485
486	callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
487}
488
489static void
490lpe_start(struct ifnet *ifp)
491{
492	struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
493
494	lpe_lock(sc);
495	lpe_start_locked(ifp);
496	lpe_unlock(sc);
497}
498
499static void
500lpe_start_locked(struct ifnet *ifp)
501{
502	struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
503	struct mbuf *m_head;
504	int encap = 0;
505
506	lpe_lock_assert(sc);
507
508	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
509		if (lpe_read_4(sc, LPE_TXDESC_PROD) ==
510		    lpe_read_4(sc, LPE_TXDESC_CONS) - 5)
511			break;
512
513		/* Dequeue first packet */
514		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
515		if (!m_head)
516			break;
517
518		lpe_encap(sc, &m_head);
519
520		encap++;
521	}
522
523	/* Submit new descriptor list */
524	if (encap) {
525		lpe_write_4(sc, LPE_TXDESC_PROD, sc->lpe_cdata.lpe_tx_prod);
526		sc->lpe_watchdog_timer = 5;
527	}
528
529}
530
531static int
532lpe_encap(struct lpe_softc *sc, struct mbuf **m_head)
533{
534	struct lpe_txdesc *txd;
535	struct lpe_hwdesc *hwd;
536	bus_dma_segment_t segs[LPE_MAXFRAGS];
537	int i, err, nsegs, prod;
538
539	lpe_lock_assert(sc);
540	M_ASSERTPKTHDR((*m_head));
541
542	prod = sc->lpe_cdata.lpe_tx_prod;
543	txd = &sc->lpe_cdata.lpe_tx_desc[prod];
544
545	debugf("starting with prod=%d\n", prod);
546
547	err = bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_tx_buf_tag,
548	    txd->lpe_txdesc_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
549
550	if (err)
551		return (err);
552
553	if (nsegs == 0) {
554		m_freem(*m_head);
555		*m_head = NULL;
556		return (EIO);
557	}
558
559        bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag, txd->lpe_txdesc_dmamap,
560          BUS_DMASYNC_PREREAD);
561        bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
562            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
563
564	txd->lpe_txdesc_first = 1;
565	txd->lpe_txdesc_mbuf = *m_head;
566
567	for (i = 0; i < nsegs; i++) {
568		hwd = &sc->lpe_rdata.lpe_tx_ring[prod];
569		hwd->lhr_data = segs[i].ds_addr;
570		hwd->lhr_control = segs[i].ds_len - 1;
571
572		if (i == nsegs - 1) {
573			hwd->lhr_control |= LPE_HWDESC_LASTFLAG;
574			hwd->lhr_control |= LPE_HWDESC_INTERRUPT;
575			hwd->lhr_control |= LPE_HWDESC_CRC;
576			hwd->lhr_control |= LPE_HWDESC_PAD;
577		}
578
579		LPE_INC(prod, LPE_TXDESC_NUM);
580	}
581
582	bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
583	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
584
585	sc->lpe_cdata.lpe_tx_used += nsegs;
586	sc->lpe_cdata.lpe_tx_prod = prod;
587
588	return (0);
589}
590
591static void
592lpe_stop(struct lpe_softc *sc)
593{
594	lpe_lock(sc);
595	lpe_stop_locked(sc);
596	lpe_unlock(sc);
597}
598
599static void
600lpe_stop_locked(struct lpe_softc *sc)
601{
602	lpe_lock_assert(sc);
603
604	callout_stop(&sc->lpe_tick);
605
606	/* Disable interrupts */
607	lpe_write_4(sc, LPE_INTCLEAR, 0xffffffff);
608
609	/* Stop EMAC */
610	lpe_write_4(sc, LPE_MAC1, 0);
611	lpe_write_4(sc, LPE_MAC2, 0);
612	lpe_write_4(sc, LPE_COMMAND, 0);
613
614	sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
615	sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
616}
617
618static int
619lpe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
620{
621	struct lpe_softc *sc = ifp->if_softc;
622	struct mii_data *mii = device_get_softc(sc->lpe_miibus);
623	struct ifreq *ifr = (struct ifreq *)data;
624	int err = 0;
625
626	switch (cmd) {
627	case SIOCSIFFLAGS:
628		lpe_lock(sc);
629		if (ifp->if_flags & IFF_UP) {
630			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
631				lpe_set_rxmode(sc);
632				lpe_set_rxfilter(sc);
633			} else
634				lpe_init_locked(sc);
635		} else
636			lpe_stop(sc);
637		lpe_unlock(sc);
638		break;
639	case SIOCADDMULTI:
640	case SIOCDELMULTI:
641		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
642			lpe_lock(sc);
643			lpe_set_rxfilter(sc);
644			lpe_unlock(sc);
645		}
646		break;
647	case SIOCGIFMEDIA:
648	case SIOCSIFMEDIA:
649		err = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
650		break;
651	default:
652		err = ether_ioctl(ifp, cmd, data);
653		break;
654	}
655
656	return (err);
657}
658
659static void lpe_set_rxmode(struct lpe_softc *sc)
660{
661	struct ifnet *ifp = sc->lpe_ifp;
662	uint32_t rxfilt;
663
664	rxfilt = LPE_RXFILTER_UNIHASH | LPE_RXFILTER_MULTIHASH | LPE_RXFILTER_PERFECT;
665
666	if (ifp->if_flags & IFF_BROADCAST)
667		rxfilt |= LPE_RXFILTER_BROADCAST;
668
669	if (ifp->if_flags & IFF_PROMISC)
670		rxfilt |= LPE_RXFILTER_UNICAST | LPE_RXFILTER_MULTICAST;
671
672	if (ifp->if_flags & IFF_ALLMULTI)
673		rxfilt |= LPE_RXFILTER_MULTICAST;
674
675	lpe_write_4(sc, LPE_RXFILTER_CTRL, rxfilt);
676}
677
678static void lpe_set_rxfilter(struct lpe_softc *sc)
679{
680	struct ifnet *ifp = sc->lpe_ifp;
681	struct ifmultiaddr *ifma;
682	int index;
683	uint32_t hashl, hashh;
684
685	hashl = 0;
686	hashh = 0;
687
688	if_maddr_rlock(ifp);
689	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
690		if (ifma->ifma_addr->sa_family != AF_LINK)
691			continue;
692
693		index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
694		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 23 & 0x3f;
695
696		if (index > 31)
697			hashh |= (1 << (index - 32));
698		else
699			hashl |= (1 << index);
700	}
701	if_maddr_runlock(ifp);
702
703	/* Program new hash filter */
704	lpe_write_4(sc, LPE_HASHFILTER_L, hashl);
705	lpe_write_4(sc, LPE_HASHFILTER_H, hashh);
706}
707
708static void
709lpe_intr(void *arg)
710{
711	struct lpe_softc *sc = (struct lpe_softc *)arg;
712	uint32_t intstatus;
713
714	debugf("status=0x%08x\n", lpe_read_4(sc, LPE_INTSTATUS));
715
716	lpe_lock(sc);
717
718	while ((intstatus = lpe_read_4(sc, LPE_INTSTATUS))) {
719		if (intstatus & LPE_INT_RXDONE)
720			lpe_rxintr(sc);
721
722		if (intstatus & LPE_INT_TXDONE)
723			lpe_txintr(sc);
724
725		lpe_write_4(sc, LPE_INTCLEAR, 0xffff);
726	}
727
728	lpe_unlock(sc);
729}
730
731static void
732lpe_rxintr(struct lpe_softc *sc)
733{
734	struct ifnet *ifp = sc->lpe_ifp;
735	struct lpe_hwdesc *hwd;
736	struct lpe_hwstatus *hws;
737	struct lpe_rxdesc *rxd;
738	struct mbuf *m;
739	int prod, cons;
740
741	for (;;) {
742		prod = lpe_read_4(sc, LPE_RXDESC_PROD);
743		cons = lpe_read_4(sc, LPE_RXDESC_CONS);
744
745		if (prod == cons)
746			break;
747
748		rxd = &sc->lpe_cdata.lpe_rx_desc[cons];
749		hwd = &sc->lpe_rdata.lpe_rx_ring[cons];
750		hws = &sc->lpe_rdata.lpe_rx_status[cons];
751
752		/* Check received frame for errors */
753		if (hws->lhs_info & LPE_HWDESC_RXERRS) {
754			ifp->if_ierrors++;
755			lpe_discard_rxbuf(sc, cons);
756			lpe_init_rxbuf(sc, cons);
757			goto skip;
758		}
759
760		m = rxd->lpe_rxdesc_mbuf;
761		m->m_pkthdr.rcvif = ifp;
762		m->m_data += 2;
763
764		ifp->if_ipackets++;
765
766		lpe_unlock(sc);
767		(*ifp->if_input)(ifp, m);
768		lpe_lock(sc);
769
770		lpe_init_rxbuf(sc, cons);
771skip:
772		LPE_INC(cons, LPE_RXDESC_NUM);
773		lpe_write_4(sc, LPE_RXDESC_CONS, cons);
774	}
775}
776
777static void
778lpe_txintr(struct lpe_softc *sc)
779{
780	struct ifnet *ifp = sc->lpe_ifp;
781	struct lpe_hwdesc *hwd;
782	struct lpe_hwstatus *hws;
783	struct lpe_txdesc *txd;
784	int cons, last;
785
786	for (;;) {
787		cons = lpe_read_4(sc, LPE_TXDESC_CONS);
788		last = sc->lpe_cdata.lpe_tx_last;
789
790		if (cons == last)
791			break;
792
793		txd = &sc->lpe_cdata.lpe_tx_desc[last];
794		hwd = &sc->lpe_rdata.lpe_tx_ring[last];
795		hws = &sc->lpe_rdata.lpe_tx_status[last];
796
797		bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag,
798		    txd->lpe_txdesc_dmamap, BUS_DMASYNC_POSTWRITE);
799
800		ifp->if_collisions += LPE_HWDESC_COLLISIONS(hws->lhs_info);
801
802		if (hws->lhs_info & LPE_HWDESC_TXERRS)
803			ifp->if_oerrors++;
804		else
805			ifp->if_opackets++;
806
807		if (txd->lpe_txdesc_first) {
808			bus_dmamap_unload(sc->lpe_cdata.lpe_tx_buf_tag,
809			    txd->lpe_txdesc_dmamap);
810
811			m_freem(txd->lpe_txdesc_mbuf);
812			txd->lpe_txdesc_mbuf = NULL;
813			txd->lpe_txdesc_first = 0;
814		}
815
816		sc->lpe_cdata.lpe_tx_used--;
817		LPE_INC(sc->lpe_cdata.lpe_tx_last, LPE_TXDESC_NUM);
818	}
819
820	if (!sc->lpe_cdata.lpe_tx_used)
821		sc->lpe_watchdog_timer = 0;
822}
823
824static void
825lpe_tick(void *arg)
826{
827	struct lpe_softc *sc = (struct lpe_softc *)arg;
828	struct mii_data *mii = device_get_softc(sc->lpe_miibus);
829
830	lpe_lock_assert(sc);
831
832	mii_tick(mii);
833	lpe_watchdog(sc);
834
835	callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
836}
837
838static void
839lpe_watchdog(struct lpe_softc *sc)
840{
841	struct ifnet *ifp = sc->lpe_ifp;
842
843	lpe_lock_assert(sc);
844
845	if (sc->lpe_watchdog_timer == 0 || sc->lpe_watchdog_timer--)
846		return;
847
848	/* Chip has stopped responding */
849	device_printf(sc->lpe_dev, "WARNING: chip hangup, restarting...\n");
850	lpe_stop_locked(sc);
851	lpe_init_locked(sc);
852
853	/* Try to resend packets */
854	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
855		lpe_start_locked(ifp);
856}
857
858static int
859lpe_dma_alloc(struct lpe_softc *sc)
860{
861	int err;
862
863	/* Create parent DMA tag */
864	err = bus_dma_tag_create(
865	    bus_get_dma_tag(sc->lpe_dev),
866	    1, 0,			/* alignment, boundary */
867	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
868	    BUS_SPACE_MAXADDR,		/* highaddr */
869	    NULL, NULL,			/* filter, filterarg */
870	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
871	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsegsize, flags */
872	    NULL, NULL,			/* lockfunc, lockarg */
873	    &sc->lpe_cdata.lpe_parent_tag);
874
875	if (err) {
876		device_printf(sc->lpe_dev, "cannot create parent DMA tag\n");
877		return (err);
878	}
879
880	err = lpe_dma_alloc_rx(sc);
881	if (err)
882		return (err);
883
884	err = lpe_dma_alloc_tx(sc);
885	if (err)
886		return (err);
887
888	return (0);
889}
890
891static int
892lpe_dma_alloc_rx(struct lpe_softc *sc)
893{
894	struct lpe_rxdesc *rxd;
895	struct lpe_dmamap_arg ctx;
896	int err, i;
897
898	/* Create tag for Rx ring */
899	err = bus_dma_tag_create(
900	    sc->lpe_cdata.lpe_parent_tag,
901	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
902	    BUS_SPACE_MAXADDR,		/* lowaddr */
903	    BUS_SPACE_MAXADDR,		/* highaddr */
904	    NULL, NULL,			/* filter, filterarg */
905	    LPE_RXDESC_SIZE, 1,		/* maxsize, nsegments */
906	    LPE_RXDESC_SIZE, 0,		/* maxsegsize, flags */
907	    NULL, NULL,			/* lockfunc, lockarg */
908	    &sc->lpe_cdata.lpe_rx_ring_tag);
909
910	if (err) {
911		device_printf(sc->lpe_dev, "cannot create Rx ring DMA tag\n");
912		goto fail;
913	}
914
915	/* Create tag for Rx status ring */
916	err = bus_dma_tag_create(
917	    sc->lpe_cdata.lpe_parent_tag,
918	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
919	    BUS_SPACE_MAXADDR,		/* lowaddr */
920	    BUS_SPACE_MAXADDR,		/* highaddr */
921	    NULL, NULL,			/* filter, filterarg */
922	    LPE_RXSTATUS_SIZE, 1,	/* maxsize, nsegments */
923	    LPE_RXSTATUS_SIZE, 0,	/* maxsegsize, flags */
924	    NULL, NULL,			/* lockfunc, lockarg */
925	    &sc->lpe_cdata.lpe_rx_status_tag);
926
927	if (err) {
928		device_printf(sc->lpe_dev, "cannot create Rx status ring DMA tag\n");
929		goto fail;
930	}
931
932	/* Create tag for Rx buffers */
933	err = bus_dma_tag_create(
934	    sc->lpe_cdata.lpe_parent_tag,
935	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
936	    BUS_SPACE_MAXADDR,		/* lowaddr */
937	    BUS_SPACE_MAXADDR,		/* highaddr */
938	    NULL, NULL,			/* filter, filterarg */
939	    MCLBYTES * LPE_RXDESC_NUM,	/* maxsize */
940	    LPE_RXDESC_NUM,		/* segments */
941	    MCLBYTES, 0,		/* maxsegsize, flags */
942	    NULL, NULL,			/* lockfunc, lockarg */
943	    &sc->lpe_cdata.lpe_rx_buf_tag);
944
945	if (err) {
946		device_printf(sc->lpe_dev, "cannot create Rx buffers DMA tag\n");
947		goto fail;
948	}
949
950	/* Allocate Rx DMA ring */
951	err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_ring_tag,
952	    (void **)&sc->lpe_rdata.lpe_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
953	    BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_ring_map);
954
955	err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_ring_tag,
956	    sc->lpe_cdata.lpe_rx_ring_map, sc->lpe_rdata.lpe_rx_ring,
957	    LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
958
959	sc->lpe_rdata.lpe_rx_ring_phys = ctx.lpe_dma_busaddr;
960
961	/* Allocate Rx status ring */
962	err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_status_tag,
963	    (void **)&sc->lpe_rdata.lpe_rx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
964	    BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_status_map);
965
966	err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_status_tag,
967	    sc->lpe_cdata.lpe_rx_status_map, sc->lpe_rdata.lpe_rx_status,
968	    LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
969
970	sc->lpe_rdata.lpe_rx_status_phys = ctx.lpe_dma_busaddr;
971
972
973	/* Create Rx buffers DMA map */
974	for (i = 0; i < LPE_RXDESC_NUM; i++) {
975		rxd = &sc->lpe_cdata.lpe_rx_desc[i];
976		rxd->lpe_rxdesc_mbuf = NULL;
977		rxd->lpe_rxdesc_dmamap = NULL;
978
979		err = bus_dmamap_create(sc->lpe_cdata.lpe_rx_buf_tag, 0,
980		    &rxd->lpe_rxdesc_dmamap);
981
982		if (err) {
983			device_printf(sc->lpe_dev, "cannot create Rx DMA map\n");
984			return (err);
985		}
986	}
987
988	return (0);
989fail:
990	return (err);
991}
992
993static int
994lpe_dma_alloc_tx(struct lpe_softc *sc)
995{
996	struct lpe_txdesc *txd;
997	struct lpe_dmamap_arg ctx;
998	int err, i;
999
1000	/* Create tag for Tx ring */
1001	err = bus_dma_tag_create(
1002	    sc->lpe_cdata.lpe_parent_tag,
1003	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
1004	    BUS_SPACE_MAXADDR,		/* lowaddr */
1005	    BUS_SPACE_MAXADDR,		/* highaddr */
1006	    NULL, NULL,			/* filter, filterarg */
1007	    LPE_TXDESC_SIZE, 1,		/* maxsize, nsegments */
1008	    LPE_TXDESC_SIZE, 0,		/* maxsegsize, flags */
1009	    NULL, NULL,			/* lockfunc, lockarg */
1010	    &sc->lpe_cdata.lpe_tx_ring_tag);
1011
1012	if (err) {
1013		device_printf(sc->lpe_dev, "cannot create Tx ring DMA tag\n");
1014		goto fail;
1015	}
1016
1017	/* Create tag for Tx status ring */
1018	err = bus_dma_tag_create(
1019	    sc->lpe_cdata.lpe_parent_tag,
1020	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
1021	    BUS_SPACE_MAXADDR,		/* lowaddr */
1022	    BUS_SPACE_MAXADDR,		/* highaddr */
1023	    NULL, NULL,			/* filter, filterarg */
1024	    LPE_TXSTATUS_SIZE, 1,	/* maxsize, nsegments */
1025	    LPE_TXSTATUS_SIZE, 0,	/* maxsegsize, flags */
1026	    NULL, NULL,			/* lockfunc, lockarg */
1027	    &sc->lpe_cdata.lpe_tx_status_tag);
1028
1029	if (err) {
1030		device_printf(sc->lpe_dev, "cannot create Tx status ring DMA tag\n");
1031		goto fail;
1032	}
1033
1034	/* Create tag for Tx buffers */
1035	err = bus_dma_tag_create(
1036	    sc->lpe_cdata.lpe_parent_tag,
1037	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
1038	    BUS_SPACE_MAXADDR,		/* lowaddr */
1039	    BUS_SPACE_MAXADDR,		/* highaddr */
1040	    NULL, NULL,			/* filter, filterarg */
1041	    MCLBYTES * LPE_TXDESC_NUM,	/* maxsize */
1042	    LPE_TXDESC_NUM,		/* segments */
1043	    MCLBYTES, 0,		/* maxsegsize, flags */
1044	    NULL, NULL,			/* lockfunc, lockarg */
1045	    &sc->lpe_cdata.lpe_tx_buf_tag);
1046
1047	if (err) {
1048		device_printf(sc->lpe_dev, "cannot create Tx buffers DMA tag\n");
1049		goto fail;
1050	}
1051
1052	/* Allocate Tx DMA ring */
1053	err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_ring_tag,
1054	    (void **)&sc->lpe_rdata.lpe_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1055	    BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_ring_map);
1056
1057	err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_ring_tag,
1058	    sc->lpe_cdata.lpe_tx_ring_map, sc->lpe_rdata.lpe_tx_ring,
1059	    LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1060
1061	sc->lpe_rdata.lpe_tx_ring_phys = ctx.lpe_dma_busaddr;
1062
1063	/* Allocate Tx status ring */
1064	err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_status_tag,
1065	    (void **)&sc->lpe_rdata.lpe_tx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1066	    BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_status_map);
1067
1068	err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_status_tag,
1069	    sc->lpe_cdata.lpe_tx_status_map, sc->lpe_rdata.lpe_tx_status,
1070	    LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1071
1072	sc->lpe_rdata.lpe_tx_status_phys = ctx.lpe_dma_busaddr;
1073
1074
1075	/* Create Tx buffers DMA map */
1076	for (i = 0; i < LPE_TXDESC_NUM; i++) {
1077		txd = &sc->lpe_cdata.lpe_tx_desc[i];
1078		txd->lpe_txdesc_mbuf = NULL;
1079		txd->lpe_txdesc_dmamap = NULL;
1080		txd->lpe_txdesc_first = 0;
1081
1082		err = bus_dmamap_create(sc->lpe_cdata.lpe_tx_buf_tag, 0,
1083		    &txd->lpe_txdesc_dmamap);
1084
1085		if (err) {
1086			device_printf(sc->lpe_dev, "cannot create Tx DMA map\n");
1087			return (err);
1088		}
1089	}
1090
1091	return (0);
1092fail:
1093	return (err);
1094}
1095
1096static int
1097lpe_init_rx(struct lpe_softc *sc)
1098{
1099	int i, err;
1100
1101	for (i = 0; i < LPE_RXDESC_NUM; i++) {
1102		err = lpe_init_rxbuf(sc, i);
1103		if (err)
1104			return (err);
1105	}
1106
1107	return (0);
1108}
1109
1110static int
1111lpe_init_rxbuf(struct lpe_softc *sc, int n)
1112{
1113	struct lpe_rxdesc *rxd;
1114	struct lpe_hwdesc *hwd;
1115	struct lpe_hwstatus *hws;
1116	struct mbuf *m;
1117	bus_dma_segment_t segs[1];
1118	int nsegs;
1119
1120	rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1121	hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1122	hws = &sc->lpe_rdata.lpe_rx_status[n];
1123	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1124
1125	if (!m) {
1126		device_printf(sc->lpe_dev, "WARNING: mbufs exhausted!\n");
1127		return (ENOBUFS);
1128	}
1129
1130	m->m_len = m->m_pkthdr.len = MCLBYTES;
1131
1132	bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1133
1134	if (bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_rx_buf_tag,
1135	    rxd->lpe_rxdesc_dmamap, m, segs, &nsegs, 0)) {
1136		m_freem(m);
1137		return (ENOBUFS);
1138	}
1139
1140	bus_dmamap_sync(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap,
1141	    BUS_DMASYNC_PREREAD);
1142
1143	rxd->lpe_rxdesc_mbuf = m;
1144	hwd->lhr_data = segs[0].ds_addr + 2;
1145	hwd->lhr_control = (segs[0].ds_len - 1) | LPE_HWDESC_INTERRUPT;
1146
1147	return (0);
1148}
1149
1150static void
1151lpe_discard_rxbuf(struct lpe_softc *sc, int n)
1152{
1153	struct lpe_rxdesc *rxd;
1154	struct lpe_hwdesc *hwd;
1155
1156	rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1157	hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1158
1159	bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1160
1161	hwd->lhr_data = 0;
1162	hwd->lhr_control = 0;
1163
1164	if (rxd->lpe_rxdesc_mbuf) {
1165		m_freem(rxd->lpe_rxdesc_mbuf);
1166		rxd->lpe_rxdesc_mbuf = NULL;
1167	}
1168}
1169
1170static void
1171lpe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1172{
1173	struct lpe_dmamap_arg *ctx;
1174
1175	if (error)
1176		return;
1177
1178	ctx = (struct lpe_dmamap_arg *)arg;
1179	ctx->lpe_dma_busaddr = segs[0].ds_addr;
1180}
1181
1182static int
1183lpe_ifmedia_upd(struct ifnet *ifp)
1184{
1185	return (0);
1186}
1187
1188static void
1189lpe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1190{
1191	struct lpe_softc *sc = ifp->if_softc;
1192	struct mii_data *mii = device_get_softc(sc->lpe_miibus);
1193
1194	lpe_lock(sc);
1195	mii_pollstat(mii);
1196	ifmr->ifm_active = mii->mii_media_active;
1197	ifmr->ifm_status = mii->mii_media_status;
1198	lpe_unlock(sc);
1199}
1200
1201static device_method_t lpe_methods[] = {
1202	/* Device interface */
1203	DEVMETHOD(device_probe,		lpe_probe),
1204	DEVMETHOD(device_attach,	lpe_attach),
1205	DEVMETHOD(device_detach,	lpe_detach),
1206
1207	/* Bus interface */
1208	DEVMETHOD(bus_print_child,	bus_generic_print_child),
1209
1210	/* MII interface */
1211	DEVMETHOD(miibus_readreg,	lpe_miibus_readreg),
1212	DEVMETHOD(miibus_writereg,	lpe_miibus_writereg),
1213	DEVMETHOD(miibus_statchg,	lpe_miibus_statchg),
1214	{ 0, 0 }
1215};
1216
1217static driver_t lpe_driver = {
1218	"lpe",
1219	lpe_methods,
1220	sizeof(struct lpe_softc),
1221};
1222
1223static devclass_t lpe_devclass;
1224
1225DRIVER_MODULE(lpe, simplebus, lpe_driver, lpe_devclass, 0, 0);
1226DRIVER_MODULE(miibus, lpe, miibus_driver, miibus_devclass, 0, 0);
1227MODULE_DEPEND(lpe, obio, 1, 1, 1);
1228MODULE_DEPEND(lpe, miibus, 1, 1, 1);
1229MODULE_DEPEND(lpe, ether, 1, 1, 1);
1230