if_cpsw.c revision 266152
1/*-
2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * TI Common Platform Ethernet Switch (CPSW) Driver
29 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
30 *
31 * This controller is documented in the AM335x Technical Reference
32 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
33 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
34 *
35 * It is basically a single Ethernet port (port 0) wired internally to
36 * a 3-port store-and-forward switch connected to two independent
37 * "sliver" controllers (port 1 and port 2).  You can operate the
38 * controller in a variety of different ways by suitably configuring
39 * the slivers and the Address Lookup Engine (ALE) that routes packets
40 * between the ports.
41 *
42 * This code was developed and tested on a BeagleBone with
43 * an AM335x SoC.
44 */
45
46#include <sys/cdefs.h>
47__FBSDID("$FreeBSD: stable/10/sys/arm/ti/cpsw/if_cpsw.c 266152 2014-05-15 16:11:06Z ian $");
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/endian.h>
52#include <sys/mbuf.h>
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#include <sys/kernel.h>
56#include <sys/module.h>
57#include <sys/socket.h>
58#include <sys/sysctl.h>
59
60#include <net/ethernet.h>
61#include <net/bpf.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_var.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/ip.h>
73
74#include <sys/sockio.h>
75#include <sys/bus.h>
76#include <machine/bus.h>
77#include <sys/rman.h>
78#include <machine/resource.h>
79
80#include <dev/mii/mii.h>
81#include <dev/mii/miivar.h>
82
83#include <dev/fdt/fdt_common.h>
84#include <dev/ofw/ofw_bus.h>
85#include <dev/ofw/ofw_bus_subr.h>
86
87#include "if_cpswreg.h"
88#include "if_cpswvar.h"
89
90#include <arm/ti/ti_scm.h>
91
92#include "miibus_if.h"
93
94/* Device probe/attach/detach. */
95static int cpsw_probe(device_t);
96static void cpsw_init_slots(struct cpsw_softc *);
97static int cpsw_attach(device_t);
98static void cpsw_free_slot(struct cpsw_softc *, struct cpsw_slot *);
99static int cpsw_detach(device_t);
100
101/* Device Init/shutdown. */
102static void cpsw_init(void *);
103static void cpsw_init_locked(void *);
104static int cpsw_shutdown(device_t);
105static void cpsw_shutdown_locked(struct cpsw_softc *);
106
107/* Device Suspend/Resume. */
108static int cpsw_suspend(device_t);
109static int cpsw_resume(device_t);
110
111/* Ioctl. */
112static int cpsw_ioctl(struct ifnet *, u_long command, caddr_t data);
113
114static int cpsw_miibus_readreg(device_t, int phy, int reg);
115static int cpsw_miibus_writereg(device_t, int phy, int reg, int value);
116
117/* Send/Receive packets. */
118static void cpsw_intr_rx(void *arg);
119static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
120static void cpsw_rx_enqueue(struct cpsw_softc *);
121static void cpsw_start(struct ifnet *);
122static void cpsw_tx_enqueue(struct cpsw_softc *);
123static int cpsw_tx_dequeue(struct cpsw_softc *);
124
125/* Misc interrupts and watchdog. */
126static void cpsw_intr_rx_thresh(void *);
127static void cpsw_intr_misc(void *);
128static void cpsw_tick(void *);
129static void cpsw_ifmedia_sts(struct ifnet *, struct ifmediareq *);
130static int cpsw_ifmedia_upd(struct ifnet *);
131static void cpsw_tx_watchdog(struct cpsw_softc *);
132
133/* ALE support */
134static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
135static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
136static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t portmap, uint8_t *mac);
137static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
138static void cpsw_ale_dump_table(struct cpsw_softc *);
139
140/* Statistics and sysctls. */
141static void cpsw_add_sysctls(struct cpsw_softc *);
142static void cpsw_stats_collect(struct cpsw_softc *);
143static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
144
145/*
146 * Arbitrary limit on number of segments in an mbuf to be transmitted.
147 * Packets with more segments than this will be defragmented before
148 * they are queued.
149 */
150#define CPSW_TXFRAGS 8
151
152
153/*
154 * TODO: The CPSW subsystem (CPSW_SS) can drive two independent PHYs
155 * as separate Ethernet ports.  To properly support this, we should
156 * break this into two separate devices: a CPSW_SS device that owns
157 * the interrupts and actually talks to the CPSW hardware, and a
158 * separate CPSW Ethernet child device for each Ethernet port.  The RX
159 * interrupt, for example, would be part of CPSW_SS; it would receive
160 * a packet, note the input port, and then dispatch it to the child
161 * device's interface queue.  Similarly for transmit.
162 *
163 * It's not clear to me whether the device tree should be restructured
164 * with a cpsw_ss node and two child nodes.  That would allow specifying
165 * MAC addresses for each port, for example, but might be overkill.
166 *
167 * Unfortunately, I don't have hardware right now that supports two
168 * Ethernet ports via CPSW.
169 */
170
171static device_method_t cpsw_methods[] = {
172	/* Device interface */
173	DEVMETHOD(device_probe,		cpsw_probe),
174	DEVMETHOD(device_attach,	cpsw_attach),
175	DEVMETHOD(device_detach,	cpsw_detach),
176	DEVMETHOD(device_shutdown,	cpsw_shutdown),
177	DEVMETHOD(device_suspend,	cpsw_suspend),
178	DEVMETHOD(device_resume,	cpsw_resume),
179	/* MII interface */
180	DEVMETHOD(miibus_readreg,	cpsw_miibus_readreg),
181	DEVMETHOD(miibus_writereg,	cpsw_miibus_writereg),
182	{ 0, 0 }
183};
184
185static driver_t cpsw_driver = {
186	"cpsw",
187	cpsw_methods,
188	sizeof(struct cpsw_softc),
189};
190
191static devclass_t cpsw_devclass;
192
193DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
194DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
195MODULE_DEPEND(cpsw, ether, 1, 1, 1);
196MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
197
198static struct resource_spec res_spec[] = {
199	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
200	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
201	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
202	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
203	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
204	{ -1, 0 }
205};
206
207/* Number of entries here must match size of stats
208 * array in struct cpsw_softc. */
209static struct cpsw_stat {
210	int	reg;
211	char *oid;
212} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
213	{0x00, "GoodRxFrames"},
214	{0x04, "BroadcastRxFrames"},
215	{0x08, "MulticastRxFrames"},
216	{0x0C, "PauseRxFrames"},
217	{0x10, "RxCrcErrors"},
218	{0x14, "RxAlignErrors"},
219	{0x18, "OversizeRxFrames"},
220	{0x1c, "RxJabbers"},
221	{0x20, "ShortRxFrames"},
222	{0x24, "RxFragments"},
223	{0x30, "RxOctets"},
224	{0x34, "GoodTxFrames"},
225	{0x38, "BroadcastTxFrames"},
226	{0x3c, "MulticastTxFrames"},
227	{0x40, "PauseTxFrames"},
228	{0x44, "DeferredTxFrames"},
229	{0x48, "CollisionsTxFrames"},
230	{0x4c, "SingleCollisionTxFrames"},
231	{0x50, "MultipleCollisionTxFrames"},
232	{0x54, "ExcessiveCollisions"},
233	{0x58, "LateCollisions"},
234	{0x5c, "TxUnderrun"},
235	{0x60, "CarrierSenseErrors"},
236	{0x64, "TxOctets"},
237	{0x68, "RxTx64OctetFrames"},
238	{0x6c, "RxTx65to127OctetFrames"},
239	{0x70, "RxTx128to255OctetFrames"},
240	{0x74, "RxTx256to511OctetFrames"},
241	{0x78, "RxTx512to1024OctetFrames"},
242	{0x7c, "RxTx1024upOctetFrames"},
243	{0x80, "NetOctets"},
244	{0x84, "RxStartOfFrameOverruns"},
245	{0x88, "RxMiddleOfFrameOverruns"},
246	{0x8c, "RxDmaOverruns"}
247};
248
249/*
250 * Basic debug support.
251 */
252
253#define IF_DEBUG(sc)  if (sc->cpsw_if_flags & IFF_DEBUG)
254
255static void
256cpsw_debugf_head(const char *funcname)
257{
258	int t = (int)(time_second % (24 * 60 * 60));
259
260	printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
261}
262
263#include <machine/stdarg.h>
264static void
265cpsw_debugf(const char *fmt, ...)
266{
267	va_list ap;
268
269	va_start(ap, fmt);
270	vprintf(fmt, ap);
271	va_end(ap);
272	printf("\n");
273
274}
275
276#define CPSW_DEBUGF(a) do {					\
277	IF_DEBUG(sc) {						\
278		cpsw_debugf_head(__func__);			\
279		cpsw_debugf a;					\
280	}							\
281} while (0)
282
283
284/*
285 * Locking macros
286 */
287#define CPSW_TX_LOCK(sc) do {					\
288		mtx_assert(&(sc)->rx.lock, MA_NOTOWNED);		\
289		mtx_lock(&(sc)->tx.lock);				\
290} while (0)
291
292#define CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx.lock)
293#define CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx.lock, MA_OWNED)
294
295#define CPSW_RX_LOCK(sc) do {					\
296		mtx_assert(&(sc)->tx.lock, MA_NOTOWNED);		\
297		mtx_lock(&(sc)->rx.lock);				\
298} while (0)
299
300#define CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx.lock)
301#define CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx.lock, MA_OWNED)
302
303#define CPSW_GLOBAL_LOCK(sc) do {					\
304		if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) !=	\
305		    (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) {		\
306			panic("cpsw deadlock possibility detection!");	\
307		}							\
308		mtx_lock(&(sc)->tx.lock);				\
309		mtx_lock(&(sc)->rx.lock);				\
310} while (0)
311
312#define CPSW_GLOBAL_UNLOCK(sc) do {					\
313		CPSW_RX_UNLOCK(sc);				\
314		CPSW_TX_UNLOCK(sc);				\
315} while (0)
316
317#define CPSW_GLOBAL_LOCK_ASSERT(sc) do {				\
318		CPSW_TX_LOCK_ASSERT(sc);				\
319		CPSW_RX_LOCK_ASSERT(sc);				\
320} while (0)
321
322/*
323 * Read/Write macros
324 */
325#define	cpsw_read_4(sc, reg)		bus_read_4(sc->res[0], reg)
326#define	cpsw_write_4(sc, reg, val)	bus_write_4(sc->res[0], reg, val)
327
328#define	cpsw_cpdma_bd_offset(i)	(CPSW_CPPI_RAM_OFFSET + ((i)*16))
329
330#define	cpsw_cpdma_bd_paddr(sc, slot)				\
331	BUS_SPACE_PHYSADDR(sc->res[0], slot->bd_offset)
332#define	cpsw_cpdma_read_bd(sc, slot, val)				\
333	bus_read_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
334#define	cpsw_cpdma_write_bd(sc, slot, val)				\
335	bus_write_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
336#define	cpsw_cpdma_write_bd_next(sc, slot, next_slot)			\
337	cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
338#define	cpsw_cpdma_read_bd_flags(sc, slot)		\
339	bus_read_2(sc->res[0], slot->bd_offset + 14)
340#define	cpsw_write_hdp_slot(sc, queue, slot)				\
341	cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
342#define	CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
343#define	cpsw_read_cp(sc, queue)				\
344	cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
345#define	cpsw_write_cp(sc, queue, val)				\
346	cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
347#define	cpsw_write_cp_slot(sc, queue, slot)		\
348	cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
349
350#if 0
351/* XXX temporary function versions for debugging. */
352static void
353cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
354{
355	uint32_t reg = queue->hdp_offset;
356	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
357	CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
358	cpsw_write_4(sc, reg, v);
359}
360
361static void
362cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
363{
364	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
365	CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
366	cpsw_write_cp(sc, queue, v);
367}
368#endif
369
370/*
371 * Expanded dump routines for verbose debugging.
372 */
373static void
374cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
375{
376	static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
377	    "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
378	    "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
379	    "Port0"};
380	struct cpsw_cpdma_bd bd;
381	const char *sep;
382	int i;
383
384	cpsw_cpdma_read_bd(sc, slot, &bd);
385	printf("BD Addr: 0x%08x   Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next);
386	printf("  BufPtr: 0x%08x   BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
387	printf("  BufOff: 0x%08x   PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
388	printf("  Flags: ");
389	sep = "";
390	for (i = 0; i < 16; ++i) {
391		if (bd.flags & (1 << (15 - i))) {
392			printf("%s%s", sep, flags[i]);
393			sep = ",";
394		}
395	}
396	printf("\n");
397	if (slot->mbuf) {
398		printf("  Ether:  %14D\n",
399		    (char *)(slot->mbuf->m_hdr.mh_data), " ");
400		printf("  Packet: %16D\n",
401		    (char *)(slot->mbuf->m_hdr.mh_data) + 14, " ");
402	}
403}
404
405#define CPSW_DUMP_SLOT(cs, slot) do {				\
406	IF_DEBUG(sc) {						\
407		cpsw_dump_slot(sc, slot);			\
408	}							\
409} while (0)
410
411
412static void
413cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
414{
415	struct cpsw_slot *slot;
416	int i = 0;
417	int others = 0;
418
419	STAILQ_FOREACH(slot, q, next) {
420		if (i > 4)
421			++others;
422		else
423			cpsw_dump_slot(sc, slot);
424		++i;
425	}
426	if (others)
427		printf(" ... and %d more.\n", others);
428	printf("\n");
429}
430
431#define CPSW_DUMP_QUEUE(sc, q) do {				\
432	IF_DEBUG(sc) {						\
433		cpsw_dump_queue(sc, q);				\
434	}							\
435} while (0)
436
437
438/*
439 *
440 * Device Probe, Attach, Detach.
441 *
442 */
443
444static int
445cpsw_probe(device_t dev)
446{
447
448	if (!ofw_bus_status_okay(dev))
449		return (ENXIO);
450
451	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
452		return (ENXIO);
453
454	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
455	return (BUS_PROBE_DEFAULT);
456}
457
458
459static void
460cpsw_init_slots(struct cpsw_softc *sc)
461{
462	struct cpsw_slot *slot;
463	int i;
464
465	STAILQ_INIT(&sc->avail);
466
467	/* Put the slot descriptors onto the global avail list. */
468	for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); i++) {
469		slot = &sc->_slots[i];
470		slot->bd_offset = cpsw_cpdma_bd_offset(i);
471		STAILQ_INSERT_TAIL(&sc->avail, slot, next);
472	}
473}
474
475/*
476 * bind an interrupt, add the relevant info to sc->interrupts
477 */
478static int
479cpsw_attach_interrupt(struct cpsw_softc *sc, struct resource *res, driver_intr_t *handler, const char *description)
480{
481	void **pcookie;
482	int error;
483
484	sc->interrupts[sc->interrupt_count].res = res;
485	sc->interrupts[sc->interrupt_count].description = description;
486	pcookie = &sc->interrupts[sc->interrupt_count].ih_cookie;
487
488	error = bus_setup_intr(sc->dev, res, INTR_TYPE_NET | INTR_MPSAFE,
489	    NULL, *handler, sc, pcookie);
490	if (error)
491		device_printf(sc->dev,
492		    "could not setup %s\n", description);
493	else
494		++sc->interrupt_count;
495	return (error);
496}
497
498/*
499 * teardown everything in sc->interrupts.
500 */
501static void
502cpsw_detach_interrupts(struct cpsw_softc *sc)
503{
504	int error;
505	int i;
506
507	for (i = 0; i < sizeof(sc->interrupts) / sizeof(sc->interrupts[0]); ++i) {
508		if (!sc->interrupts[i].ih_cookie)
509			continue;
510		error = bus_teardown_intr(sc->dev,
511		    sc->interrupts[i].res, sc->interrupts[i].ih_cookie);
512		if (error)
513			device_printf(sc->dev, "could not release %s\n",
514			    sc->interrupts[i].description);
515		sc->interrupts[i].ih_cookie = NULL;
516	}
517}
518
519static int
520cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
521{
522	const int max_slots = sizeof(sc->_slots) / sizeof(sc->_slots[0]);
523	struct cpsw_slot *slot;
524	int i;
525
526	if (requested < 0)
527		requested = max_slots;
528
529	for (i = 0; i < requested; ++i) {
530		slot = STAILQ_FIRST(&sc->avail);
531		if (slot == NULL)
532			return (0);
533		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
534			if_printf(sc->ifp, "failed to create dmamap\n");
535			return (ENOMEM);
536		}
537		STAILQ_REMOVE_HEAD(&sc->avail, next);
538		STAILQ_INSERT_TAIL(&queue->avail, slot, next);
539		++queue->avail_queue_len;
540		++queue->queue_slots;
541	}
542	return (0);
543}
544
545static int
546cpsw_attach(device_t dev)
547{
548	bus_dma_segment_t segs[1];
549	struct cpsw_softc *sc = device_get_softc(dev);
550	struct mii_softc *miisc;
551	struct ifnet *ifp;
552	void *phy_sc;
553	int error, phy, nsegs;
554	uint32_t reg;
555
556	CPSW_DEBUGF((""));
557
558	getbinuptime(&sc->attach_uptime);
559	sc->dev = dev;
560	sc->node = ofw_bus_get_node(dev);
561
562	/* Get phy address from fdt */
563	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, &phy_sc) != 0) {
564		device_printf(dev, "failed to get PHY address from FDT\n");
565		return (ENXIO);
566	}
567	/* Initialize mutexes */
568	mtx_init(&sc->tx.lock, device_get_nameunit(dev),
569	    "cpsw TX lock", MTX_DEF);
570	mtx_init(&sc->rx.lock, device_get_nameunit(dev),
571	    "cpsw RX lock", MTX_DEF);
572
573	/* Allocate IO and IRQ resources */
574	error = bus_alloc_resources(dev, res_spec, sc->res);
575	if (error) {
576		device_printf(dev, "could not allocate resources\n");
577		cpsw_detach(dev);
578		return (ENXIO);
579	}
580
581	reg = cpsw_read_4(sc, CPSW_SS_IDVER);
582	device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
583		reg & 0xFF, (reg >> 11) & 0x1F);
584
585	cpsw_add_sysctls(sc);
586
587	/* Allocate a busdma tag and DMA safe memory for mbufs. */
588	error = bus_dma_tag_create(
589		bus_get_dma_tag(sc->dev),	/* parent */
590		1, 0,				/* alignment, boundary */
591		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
592		BUS_SPACE_MAXADDR,		/* highaddr */
593		NULL, NULL,			/* filtfunc, filtfuncarg */
594		MCLBYTES, CPSW_TXFRAGS,		/* maxsize, nsegments */
595		MCLBYTES, 0,			/* maxsegsz, flags */
596		NULL, NULL,			/* lockfunc, lockfuncarg */
597		&sc->mbuf_dtag);		/* dmatag */
598	if (error) {
599		device_printf(dev, "bus_dma_tag_create failed\n");
600		cpsw_detach(dev);
601		return (error);
602	}
603
604	/* Allocate network interface */
605	ifp = sc->ifp = if_alloc(IFT_ETHER);
606	if (ifp == NULL) {
607		device_printf(dev, "if_alloc() failed\n");
608		cpsw_detach(dev);
609		return (ENOMEM);
610	}
611
612	/* Allocate the null mbuf and pre-sync it. */
613	sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
614	memset(sc->null_mbuf->m_hdr.mh_data, 0, sc->null_mbuf->m_ext.ext_size);
615	bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap);
616	bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap,
617	    sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
618	bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap,
619	    BUS_DMASYNC_PREWRITE);
620	sc->null_mbuf_paddr = segs[0].ds_addr;
621
622	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
623	ifp->if_softc = sc;
624	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
625	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
626	ifp->if_capenable = ifp->if_capabilities;
627
628	ifp->if_init = cpsw_init;
629	ifp->if_start = cpsw_start;
630	ifp->if_ioctl = cpsw_ioctl;
631
632	cpsw_init_slots(sc);
633
634	/* Allocate slots to TX and RX queues. */
635	STAILQ_INIT(&sc->rx.avail);
636	STAILQ_INIT(&sc->rx.active);
637	STAILQ_INIT(&sc->tx.avail);
638	STAILQ_INIT(&sc->tx.active);
639	// For now:  128 slots to TX, rest to RX.
640	// XXX TODO: start with 32/64 and grow dynamically based on demand.
641	if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) {
642		device_printf(dev, "failed to allocate dmamaps\n");
643		cpsw_detach(dev);
644		return (ENOMEM);
645	}
646	device_printf(dev, "Initial queue size TX=%d RX=%d\n",
647	    sc->tx.queue_slots, sc->rx.queue_slots);
648
649	ifp->if_snd.ifq_drv_maxlen = sc->tx.queue_slots;
650	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
651	IFQ_SET_READY(&ifp->if_snd);
652
653	sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
654	sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
655
656	/* Get high part of MAC address from control module (mac_id0_hi) */
657	/* TODO: Get MAC ID1 as well as MAC ID0. */
658	ti_scm_reg_read_4(0x634, &reg);
659	sc->mac_addr[0] = reg & 0xFF;
660	sc->mac_addr[1] = (reg >>  8) & 0xFF;
661	sc->mac_addr[2] = (reg >> 16) & 0xFF;
662	sc->mac_addr[3] = (reg >> 24) & 0xFF;
663
664	/* Get low part of MAC address from control module (mac_id0_lo) */
665	ti_scm_reg_read_4(0x630, &reg);
666	sc->mac_addr[4] = reg & 0xFF;
667	sc->mac_addr[5] = (reg >>  8) & 0xFF;
668
669	ether_ifattach(ifp, sc->mac_addr);
670	callout_init(&sc->watchdog.callout, 0);
671
672	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
673	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
674	cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
675
676	/* Clear ALE */
677	cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 30);
678
679	/* Attach PHY(s) */
680	error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd,
681	    cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
682	if (error) {
683		device_printf(dev, "attaching PHYs failed\n");
684		cpsw_detach(dev);
685		return (error);
686	}
687	sc->mii = device_get_softc(sc->miibus);
688
689	/* Tell the MAC where to find the PHY so autoneg works */
690	miisc = LIST_FIRST(&sc->mii->mii_phys);
691
692	/* Select PHY and enable interrupts */
693	cpsw_write_4(sc, MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F));
694
695	/* Note: We don't use sc->res[3] (TX interrupt) */
696	if (cpsw_attach_interrupt(sc, sc->res[1],
697		cpsw_intr_rx_thresh, "CPSW RX threshold interrupt") ||
698	    cpsw_attach_interrupt(sc, sc->res[2],
699		cpsw_intr_rx, "CPSW RX interrupt") ||
700	    cpsw_attach_interrupt(sc, sc->res[4],
701		cpsw_intr_misc, "CPSW misc interrupt")) {
702		cpsw_detach(dev);
703		return (ENXIO);
704	}
705
706	return (0);
707}
708
709static void
710cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
711{
712	int error;
713
714	if (slot->dmamap) {
715		error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
716		KASSERT(error == 0, ("Mapping still active"));
717		slot->dmamap = NULL;
718	}
719	if (slot->mbuf) {
720		m_freem(slot->mbuf);
721		slot->mbuf = NULL;
722	}
723}
724
725static int
726cpsw_detach(device_t dev)
727{
728	struct cpsw_softc *sc = device_get_softc(dev);
729	int error, i;
730
731	CPSW_DEBUGF((""));
732
733	/* Stop controller and free TX queue */
734	if (device_is_attached(dev)) {
735		ether_ifdetach(sc->ifp);
736		CPSW_GLOBAL_LOCK(sc);
737		cpsw_shutdown_locked(sc);
738		CPSW_GLOBAL_UNLOCK(sc);
739		callout_drain(&sc->watchdog.callout);
740	}
741
742	bus_generic_detach(dev);
743	device_delete_child(dev, sc->miibus);
744
745	/* Stop and release all interrupts */
746	cpsw_detach_interrupts(sc);
747
748	/* Free dmamaps and mbufs */
749	for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); ++i) {
750		cpsw_free_slot(sc, &sc->_slots[i]);
751	}
752
753	/* Free DMA tag */
754	error = bus_dma_tag_destroy(sc->mbuf_dtag);
755	KASSERT(error == 0, ("Unable to destroy DMA tag"));
756
757	/* Free IO memory handler */
758	bus_release_resources(dev, res_spec, sc->res);
759
760	/* Destroy mutexes */
761	mtx_destroy(&sc->rx.lock);
762	mtx_destroy(&sc->tx.lock);
763
764	return (0);
765}
766
767/*
768 *
769 * Init/Shutdown.
770 *
771 */
772
773static void
774cpsw_reset(struct cpsw_softc *sc)
775{
776	int i;
777
778	/* Reset RMII/RGMII wrapper. */
779	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
780	while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
781		;
782
783	/* Disable TX and RX interrupts for all cores. */
784	for (i = 0; i < 3; ++i) {
785		cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
786		cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
787		cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
788		cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
789	}
790
791	/* Reset CPSW subsystem. */
792	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
793	while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
794		;
795
796	/* Reset Sliver port 1 and 2 */
797	for (i = 0; i < 2; i++) {
798		/* Reset */
799		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
800		while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
801			;
802	}
803
804	/* Reset DMA controller. */
805	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
806	while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
807		;
808
809	/* Disable TX & RX DMA */
810	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
811	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
812
813	/* Clear all queues. */
814	for (i = 0; i < 8; i++) {
815		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
816		cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
817		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
818		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
819	}
820
821	/* Clear all interrupt Masks */
822	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
823	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
824}
825
826static void
827cpsw_init(void *arg)
828{
829	struct cpsw_softc *sc = arg;
830
831	CPSW_DEBUGF((""));
832	CPSW_GLOBAL_LOCK(sc);
833	cpsw_init_locked(arg);
834	CPSW_GLOBAL_UNLOCK(sc);
835}
836
837static void
838cpsw_init_locked(void *arg)
839{
840	struct ifnet *ifp;
841	struct cpsw_softc *sc = arg;
842	struct cpsw_slot *slot;
843	uint32_t i;
844
845	CPSW_DEBUGF((""));
846	ifp = sc->ifp;
847	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
848		return;
849
850	getbinuptime(&sc->init_uptime);
851
852	/* Reset the controller. */
853	cpsw_reset(sc);
854
855	/* Enable ALE */
856	cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 31 | 1 << 4);
857
858	/* Init Sliver port 1 and 2 */
859	for (i = 0; i < 2; i++) {
860		/* Set Slave Mapping */
861		cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
862		cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100);
863		cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
864		/* Set MACCONTROL for ports 0,1: IFCTL_B(16), IFCTL_A(15),
865		   GMII_EN(5), FULLDUPLEX(1) */
866		/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
867		/* Huh?  Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
868		cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1);
869	}
870
871	/* Set Host Port Mapping */
872	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
873	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
874
875	/* Initialize ALE: all ports set to forwarding(3), initialize addrs */
876	for (i = 0; i < 3; i++)
877		cpsw_write_4(sc, CPSW_ALE_PORTCTL(i), 3);
878	cpsw_ale_update_addresses(sc, 1);
879
880	cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
881
882	/* Enable statistics for ports 0, 1 and 2 */
883	cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
884
885	/* Experiment:  Turn off flow control */
886	/* This seems to fix the watchdog resets that have plagued
887	   earlier versions of this driver; I'm not yet sure if there
888	   are negative effects yet. */
889	cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
890
891	/* Make IP hdr aligned with 4 */
892	cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
893
894	/* Initialize RX Buffer Descriptors */
895	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
896
897	/* Enable TX & RX DMA */
898	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
899	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
900
901	/* Enable Interrupts for core 0 */
902	cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
903	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
904	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x3F);
905
906	/* Enable host Error Interrupt */
907	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
908
909	/* Enable interrupts for RX Channel 0 */
910	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
911
912	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
913	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
914	cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
915
916	/* Select MII in GMII_SEL, Internal Delay mode */
917	//ti_scm_reg_write_4(0x650, 0);
918
919	/* Initialize active queues. */
920	slot = STAILQ_FIRST(&sc->tx.active);
921	if (slot != NULL)
922		cpsw_write_hdp_slot(sc, &sc->tx, slot);
923	slot = STAILQ_FIRST(&sc->rx.active);
924	if (slot != NULL)
925		cpsw_write_hdp_slot(sc, &sc->rx, slot);
926	cpsw_rx_enqueue(sc);
927
928	/* Activate network interface */
929	sc->rx.running = 1;
930	sc->tx.running = 1;
931	sc->watchdog.timer = 0;
932	callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
933	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
934	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
935
936}
937
938static int
939cpsw_shutdown(device_t dev)
940{
941	struct cpsw_softc *sc = device_get_softc(dev);
942
943	CPSW_DEBUGF((""));
944	CPSW_GLOBAL_LOCK(sc);
945	cpsw_shutdown_locked(sc);
946	CPSW_GLOBAL_UNLOCK(sc);
947	return (0);
948}
949
950static void
951cpsw_rx_teardown_locked(struct cpsw_softc *sc)
952{
953	struct mbuf *received, *next;
954	int i = 0;
955
956	CPSW_DEBUGF(("starting RX teardown"));
957	cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
958	for (;;) {
959		received = cpsw_rx_dequeue(sc);
960		CPSW_GLOBAL_UNLOCK(sc);
961		while (received != NULL) {
962			next = received->m_nextpkt;
963			received->m_nextpkt = NULL;
964			(*sc->ifp->if_input)(sc->ifp, received);
965			received = next;
966		}
967		CPSW_GLOBAL_LOCK(sc);
968		if (!sc->rx.running) {
969			CPSW_DEBUGF(("finished RX teardown (%d retries)", i));
970			return;
971		}
972		if (++i > 10) {
973			if_printf(sc->ifp, "Unable to cleanly shutdown receiver\n");
974			return;
975		}
976		DELAY(10);
977	}
978}
979
980static void
981cpsw_tx_teardown_locked(struct cpsw_softc *sc)
982{
983	int i = 0;
984
985	CPSW_DEBUGF(("starting TX teardown"));
986	cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
987	cpsw_tx_dequeue(sc);
988	while (sc->tx.running && ++i < 10) {
989		DELAY(10);
990		cpsw_tx_dequeue(sc);
991	}
992	if (sc->tx.running)
993		if_printf(sc->ifp, "Unable to cleanly shutdown transmitter\n");
994	CPSW_DEBUGF(("finished TX teardown (%d retries, %d idle buffers)",
995	    i, sc->tx.active_queue_len));
996}
997
998static void
999cpsw_shutdown_locked(struct cpsw_softc *sc)
1000{
1001	struct ifnet *ifp;
1002
1003	CPSW_DEBUGF((""));
1004	CPSW_GLOBAL_LOCK_ASSERT(sc);
1005	ifp = sc->ifp;
1006
1007	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1008		return;
1009
1010	/* Disable interface */
1011	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1012	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1013
1014	/* Stop ticker */
1015	callout_stop(&sc->watchdog.callout);
1016
1017	/* Tear down the RX/TX queues. */
1018	cpsw_rx_teardown_locked(sc);
1019	cpsw_tx_teardown_locked(sc);
1020
1021	/* Capture stats before we reset controller. */
1022	cpsw_stats_collect(sc);
1023
1024	cpsw_reset(sc);
1025}
1026
1027/*
1028 *  Suspend/Resume.
1029 */
1030
1031static int
1032cpsw_suspend(device_t dev)
1033{
1034	struct cpsw_softc *sc = device_get_softc(dev);
1035
1036	CPSW_DEBUGF((""));
1037	CPSW_GLOBAL_LOCK(sc);
1038	cpsw_shutdown_locked(sc);
1039	CPSW_GLOBAL_UNLOCK(sc);
1040	return (0);
1041}
1042
1043static int
1044cpsw_resume(device_t dev)
1045{
1046	struct cpsw_softc *sc = device_get_softc(dev);
1047
1048	CPSW_DEBUGF(("UNIMPLEMENTED"));
1049	return (0);
1050}
1051
1052/*
1053 *
1054 *  IOCTL
1055 *
1056 */
1057
1058static void
1059cpsw_set_promisc(struct cpsw_softc *sc, int set)
1060{
1061	/*
1062	 * Enabling promiscuous mode requires two bits of work: First,
1063	 * ALE_BYPASS needs to be enabled.  That disables the ALE
1064	 * forwarding logic and causes every packet to be sent to the
1065	 * host port.  That makes us promiscuous wrt received packets.
1066	 *
1067	 * With ALE forwarding disabled, the transmitter needs to set
1068	 * an explicit output port on every packet to route it to the
1069	 * correct egress.  This should be doable for systems such as
1070	 * BeagleBone where only one egress port is actually wired to
1071	 * a PHY.  If you have both egress ports wired up, life gets a
1072	 * lot more interesting.
1073	 *
1074	 * Hmmm.... NetBSD driver uses ALE_BYPASS always and doesn't
1075	 * seem to set explicit egress ports.  Does that mean they
1076	 * are always promiscuous?
1077	 */
1078	if (set) {
1079		printf("Promiscuous mode unimplemented\n");
1080	}
1081}
1082
1083static void
1084cpsw_set_allmulti(struct cpsw_softc *sc, int set)
1085{
1086	if (set) {
1087		printf("All-multicast mode unimplemented\n");
1088	}
1089}
1090
1091static int
1092cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1093{
1094	struct cpsw_softc *sc = ifp->if_softc;
1095	struct ifreq *ifr = (struct ifreq *)data;
1096	int error;
1097	uint32_t changed;
1098
1099	error = 0;
1100
1101	switch (command) {
1102	case SIOCSIFFLAGS:
1103		CPSW_GLOBAL_LOCK(sc);
1104		if (ifp->if_flags & IFF_UP) {
1105			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1106				changed = ifp->if_flags ^ sc->cpsw_if_flags;
1107				CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed));
1108				if (changed & IFF_PROMISC)
1109					cpsw_set_promisc(sc,
1110					    ifp->if_flags & IFF_PROMISC);
1111				if (changed & IFF_ALLMULTI)
1112					cpsw_set_allmulti(sc,
1113					    ifp->if_flags & IFF_ALLMULTI);
1114			} else {
1115				CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING; starting up"));
1116				cpsw_init_locked(sc);
1117			}
1118		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1119			CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING; shutting down"));
1120			cpsw_shutdown_locked(sc);
1121		}
1122
1123		sc->cpsw_if_flags = ifp->if_flags;
1124		CPSW_GLOBAL_UNLOCK(sc);
1125		break;
1126	case SIOCADDMULTI:
1127		cpsw_ale_update_addresses(sc, 0);
1128		break;
1129	case SIOCDELMULTI:
1130		/* Ugh.  DELMULTI doesn't provide the specific address
1131		   being removed, so the best we can do is remove
1132		   everything and rebuild it all. */
1133		cpsw_ale_update_addresses(sc, 1);
1134		break;
1135	case SIOCGIFMEDIA:
1136	case SIOCSIFMEDIA:
1137		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1138		break;
1139	default:
1140		error = ether_ioctl(ifp, command, data);
1141	}
1142	return (error);
1143}
1144
1145/*
1146 *
1147 * MIIBUS
1148 *
1149 */
1150static int
1151cpsw_miibus_ready(struct cpsw_softc *sc)
1152{
1153	uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1154
1155	while (--retries) {
1156		r = cpsw_read_4(sc, MDIOUSERACCESS0);
1157		if ((r & 1 << 31) == 0)
1158			return 1;
1159		DELAY(CPSW_MIIBUS_DELAY);
1160	}
1161	return 0;
1162}
1163
1164static int
1165cpsw_miibus_readreg(device_t dev, int phy, int reg)
1166{
1167	struct cpsw_softc *sc = device_get_softc(dev);
1168	uint32_t cmd, r;
1169
1170	if (!cpsw_miibus_ready(sc)) {
1171		device_printf(dev, "MDIO not ready to read\n");
1172		return 0;
1173	}
1174
1175	/* Set GO, reg, phy */
1176	cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1177	cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1178
1179	if (!cpsw_miibus_ready(sc)) {
1180		device_printf(dev, "MDIO timed out during read\n");
1181		return 0;
1182	}
1183
1184	r = cpsw_read_4(sc, MDIOUSERACCESS0);
1185	if((r & 1 << 29) == 0) {
1186		device_printf(dev, "Failed to read from PHY.\n");
1187		r = 0;
1188	}
1189	return (r & 0xFFFF);
1190}
1191
1192static int
1193cpsw_miibus_writereg(device_t dev, int phy, int reg, int value)
1194{
1195	struct cpsw_softc *sc = device_get_softc(dev);
1196	uint32_t cmd;
1197
1198	if (!cpsw_miibus_ready(sc)) {
1199		device_printf(dev, "MDIO not ready to write\n");
1200		return 0;
1201	}
1202
1203	/* Set GO, WRITE, reg, phy, and value */
1204	cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16
1205	    | (value & 0xFFFF);
1206	cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1207
1208	if (!cpsw_miibus_ready(sc)) {
1209		device_printf(dev, "MDIO timed out during write\n");
1210		return 0;
1211	}
1212
1213	if((cpsw_read_4(sc, MDIOUSERACCESS0) & (1 << 29)) == 0)
1214		device_printf(dev, "Failed to write to PHY.\n");
1215
1216	return 0;
1217}
1218
1219/*
1220 *
1221 * Transmit/Receive Packets.
1222 *
1223 */
1224
1225
1226static void
1227cpsw_intr_rx(void *arg)
1228{
1229	struct cpsw_softc *sc = arg;
1230	struct mbuf *received, *next;
1231
1232	CPSW_RX_LOCK(sc);
1233	received = cpsw_rx_dequeue(sc);
1234	cpsw_rx_enqueue(sc);
1235	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1236	CPSW_RX_UNLOCK(sc);
1237
1238	while (received != NULL) {
1239		next = received->m_nextpkt;
1240		received->m_nextpkt = NULL;
1241		(*sc->ifp->if_input)(sc->ifp, received);
1242		received = next;
1243	}
1244}
1245
1246static struct mbuf *
1247cpsw_rx_dequeue(struct cpsw_softc *sc)
1248{
1249	struct cpsw_cpdma_bd bd;
1250	struct cpsw_slot *slot;
1251	struct ifnet *ifp;
1252	struct mbuf *mb_head, *mb_tail;
1253	int removed = 0;
1254
1255	ifp = sc->ifp;
1256	mb_head = mb_tail = NULL;
1257
1258	/* Pull completed packets off hardware RX queue. */
1259	while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1260		cpsw_cpdma_read_bd(sc, slot, &bd);
1261		if (bd.flags & CPDMA_BD_OWNER)
1262			break; /* Still in use by hardware */
1263
1264		CPSW_DEBUGF(("Removing received packet from RX queue"));
1265		++removed;
1266		STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1267		STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1268
1269		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1270		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1271
1272		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1273			CPSW_DEBUGF(("RX teardown in progress"));
1274			m_freem(slot->mbuf);
1275			slot->mbuf = NULL;
1276			cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1277			sc->rx.running = 0;
1278			break;
1279		}
1280
1281		cpsw_write_cp_slot(sc, &sc->rx, slot);
1282
1283		/* Set up mbuf */
1284		/* TODO: track SOP/EOP bits to assemble a full mbuf
1285		   out of received fragments. */
1286		slot->mbuf->m_hdr.mh_data += bd.bufoff;
1287		slot->mbuf->m_hdr.mh_len = bd.pktlen - 4;
1288		slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
1289		slot->mbuf->m_flags |= M_PKTHDR;
1290		slot->mbuf->m_pkthdr.rcvif = ifp;
1291		slot->mbuf->m_nextpkt = NULL;
1292
1293		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1294			/* check for valid CRC by looking into pkt_err[5:4] */
1295			if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
1296				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1297				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1298				slot->mbuf->m_pkthdr.csum_data = 0xffff;
1299			}
1300		}
1301
1302		/* Add mbuf to packet list to be returned. */
1303		if (mb_tail) {
1304			mb_tail->m_nextpkt = slot->mbuf;
1305		} else {
1306			mb_head = slot->mbuf;
1307		}
1308		mb_tail = slot->mbuf;
1309		slot->mbuf = NULL;
1310	}
1311
1312	if (removed != 0) {
1313		sc->rx.queue_removes += removed;
1314		sc->rx.active_queue_len -= removed;
1315		sc->rx.avail_queue_len += removed;
1316		if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1317			sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1318	}
1319	return (mb_head);
1320}
1321
1322static void
1323cpsw_rx_enqueue(struct cpsw_softc *sc)
1324{
1325	bus_dma_segment_t seg[1];
1326	struct cpsw_cpdma_bd bd;
1327	struct ifnet *ifp = sc->ifp;
1328	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1329	struct cpsw_slot *slot, *prev_slot = NULL;
1330	struct cpsw_slot *last_old_slot, *first_new_slot;
1331	int error, nsegs, added = 0;
1332
1333	/* Register new mbufs with hardware. */
1334	while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1335		if (slot->mbuf == NULL) {
1336			slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1337			if (slot->mbuf == NULL) {
1338				if_printf(sc->ifp, "Unable to fill RX queue\n");
1339				break;
1340			}
1341			slot->mbuf->m_len =
1342			    slot->mbuf->m_pkthdr.len =
1343			    slot->mbuf->m_ext.ext_size;
1344		}
1345
1346		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1347		    slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1348
1349		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1350		KASSERT(error == 0, ("DMA error (error=%d)", error));
1351		if (error != 0 || nsegs != 1) {
1352			if_printf(ifp,
1353			    "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1354			    __func__, nsegs, error);
1355			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1356			m_freem(slot->mbuf);
1357			slot->mbuf = NULL;
1358			break;
1359		}
1360
1361		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1362
1363		/* Create and submit new rx descriptor*/
1364		bd.next = 0;
1365		bd.bufptr = seg->ds_addr;
1366		bd.bufoff = 0;
1367		bd.buflen = MCLBYTES - 1;
1368		bd.pktlen = bd.buflen;
1369		bd.flags = CPDMA_BD_OWNER;
1370		cpsw_cpdma_write_bd(sc, slot, &bd);
1371		++added;
1372
1373		if (prev_slot != NULL)
1374			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1375		prev_slot = slot;
1376		STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1377		sc->rx.avail_queue_len--;
1378		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1379	}
1380
1381	if (added == 0)
1382		return;
1383
1384	CPSW_DEBUGF(("Adding %d buffers to RX queue", added));
1385
1386	/* Link new entries to hardware RX queue. */
1387	last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1388	first_new_slot = STAILQ_FIRST(&tmpqueue);
1389	STAILQ_CONCAT(&sc->rx.active, &tmpqueue);
1390	if (first_new_slot == NULL) {
1391		return;
1392	} else if (last_old_slot == NULL) {
1393		/* Start a fresh queue. */
1394		cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1395	} else {
1396		/* Add buffers to end of current queue. */
1397		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1398		/* If underrun, restart queue. */
1399		if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1400			cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1401		}
1402	}
1403	sc->rx.queue_adds += added;
1404	sc->rx.active_queue_len += added;
1405	if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) {
1406		sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1407	}
1408}
1409
1410static void
1411cpsw_start(struct ifnet *ifp)
1412{
1413	struct cpsw_softc *sc = ifp->if_softc;
1414
1415	CPSW_TX_LOCK(sc);
1416	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->tx.running) {
1417		cpsw_tx_enqueue(sc);
1418		cpsw_tx_dequeue(sc);
1419	}
1420	CPSW_TX_UNLOCK(sc);
1421}
1422
1423static void
1424cpsw_tx_enqueue(struct cpsw_softc *sc)
1425{
1426	bus_dma_segment_t segs[CPSW_TXFRAGS];
1427	struct cpsw_cpdma_bd bd;
1428	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1429	struct cpsw_slot *slot, *prev_slot = NULL;
1430	struct cpsw_slot *last_old_slot, *first_new_slot;
1431	struct mbuf *m0;
1432	int error, nsegs, seg, added = 0, padlen;
1433
1434	/* Pull pending packets from IF queue and prep them for DMA. */
1435	while ((slot = STAILQ_FIRST(&sc->tx.avail)) != NULL) {
1436		IF_DEQUEUE(&sc->ifp->if_snd, m0);
1437		if (m0 == NULL)
1438			break;
1439
1440		slot->mbuf = m0;
1441		padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len;
1442		if (padlen < 0)
1443			padlen = 0;
1444
1445		/* Create mapping in DMA memory */
1446		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1447		    slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1448		/* If the packet is too fragmented, try to simplify. */
1449		if (error == EFBIG ||
1450		    (error == 0 &&
1451			nsegs + (padlen > 0 ? 1 : 0) > sc->tx.avail_queue_len)) {
1452			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1453			if (padlen > 0) /* May as well add padding. */
1454				m_append(slot->mbuf, padlen,
1455				    sc->null_mbuf->m_hdr.mh_data);
1456			m0 = m_defrag(slot->mbuf, M_NOWAIT);
1457			if (m0 == NULL) {
1458				if_printf(sc->ifp,
1459				    "Can't defragment packet; dropping\n");
1460				m_freem(slot->mbuf);
1461			} else {
1462				CPSW_DEBUGF(("Requeueing defragmented packet"));
1463				IF_PREPEND(&sc->ifp->if_snd, m0);
1464			}
1465			slot->mbuf = NULL;
1466			continue;
1467		}
1468		if (error != 0) {
1469			if_printf(sc->ifp,
1470			    "%s: Can't setup DMA (error=%d), dropping packet\n",
1471			    __func__, error);
1472			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1473			m_freem(slot->mbuf);
1474			slot->mbuf = NULL;
1475			break;
1476		}
1477
1478		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap,
1479				BUS_DMASYNC_PREWRITE);
1480
1481
1482		CPSW_DEBUGF(("Queueing TX packet: %d segments + %d pad bytes",
1483			nsegs, padlen));
1484
1485		/* If there is only one segment, the for() loop
1486		 * gets skipped and the single buffer gets set up
1487		 * as both SOP and EOP. */
1488		/* Start by setting up the first buffer */
1489		bd.next = 0;
1490		bd.bufptr = segs[0].ds_addr;
1491		bd.bufoff = 0;
1492		bd.buflen = segs[0].ds_len;
1493		bd.pktlen = m_length(slot->mbuf, NULL) + padlen;
1494		bd.flags =  CPDMA_BD_SOP | CPDMA_BD_OWNER;
1495		for (seg = 1; seg < nsegs; ++seg) {
1496			/* Save the previous buffer (which isn't EOP) */
1497			cpsw_cpdma_write_bd(sc, slot, &bd);
1498			if (prev_slot != NULL)
1499				cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1500			prev_slot = slot;
1501			STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1502			sc->tx.avail_queue_len--;
1503			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1504			++added;
1505			slot = STAILQ_FIRST(&sc->tx.avail);
1506
1507			/* Setup next buffer (which isn't SOP) */
1508			bd.next = 0;
1509			bd.bufptr = segs[seg].ds_addr;
1510			bd.bufoff = 0;
1511			bd.buflen = segs[seg].ds_len;
1512			bd.pktlen = 0;
1513			bd.flags = CPDMA_BD_OWNER;
1514		}
1515		/* Save the final buffer. */
1516		if (padlen <= 0)
1517			bd.flags |= CPDMA_BD_EOP;
1518		cpsw_cpdma_write_bd(sc, slot, &bd);
1519		if (prev_slot != NULL)
1520			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1521		prev_slot = slot;
1522		STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1523		sc->tx.avail_queue_len--;
1524		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1525		++added;
1526
1527		if (padlen > 0) {
1528			slot = STAILQ_FIRST(&sc->tx.avail);
1529			STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1530			sc->tx.avail_queue_len--;
1531			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1532			++added;
1533
1534			/* Setup buffer of null pad bytes (definitely EOP) */
1535			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1536			prev_slot = slot;
1537			bd.next = 0;
1538			bd.bufptr = sc->null_mbuf_paddr;
1539			bd.bufoff = 0;
1540			bd.buflen = padlen;
1541			bd.pktlen = 0;
1542			bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER;
1543			cpsw_cpdma_write_bd(sc, slot, &bd);
1544			++nsegs;
1545		}
1546
1547		if (nsegs > sc->tx.longest_chain)
1548			sc->tx.longest_chain = nsegs;
1549
1550		// TODO: Should we defer the BPF tap until
1551		// after all packets are queued?
1552		BPF_MTAP(sc->ifp, m0);
1553	}
1554
1555	/* Attach the list of new buffers to the hardware TX queue. */
1556	last_old_slot = STAILQ_LAST(&sc->tx.active, cpsw_slot, next);
1557	first_new_slot = STAILQ_FIRST(&tmpqueue);
1558	STAILQ_CONCAT(&sc->tx.active, &tmpqueue);
1559	if (first_new_slot == NULL) {
1560		return;
1561	} else if (last_old_slot == NULL) {
1562		/* Start a fresh queue. */
1563		cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1564	} else {
1565		/* Add buffers to end of current queue. */
1566		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1567		/* If underrun, restart queue. */
1568		if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1569			cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1570		}
1571	}
1572	sc->tx.queue_adds += added;
1573	sc->tx.active_queue_len += added;
1574	if (sc->tx.active_queue_len > sc->tx.max_active_queue_len) {
1575		sc->tx.max_active_queue_len = sc->tx.active_queue_len;
1576	}
1577}
1578
1579static int
1580cpsw_tx_dequeue(struct cpsw_softc *sc)
1581{
1582	struct cpsw_slot *slot, *last_removed_slot = NULL;
1583	uint32_t flags, removed = 0;
1584
1585	slot = STAILQ_FIRST(&sc->tx.active);
1586	if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) {
1587		CPSW_DEBUGF(("TX teardown of an empty queue"));
1588		cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1589		sc->tx.running = 0;
1590		return (0);
1591	}
1592
1593	/* Pull completed buffers off the hardware TX queue. */
1594	while (slot != NULL) {
1595		flags = cpsw_cpdma_read_bd_flags(sc, slot);
1596		if (flags & CPDMA_BD_OWNER)
1597			break; /* Hardware is still using this packet. */
1598
1599		CPSW_DEBUGF(("TX removing completed packet"));
1600		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
1601		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1602		m_freem(slot->mbuf);
1603		slot->mbuf = NULL;
1604
1605		/* Dequeue any additional buffers used by this packet. */
1606		while (slot != NULL && slot->mbuf == NULL) {
1607			STAILQ_REMOVE_HEAD(&sc->tx.active, next);
1608			STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
1609			++removed;
1610			last_removed_slot = slot;
1611			slot = STAILQ_FIRST(&sc->tx.active);
1612		}
1613
1614		/* TearDown complete is only marked on the SOP for the packet. */
1615		if (flags & CPDMA_BD_TDOWNCMPLT) {
1616			CPSW_DEBUGF(("TX teardown in progress"));
1617			cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1618			// TODO: Increment a count of dropped TX packets
1619			sc->tx.running = 0;
1620			break;
1621		}
1622	}
1623
1624	if (removed != 0) {
1625		cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
1626		sc->tx.queue_removes += removed;
1627		sc->tx.active_queue_len -= removed;
1628		sc->tx.avail_queue_len += removed;
1629		if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
1630			sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
1631	}
1632	return (removed);
1633}
1634
1635/*
1636 *
1637 * Miscellaneous interrupts.
1638 *
1639 */
1640
1641static void
1642cpsw_intr_rx_thresh(void *arg)
1643{
1644	struct cpsw_softc *sc = arg;
1645	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0));
1646
1647	CPSW_DEBUGF(("stat=%x", stat));
1648	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1649}
1650
1651static void
1652cpsw_intr_misc_host_error(struct cpsw_softc *sc)
1653{
1654	uint32_t intstat;
1655	uint32_t dmastat;
1656	int txerr, rxerr, txchan, rxchan;
1657
1658	printf("\n\n");
1659	device_printf(sc->dev,
1660	    "HOST ERROR:  PROGRAMMING ERROR DETECTED BY HARDWARE\n");
1661	printf("\n\n");
1662	intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1663	device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
1664	dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1665	device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
1666
1667	txerr = (dmastat >> 20) & 15;
1668	txchan = (dmastat >> 16) & 7;
1669	rxerr = (dmastat >> 12) & 15;
1670	rxchan = (dmastat >> 8) & 7;
1671
1672	switch (txerr) {
1673	case 0: break;
1674	case 1:	printf("SOP error on TX channel %d\n", txchan);
1675		break;
1676	case 2:	printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
1677		break;
1678	case 3:	printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
1679		break;
1680	case 4:	printf("Zero Buffer Pointer on TX channel %d\n", txchan);
1681		break;
1682	case 5:	printf("Zero Buffer Length on TX channel %d\n", txchan);
1683		break;
1684	case 6:	printf("Packet length error on TX channel %d\n", txchan);
1685		break;
1686	default: printf("Unknown error on TX channel %d\n", txchan);
1687		break;
1688	}
1689
1690	if (txerr != 0) {
1691		printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
1692		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
1693		printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
1694		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
1695		cpsw_dump_queue(sc, &sc->tx.active);
1696	}
1697
1698	switch (rxerr) {
1699	case 0: break;
1700	case 2:	printf("Ownership bit not set on RX channel %d\n", rxchan);
1701		break;
1702	case 4:	printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
1703		break;
1704	case 5:	printf("Zero Buffer Length on RX channel %d\n", rxchan);
1705		break;
1706	case 6:	printf("Buffer offset too big on RX channel %d\n", rxchan);
1707		break;
1708	default: printf("Unknown RX error on RX channel %d\n", rxchan);
1709		break;
1710	}
1711
1712	if (rxerr != 0) {
1713		printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
1714		    rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
1715		printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
1716		    rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
1717		cpsw_dump_queue(sc, &sc->rx.active);
1718	}
1719
1720	printf("\nALE Table\n");
1721	cpsw_ale_dump_table(sc);
1722
1723	// XXX do something useful here??
1724	panic("CPSW HOST ERROR INTERRUPT");
1725
1726	// Suppress this interrupt in the future.
1727	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
1728	printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
1729	// The watchdog will probably reset the controller
1730	// in a little while.  It will probably fail again.
1731}
1732
1733static void
1734cpsw_intr_misc(void *arg)
1735{
1736	struct cpsw_softc *sc = arg;
1737	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1738
1739	if (stat & 16)
1740		CPSW_DEBUGF(("Time sync event interrupt unimplemented"));
1741	if (stat & 8)
1742		cpsw_stats_collect(sc);
1743	if (stat & 4)
1744		cpsw_intr_misc_host_error(sc);
1745	if (stat & 2)
1746		CPSW_DEBUGF(("MDIO link change interrupt unimplemented"));
1747	if (stat & 1)
1748		CPSW_DEBUGF(("MDIO operation completed interrupt unimplemented"));
1749	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1750}
1751
1752/*
1753 *
1754 * Periodic Checks and Watchdog.
1755 *
1756 */
1757
1758static void
1759cpsw_tick(void *msc)
1760{
1761	struct cpsw_softc *sc = msc;
1762
1763	/* Check for TX timeout */
1764	cpsw_tx_watchdog(sc);
1765
1766	/* Check for media type change */
1767	mii_tick(sc->mii);
1768	if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) {
1769		printf("%s: media type changed (ifm_media=%x)\n", __func__,
1770			sc->mii->mii_media.ifm_media);
1771		cpsw_ifmedia_upd(sc->ifp);
1772	}
1773
1774	/* Schedule another timeout one second from now */
1775	callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
1776}
1777
1778static void
1779cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1780{
1781	struct cpsw_softc *sc = ifp->if_softc;
1782	struct mii_data *mii;
1783
1784	CPSW_DEBUGF((""));
1785	CPSW_TX_LOCK(sc);
1786
1787	mii = sc->mii;
1788	mii_pollstat(mii);
1789
1790	ifmr->ifm_active = mii->mii_media_active;
1791	ifmr->ifm_status = mii->mii_media_status;
1792
1793	CPSW_TX_UNLOCK(sc);
1794}
1795
1796static int
1797cpsw_ifmedia_upd(struct ifnet *ifp)
1798{
1799	struct cpsw_softc *sc = ifp->if_softc;
1800
1801	CPSW_DEBUGF((""));
1802	if (ifp->if_flags & IFF_UP) {
1803		CPSW_GLOBAL_LOCK(sc);
1804		sc->cpsw_media_status = sc->mii->mii_media.ifm_media;
1805		mii_mediachg(sc->mii);
1806		cpsw_init_locked(sc);
1807		CPSW_GLOBAL_UNLOCK(sc);
1808	}
1809
1810	return (0);
1811}
1812
1813static void
1814cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
1815{
1816	cpsw_debugf_head("CPSW watchdog");
1817	if_printf(sc->ifp, "watchdog timeout\n");
1818	cpsw_shutdown_locked(sc);
1819	cpsw_init_locked(sc);
1820}
1821
1822static void
1823cpsw_tx_watchdog(struct cpsw_softc *sc)
1824{
1825	struct ifnet *ifp = sc->ifp;
1826
1827	CPSW_GLOBAL_LOCK(sc);
1828	if (sc->tx.active_queue_len == 0 || (ifp->if_flags & IFF_UP) == 0 ||
1829	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !sc->tx.running) {
1830		sc->watchdog.timer = 0; /* Nothing to do. */
1831	} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
1832		sc->watchdog.timer = 0;  /* Stuff done while we weren't looking. */
1833	} else if (cpsw_tx_dequeue(sc) > 0) {
1834		sc->watchdog.timer = 0;  /* We just did something. */
1835	} else {
1836		/* There was something to do but it didn't get done. */
1837		++sc->watchdog.timer;
1838		if (sc->watchdog.timer > 2) {
1839			sc->watchdog.timer = 0;
1840			++ifp->if_oerrors;
1841			++sc->watchdog.resets;
1842			cpsw_tx_watchdog_full_reset(sc);
1843		}
1844	}
1845	sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
1846	CPSW_GLOBAL_UNLOCK(sc);
1847}
1848
1849/*
1850 *
1851 * ALE support routines.
1852 *
1853 */
1854
1855static void
1856cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1857{
1858	cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1859	ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1860	ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1861	ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1862}
1863
1864static void
1865cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1866{
1867	cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1868	cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1869	cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1870	cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1871}
1872
1873static int
1874cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1875{
1876	int i;
1877	uint32_t ale_entry[3];
1878
1879	/* First two entries are link address and broadcast. */
1880	for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1881		cpsw_ale_read_entry(sc, i, ale_entry);
1882		if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1883		    ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1884			ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1885			cpsw_ale_write_entry(sc, i, ale_entry);
1886		}
1887	}
1888	return CPSW_MAX_ALE_ENTRIES;
1889}
1890
1891static int
1892cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac)
1893{
1894	int free_index = -1, matching_index = -1, i;
1895	uint32_t ale_entry[3];
1896
1897	/* Find a matching entry or a free entry. */
1898	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1899		cpsw_ale_read_entry(sc, i, ale_entry);
1900
1901		/* Entry Type[61:60] is 0 for free entry */
1902		if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1903			free_index = i;
1904		}
1905
1906		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
1907		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
1908		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
1909		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
1910		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
1911		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
1912			matching_index = i;
1913			break;
1914		}
1915	}
1916
1917	if (matching_index < 0) {
1918		if (free_index < 0)
1919			return (ENOMEM);
1920		i = free_index;
1921	}
1922
1923	/* Set MAC address */
1924	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1925	ale_entry[1] = mac[0] << 8 | mac[1];
1926
1927	/* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1928	ale_entry[1] |= 0xd0 << 24;
1929
1930	/* Set portmask [68:66] */
1931	ale_entry[2] = (portmap & 7) << 2;
1932
1933	cpsw_ale_write_entry(sc, i, ale_entry);
1934
1935	return 0;
1936}
1937
1938static void
1939cpsw_ale_dump_table(struct cpsw_softc *sc) {
1940	int i;
1941	uint32_t ale_entry[3];
1942	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1943		cpsw_ale_read_entry(sc, i, ale_entry);
1944		if (ale_entry[0] || ale_entry[1] || ale_entry[2]) {
1945			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0],
1946				ale_entry[1], ale_entry[2]);
1947			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
1948				(ale_entry[1] >> 8) & 0xFF,
1949				(ale_entry[1] >> 0) & 0xFF,
1950				(ale_entry[0] >>24) & 0xFF,
1951				(ale_entry[0] >>16) & 0xFF,
1952				(ale_entry[0] >> 8) & 0xFF,
1953				(ale_entry[0] >> 0) & 0xFF);
1954			printf(((ale_entry[1] >> 8) & 1) ? "mcast " : "ucast ");
1955			printf("type: %u ", (ale_entry[1] >> 28) & 3);
1956			printf("port: %u ", (ale_entry[2] >> 2) & 7);
1957			printf("\n");
1958		}
1959	}
1960	printf("\n");
1961}
1962
1963static int
1964cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1965{
1966	uint8_t *mac;
1967	uint32_t ale_entry[3];
1968	struct ifnet *ifp = sc->ifp;
1969	struct ifmultiaddr *ifma;
1970	int i;
1971
1972	/* Route incoming packets for our MAC address to Port 0 (host). */
1973	/* For simplicity, keep this entry at table index 0 in the ALE. */
1974        if_addr_rlock(ifp);
1975	mac = LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr);
1976	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1977	ale_entry[1] = 0x10 << 24 | mac[0] << 8 | mac[1]; /* addr entry + mac */
1978	ale_entry[2] = 0; /* port = 0 */
1979	cpsw_ale_write_entry(sc, 0, ale_entry);
1980
1981	/* Set outgoing MAC Address for Ports 1 and 2. */
1982	for (i = 1; i < 3; ++i) {
1983		cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i),
1984		    mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1985		cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i),
1986		    mac[5] << 8 | mac[4]);
1987	}
1988        if_addr_runlock(ifp);
1989
1990	/* Keep the broadcast address at table entry 1. */
1991	ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
1992	ale_entry[1] = 0xd000ffff; /* FW (3 << 30), Addr entry (1 << 24), upper 16 bits of Mac */
1993	ale_entry[2] = 0x0000001c; /* Forward to all ports */
1994	cpsw_ale_write_entry(sc, 1, ale_entry);
1995
1996	/* SIOCDELMULTI doesn't specify the particular address
1997	   being removed, so we have to remove all and rebuild. */
1998	if (purge)
1999		cpsw_ale_remove_all_mc_entries(sc);
2000
2001        /* Set other multicast addrs desired. */
2002        if_maddr_rlock(ifp);
2003        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2004                if (ifma->ifma_addr->sa_family != AF_LINK)
2005                        continue;
2006		cpsw_ale_mc_entry_set(sc, 7,
2007		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2008        }
2009        if_maddr_runlock(ifp);
2010
2011	return (0);
2012}
2013
2014/*
2015 *
2016 * Statistics and Sysctls.
2017 *
2018 */
2019
2020#if 0
2021static void
2022cpsw_stats_dump(struct cpsw_softc *sc)
2023{
2024	int i;
2025	uint32_t r;
2026
2027	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2028		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2029		    cpsw_stat_sysctls[i].reg);
2030		CPSW_DEBUGF(("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2031			     (intmax_t)sc->shadow_stats[i], r,
2032			     (intmax_t)sc->shadow_stats[i] + r));
2033	}
2034}
2035#endif
2036
2037static void
2038cpsw_stats_collect(struct cpsw_softc *sc)
2039{
2040	int i;
2041	uint32_t r;
2042
2043	CPSW_DEBUGF(("Controller shadow statistics updated."));
2044
2045	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2046		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2047		    cpsw_stat_sysctls[i].reg);
2048		sc->shadow_stats[i] += r;
2049		cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r);
2050	}
2051}
2052
2053static int
2054cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2055{
2056	struct cpsw_softc *sc;
2057	struct cpsw_stat *stat;
2058	uint64_t result;
2059
2060	sc = (struct cpsw_softc *)arg1;
2061	stat = &cpsw_stat_sysctls[oidp->oid_number];
2062	result = sc->shadow_stats[oidp->oid_number];
2063	result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2064	return (sysctl_handle_64(oidp, &result, 0, req));
2065}
2066
2067static int
2068cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2069{
2070	struct cpsw_softc *sc;
2071	struct bintime t;
2072	unsigned result;
2073
2074	sc = (struct cpsw_softc *)arg1;
2075	getbinuptime(&t);
2076	bintime_sub(&t, &sc->attach_uptime);
2077	result = t.sec;
2078	return (sysctl_handle_int(oidp, &result, 0, req));
2079}
2080
2081static int
2082cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2083{
2084	struct cpsw_softc *sc;
2085	struct bintime t;
2086	unsigned result;
2087
2088	sc = (struct cpsw_softc *)arg1;
2089	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2090		getbinuptime(&t);
2091		bintime_sub(&t, &sc->init_uptime);
2092		result = t.sec;
2093	} else
2094		result = 0;
2095	return (sysctl_handle_int(oidp, &result, 0, req));
2096}
2097
2098static void
2099cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue)
2100{
2101	struct sysctl_oid_list *parent;
2102
2103	parent = SYSCTL_CHILDREN(node);
2104	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2105	    CTLFLAG_RD, &queue->queue_slots, 0,
2106	    "Total buffers currently assigned to this queue");
2107	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2108	    CTLFLAG_RD, &queue->active_queue_len, 0,
2109	    "Buffers currently registered with hardware controller");
2110	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2111	    CTLFLAG_RD, &queue->max_active_queue_len, 0,
2112	    "Max value of activeBuffers since last driver reset");
2113	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2114	    CTLFLAG_RD, &queue->avail_queue_len, 0,
2115	    "Buffers allocated to this queue but not currently "
2116	    "registered with hardware controller");
2117	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2118	    CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2119	    "Max value of availBuffers since last driver reset");
2120	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2121	    CTLFLAG_RD, &queue->queue_adds, 0,
2122	    "Total buffers added to queue");
2123	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2124	    CTLFLAG_RD, &queue->queue_removes, 0,
2125	    "Total buffers removed from queue");
2126	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2127	    CTLFLAG_RD, &queue->longest_chain, 0,
2128	    "Max buffers used for a single packet");
2129}
2130
2131static void
2132cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc)
2133{
2134	struct sysctl_oid_list *parent;
2135
2136	parent = SYSCTL_CHILDREN(node);
2137	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2138	    CTLFLAG_RD, &sc->watchdog.resets, 0,
2139	    "Total number of watchdog resets");
2140}
2141
2142static void
2143cpsw_add_sysctls(struct cpsw_softc *sc)
2144{
2145	struct sysctl_ctx_list *ctx;
2146	struct sysctl_oid *stats_node, *queue_node, *node;
2147	struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2148	int i;
2149
2150	ctx = device_get_sysctl_ctx(sc->dev);
2151	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2152
2153	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2154	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
2155	    "Time since driver attach");
2156
2157	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "uptime",
2158	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_uptime, "IU",
2159	    "Seconds since driver init");
2160
2161	stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2162				     CTLFLAG_RD, NULL, "CPSW Statistics");
2163	stats_parent = SYSCTL_CHILDREN(stats_node);
2164	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2165		SYSCTL_ADD_PROC(ctx, stats_parent, i,
2166				cpsw_stat_sysctls[i].oid,
2167				CTLTYPE_U64 | CTLFLAG_RD, sc, 0,
2168				cpsw_stats_sysctl, "IU",
2169				cpsw_stat_sysctls[i].oid);
2170	}
2171
2172	queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2173	    CTLFLAG_RD, NULL, "CPSW Queue Statistics");
2174	queue_parent = SYSCTL_CHILDREN(queue_node);
2175
2176	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2177	    CTLFLAG_RD, NULL, "TX Queue Statistics");
2178	cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2179
2180	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2181	    CTLFLAG_RD, NULL, "RX Queue Statistics");
2182	cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2183
2184	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2185	    CTLFLAG_RD, NULL, "Watchdog Statistics");
2186	cpsw_add_watchdog_sysctls(ctx, node, sc);
2187}
2188
2189