if_cpsw.c revision 310857
1/*-
2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
3 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate)
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * TI Common Platform Ethernet Switch (CPSW) Driver
30 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
31 *
32 * This controller is documented in the AM335x Technical Reference
33 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
34 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
35 *
36 * It is basically a single Ethernet port (port 0) wired internally to
37 * a 3-port store-and-forward switch connected to two independent
38 * "sliver" controllers (port 1 and port 2).  You can operate the
39 * controller in a variety of different ways by suitably configuring
40 * the slivers and the Address Lookup Engine (ALE) that routes packets
41 * between the ports.
42 *
43 * This code was developed and tested on a BeagleBone with
44 * an AM335x SoC.
45 */
46
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: stable/11/sys/arm/ti/cpsw/if_cpsw.c 310857 2016-12-30 20:33:54Z loos $");
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/endian.h>
53#include <sys/mbuf.h>
54#include <sys/lock.h>
55#include <sys/mutex.h>
56#include <sys/kernel.h>
57#include <sys/module.h>
58#include <sys/socket.h>
59#include <sys/sysctl.h>
60
61#include <net/ethernet.h>
62#include <net/bpf.h>
63#include <net/if.h>
64#include <net/if_arp.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_types.h>
68#include <net/if_var.h>
69#include <net/if_vlan_var.h>
70
71#include <netinet/in_systm.h>
72#include <netinet/in.h>
73#include <netinet/ip.h>
74
75#include <sys/sockio.h>
76#include <sys/bus.h>
77#include <machine/bus.h>
78#include <sys/rman.h>
79#include <machine/resource.h>
80
81#include <arm/ti/ti_scm.h>
82#include <arm/ti/am335x/am335x_scm.h>
83
84#include <dev/mii/mii.h>
85#include <dev/mii/miivar.h>
86
87#include <dev/fdt/fdt_common.h>
88#include <dev/ofw/ofw_bus.h>
89#include <dev/ofw/ofw_bus_subr.h>
90
91#include "if_cpswreg.h"
92#include "if_cpswvar.h"
93
94#include "miibus_if.h"
95
96/* Device probe/attach/detach. */
97static int cpsw_probe(device_t);
98static int cpsw_attach(device_t);
99static int cpsw_detach(device_t);
100static int cpswp_probe(device_t);
101static int cpswp_attach(device_t);
102static int cpswp_detach(device_t);
103
104static phandle_t cpsw_get_node(device_t, device_t);
105
106/* Device Init/shutdown. */
107static int cpsw_shutdown(device_t);
108static void cpswp_init(void *);
109static void cpswp_init_locked(void *);
110static void cpswp_stop_locked(struct cpswp_softc *);
111
112/* Device Suspend/Resume. */
113static int cpsw_suspend(device_t);
114static int cpsw_resume(device_t);
115
116/* Ioctl. */
117static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data);
118
119static int cpswp_miibus_readreg(device_t, int phy, int reg);
120static int cpswp_miibus_writereg(device_t, int phy, int reg, int value);
121static void cpswp_miibus_statchg(device_t);
122
123/* Send/Receive packets. */
124static void cpsw_intr_rx(void *arg);
125static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
126static void cpsw_rx_enqueue(struct cpsw_softc *);
127static void cpswp_start(struct ifnet *);
128static void cpswp_tx_enqueue(struct cpswp_softc *);
129static int cpsw_tx_dequeue(struct cpsw_softc *);
130
131/* Misc interrupts and watchdog. */
132static void cpsw_intr_rx_thresh(void *);
133static void cpsw_intr_misc(void *);
134static void cpswp_tick(void *);
135static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
136static int cpswp_ifmedia_upd(struct ifnet *);
137static void cpsw_tx_watchdog(void *);
138
139/* ALE support */
140static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *);
141static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *);
142static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *);
143static void cpsw_ale_dump_table(struct cpsw_softc *);
144static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int,
145	int);
146static int cpswp_ale_update_addresses(struct cpswp_softc *, int);
147
148/* Statistics and sysctls. */
149static void cpsw_add_sysctls(struct cpsw_softc *);
150static void cpsw_stats_collect(struct cpsw_softc *);
151static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
152
153/*
154 * Arbitrary limit on number of segments in an mbuf to be transmitted.
155 * Packets with more segments than this will be defragmented before
156 * they are queued.
157 */
158#define	CPSW_TXFRAGS		16
159
160/* Shared resources. */
161static device_method_t cpsw_methods[] = {
162	/* Device interface */
163	DEVMETHOD(device_probe,		cpsw_probe),
164	DEVMETHOD(device_attach,	cpsw_attach),
165	DEVMETHOD(device_detach,	cpsw_detach),
166	DEVMETHOD(device_shutdown,	cpsw_shutdown),
167	DEVMETHOD(device_suspend,	cpsw_suspend),
168	DEVMETHOD(device_resume,	cpsw_resume),
169	/* OFW methods */
170	DEVMETHOD(ofw_bus_get_node,	cpsw_get_node),
171	DEVMETHOD_END
172};
173
174static driver_t cpsw_driver = {
175	"cpswss",
176	cpsw_methods,
177	sizeof(struct cpsw_softc),
178};
179
180static devclass_t cpsw_devclass;
181
182DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
183
184/* Port/Slave resources. */
185static device_method_t cpswp_methods[] = {
186	/* Device interface */
187	DEVMETHOD(device_probe,		cpswp_probe),
188	DEVMETHOD(device_attach,	cpswp_attach),
189	DEVMETHOD(device_detach,	cpswp_detach),
190	/* MII interface */
191	DEVMETHOD(miibus_readreg,	cpswp_miibus_readreg),
192	DEVMETHOD(miibus_writereg,	cpswp_miibus_writereg),
193	DEVMETHOD(miibus_statchg,	cpswp_miibus_statchg),
194	DEVMETHOD_END
195};
196
197static driver_t cpswp_driver = {
198	"cpsw",
199	cpswp_methods,
200	sizeof(struct cpswp_softc),
201};
202
203static devclass_t cpswp_devclass;
204
205DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0);
206DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
207MODULE_DEPEND(cpsw, ether, 1, 1, 1);
208MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
209
210static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 };
211
212static struct resource_spec irq_res_spec[] = {
213	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
214	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
215	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
216	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
217	{ -1, 0 }
218};
219
220/* Number of entries here must match size of stats
221 * array in struct cpswp_softc. */
222static struct cpsw_stat {
223	int	reg;
224	char *oid;
225} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
226	{0x00, "GoodRxFrames"},
227	{0x04, "BroadcastRxFrames"},
228	{0x08, "MulticastRxFrames"},
229	{0x0C, "PauseRxFrames"},
230	{0x10, "RxCrcErrors"},
231	{0x14, "RxAlignErrors"},
232	{0x18, "OversizeRxFrames"},
233	{0x1c, "RxJabbers"},
234	{0x20, "ShortRxFrames"},
235	{0x24, "RxFragments"},
236	{0x30, "RxOctets"},
237	{0x34, "GoodTxFrames"},
238	{0x38, "BroadcastTxFrames"},
239	{0x3c, "MulticastTxFrames"},
240	{0x40, "PauseTxFrames"},
241	{0x44, "DeferredTxFrames"},
242	{0x48, "CollisionsTxFrames"},
243	{0x4c, "SingleCollisionTxFrames"},
244	{0x50, "MultipleCollisionTxFrames"},
245	{0x54, "ExcessiveCollisions"},
246	{0x58, "LateCollisions"},
247	{0x5c, "TxUnderrun"},
248	{0x60, "CarrierSenseErrors"},
249	{0x64, "TxOctets"},
250	{0x68, "RxTx64OctetFrames"},
251	{0x6c, "RxTx65to127OctetFrames"},
252	{0x70, "RxTx128to255OctetFrames"},
253	{0x74, "RxTx256to511OctetFrames"},
254	{0x78, "RxTx512to1024OctetFrames"},
255	{0x7c, "RxTx1024upOctetFrames"},
256	{0x80, "NetOctets"},
257	{0x84, "RxStartOfFrameOverruns"},
258	{0x88, "RxMiddleOfFrameOverruns"},
259	{0x8c, "RxDmaOverruns"}
260};
261
262/*
263 * Basic debug support.
264 */
265
266#define	IF_DEBUG(_sc)		if ((_sc)->if_flags & IFF_DEBUG)
267
268static void
269cpsw_debugf_head(const char *funcname)
270{
271	int t = (int)(time_second % (24 * 60 * 60));
272
273	printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
274}
275
276#include <machine/stdarg.h>
277static void
278cpsw_debugf(const char *fmt, ...)
279{
280	va_list ap;
281
282	va_start(ap, fmt);
283	vprintf(fmt, ap);
284	va_end(ap);
285	printf("\n");
286
287}
288
289#define	CPSW_DEBUGF(_sc, a) do {					\
290	if (sc->debug) {						\
291		cpsw_debugf_head(__func__);				\
292		cpsw_debugf a;						\
293	}								\
294} while (0)
295
296#define	CPSWP_DEBUGF(_sc, a) do {					\
297	IF_DEBUG((_sc)) {						\
298		cpsw_debugf_head(__func__);				\
299		cpsw_debugf a;						\
300	}								\
301} while (0)
302
303
304/*
305 * Locking macros
306 */
307#define	CPSW_TX_LOCK(sc) do {						\
308		mtx_assert(&(sc)->rx.lock, MA_NOTOWNED);		\
309		mtx_lock(&(sc)->tx.lock);				\
310} while (0)
311
312#define	CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx.lock)
313#define	CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx.lock, MA_OWNED)
314
315#define	CPSW_RX_LOCK(sc) do {						\
316		mtx_assert(&(sc)->tx.lock, MA_NOTOWNED);		\
317		mtx_lock(&(sc)->rx.lock);				\
318} while (0)
319
320#define	CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx.lock)
321#define	CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx.lock, MA_OWNED)
322
323#define	CPSW_GLOBAL_LOCK(sc) do {					\
324		if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) !=		\
325		    (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) {		\
326			panic("cpsw deadlock possibility detection!");	\
327		}							\
328		mtx_lock(&(sc)->tx.lock);				\
329		mtx_lock(&(sc)->rx.lock);				\
330} while (0)
331
332#define	CPSW_GLOBAL_UNLOCK(sc) do {					\
333		CPSW_RX_UNLOCK(sc);					\
334		CPSW_TX_UNLOCK(sc);					\
335} while (0)
336
337#define	CPSW_GLOBAL_LOCK_ASSERT(sc) do {				\
338		CPSW_TX_LOCK_ASSERT(sc);				\
339		CPSW_RX_LOCK_ASSERT(sc);				\
340} while (0)
341
342#define CPSW_PORT_LOCK(_sc) do {					\
343		mtx_assert(&(_sc)->lock, MA_NOTOWNED);			\
344		mtx_lock(&(_sc)->lock);					\
345} while (0)
346
347#define	CPSW_PORT_UNLOCK(_sc)	mtx_unlock(&(_sc)->lock)
348#define	CPSW_PORT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->lock, MA_OWNED)
349
350/*
351 * Read/Write macros
352 */
353#define	cpsw_read_4(_sc, _reg)		bus_read_4((_sc)->mem_res, (_reg))
354#define	cpsw_write_4(_sc, _reg, _val)					\
355	bus_write_4((_sc)->mem_res, (_reg), (_val))
356
357#define	cpsw_cpdma_bd_offset(i)	(CPSW_CPPI_RAM_OFFSET + ((i)*16))
358
359#define	cpsw_cpdma_bd_paddr(sc, slot)					\
360	BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset)
361#define	cpsw_cpdma_read_bd(sc, slot, val)				\
362	bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
363#define	cpsw_cpdma_write_bd(sc, slot, val)				\
364	bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
365#define	cpsw_cpdma_write_bd_next(sc, slot, next_slot)			\
366	cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
367#define	cpsw_cpdma_read_bd_flags(sc, slot)				\
368	bus_read_2(sc->mem_res, slot->bd_offset + 14)
369#define	cpsw_write_hdp_slot(sc, queue, slot)				\
370	cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
371#define	CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
372#define	cpsw_read_cp(sc, queue)						\
373	cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
374#define	cpsw_write_cp(sc, queue, val)					\
375	cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
376#define	cpsw_write_cp_slot(sc, queue, slot)				\
377	cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
378
379#if 0
380/* XXX temporary function versions for debugging. */
381static void
382cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
383{
384	uint32_t reg = queue->hdp_offset;
385	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
386	CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
387	cpsw_write_4(sc, reg, v);
388}
389
390static void
391cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
392{
393	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
394	CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
395	cpsw_write_cp(sc, queue, v);
396}
397#endif
398
399/*
400 * Expanded dump routines for verbose debugging.
401 */
402static void
403cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
404{
405	static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
406	    "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
407	    "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
408	    "Port0"};
409	struct cpsw_cpdma_bd bd;
410	const char *sep;
411	int i;
412
413	cpsw_cpdma_read_bd(sc, slot, &bd);
414	printf("BD Addr: 0x%08x   Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next);
415	printf("  BufPtr: 0x%08x   BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
416	printf("  BufOff: 0x%08x   PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
417	printf("  Flags: ");
418	sep = "";
419	for (i = 0; i < 16; ++i) {
420		if (bd.flags & (1 << (15 - i))) {
421			printf("%s%s", sep, flags[i]);
422			sep = ",";
423		}
424	}
425	printf("\n");
426	if (slot->mbuf) {
427		printf("  Ether:  %14D\n",
428		    (char *)(slot->mbuf->m_data), " ");
429		printf("  Packet: %16D\n",
430		    (char *)(slot->mbuf->m_data) + 14, " ");
431	}
432}
433
434#define	CPSW_DUMP_SLOT(cs, slot) do {				\
435	IF_DEBUG(sc) {						\
436		cpsw_dump_slot(sc, slot);			\
437	}							\
438} while (0)
439
440static void
441cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
442{
443	struct cpsw_slot *slot;
444	int i = 0;
445	int others = 0;
446
447	STAILQ_FOREACH(slot, q, next) {
448		if (i > 4)
449			++others;
450		else
451			cpsw_dump_slot(sc, slot);
452		++i;
453	}
454	if (others)
455		printf(" ... and %d more.\n", others);
456	printf("\n");
457}
458
459#define CPSW_DUMP_QUEUE(sc, q) do {				\
460	IF_DEBUG(sc) {						\
461		cpsw_dump_queue(sc, q);				\
462	}							\
463} while (0)
464
465static void
466cpsw_init_slots(struct cpsw_softc *sc)
467{
468	struct cpsw_slot *slot;
469	int i;
470
471	STAILQ_INIT(&sc->avail);
472
473	/* Put the slot descriptors onto the global avail list. */
474	for (i = 0; i < nitems(sc->_slots); i++) {
475		slot = &sc->_slots[i];
476		slot->bd_offset = cpsw_cpdma_bd_offset(i);
477		STAILQ_INSERT_TAIL(&sc->avail, slot, next);
478	}
479}
480
481static int
482cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
483{
484	const int max_slots = nitems(sc->_slots);
485	struct cpsw_slot *slot;
486	int i;
487
488	if (requested < 0)
489		requested = max_slots;
490
491	for (i = 0; i < requested; ++i) {
492		slot = STAILQ_FIRST(&sc->avail);
493		if (slot == NULL)
494			return (0);
495		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
496			device_printf(sc->dev, "failed to create dmamap\n");
497			return (ENOMEM);
498		}
499		STAILQ_REMOVE_HEAD(&sc->avail, next);
500		STAILQ_INSERT_TAIL(&queue->avail, slot, next);
501		++queue->avail_queue_len;
502		++queue->queue_slots;
503	}
504	return (0);
505}
506
507static void
508cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
509{
510	int error;
511
512	if (slot->dmamap) {
513		if (slot->mbuf)
514			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
515		error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
516		KASSERT(error == 0, ("Mapping still active"));
517		slot->dmamap = NULL;
518	}
519	if (slot->mbuf) {
520		m_freem(slot->mbuf);
521		slot->mbuf = NULL;
522	}
523}
524
525static void
526cpsw_reset(struct cpsw_softc *sc)
527{
528	int i;
529
530	callout_stop(&sc->watchdog.callout);
531
532	/* Reset RMII/RGMII wrapper. */
533	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
534	while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
535		;
536
537	/* Disable TX and RX interrupts for all cores. */
538	for (i = 0; i < 3; ++i) {
539		cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
540		cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
541		cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
542		cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
543	}
544
545	/* Reset CPSW subsystem. */
546	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
547	while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
548		;
549
550	/* Reset Sliver port 1 and 2 */
551	for (i = 0; i < 2; i++) {
552		/* Reset */
553		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
554		while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
555			;
556	}
557
558	/* Reset DMA controller. */
559	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
560	while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
561		;
562
563	/* Disable TX & RX DMA */
564	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
565	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
566
567	/* Clear all queues. */
568	for (i = 0; i < 8; i++) {
569		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
570		cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
571		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
572		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
573	}
574
575	/* Clear all interrupt Masks */
576	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
577	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
578}
579
580static void
581cpsw_init(struct cpsw_softc *sc)
582{
583	struct cpsw_slot *slot;
584	uint32_t reg;
585
586	/* Disable the interrupt pacing. */
587	reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
588	reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
589	cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg);
590
591	/* Clear ALE */
592	cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL);
593
594	/* Enable ALE */
595	reg = CPSW_ALE_CTL_ENABLE;
596	if (sc->dualemac)
597		reg |= CPSW_ALE_CTL_VLAN_AWARE;
598	cpsw_write_4(sc, CPSW_ALE_CONTROL, reg);
599
600	/* Set Host Port Mapping. */
601	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
602	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
603
604	/* Initialize ALE: set host port to forwarding(3). */
605	cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
606
607	cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
608
609	/* Enable statistics for ports 0, 1 and 2 */
610	cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
611
612	/* Experiment:  Turn off flow control */
613	/* This seems to fix the watchdog resets that have plagued
614	   earlier versions of this driver; I'm not yet sure if there
615	   are negative effects yet. */
616	cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
617
618	/* Make IP hdr aligned with 4 */
619	cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
620
621	/* Initialize RX Buffer Descriptors */
622	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
623
624	/* Enable TX & RX DMA */
625	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
626	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
627
628	/* Enable Interrupts for core 0 */
629	cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
630	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
631	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
632
633	/* Enable host Error Interrupt */
634	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
635
636	/* Enable interrupts for RX Channel 0 */
637	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
638
639	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
640	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
641	cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff);
642
643	/* Select MII in GMII_SEL, Internal Delay mode */
644	//ti_scm_reg_write_4(0x650, 0);
645
646	/* Initialize active queues. */
647	slot = STAILQ_FIRST(&sc->tx.active);
648	if (slot != NULL)
649		cpsw_write_hdp_slot(sc, &sc->tx, slot);
650	slot = STAILQ_FIRST(&sc->rx.active);
651	if (slot != NULL)
652		cpsw_write_hdp_slot(sc, &sc->rx, slot);
653	cpsw_rx_enqueue(sc);
654
655	/* Activate network interface. */
656	sc->rx.running = 1;
657	sc->tx.running = 1;
658	sc->watchdog.timer = 0;
659	callout_init(&sc->watchdog.callout, 0);
660	callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
661}
662
663/*
664 *
665 * Device Probe, Attach, Detach.
666 *
667 */
668
669static int
670cpsw_probe(device_t dev)
671{
672
673	if (!ofw_bus_status_okay(dev))
674		return (ENXIO);
675
676	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
677		return (ENXIO);
678
679	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
680	return (BUS_PROBE_DEFAULT);
681}
682
683static int
684cpsw_intr_attach(struct cpsw_softc *sc)
685{
686
687	/* Note: We don't use sc->irq_res[2] (TX interrupt) */
688	if (bus_setup_intr(sc->dev, sc->irq_res[0],
689	    INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_rx_thresh,
690	    sc, &sc->ih_cookie[0]) != 0) {
691		return (-1);
692	}
693	if (bus_setup_intr(sc->dev, sc->irq_res[1],
694	    INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_rx,
695	    sc, &sc->ih_cookie[1]) != 0) {
696		return (-1);
697	}
698	if (bus_setup_intr(sc->dev, sc->irq_res[3],
699	    INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_misc,
700	    sc, &sc->ih_cookie[3]) != 0) {
701		return (-1);
702	}
703
704	return (0);
705}
706
707static void
708cpsw_intr_detach(struct cpsw_softc *sc)
709{
710	int i;
711
712	for (i = 0; i < CPSW_INTR_COUNT; i++) {
713		if (sc->ih_cookie[i]) {
714			bus_teardown_intr(sc->dev, sc->irq_res[i],
715			    sc->ih_cookie[i]);
716		}
717	}
718}
719
720static int
721cpsw_get_fdt_data(struct cpsw_softc *sc, int port)
722{
723	char *name;
724	int len, phy, vlan;
725	pcell_t phy_id[3], vlan_id;
726	phandle_t child;
727	unsigned long mdio_child_addr;
728
729	/* Find any slave with phy_id */
730	phy = -1;
731	vlan = -1;
732	for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) {
733		if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0)
734			continue;
735		if (sscanf(name, "slave@%x", &mdio_child_addr) != 1) {
736			OF_prop_free(name);
737			continue;
738		}
739		OF_prop_free(name);
740		if (mdio_child_addr != slave_mdio_addr[port])
741			continue;
742
743		len = OF_getproplen(child, "phy_id");
744		if (len / sizeof(pcell_t) == 2) {
745			/* Get phy address from fdt */
746			if (OF_getencprop(child, "phy_id", phy_id, len) > 0)
747				phy = phy_id[1];
748		}
749
750		len = OF_getproplen(child, "dual_emac_res_vlan");
751		if (len / sizeof(pcell_t) == 1) {
752			/* Get phy address from fdt */
753			if (OF_getencprop(child, "dual_emac_res_vlan",
754			    &vlan_id, len) > 0) {
755				vlan = vlan_id;
756			}
757		}
758
759		break;
760	}
761	if (phy == -1)
762		return (ENXIO);
763	sc->port[port].phy = phy;
764	sc->port[port].vlan = vlan;
765
766	return (0);
767}
768
769static int
770cpsw_attach(device_t dev)
771{
772	bus_dma_segment_t segs[1];
773	int error, i, nsegs;
774	struct cpsw_softc *sc;
775	uint32_t reg;
776
777	sc = device_get_softc(dev);
778	sc->dev = dev;
779	sc->node = ofw_bus_get_node(dev);
780	getbinuptime(&sc->attach_uptime);
781
782	if (OF_getencprop(sc->node, "active_slave", &sc->active_slave,
783	    sizeof(sc->active_slave)) <= 0) {
784		sc->active_slave = 0;
785	}
786	if (sc->active_slave > 1)
787		sc->active_slave = 1;
788
789	if (OF_hasprop(sc->node, "dual_emac"))
790		sc->dualemac = 1;
791
792	for (i = 0; i < CPSW_PORTS; i++) {
793		if (!sc->dualemac && i != sc->active_slave)
794			continue;
795		if (cpsw_get_fdt_data(sc, i) != 0) {
796			device_printf(dev,
797			    "failed to get PHY address from FDT\n");
798			return (ENXIO);
799		}
800	}
801
802	/* Initialize mutexes */
803	mtx_init(&sc->tx.lock, device_get_nameunit(dev),
804	    "cpsw TX lock", MTX_DEF);
805	mtx_init(&sc->rx.lock, device_get_nameunit(dev),
806	    "cpsw RX lock", MTX_DEF);
807
808	/* Allocate IRQ resources */
809	error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res);
810	if (error) {
811		device_printf(dev, "could not allocate IRQ resources\n");
812		cpsw_detach(dev);
813		return (ENXIO);
814	}
815
816	sc->mem_rid = 0;
817	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
818	    &sc->mem_rid, RF_ACTIVE);
819	if (sc->mem_res == NULL) {
820		device_printf(sc->dev, "failed to allocate memory resource\n");
821		cpsw_detach(dev);
822		return (ENXIO);
823	}
824
825	reg = cpsw_read_4(sc, CPSW_SS_IDVER);
826	device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
827		reg & 0xFF, (reg >> 11) & 0x1F);
828
829	cpsw_add_sysctls(sc);
830
831	/* Allocate a busdma tag and DMA safe memory for mbufs. */
832	error = bus_dma_tag_create(
833		bus_get_dma_tag(sc->dev),	/* parent */
834		1, 0,				/* alignment, boundary */
835		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
836		BUS_SPACE_MAXADDR,		/* highaddr */
837		NULL, NULL,			/* filtfunc, filtfuncarg */
838		MCLBYTES, CPSW_TXFRAGS,		/* maxsize, nsegments */
839		MCLBYTES, 0,			/* maxsegsz, flags */
840		NULL, NULL,			/* lockfunc, lockfuncarg */
841		&sc->mbuf_dtag);		/* dmatag */
842	if (error) {
843		device_printf(dev, "bus_dma_tag_create failed\n");
844		cpsw_detach(dev);
845		return (error);
846	}
847
848	/* Allocate the null mbuf and pre-sync it. */
849	sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
850	memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size);
851	bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap);
852	bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap,
853	    sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
854	bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap,
855	    BUS_DMASYNC_PREWRITE);
856	sc->null_mbuf_paddr = segs[0].ds_addr;
857
858	cpsw_init_slots(sc);
859
860	/* Allocate slots to TX and RX queues. */
861	STAILQ_INIT(&sc->rx.avail);
862	STAILQ_INIT(&sc->rx.active);
863	STAILQ_INIT(&sc->tx.avail);
864	STAILQ_INIT(&sc->tx.active);
865	// For now:  128 slots to TX, rest to RX.
866	// XXX TODO: start with 32/64 and grow dynamically based on demand.
867	if (cpsw_add_slots(sc, &sc->tx, 128) ||
868	    cpsw_add_slots(sc, &sc->rx, -1)) {
869		device_printf(dev, "failed to allocate dmamaps\n");
870		cpsw_detach(dev);
871		return (ENOMEM);
872	}
873	device_printf(dev, "Initial queue size TX=%d RX=%d\n",
874	    sc->tx.queue_slots, sc->rx.queue_slots);
875
876	sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
877	sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
878
879	if (cpsw_intr_attach(sc) == -1) {
880		device_printf(dev, "failed to setup interrupts\n");
881		cpsw_detach(dev);
882		return (ENXIO);
883	}
884
885	/* Reset the controller. */
886	cpsw_reset(sc);
887	cpsw_init(sc);
888
889	for (i = 0; i < CPSW_PORTS; i++) {
890		if (!sc->dualemac && i != sc->active_slave)
891			continue;
892		sc->port[i].dev = device_add_child(dev, "cpsw", i);
893		if (sc->port[i].dev == NULL) {
894			cpsw_detach(dev);
895			return (ENXIO);
896		}
897	}
898	bus_generic_attach(dev);
899
900	return (0);
901}
902
903static int
904cpsw_detach(device_t dev)
905{
906	struct cpsw_softc *sc;
907	int error, i;
908
909	bus_generic_detach(dev);
910 	sc = device_get_softc(dev);
911
912	for (i = 0; i < CPSW_PORTS; i++) {
913		if (sc->port[i].dev)
914			device_delete_child(dev, sc->port[i].dev);
915	}
916
917	if (device_is_attached(dev)) {
918		callout_stop(&sc->watchdog.callout);
919		callout_drain(&sc->watchdog.callout);
920	}
921
922	/* Stop and release all interrupts */
923	cpsw_intr_detach(sc);
924
925	/* Free dmamaps and mbufs */
926	for (i = 0; i < nitems(sc->_slots); ++i)
927		cpsw_free_slot(sc, &sc->_slots[i]);
928
929	/* Free null mbuf. */
930	if (sc->null_mbuf_dmamap) {
931		bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap);
932		error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap);
933		KASSERT(error == 0, ("Mapping still active"));
934		m_freem(sc->null_mbuf);
935	}
936
937	/* Free DMA tag */
938	if (sc->mbuf_dtag) {
939		error = bus_dma_tag_destroy(sc->mbuf_dtag);
940		KASSERT(error == 0, ("Unable to destroy DMA tag"));
941	}
942
943	/* Free IO memory handler */
944	if (sc->mem_res != NULL)
945		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
946	bus_release_resources(dev, irq_res_spec, sc->irq_res);
947
948	/* Destroy mutexes */
949	mtx_destroy(&sc->rx.lock);
950	mtx_destroy(&sc->tx.lock);
951
952	return (0);
953}
954
955static phandle_t
956cpsw_get_node(device_t bus, device_t dev)
957{
958
959	/* Share controller node with port device. */
960	return (ofw_bus_get_node(bus));
961}
962
963static int
964cpswp_probe(device_t dev)
965{
966
967	if (device_get_unit(dev) > 1) {
968		device_printf(dev, "Only two ports are supported.\n");
969		return (ENXIO);
970	}
971	device_set_desc(dev, "Ethernet Switch Port");
972
973	return (BUS_PROBE_DEFAULT);
974}
975
976static int
977cpswp_attach(device_t dev)
978{
979	int error;
980	struct ifnet *ifp;
981	struct cpswp_softc *sc;
982	uint32_t reg;
983	uint8_t mac_addr[ETHER_ADDR_LEN];
984
985	sc = device_get_softc(dev);
986	sc->dev = dev;
987	sc->pdev = device_get_parent(dev);
988	sc->swsc = device_get_softc(sc->pdev);
989	sc->unit = device_get_unit(dev);
990	sc->phy = sc->swsc->port[sc->unit].phy;
991	sc->vlan = sc->swsc->port[sc->unit].vlan;
992	if (sc->swsc->dualemac && sc->vlan == -1)
993		sc->vlan = sc->unit + 1;
994
995	if (sc->unit == 0) {
996		sc->physel = MDIOUSERPHYSEL0;
997		sc->phyaccess = MDIOUSERACCESS0;
998	} else {
999		sc->physel = MDIOUSERPHYSEL1;
1000		sc->phyaccess = MDIOUSERACCESS1;
1001	}
1002
1003	mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock",
1004	    MTX_DEF);
1005
1006	/* Allocate network interface */
1007	ifp = sc->ifp = if_alloc(IFT_ETHER);
1008	if (ifp == NULL) {
1009		cpswp_detach(dev);
1010		return (ENXIO);
1011	}
1012
1013	if_initname(ifp, device_get_name(sc->dev), sc->unit);
1014	ifp->if_softc = sc;
1015	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
1016	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
1017	ifp->if_capenable = ifp->if_capabilities;
1018
1019	ifp->if_init = cpswp_init;
1020	ifp->if_start = cpswp_start;
1021	ifp->if_ioctl = cpswp_ioctl;
1022
1023	ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots;
1024	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1025	IFQ_SET_READY(&ifp->if_snd);
1026
1027	/* Get high part of MAC address from control module (mac_id[0|1]_hi) */
1028	ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, &reg);
1029	mac_addr[0] = reg & 0xFF;
1030	mac_addr[1] = (reg >>  8) & 0xFF;
1031	mac_addr[2] = (reg >> 16) & 0xFF;
1032	mac_addr[3] = (reg >> 24) & 0xFF;
1033
1034	/* Get low part of MAC address from control module (mac_id[0|1]_lo) */
1035	ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, &reg);
1036	mac_addr[4] = reg & 0xFF;
1037	mac_addr[5] = (reg >>  8) & 0xFF;
1038
1039	error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd,
1040	    cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0);
1041	if (error) {
1042		device_printf(dev, "attaching PHYs failed\n");
1043		cpswp_detach(dev);
1044		return (error);
1045	}
1046	sc->mii = device_get_softc(sc->miibus);
1047
1048	/* Select PHY and enable interrupts */
1049	cpsw_write_4(sc->swsc, sc->physel,
1050	    MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F));
1051
1052	ether_ifattach(sc->ifp, mac_addr);
1053	callout_init(&sc->mii_callout, 0);
1054
1055	return (0);
1056}
1057
1058static int
1059cpswp_detach(device_t dev)
1060{
1061	struct cpswp_softc *sc;
1062
1063	sc = device_get_softc(dev);
1064	CPSWP_DEBUGF(sc, (""));
1065	if (device_is_attached(dev)) {
1066		ether_ifdetach(sc->ifp);
1067		CPSW_PORT_LOCK(sc);
1068		cpswp_stop_locked(sc);
1069		CPSW_PORT_UNLOCK(sc);
1070		callout_drain(&sc->mii_callout);
1071	}
1072
1073	bus_generic_detach(dev);
1074
1075	if_free(sc->ifp);
1076	mtx_destroy(&sc->lock);
1077
1078	return (0);
1079}
1080
1081/*
1082 *
1083 * Init/Shutdown.
1084 *
1085 */
1086
1087static int
1088cpsw_ports_down(struct cpsw_softc *sc)
1089{
1090	struct cpswp_softc *psc;
1091	struct ifnet *ifp1, *ifp2;
1092
1093	if (!sc->dualemac)
1094		return (1);
1095	psc = device_get_softc(sc->port[0].dev);
1096	ifp1 = psc->ifp;
1097	psc = device_get_softc(sc->port[1].dev);
1098	ifp2 = psc->ifp;
1099	if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0)
1100		return (1);
1101
1102	return (0);
1103}
1104
1105static void
1106cpswp_init(void *arg)
1107{
1108	struct cpswp_softc *sc = arg;
1109
1110	CPSWP_DEBUGF(sc, (""));
1111	CPSW_PORT_LOCK(sc);
1112	cpswp_init_locked(arg);
1113	CPSW_PORT_UNLOCK(sc);
1114}
1115
1116static void
1117cpswp_init_locked(void *arg)
1118{
1119	struct cpswp_softc *sc = arg;
1120	struct ifnet *ifp;
1121	uint32_t reg;
1122
1123	CPSWP_DEBUGF(sc, (""));
1124	CPSW_PORT_LOCK_ASSERT(sc);
1125	ifp = sc->ifp;
1126	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1127		return;
1128
1129	getbinuptime(&sc->init_uptime);
1130
1131	if (!sc->swsc->rx.running && !sc->swsc->tx.running) {
1132		/* Reset the controller. */
1133		cpsw_reset(sc->swsc);
1134		cpsw_init(sc->swsc);
1135	}
1136
1137	/* Set Slave Mapping. */
1138	cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210);
1139	cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1),
1140	    0x33221100);
1141	cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2);
1142	/* Enable MAC RX/TX modules. */
1143	/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
1144	/* Huh?  Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
1145	reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
1146	reg |= CPSW_SL_MACTL_GMII_ENABLE;
1147	cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
1148
1149	/* Initialize ALE: set port to forwarding(3), initialize addrs */
1150	cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 3);
1151	cpswp_ale_update_addresses(sc, 1);
1152
1153	if (sc->swsc->dualemac) {
1154		/* Set Port VID. */
1155		cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1),
1156		    sc->vlan & 0xfff);
1157		cpsw_ale_update_vlan_table(sc->swsc, sc->vlan,
1158		    (1 << (sc->unit + 1)) | (1 << 0), /* Member list */
1159		    (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */
1160		    (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */
1161	}
1162
1163	mii_mediachg(sc->mii);
1164	callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
1165	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1166	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1167}
1168
1169static int
1170cpsw_shutdown(device_t dev)
1171{
1172	struct cpsw_softc *sc;
1173	struct cpswp_softc *psc;
1174	int i;
1175
1176 	sc = device_get_softc(dev);
1177	CPSW_DEBUGF(sc, (""));
1178	for (i = 0; i < CPSW_PORTS; i++) {
1179		if (!sc->dualemac && i != sc->active_slave)
1180			continue;
1181		psc = device_get_softc(sc->port[i].dev);
1182		CPSW_PORT_LOCK(psc);
1183		cpswp_stop_locked(psc);
1184		CPSW_PORT_UNLOCK(psc);
1185	}
1186
1187	return (0);
1188}
1189
1190static void
1191cpsw_rx_teardown_locked(struct cpsw_softc *sc)
1192{
1193	struct ifnet *ifp;
1194	struct mbuf *received, *next;
1195	int i = 0;
1196
1197	CPSW_DEBUGF(sc, ("starting RX teardown"));
1198	cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1199	for (;;) {
1200		received = cpsw_rx_dequeue(sc);
1201		CPSW_GLOBAL_UNLOCK(sc);
1202		while (received != NULL) {
1203			next = received->m_nextpkt;
1204			received->m_nextpkt = NULL;
1205			ifp = received->m_pkthdr.rcvif;
1206			(*ifp->if_input)(ifp, received);
1207			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1208			received = next;
1209		}
1210		CPSW_GLOBAL_LOCK(sc);
1211		if (!sc->rx.running) {
1212			CPSW_DEBUGF(sc,
1213			    ("finished RX teardown (%d retries)", i));
1214			return;
1215		}
1216		if (++i > 10) {
1217			device_printf(sc->dev,
1218			    "Unable to cleanly shutdown receiver\n");
1219			return;
1220		}
1221		DELAY(10);
1222	}
1223}
1224
1225static void
1226cpsw_tx_teardown_locked(struct cpsw_softc *sc)
1227{
1228	int i = 0;
1229
1230	CPSW_DEBUGF(sc, ("starting TX teardown"));
1231	cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1232	cpsw_tx_dequeue(sc);
1233	while (sc->tx.running && ++i < 10) {
1234		DELAY(10);
1235		cpsw_tx_dequeue(sc);
1236	}
1237	if (sc->tx.running) {
1238		device_printf(sc->dev,
1239		    "Unable to cleanly shutdown transmitter\n");
1240	}
1241	CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)",
1242	    i, sc->tx.active_queue_len));
1243}
1244
1245static void
1246cpswp_stop_locked(struct cpswp_softc *sc)
1247{
1248	struct ifnet *ifp;
1249	uint32_t reg;
1250
1251	ifp = sc->ifp;
1252	CPSWP_DEBUGF(sc, (""));
1253	CPSW_PORT_LOCK_ASSERT(sc);
1254
1255	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1256		return;
1257
1258	/* Disable interface */
1259	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1260	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1261
1262	/* Stop ticker */
1263	callout_stop(&sc->mii_callout);
1264
1265	/* Tear down the RX/TX queues. */
1266	if (cpsw_ports_down(sc->swsc)) {
1267		CPSW_GLOBAL_LOCK(sc->swsc);
1268		cpsw_rx_teardown_locked(sc->swsc);
1269		cpsw_tx_teardown_locked(sc->swsc);
1270		CPSW_GLOBAL_UNLOCK(sc->swsc);
1271	}
1272
1273	/* Stop MAC RX/TX modules. */
1274	reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
1275	reg &= ~CPSW_SL_MACTL_GMII_ENABLE;
1276	cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
1277
1278	if (cpsw_ports_down(sc->swsc)) {
1279		/* Capture stats before we reset controller. */
1280		cpsw_stats_collect(sc->swsc);
1281
1282		cpsw_reset(sc->swsc);
1283		cpsw_init(sc->swsc);
1284	}
1285}
1286
1287/*
1288 *  Suspend/Resume.
1289 */
1290
1291static int
1292cpsw_suspend(device_t dev)
1293{
1294	struct cpsw_softc *sc;
1295	struct cpswp_softc *psc;
1296	int i;
1297
1298	sc = device_get_softc(dev);
1299	CPSW_DEBUGF(sc, (""));
1300	for (i = 0; i < CPSW_PORTS; i++) {
1301		if (!sc->dualemac && i != sc->active_slave)
1302			continue;
1303		psc = device_get_softc(sc->port[i].dev);
1304		CPSW_PORT_LOCK(psc);
1305		cpswp_stop_locked(psc);
1306		CPSW_PORT_UNLOCK(psc);
1307	}
1308
1309	return (0);
1310}
1311
1312static int
1313cpsw_resume(device_t dev)
1314{
1315	struct cpsw_softc *sc;
1316
1317	sc  = device_get_softc(dev);
1318	CPSW_DEBUGF(sc, ("UNIMPLEMENTED"));
1319
1320	return (0);
1321}
1322
1323/*
1324 *
1325 *  IOCTL
1326 *
1327 */
1328
1329static void
1330cpsw_set_promisc(struct cpswp_softc *sc, int set)
1331{
1332	uint32_t reg;
1333
1334	/*
1335	 * Enabling promiscuous mode requires ALE_BYPASS to be enabled.
1336	 * That disables the ALE forwarding logic and causes every
1337	 * packet to be sent only to the host port.  In bypass mode,
1338	 * the ALE processes host port transmit packets the same as in
1339	 * normal mode.
1340	 */
1341	reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL);
1342	reg &= ~CPSW_ALE_CTL_BYPASS;
1343	if (set)
1344		reg |= CPSW_ALE_CTL_BYPASS;
1345	cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg);
1346}
1347
1348static void
1349cpsw_set_allmulti(struct cpswp_softc *sc, int set)
1350{
1351	if (set) {
1352		printf("All-multicast mode unimplemented\n");
1353	}
1354}
1355
1356static int
1357cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1358{
1359	struct cpswp_softc *sc;
1360	struct ifreq *ifr;
1361	int error;
1362	uint32_t changed;
1363
1364	error = 0;
1365	sc = ifp->if_softc;
1366	ifr = (struct ifreq *)data;
1367
1368	switch (command) {
1369	case SIOCSIFFLAGS:
1370		CPSW_PORT_LOCK(sc);
1371		if (ifp->if_flags & IFF_UP) {
1372			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1373				changed = ifp->if_flags ^ sc->if_flags;
1374				CPSWP_DEBUGF(sc,
1375				    ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)",
1376				    changed));
1377				if (changed & IFF_PROMISC)
1378					cpsw_set_promisc(sc,
1379					    ifp->if_flags & IFF_PROMISC);
1380				if (changed & IFF_ALLMULTI)
1381					cpsw_set_allmulti(sc,
1382					    ifp->if_flags & IFF_ALLMULTI);
1383			} else {
1384				CPSWP_DEBUGF(sc,
1385				    ("SIOCSIFFLAGS: UP but not RUNNING; starting up"));
1386				cpswp_init_locked(sc);
1387			}
1388		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1389			CPSWP_DEBUGF(sc,
1390			    ("SIOCSIFFLAGS: not UP but RUNNING; shutting down"));
1391			cpswp_stop_locked(sc);
1392		}
1393
1394		sc->if_flags = ifp->if_flags;
1395		CPSW_PORT_UNLOCK(sc);
1396		break;
1397	case SIOCADDMULTI:
1398		cpswp_ale_update_addresses(sc, 0);
1399		break;
1400	case SIOCDELMULTI:
1401		/* Ugh.  DELMULTI doesn't provide the specific address
1402		   being removed, so the best we can do is remove
1403		   everything and rebuild it all. */
1404		cpswp_ale_update_addresses(sc, 1);
1405		break;
1406	case SIOCGIFMEDIA:
1407	case SIOCSIFMEDIA:
1408		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1409		break;
1410	default:
1411		error = ether_ioctl(ifp, command, data);
1412	}
1413	return (error);
1414}
1415
1416/*
1417 *
1418 * MIIBUS
1419 *
1420 */
1421static int
1422cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg)
1423{
1424	uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1425
1426	while (--retries) {
1427		r = cpsw_read_4(sc, reg);
1428		if ((r & MDIO_PHYACCESS_GO) == 0)
1429			return (1);
1430		DELAY(CPSW_MIIBUS_DELAY);
1431	}
1432
1433	return (0);
1434}
1435
1436static int
1437cpswp_miibus_readreg(device_t dev, int phy, int reg)
1438{
1439	struct cpswp_softc *sc;
1440	uint32_t cmd, r;
1441
1442	sc = device_get_softc(dev);
1443	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1444		device_printf(dev, "MDIO not ready to read\n");
1445		return (0);
1446	}
1447
1448	/* Set GO, reg, phy */
1449	cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1450	cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
1451
1452	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1453		device_printf(dev, "MDIO timed out during read\n");
1454		return (0);
1455	}
1456
1457	r = cpsw_read_4(sc->swsc, sc->phyaccess);
1458	if ((r & MDIO_PHYACCESS_ACK) == 0) {
1459		device_printf(dev, "Failed to read from PHY.\n");
1460		r = 0;
1461	}
1462	return (r & 0xFFFF);
1463}
1464
1465static int
1466cpswp_miibus_writereg(device_t dev, int phy, int reg, int value)
1467{
1468	struct cpswp_softc *sc;
1469	uint32_t cmd;
1470
1471	sc = device_get_softc(dev);
1472	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1473		device_printf(dev, "MDIO not ready to write\n");
1474		return (0);
1475	}
1476
1477	/* Set GO, WRITE, reg, phy, and value */
1478	cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE |
1479	    (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF);
1480	cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
1481
1482	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1483		device_printf(dev, "MDIO timed out during write\n");
1484		return (0);
1485	}
1486
1487	if ((cpsw_read_4(sc->swsc, sc->phyaccess) & MDIO_PHYACCESS_ACK) == 0)
1488		device_printf(dev, "Failed to write to PHY.\n");
1489
1490	return (0);
1491}
1492
1493static void
1494cpswp_miibus_statchg(device_t dev)
1495{
1496	struct cpswp_softc *sc;
1497	uint32_t mac_control, reg;
1498
1499	sc = device_get_softc(dev);
1500	CPSWP_DEBUGF(sc, (""));
1501
1502	reg = CPSW_SL_MACCONTROL(sc->unit);
1503	mac_control = cpsw_read_4(sc->swsc, reg);
1504	mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A |
1505	    CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX);
1506
1507	switch(IFM_SUBTYPE(sc->mii->mii_media_active)) {
1508	case IFM_1000_SX:
1509	case IFM_1000_LX:
1510	case IFM_1000_CX:
1511	case IFM_1000_T:
1512		mac_control |= CPSW_SL_MACTL_GIG;
1513		break;
1514
1515	case IFM_100_TX:
1516		mac_control |= CPSW_SL_MACTL_IFCTL_A;
1517		break;
1518	}
1519	if (sc->mii->mii_media_active & IFM_FDX)
1520		mac_control |= CPSW_SL_MACTL_FULLDUPLEX;
1521
1522	cpsw_write_4(sc->swsc, reg, mac_control);
1523}
1524
1525/*
1526 *
1527 * Transmit/Receive Packets.
1528 *
1529 */
1530static void
1531cpsw_intr_rx(void *arg)
1532{
1533	struct cpsw_softc *sc = arg;
1534	struct ifnet *ifp;
1535	struct mbuf *received, *next;
1536
1537	CPSW_RX_LOCK(sc);
1538	received = cpsw_rx_dequeue(sc);
1539	cpsw_rx_enqueue(sc);
1540	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1541	CPSW_RX_UNLOCK(sc);
1542
1543	while (received != NULL) {
1544		next = received->m_nextpkt;
1545		received->m_nextpkt = NULL;
1546		ifp = received->m_pkthdr.rcvif;
1547		(*ifp->if_input)(ifp, received);
1548		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1549		received = next;
1550	}
1551}
1552
1553static struct mbuf *
1554cpsw_rx_dequeue(struct cpsw_softc *sc)
1555{
1556	struct cpsw_cpdma_bd bd;
1557	struct cpsw_slot *slot;
1558	struct cpswp_softc *psc;
1559	struct mbuf *mb_head, *mb_tail;
1560	int port, removed = 0;
1561
1562	mb_head = mb_tail = NULL;
1563
1564	/* Pull completed packets off hardware RX queue. */
1565	while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1566		cpsw_cpdma_read_bd(sc, slot, &bd);
1567		if (bd.flags & CPDMA_BD_OWNER)
1568			break; /* Still in use by hardware */
1569
1570		CPSW_DEBUGF(sc, ("Removing received packet from RX queue"));
1571		++removed;
1572		STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1573		STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1574
1575		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1576		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1577
1578		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1579			CPSW_DEBUGF(sc, ("RX teardown in progress"));
1580			m_freem(slot->mbuf);
1581			slot->mbuf = NULL;
1582			cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1583			sc->rx.running = 0;
1584			break;
1585		}
1586
1587		cpsw_write_cp_slot(sc, &sc->rx, slot);
1588
1589		port = (bd.flags & CPDMA_BD_PORT_MASK) - 1;
1590		KASSERT(port >= 0 && port <= 1,
1591		    ("patcket received with invalid port: %d", port));
1592		psc = device_get_softc(sc->port[port].dev);
1593
1594		/* Set up mbuf */
1595		/* TODO: track SOP/EOP bits to assemble a full mbuf
1596		   out of received fragments. */
1597		slot->mbuf->m_data += bd.bufoff;
1598		slot->mbuf->m_len = bd.pktlen - 4;
1599		slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
1600		slot->mbuf->m_flags |= M_PKTHDR;
1601		slot->mbuf->m_pkthdr.rcvif = psc->ifp;
1602		slot->mbuf->m_nextpkt = NULL;
1603
1604		if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1605			/* check for valid CRC by looking into pkt_err[5:4] */
1606			if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
1607				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1608				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1609				slot->mbuf->m_pkthdr.csum_data = 0xffff;
1610			}
1611		}
1612
1613		/* Add mbuf to packet list to be returned. */
1614		if (mb_tail) {
1615			mb_tail->m_nextpkt = slot->mbuf;
1616		} else {
1617			mb_head = slot->mbuf;
1618		}
1619		mb_tail = slot->mbuf;
1620		slot->mbuf = NULL;
1621	}
1622
1623	if (removed != 0) {
1624		sc->rx.queue_removes += removed;
1625		sc->rx.active_queue_len -= removed;
1626		sc->rx.avail_queue_len += removed;
1627		if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1628			sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1629	}
1630	return (mb_head);
1631}
1632
1633static void
1634cpsw_rx_enqueue(struct cpsw_softc *sc)
1635{
1636	bus_dma_segment_t seg[1];
1637	struct cpsw_cpdma_bd bd;
1638	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1639	struct cpsw_slot *slot, *prev_slot = NULL;
1640	struct cpsw_slot *last_old_slot, *first_new_slot;
1641	int error, nsegs, added = 0;
1642
1643	/* Register new mbufs with hardware. */
1644	while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1645		if (slot->mbuf == NULL) {
1646			slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1647			if (slot->mbuf == NULL) {
1648				device_printf(sc->dev,
1649				    "Unable to fill RX queue\n");
1650				break;
1651			}
1652			slot->mbuf->m_len =
1653			    slot->mbuf->m_pkthdr.len =
1654			    slot->mbuf->m_ext.ext_size;
1655		}
1656
1657		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1658		    slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1659
1660		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1661		KASSERT(error == 0, ("DMA error (error=%d)", error));
1662		if (error != 0 || nsegs != 1) {
1663			device_printf(sc->dev,
1664			    "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1665			    __func__, nsegs, error);
1666			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1667			m_freem(slot->mbuf);
1668			slot->mbuf = NULL;
1669			break;
1670		}
1671
1672		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1673
1674		/* Create and submit new rx descriptor*/
1675		bd.next = 0;
1676		bd.bufptr = seg->ds_addr;
1677		bd.bufoff = 0;
1678		bd.buflen = MCLBYTES - 1;
1679		bd.pktlen = bd.buflen;
1680		bd.flags = CPDMA_BD_OWNER;
1681		cpsw_cpdma_write_bd(sc, slot, &bd);
1682		++added;
1683
1684		if (prev_slot != NULL)
1685			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1686		prev_slot = slot;
1687		STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1688		sc->rx.avail_queue_len--;
1689		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1690	}
1691
1692	if (added == 0)
1693		return;
1694
1695	CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added));
1696
1697	/* Link new entries to hardware RX queue. */
1698	last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1699	first_new_slot = STAILQ_FIRST(&tmpqueue);
1700	STAILQ_CONCAT(&sc->rx.active, &tmpqueue);
1701	if (first_new_slot == NULL) {
1702		return;
1703	} else if (last_old_slot == NULL) {
1704		/* Start a fresh queue. */
1705		cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1706	} else {
1707		/* Add buffers to end of current queue. */
1708		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1709		/* If underrun, restart queue. */
1710		if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1711			cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1712		}
1713	}
1714	sc->rx.queue_adds += added;
1715	sc->rx.active_queue_len += added;
1716	if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) {
1717		sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1718	}
1719}
1720
1721static void
1722cpswp_start(struct ifnet *ifp)
1723{
1724	struct cpswp_softc *sc = ifp->if_softc;
1725
1726	CPSW_TX_LOCK(sc->swsc);
1727	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->swsc->tx.running) {
1728		cpswp_tx_enqueue(sc);
1729		cpsw_tx_dequeue(sc->swsc);
1730	}
1731	CPSW_TX_UNLOCK(sc->swsc);
1732}
1733
1734static void
1735cpswp_tx_enqueue(struct cpswp_softc *sc)
1736{
1737	bus_dma_segment_t segs[CPSW_TXFRAGS];
1738	struct cpsw_cpdma_bd bd;
1739	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1740	struct cpsw_slot *slot, *prev_slot = NULL;
1741	struct cpsw_slot *last_old_slot, *first_new_slot;
1742	struct mbuf *m0;
1743	int error, flags, nsegs, seg, added = 0, padlen;
1744
1745	flags = 0;
1746	if (sc->swsc->dualemac) {
1747		flags = CPDMA_BD_TO_PORT |
1748		    ((sc->unit + 1) & CPDMA_BD_PORT_MASK);
1749	}
1750	/* Pull pending packets from IF queue and prep them for DMA. */
1751	while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) {
1752		IF_DEQUEUE(&sc->ifp->if_snd, m0);
1753		if (m0 == NULL)
1754			break;
1755
1756		slot->mbuf = m0;
1757		padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len;
1758		if (padlen < 0)
1759			padlen = 0;
1760
1761		/* Create mapping in DMA memory */
1762		error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag,
1763		    slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1764		/* If the packet is too fragmented, try to simplify. */
1765		if (error == EFBIG ||
1766		    (error == 0 &&
1767		    nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) {
1768			bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
1769			if (padlen > 0) /* May as well add padding. */
1770				m_append(slot->mbuf, padlen,
1771				    sc->swsc->null_mbuf->m_data);
1772			m0 = m_defrag(slot->mbuf, M_NOWAIT);
1773			if (m0 == NULL) {
1774				device_printf(sc->dev,
1775				    "Can't defragment packet; dropping\n");
1776				m_freem(slot->mbuf);
1777			} else {
1778				CPSWP_DEBUGF(sc,
1779				    ("Requeueing defragmented packet"));
1780				IF_PREPEND(&sc->ifp->if_snd, m0);
1781			}
1782			slot->mbuf = NULL;
1783			continue;
1784		}
1785		if (error != 0) {
1786			device_printf(sc->dev,
1787			    "%s: Can't setup DMA (error=%d), dropping packet\n",
1788			    __func__, error);
1789			bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
1790			m_freem(slot->mbuf);
1791			slot->mbuf = NULL;
1792			break;
1793		}
1794
1795		bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap,
1796				BUS_DMASYNC_PREWRITE);
1797
1798		CPSWP_DEBUGF(sc,
1799		    ("Queueing TX packet: %d segments + %d pad bytes",
1800		    nsegs, padlen));
1801
1802		slot->ifp = sc->ifp;
1803		/* If there is only one segment, the for() loop
1804		 * gets skipped and the single buffer gets set up
1805		 * as both SOP and EOP. */
1806		/* Start by setting up the first buffer */
1807		bd.next = 0;
1808		bd.bufptr = segs[0].ds_addr;
1809		bd.bufoff = 0;
1810		bd.buflen = segs[0].ds_len;
1811		bd.pktlen = m_length(slot->mbuf, NULL) + padlen;
1812		bd.flags =  CPDMA_BD_SOP | CPDMA_BD_OWNER | flags;
1813		for (seg = 1; seg < nsegs; ++seg) {
1814			/* Save the previous buffer (which isn't EOP) */
1815			cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1816			if (prev_slot != NULL) {
1817				cpsw_cpdma_write_bd_next(sc->swsc, prev_slot,
1818				    slot);
1819			}
1820			prev_slot = slot;
1821			STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1822			sc->swsc->tx.avail_queue_len--;
1823			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1824			++added;
1825			slot = STAILQ_FIRST(&sc->swsc->tx.avail);
1826
1827			/* Setup next buffer (which isn't SOP) */
1828			bd.next = 0;
1829			bd.bufptr = segs[seg].ds_addr;
1830			bd.bufoff = 0;
1831			bd.buflen = segs[seg].ds_len;
1832			bd.pktlen = 0;
1833			bd.flags = CPDMA_BD_OWNER | flags;
1834		}
1835		/* Save the final buffer. */
1836		if (padlen <= 0)
1837			bd.flags |= CPDMA_BD_EOP;
1838		cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1839		if (prev_slot != NULL)
1840			cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot);
1841		prev_slot = slot;
1842		STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1843		sc->swsc->tx.avail_queue_len--;
1844		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1845		++added;
1846
1847		if (padlen > 0) {
1848			slot = STAILQ_FIRST(&sc->swsc->tx.avail);
1849			STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1850			sc->swsc->tx.avail_queue_len--;
1851			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1852			++added;
1853
1854			/* Setup buffer of null pad bytes (definitely EOP) */
1855			cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot);
1856			prev_slot = slot;
1857			bd.next = 0;
1858			bd.bufptr = sc->swsc->null_mbuf_paddr;
1859			bd.bufoff = 0;
1860			bd.buflen = padlen;
1861			bd.pktlen = 0;
1862			bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER | flags;
1863			cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1864			++nsegs;
1865		}
1866
1867		if (nsegs > sc->swsc->tx.longest_chain)
1868			sc->swsc->tx.longest_chain = nsegs;
1869
1870		// TODO: Should we defer the BPF tap until
1871		// after all packets are queued?
1872		BPF_MTAP(sc->ifp, m0);
1873	}
1874
1875	/* Attach the list of new buffers to the hardware TX queue. */
1876	last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next);
1877	first_new_slot = STAILQ_FIRST(&tmpqueue);
1878	STAILQ_CONCAT(&sc->swsc->tx.active, &tmpqueue);
1879	if (first_new_slot == NULL) {
1880		return;
1881	} else if (last_old_slot == NULL) {
1882		/* Start a fresh queue. */
1883		sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot);
1884		cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot);
1885	} else {
1886		/* Add buffers to end of current queue. */
1887		cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot,
1888		    first_new_slot);
1889		/* If underrun, restart queue. */
1890		if (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) &
1891		    CPDMA_BD_EOQ) {
1892			sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot);
1893			cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx,
1894			    first_new_slot);
1895		}
1896	}
1897	sc->swsc->tx.queue_adds += added;
1898	sc->swsc->tx.active_queue_len += added;
1899	if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) {
1900		sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len;
1901	}
1902}
1903
1904static int
1905cpsw_tx_dequeue(struct cpsw_softc *sc)
1906{
1907	struct cpsw_slot *slot, *last_removed_slot = NULL;
1908	struct cpsw_cpdma_bd bd;
1909	uint32_t flags, removed = 0;
1910
1911	slot = STAILQ_FIRST(&sc->tx.active);
1912	if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) {
1913		CPSW_DEBUGF(sc, ("TX teardown of an empty queue"));
1914		cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1915		sc->tx.running = 0;
1916		return (0);
1917	}
1918
1919	/* Pull completed buffers off the hardware TX queue. */
1920	while (slot != NULL) {
1921		flags = cpsw_cpdma_read_bd_flags(sc, slot);
1922		if (flags & CPDMA_BD_OWNER)
1923			break; /* Hardware is still using this packet. */
1924
1925		CPSW_DEBUGF(sc, ("TX removing completed packet"));
1926		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
1927		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1928		m_freem(slot->mbuf);
1929		slot->mbuf = NULL;
1930		if (slot->ifp)
1931			if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1);
1932
1933		/* Dequeue any additional buffers used by this packet. */
1934		while (slot != NULL && slot->mbuf == NULL) {
1935			STAILQ_REMOVE_HEAD(&sc->tx.active, next);
1936			STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
1937			++removed;
1938			last_removed_slot = slot;
1939			slot = STAILQ_FIRST(&sc->tx.active);
1940		}
1941
1942		/* TearDown complete is only marked on the SOP for the packet. */
1943		if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) ==
1944		    (CPDMA_BD_EOP | CPDMA_BD_TDOWNCMPLT)) {
1945			CPSW_DEBUGF(sc, ("TX teardown in progress"));
1946			cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1947			// TODO: Increment a count of dropped TX packets
1948			sc->tx.running = 0;
1949			break;
1950		}
1951
1952		if ((flags & CPDMA_BD_EOP) == 0)
1953			flags = cpsw_cpdma_read_bd_flags(sc, last_removed_slot);
1954		if ((flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) ==
1955		    (CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
1956			cpsw_cpdma_read_bd(sc, last_removed_slot, &bd);
1957			if (bd.next != 0 && bd.next != sc->last_hdp) {
1958				/* Restart the queue. */
1959				sc->last_hdp = bd.next;
1960				cpsw_write_4(sc, sc->tx.hdp_offset, bd.next);
1961			}
1962		}
1963	}
1964
1965	if (removed != 0) {
1966		cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
1967		sc->tx.queue_removes += removed;
1968		sc->tx.active_queue_len -= removed;
1969		sc->tx.avail_queue_len += removed;
1970		if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
1971			sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
1972	}
1973	return (removed);
1974}
1975
1976/*
1977 *
1978 * Miscellaneous interrupts.
1979 *
1980 */
1981
1982static void
1983cpsw_intr_rx_thresh(void *arg)
1984{
1985	struct cpsw_softc *sc = arg;
1986	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0));
1987
1988	CPSW_DEBUGF(sc, ("stat=%x", stat));
1989	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1990}
1991
1992static void
1993cpsw_intr_misc_host_error(struct cpsw_softc *sc)
1994{
1995	uint32_t intstat;
1996	uint32_t dmastat;
1997	int txerr, rxerr, txchan, rxchan;
1998
1999	printf("\n\n");
2000	device_printf(sc->dev,
2001	    "HOST ERROR:  PROGRAMMING ERROR DETECTED BY HARDWARE\n");
2002	printf("\n\n");
2003	intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
2004	device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
2005	dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
2006	device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
2007
2008	txerr = (dmastat >> 20) & 15;
2009	txchan = (dmastat >> 16) & 7;
2010	rxerr = (dmastat >> 12) & 15;
2011	rxchan = (dmastat >> 8) & 7;
2012
2013	switch (txerr) {
2014	case 0: break;
2015	case 1:	printf("SOP error on TX channel %d\n", txchan);
2016		break;
2017	case 2:	printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
2018		break;
2019	case 3:	printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
2020		break;
2021	case 4:	printf("Zero Buffer Pointer on TX channel %d\n", txchan);
2022		break;
2023	case 5:	printf("Zero Buffer Length on TX channel %d\n", txchan);
2024		break;
2025	case 6:	printf("Packet length error on TX channel %d\n", txchan);
2026		break;
2027	default: printf("Unknown error on TX channel %d\n", txchan);
2028		break;
2029	}
2030
2031	if (txerr != 0) {
2032		printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
2033		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
2034		printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
2035		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
2036		cpsw_dump_queue(sc, &sc->tx.active);
2037	}
2038
2039	switch (rxerr) {
2040	case 0: break;
2041	case 2:	printf("Ownership bit not set on RX channel %d\n", rxchan);
2042		break;
2043	case 4:	printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
2044		break;
2045	case 5:	printf("Zero Buffer Length on RX channel %d\n", rxchan);
2046		break;
2047	case 6:	printf("Buffer offset too big on RX channel %d\n", rxchan);
2048		break;
2049	default: printf("Unknown RX error on RX channel %d\n", rxchan);
2050		break;
2051	}
2052
2053	if (rxerr != 0) {
2054		printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
2055		    rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
2056		printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
2057		    rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
2058		cpsw_dump_queue(sc, &sc->rx.active);
2059	}
2060
2061	printf("\nALE Table\n");
2062	cpsw_ale_dump_table(sc);
2063
2064	// XXX do something useful here??
2065	panic("CPSW HOST ERROR INTERRUPT");
2066
2067	// Suppress this interrupt in the future.
2068	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
2069	printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
2070	// The watchdog will probably reset the controller
2071	// in a little while.  It will probably fail again.
2072}
2073
2074static void
2075cpsw_intr_misc(void *arg)
2076{
2077	struct cpsw_softc *sc = arg;
2078	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
2079
2080	if (stat & CPSW_WR_C_MISC_EVNT_PEND)
2081		CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented"));
2082	if (stat & CPSW_WR_C_MISC_STAT_PEND)
2083		cpsw_stats_collect(sc);
2084	if (stat & CPSW_WR_C_MISC_HOST_PEND)
2085		cpsw_intr_misc_host_error(sc);
2086	if (stat & CPSW_WR_C_MISC_MDIOLINK) {
2087		cpsw_write_4(sc, MDIOLINKINTMASKED,
2088		    cpsw_read_4(sc, MDIOLINKINTMASKED));
2089	}
2090	if (stat & CPSW_WR_C_MISC_MDIOUSER) {
2091		CPSW_DEBUGF(sc,
2092		    ("MDIO operation completed interrupt unimplemented"));
2093	}
2094	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
2095}
2096
2097/*
2098 *
2099 * Periodic Checks and Watchdog.
2100 *
2101 */
2102
2103static void
2104cpswp_tick(void *msc)
2105{
2106	struct cpswp_softc *sc = msc;
2107
2108	/* Check for media type change */
2109	mii_tick(sc->mii);
2110	if (sc->media_status != sc->mii->mii_media.ifm_media) {
2111		printf("%s: media type changed (ifm_media=%x)\n", __func__,
2112			sc->mii->mii_media.ifm_media);
2113		cpswp_ifmedia_upd(sc->ifp);
2114	}
2115
2116	/* Schedule another timeout one second from now */
2117	callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
2118}
2119
2120static void
2121cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2122{
2123	struct cpswp_softc *sc;
2124	struct mii_data *mii;
2125
2126	sc = ifp->if_softc;
2127	CPSWP_DEBUGF(sc, (""));
2128	CPSW_PORT_LOCK(sc);
2129
2130	mii = sc->mii;
2131	mii_pollstat(mii);
2132
2133	ifmr->ifm_active = mii->mii_media_active;
2134	ifmr->ifm_status = mii->mii_media_status;
2135	CPSW_PORT_UNLOCK(sc);
2136}
2137
2138static int
2139cpswp_ifmedia_upd(struct ifnet *ifp)
2140{
2141	struct cpswp_softc *sc;
2142
2143	sc = ifp->if_softc;
2144	CPSWP_DEBUGF(sc, (""));
2145	CPSW_PORT_LOCK(sc);
2146	mii_mediachg(sc->mii);
2147	sc->media_status = sc->mii->mii_media.ifm_media;
2148	CPSW_PORT_UNLOCK(sc);
2149
2150	return (0);
2151}
2152
2153static void
2154cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
2155{
2156	struct cpswp_softc *psc;
2157	int i;
2158
2159	cpsw_debugf_head("CPSW watchdog");
2160	device_printf(sc->dev, "watchdog timeout\n");
2161	for (i = 0; i < CPSW_PORTS; i++) {
2162		if (!sc->dualemac && i != sc->active_slave)
2163			continue;
2164		psc = device_get_softc(sc->port[i].dev);
2165		CPSW_PORT_LOCK(psc);
2166		cpswp_stop_locked(psc);
2167		CPSW_PORT_UNLOCK(psc);
2168	}
2169}
2170
2171static void
2172cpsw_tx_watchdog(void *msc)
2173{
2174	struct cpsw_softc *sc;
2175
2176	sc = msc;
2177	CPSW_GLOBAL_LOCK(sc);
2178	if (sc->tx.active_queue_len == 0 || !sc->tx.running) {
2179		sc->watchdog.timer = 0; /* Nothing to do. */
2180	} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
2181		sc->watchdog.timer = 0;  /* Stuff done while we weren't looking. */
2182	} else if (cpsw_tx_dequeue(sc) > 0) {
2183		sc->watchdog.timer = 0;  /* We just did something. */
2184	} else {
2185		/* There was something to do but it didn't get done. */
2186		++sc->watchdog.timer;
2187		if (sc->watchdog.timer > 5) {
2188			sc->watchdog.timer = 0;
2189			++sc->watchdog.resets;
2190			cpsw_tx_watchdog_full_reset(sc);
2191		}
2192	}
2193	sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
2194	CPSW_GLOBAL_UNLOCK(sc);
2195
2196	/* Schedule another timeout one second from now */
2197	callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
2198}
2199
2200/*
2201 *
2202 * ALE support routines.
2203 *
2204 */
2205
2206static void
2207cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
2208{
2209	cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
2210	ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
2211	ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
2212	ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
2213}
2214
2215static void
2216cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
2217{
2218	cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
2219	cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
2220	cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
2221	cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
2222}
2223
2224static void
2225cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
2226{
2227	int i;
2228	uint32_t ale_entry[3];
2229
2230	/* First four entries are link address and broadcast. */
2231	for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
2232		cpsw_ale_read_entry(sc, i, ale_entry);
2233		if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR ||
2234		    ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) &&
2235		    ALE_MCAST(ale_entry)  == 1) { /* MCast link addr */
2236			ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
2237			cpsw_ale_write_entry(sc, i, ale_entry);
2238		}
2239	}
2240}
2241
2242static int
2243cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan,
2244	uint8_t *mac)
2245{
2246	int free_index = -1, matching_index = -1, i;
2247	uint32_t ale_entry[3], ale_type;
2248
2249	/* Find a matching entry or a free entry. */
2250	for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
2251		cpsw_ale_read_entry(sc, i, ale_entry);
2252
2253		/* Entry Type[61:60] is 0 for free entry */
2254		if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
2255			free_index = i;
2256
2257		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
2258		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
2259		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
2260		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
2261		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
2262		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
2263			matching_index = i;
2264			break;
2265		}
2266	}
2267
2268	if (matching_index < 0) {
2269		if (free_index < 0)
2270			return (ENOMEM);
2271		i = free_index;
2272	}
2273
2274	if (vlan != -1)
2275		ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16;
2276	else
2277		ale_type = ALE_TYPE_ADDR << 28;
2278
2279	/* Set MAC address */
2280	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
2281	ale_entry[1] = mac[0] << 8 | mac[1];
2282
2283	/* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */
2284	ale_entry[1] |= ALE_MCAST_FWD | ale_type;
2285
2286	/* Set portmask [68:66] */
2287	ale_entry[2] = (portmap & 7) << 2;
2288
2289	cpsw_ale_write_entry(sc, i, ale_entry);
2290
2291	return 0;
2292}
2293
2294static void
2295cpsw_ale_dump_table(struct cpsw_softc *sc) {
2296	int i;
2297	uint32_t ale_entry[3];
2298	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
2299		cpsw_ale_read_entry(sc, i, ale_entry);
2300		switch (ALE_TYPE(ale_entry)) {
2301		case ALE_TYPE_VLAN:
2302			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
2303				ale_entry[1], ale_entry[0]);
2304			printf("type: %u ", ALE_TYPE(ale_entry));
2305			printf("vlan: %u ", ALE_VLAN(ale_entry));
2306			printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry));
2307			printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry));
2308			printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry));
2309			printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry));
2310			printf("\n");
2311			break;
2312		case ALE_TYPE_ADDR:
2313		case ALE_TYPE_VLAN_ADDR:
2314			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
2315				ale_entry[1], ale_entry[0]);
2316			printf("type: %u ", ALE_TYPE(ale_entry));
2317			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
2318				(ale_entry[1] >> 8) & 0xFF,
2319				(ale_entry[1] >> 0) & 0xFF,
2320				(ale_entry[0] >>24) & 0xFF,
2321				(ale_entry[0] >>16) & 0xFF,
2322				(ale_entry[0] >> 8) & 0xFF,
2323				(ale_entry[0] >> 0) & 0xFF);
2324			printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast ");
2325			if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR)
2326				printf("vlan: %u ", ALE_VLAN(ale_entry));
2327			printf("port: %u ", ALE_PORTS(ale_entry));
2328			printf("\n");
2329			break;
2330		}
2331	}
2332	printf("\n");
2333}
2334
2335static int
2336cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge)
2337{
2338	uint8_t *mac;
2339	uint32_t ale_entry[3], ale_type, portmask;
2340	struct ifmultiaddr *ifma;
2341
2342	if (sc->swsc->dualemac) {
2343		ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16;
2344		portmask = 1 << (sc->unit + 1) | 1 << 0;
2345	} else {
2346		ale_type = ALE_TYPE_ADDR << 28;
2347		portmask = 7;
2348	}
2349
2350	/*
2351	 * Route incoming packets for our MAC address to Port 0 (host).
2352	 * For simplicity, keep this entry at table index 0 for port 1 and
2353	 * at index 2 for port 2 in the ALE.
2354	 */
2355        if_addr_rlock(sc->ifp);
2356	mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr);
2357	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
2358	ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */
2359	ale_entry[2] = 0; /* port = 0 */
2360	cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry);
2361
2362	/* Set outgoing MAC Address for slave port. */
2363	cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1),
2364	    mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
2365	cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1),
2366	    mac[5] << 8 | mac[4]);
2367        if_addr_runlock(sc->ifp);
2368
2369	/* Keep the broadcast address at table entry 1 (or 3). */
2370	ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
2371	/* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */
2372	ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff;
2373	ale_entry[2] = portmask << 2;
2374	cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry);
2375
2376	/* SIOCDELMULTI doesn't specify the particular address
2377	   being removed, so we have to remove all and rebuild. */
2378	if (purge)
2379		cpsw_ale_remove_all_mc_entries(sc->swsc);
2380
2381        /* Set other multicast addrs desired. */
2382        if_maddr_rlock(sc->ifp);
2383        TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
2384                if (ifma->ifma_addr->sa_family != AF_LINK)
2385                        continue;
2386		cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan,
2387		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2388        }
2389        if_maddr_runlock(sc->ifp);
2390
2391	return (0);
2392}
2393
2394static int
2395cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports,
2396	int untag, int mcregflood, int mcunregflood)
2397{
2398	int free_index, i, matching_index;
2399	uint32_t ale_entry[3];
2400
2401	free_index = matching_index = -1;
2402	/* Find a matching entry or a free entry. */
2403	for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) {
2404		cpsw_ale_read_entry(sc, i, ale_entry);
2405
2406		/* Entry Type[61:60] is 0 for free entry */
2407		if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
2408			free_index = i;
2409
2410		if (ALE_VLAN(ale_entry) == vlan) {
2411			matching_index = i;
2412			break;
2413		}
2414	}
2415
2416	if (matching_index < 0) {
2417		if (free_index < 0)
2418			return (-1);
2419		i = free_index;
2420	}
2421
2422	ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 |
2423	    (mcunregflood & 7) << 8 | (ports & 7);
2424	ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16;
2425	ale_entry[2] = 0;
2426	cpsw_ale_write_entry(sc, i, ale_entry);
2427
2428	return (0);
2429}
2430
2431/*
2432 *
2433 * Statistics and Sysctls.
2434 *
2435 */
2436
2437#if 0
2438static void
2439cpsw_stats_dump(struct cpsw_softc *sc)
2440{
2441	int i;
2442	uint32_t r;
2443
2444	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2445		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2446		    cpsw_stat_sysctls[i].reg);
2447		CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2448		    (intmax_t)sc->shadow_stats[i], r,
2449		    (intmax_t)sc->shadow_stats[i] + r));
2450	}
2451}
2452#endif
2453
2454static void
2455cpsw_stats_collect(struct cpsw_softc *sc)
2456{
2457	int i;
2458	uint32_t r;
2459
2460	CPSW_DEBUGF(sc, ("Controller shadow statistics updated."));
2461
2462	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2463		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2464		    cpsw_stat_sysctls[i].reg);
2465		sc->shadow_stats[i] += r;
2466		cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg,
2467		    r);
2468	}
2469}
2470
2471static int
2472cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2473{
2474	struct cpsw_softc *sc;
2475	struct cpsw_stat *stat;
2476	uint64_t result;
2477
2478	sc = (struct cpsw_softc *)arg1;
2479	stat = &cpsw_stat_sysctls[oidp->oid_number];
2480	result = sc->shadow_stats[oidp->oid_number];
2481	result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2482	return (sysctl_handle_64(oidp, &result, 0, req));
2483}
2484
2485static int
2486cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2487{
2488	struct cpsw_softc *sc;
2489	struct bintime t;
2490	unsigned result;
2491
2492	sc = (struct cpsw_softc *)arg1;
2493	getbinuptime(&t);
2494	bintime_sub(&t, &sc->attach_uptime);
2495	result = t.sec;
2496	return (sysctl_handle_int(oidp, &result, 0, req));
2497}
2498
2499static int
2500cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS)
2501{
2502	int error;
2503	struct cpsw_softc *sc;
2504	uint32_t ctrl, intr_per_ms;
2505
2506	sc = (struct cpsw_softc *)arg1;
2507	error = sysctl_handle_int(oidp, &sc->coal_us, 0, req);
2508	if (error != 0 || req->newptr == NULL)
2509		return (error);
2510
2511	ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
2512	ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
2513	if (sc->coal_us == 0) {
2514		/* Disable the interrupt pace hardware. */
2515		cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
2516		cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0);
2517		cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0);
2518		return (0);
2519	}
2520
2521	if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX)
2522		sc->coal_us = CPSW_WR_C_IMAX_US_MAX;
2523	if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN)
2524		sc->coal_us = CPSW_WR_C_IMAX_US_MIN;
2525	intr_per_ms = 1000 / sc->coal_us;
2526	/* Just to make sure... */
2527	if (intr_per_ms > CPSW_WR_C_IMAX_MAX)
2528		intr_per_ms = CPSW_WR_C_IMAX_MAX;
2529	if (intr_per_ms < CPSW_WR_C_IMAX_MIN)
2530		intr_per_ms = CPSW_WR_C_IMAX_MIN;
2531
2532	/* Set the prescale to produce 4us pulses from the 125 Mhz clock. */
2533	ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK;
2534
2535	/* Enable the interrupt pace hardware. */
2536	cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms);
2537	cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms);
2538	ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE;
2539	cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
2540
2541	return (0);
2542}
2543
2544static int
2545cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2546{
2547	struct cpsw_softc *swsc;
2548	struct cpswp_softc *sc;
2549	struct bintime t;
2550	unsigned result;
2551
2552	swsc = arg1;
2553	sc = device_get_softc(swsc->port[arg2].dev);
2554	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2555		getbinuptime(&t);
2556		bintime_sub(&t, &sc->init_uptime);
2557		result = t.sec;
2558	} else
2559		result = 0;
2560	return (sysctl_handle_int(oidp, &result, 0, req));
2561}
2562
2563static void
2564cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
2565	struct cpsw_queue *queue)
2566{
2567	struct sysctl_oid_list *parent;
2568
2569	parent = SYSCTL_CHILDREN(node);
2570	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2571	    CTLFLAG_RD, &queue->queue_slots, 0,
2572	    "Total buffers currently assigned to this queue");
2573	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2574	    CTLFLAG_RD, &queue->active_queue_len, 0,
2575	    "Buffers currently registered with hardware controller");
2576	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2577	    CTLFLAG_RD, &queue->max_active_queue_len, 0,
2578	    "Max value of activeBuffers since last driver reset");
2579	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2580	    CTLFLAG_RD, &queue->avail_queue_len, 0,
2581	    "Buffers allocated to this queue but not currently "
2582	    "registered with hardware controller");
2583	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2584	    CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2585	    "Max value of availBuffers since last driver reset");
2586	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2587	    CTLFLAG_RD, &queue->queue_adds, 0,
2588	    "Total buffers added to queue");
2589	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2590	    CTLFLAG_RD, &queue->queue_removes, 0,
2591	    "Total buffers removed from queue");
2592	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2593	    CTLFLAG_RD, &queue->longest_chain, 0,
2594	    "Max buffers used for a single packet");
2595}
2596
2597static void
2598cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
2599	struct cpsw_softc *sc)
2600{
2601	struct sysctl_oid_list *parent;
2602
2603	parent = SYSCTL_CHILDREN(node);
2604	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2605	    CTLFLAG_RD, &sc->watchdog.resets, 0,
2606	    "Total number of watchdog resets");
2607}
2608
2609static void
2610cpsw_add_sysctls(struct cpsw_softc *sc)
2611{
2612	struct sysctl_ctx_list *ctx;
2613	struct sysctl_oid *stats_node, *queue_node, *node;
2614	struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2615	struct sysctl_oid_list *ports_parent, *port_parent;
2616	char port[16];
2617	int i;
2618
2619	ctx = device_get_sysctl_ctx(sc->dev);
2620	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2621
2622	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug",
2623	    CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages");
2624
2625	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2626	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
2627	    "Time since driver attach");
2628
2629	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us",
2630	    CTLTYPE_UINT | CTLFLAG_RW, sc, 0, cpsw_intr_coalesce, "IU",
2631	    "minimum time between interrupts");
2632
2633	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports",
2634	    CTLFLAG_RD, NULL, "CPSW Ports Statistics");
2635	ports_parent = SYSCTL_CHILDREN(node);
2636	for (i = 0; i < CPSW_PORTS; i++) {
2637		if (!sc->dualemac && i != sc->active_slave)
2638			continue;
2639		port[0] = '0' + i;
2640		port[1] = '\0';
2641		node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO,
2642		    port, CTLFLAG_RD, NULL, "CPSW Port Statistics");
2643		port_parent = SYSCTL_CHILDREN(node);
2644		SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime",
2645		    CTLTYPE_UINT | CTLFLAG_RD, sc, i,
2646		    cpsw_stat_uptime, "IU", "Seconds since driver init");
2647	}
2648
2649	stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2650				     CTLFLAG_RD, NULL, "CPSW Statistics");
2651	stats_parent = SYSCTL_CHILDREN(stats_node);
2652	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2653		SYSCTL_ADD_PROC(ctx, stats_parent, i,
2654				cpsw_stat_sysctls[i].oid,
2655				CTLTYPE_U64 | CTLFLAG_RD, sc, 0,
2656				cpsw_stats_sysctl, "IU",
2657				cpsw_stat_sysctls[i].oid);
2658	}
2659
2660	queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2661	    CTLFLAG_RD, NULL, "CPSW Queue Statistics");
2662	queue_parent = SYSCTL_CHILDREN(queue_node);
2663
2664	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2665	    CTLFLAG_RD, NULL, "TX Queue Statistics");
2666	cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2667
2668	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2669	    CTLFLAG_RD, NULL, "RX Queue Statistics");
2670	cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2671
2672	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2673	    CTLFLAG_RD, NULL, "Watchdog Statistics");
2674	cpsw_add_watchdog_sysctls(ctx, node, sc);
2675}
2676