rge.c revision 314667
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/mips/rmi/dev/xlr/rge.c 314667 2017-03-04 13:03:31Z avg $");
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39#include <sys/types.h>
40#include <sys/endian.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/param.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/limits.h>
48#include <sys/bus.h>
49#include <sys/mbuf.h>
50#include <sys/malloc.h>
51#include <sys/kernel.h>
52#include <sys/module.h>
53#include <sys/socket.h>
54#define __RMAN_RESOURCE_VISIBLE
55#include <sys/rman.h>
56#include <sys/taskqueue.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59
60#include <net/if.h>
61#include <net/if_arp.h>
62#include <net/ethernet.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/bpf.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/ip.h>
73
74#include <vm/vm.h>
75#include <vm/pmap.h>
76
77#include <machine/reg.h>
78#include <machine/cpu.h>
79#include <machine/mips_opcode.h>
80#include <machine/asm.h>
81
82#include <machine/param.h>
83#include <machine/intr_machdep.h>
84#include <machine/clock.h>	/* for DELAY */
85#include <machine/cpuregs.h>
86#include <machine/bus.h>	/* */
87#include <machine/resource.h>
88
89#include <dev/mii/mii.h>
90#include <dev/mii/miivar.h>
91#include <dev/mii/brgphyreg.h>
92
93#include <mips/rmi/interrupt.h>
94#include <mips/rmi/msgring.h>
95#include <mips/rmi/iomap.h>
96#include <mips/rmi/pic.h>
97#include <mips/rmi/rmi_mips_exts.h>
98#include <mips/rmi/rmi_boot_info.h>
99#include <mips/rmi/board.h>
100
101#include <mips/rmi/dev/xlr/debug.h>
102#include <mips/rmi/dev/xlr/atx_cpld.h>
103#include <mips/rmi/dev/xlr/xgmac_mdio.h>
104#include <mips/rmi/dev/xlr/rge.h>
105
106#include "miibus_if.h"
107
108MODULE_DEPEND(rge, ether, 1, 1, 1);
109MODULE_DEPEND(rge, miibus, 1, 1, 1);
110
111/* #define DEBUG */
112
113#define RGE_TX_THRESHOLD 1024
114#define RGE_TX_Q_SIZE 1024
115
116#ifdef DEBUG
117#undef dbg_msg
118int mac_debug = 1;
119
120#define dbg_msg(fmt, args...) \
121        do {\
122            if (mac_debug) {\
123                printf("[%s@%d|%s]: cpu_%d: " fmt, \
124                __FILE__, __LINE__, __FUNCTION__,  xlr_cpu_id(), ##args);\
125            }\
126        } while(0);
127
128#define DUMP_PACKETS
129#else
130#undef dbg_msg
131#define dbg_msg(fmt, args...)
132int mac_debug = 0;
133
134#endif
135
136#define MAC_B2B_IPG             88
137
138/* frame sizes need to be cacheline aligned */
139#define MAX_FRAME_SIZE          1536
140#define MAX_FRAME_SIZE_JUMBO    9216
141
142#define MAC_SKB_BACK_PTR_SIZE   SMP_CACHE_BYTES
143#define MAC_PREPAD              0
144#define BYTE_OFFSET             2
145#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
146#define MAC_CRC_LEN             4
147#define MAX_NUM_MSGRNG_STN_CC   128
148
149#define MAX_NUM_DESC		1024
150#define MAX_SPILL_SIZE          (MAX_NUM_DESC + 128)
151
152#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
153
154#define MAX_FRIN_SPILL          (MAX_SPILL_SIZE << 2)
155#define MAX_FROUT_SPILL         (MAX_SPILL_SIZE << 2)
156#define MAX_CLASS_0_SPILL       (MAX_SPILL_SIZE << 2)
157#define MAX_CLASS_1_SPILL       (MAX_SPILL_SIZE << 2)
158#define MAX_CLASS_2_SPILL       (MAX_SPILL_SIZE << 2)
159#define MAX_CLASS_3_SPILL       (MAX_SPILL_SIZE << 2)
160
161/*****************************************************************
162 * Phoenix Generic Mac driver
163 *****************************************************************/
164
165extern uint32_t cpu_ltop_map[32];
166
167#ifdef ENABLED_DEBUG
168static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
169
170#define port_inc_counter(port, counter) 	atomic_add_int(&port_counters[port][(counter)], 1)
171#else
172#define port_inc_counter(port, counter)	/* Nothing */
173#endif
174
175int xlr_rge_tx_prepend[MAXCPU];
176int xlr_rge_tx_done[MAXCPU];
177int xlr_rge_get_p2d_failed[MAXCPU];
178int xlr_rge_msg_snd_failed[MAXCPU];
179int xlr_rge_tx_ok_done[MAXCPU];
180int xlr_rge_rx_done[MAXCPU];
181int xlr_rge_repl_done[MAXCPU];
182
183/* #define mac_stats_add(x, val) ({(x) += (val);}) */
184#define mac_stats_add(x, val) xlr_ldaddwu(val, &x)
185
186#define XLR_MAX_CORE 8
187#define RGE_LOCK_INIT(_sc, _name) \
188  mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
189#define RGE_LOCK(_sc)   mtx_lock(&(_sc)->rge_mtx)
190#define RGE_LOCK_ASSERT(_sc)  mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
191#define RGE_UNLOCK(_sc)   mtx_unlock(&(_sc)->rge_mtx)
192#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
193
194#define XLR_MAX_MACS     8
195#define XLR_MAX_TX_FRAGS 14
196#define MAX_P2D_DESC_PER_PORT 512
197struct p2d_tx_desc {
198	uint64_t frag[XLR_MAX_TX_FRAGS + 2];
199};
200
201#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
202
203struct rge_softc *dev_mac[XLR_MAX_MACS];
204static int dev_mac_xgs0;
205static int dev_mac_gmac0;
206
207static int gmac_common_init_done;
208
209
210static int rge_probe(device_t);
211static int rge_attach(device_t);
212static int rge_detach(device_t);
213static int rge_suspend(device_t);
214static int rge_resume(device_t);
215static void rge_release_resources(struct rge_softc *);
216static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
217static void rge_intr(void *);
218static void rge_start_locked(struct ifnet *, int);
219static void rge_start(struct ifnet *);
220static int rge_ioctl(struct ifnet *, u_long, caddr_t);
221static void rge_init(void *);
222static void rge_stop(struct rge_softc *);
223static int rge_shutdown(device_t);
224static void rge_reset(struct rge_softc *);
225
226static struct mbuf *get_mbuf(void);
227static void free_buf(vm_paddr_t paddr);
228static void *get_buf(void);
229
230static void xlr_mac_get_hwaddr(struct rge_softc *);
231static void xlr_mac_setup_hwaddr(struct driver_data *);
232static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
233static void rmi_xlr_xgmac_init(struct driver_data *priv);
234static void rmi_xlr_gmac_init(struct driver_data *priv);
235static void mac_common_init(void);
236static int rge_mii_write(device_t, int, int, int);
237static int rge_mii_read(device_t, int, int);
238static void rmi_xlr_mac_mii_statchg(device_t);
239static int rmi_xlr_mac_mediachange(struct ifnet *);
240static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
241static void xlr_mac_set_rx_mode(struct rge_softc *sc);
242void
243rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
244    int stid, struct msgrng_msg *msg,
245    void *data);
246static void mac_frin_replenish(void *);
247static int rmi_xlr_mac_open(struct rge_softc *);
248static int rmi_xlr_mac_close(struct rge_softc *);
249static int
250mac_xmit(struct mbuf *, struct rge_softc *,
251    struct driver_data *, int, struct p2d_tx_desc *);
252static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
253static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
254static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
255static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
256static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
257static void rmi_xlr_config_spill_area(struct driver_data *priv);
258static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
259static int
260rmi_xlr_mac_set_duplex(struct driver_data *s,
261    xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
262static void serdes_regs_init(struct driver_data *priv);
263static int rmi_xlr_gmac_reset(struct driver_data *priv);
264
265/*Statistics...*/
266static int get_p2d_desc_failed = 0;
267static int msg_snd_failed = 0;
268
269SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
270    &get_p2d_desc_failed, 0, "p2d desc failed");
271SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
272    &msg_snd_failed, 0, "msg snd failed");
273
274struct callout xlr_tx_stop_bkp;
275
276static device_method_t rge_methods[] = {
277	/* Device interface */
278	DEVMETHOD(device_probe, rge_probe),
279	DEVMETHOD(device_attach, rge_attach),
280	DEVMETHOD(device_detach, rge_detach),
281	DEVMETHOD(device_shutdown, rge_shutdown),
282	DEVMETHOD(device_suspend, rge_suspend),
283	DEVMETHOD(device_resume, rge_resume),
284
285	/* MII interface */
286	DEVMETHOD(miibus_readreg, rge_mii_read),
287	DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
288	DEVMETHOD(miibus_writereg, rge_mii_write),
289	{0, 0}
290};
291
292static driver_t rge_driver = {
293	"rge",
294	rge_methods,
295	sizeof(struct rge_softc)
296};
297
298static devclass_t rge_devclass;
299
300DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
301DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
302
303#ifndef __STR
304#define __STR(x) #x
305#endif
306#ifndef STR
307#define STR(x) __STR(x)
308#endif
309
310void *xlr_tx_ring_mem;
311
312struct tx_desc_node {
313	struct p2d_tx_desc *ptr;
314	            TAILQ_ENTRY(tx_desc_node) list;
315};
316
317#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
318struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
319static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
320static int xlr_total_active_core = 0;
321
322/*
323 * This should contain the list of all free tx frag desc nodes pointing to tx
324 * p2d arrays
325 */
326static
327TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
328{
329	TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
330	TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
331	TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
332	TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
333	TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
334	TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
335	TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
336	TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
337};
338
339/* This contains a list of free tx frag node descriptors */
340static
341TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
342{
343	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
344	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
345	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
346	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
347	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
348	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
349	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
350	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
351};
352
353static struct mtx tx_desc_lock[XLR_MAX_CORE];
354
355static inline void
356mac_make_desc_rfr(struct msgrng_msg *msg,
357    vm_paddr_t addr)
358{
359	msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
360	msg->msg1 = msg->msg2 = msg->msg3 = 0;
361}
362
363#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
364
365static void
366init_p2d_allocation(void)
367{
368	int active_core[8] = {0};
369	int i = 0;
370	uint32_t cpumask;
371	int cpu;
372
373	cpumask = xlr_hw_thread_mask;
374
375	for (i = 0; i < 32; i++) {
376		if (cpumask & (1 << i)) {
377			cpu = i;
378			if (!active_core[cpu / 4]) {
379				active_core[cpu / 4] = 1;
380				xlr_total_active_core++;
381			}
382		}
383	}
384	for (i = 0; i < XLR_MAX_CORE; i++) {
385		if (active_core[i])
386			xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
387	}
388	printf("Total Active Core %d\n", xlr_total_active_core);
389}
390
391
392static void
393init_tx_ring(void)
394{
395	int i;
396	int j = 0;
397	struct tx_desc_node *start, *node;
398	struct p2d_tx_desc *tx_desc;
399	vm_paddr_t paddr;
400	vm_offset_t unmapped_addr;
401
402	for (i = 0; i < XLR_MAX_CORE; i++)
403		mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
404
405	start = &tx_desc_nodes[0];
406	/* TODO: try to get this from KSEG0 */
407	xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
408	    M_DEVBUF, M_NOWAIT | M_ZERO, 0,
409	    0x10000000, XLR_CACHELINE_SIZE, 0);
410
411	if (xlr_tx_ring_mem == NULL) {
412		panic("TX ring memory allocation failed");
413	}
414	paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
415
416	unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
417
418
419	tx_desc = (struct p2d_tx_desc *)unmapped_addr;
420
421	for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
422		node = start + i;
423		node->ptr = tx_desc;
424		tx_desc++;
425		TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
426		j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
427	}
428}
429
430static inline struct p2d_tx_desc *
431get_p2d_desc(void)
432{
433	struct tx_desc_node *node;
434	struct p2d_tx_desc *tx_desc = NULL;
435	int cpu = xlr_core_id();
436
437	mtx_lock_spin(&tx_desc_lock[cpu]);
438	node = TAILQ_FIRST(&tx_frag_desc[cpu]);
439	if (node) {
440		xlr_tot_avail_p2d[cpu]--;
441		TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
442		tx_desc = node->ptr;
443		TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
444	} else {
445		/* Increment p2d desc fail count */
446		get_p2d_desc_failed++;
447	}
448	mtx_unlock_spin(&tx_desc_lock[cpu]);
449	return tx_desc;
450}
451static void
452free_p2d_desc(struct p2d_tx_desc *tx_desc)
453{
454	struct tx_desc_node *node;
455	int cpu = xlr_core_id();
456
457	mtx_lock_spin(&tx_desc_lock[cpu]);
458	node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
459	KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
460
461	TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
462	node->ptr = tx_desc;
463	TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
464	xlr_tot_avail_p2d[cpu]++;
465	mtx_unlock_spin(&tx_desc_lock[cpu]);
466
467}
468
469static int
470build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
471{
472	struct mbuf *m;
473	vm_paddr_t paddr;
474	uint64_t p2d_len;
475	int nfrag;
476	vm_paddr_t p1, p2;
477	uint32_t len1, len2;
478	vm_offset_t taddr;
479	uint64_t fr_stid;
480
481	fr_stid = (xlr_core_id() << 3) + xlr_thr_id() + 4;
482
483	if (tx_desc == NULL)
484		return 1;
485
486	nfrag = 0;
487	for (m = m_head; m != NULL; m = m->m_next) {
488		if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
489			free_p2d_desc(tx_desc);
490			return 1;
491		}
492		if (m->m_len != 0) {
493			paddr = vtophys(mtod(m, vm_offset_t));
494			p1 = paddr + m->m_len;
495			p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
496			if (p1 != p2) {
497				len1 = (uint32_t)
498				    (PAGE_SIZE - (paddr & PAGE_MASK));
499				tx_desc->frag[nfrag] = (127ULL << 54) |
500				    ((uint64_t) len1 << 40) | paddr;
501				nfrag++;
502				taddr = (vm_offset_t)m->m_data + len1;
503				p2 = vtophys(taddr);
504				len2 = m->m_len - len1;
505				if (len2 == 0)
506					continue;
507				if (nfrag >= XLR_MAX_TX_FRAGS)
508					panic("TX frags exceeded");
509
510				tx_desc->frag[nfrag] = (127ULL << 54) |
511				    ((uint64_t) len2 << 40) | p2;
512
513				taddr += len2;
514				p1 = vtophys(taddr);
515
516				if ((p2 + len2) != p1) {
517					printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
518					printf("len1 = %x len2 = %x\n", len1,
519					    len2);
520					printf("m_data %p\n", m->m_data);
521					DELAY(1000000);
522					panic("Multiple Mbuf segment discontiguous\n");
523				}
524			} else {
525				tx_desc->frag[nfrag] = (127ULL << 54) |
526				    ((uint64_t) m->m_len << 40) | paddr;
527			}
528			nfrag++;
529		}
530	}
531	/* set eop in the last tx p2d desc */
532	tx_desc->frag[nfrag - 1] |= (1ULL << 63);
533	paddr = vtophys((vm_offset_t)tx_desc);
534	tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
535	nfrag++;
536	tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t)(intptr_t)tx_desc;
537	tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t)(intptr_t)m_head;
538
539	p2d_len = (nfrag * 8);
540	p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
541	    (p2d_len << 40) | paddr;
542
543	return 0;
544}
545static void
546release_tx_desc(struct msgrng_msg *msg, int rel_buf)
547{
548	struct p2d_tx_desc *tx_desc, *chk_addr;
549	struct mbuf *m;
550
551	tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0);
552	chk_addr = (struct p2d_tx_desc *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS];
553	if (tx_desc != chk_addr) {
554		printf("Address %p does not match with stored addr %p - we leaked a descriptor\n",
555		    tx_desc, chk_addr);
556		return;
557	}
558	if (rel_buf) {
559		m = (struct mbuf *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS + 1];
560		m_freem(m);
561	}
562	free_p2d_desc(tx_desc);
563}
564
565
566static struct mbuf *
567get_mbuf(void)
568{
569	struct mbuf *m_new = NULL;
570
571	if ((m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
572		return NULL;
573
574	m_new->m_len = MCLBYTES;
575	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
576	return m_new;
577}
578
579static void
580free_buf(vm_paddr_t paddr)
581{
582	struct mbuf *m;
583	uint64_t mag;
584	uint32_t sr;
585
586	sr = xlr_enable_kx();
587	m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
588	mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
589	xlr_restore_kx(sr);
590	if (mag != 0xf00bad) {
591		printf("Something is wrong kseg:%lx found mag:%lx not 0xf00bad\n",
592		    (u_long)paddr, (u_long)mag);
593		return;
594	}
595	if (m != NULL)
596		m_freem(m);
597}
598
599static void *
600get_buf(void)
601{
602	struct mbuf *m_new = NULL;
603	uint64_t *md;
604#ifdef INVARIANTS
605	vm_paddr_t temp1, temp2;
606#endif
607
608	m_new = get_mbuf();
609	if (m_new == NULL)
610		return NULL;
611
612	m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
613	md = (uint64_t *)m_new->m_data;
614	md[0] = (uintptr_t)m_new;	/* Back Ptr */
615	md[1] = 0xf00bad;
616	m_adj(m_new, XLR_CACHELINE_SIZE);
617
618#ifdef INVARIANTS
619	temp1 = vtophys((vm_offset_t)m_new->m_data);
620	temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
621	if ((temp1 + 1536) != temp2)
622		panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
623#endif
624	return (void *)m_new->m_data;
625}
626
627/**********************************************************************
628 **********************************************************************/
629static void
630rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
631{
632	uint32_t regval;
633	int tx_threshold = 1518;
634
635	if (flag) {
636		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
637		regval |= (1 << O_TX_CONTROL__TxEnable) |
638		    (tx_threshold << O_TX_CONTROL__TxThreshold);
639
640		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
641
642		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
643		regval |= 1 << O_RX_CONTROL__RxEnable;
644		if (priv->mode == XLR_PORT0_RGMII)
645			regval |= 1 << O_RX_CONTROL__RGMII;
646		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
647
648		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
649		regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
650		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
651	} else {
652		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
653		regval &= ~((1 << O_TX_CONTROL__TxEnable) |
654		    (tx_threshold << O_TX_CONTROL__TxThreshold));
655
656		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
657
658		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
659		regval &= ~(1 << O_RX_CONTROL__RxEnable);
660		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
661
662		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
663		regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
664		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
665	}
666}
667
668/**********************************************************************
669 **********************************************************************/
670static __inline__ int
671xlr_mac_send_fr(struct driver_data *priv,
672    vm_paddr_t addr, int len)
673{
674	struct msgrng_msg msg;
675	int stid = priv->rfrbucket;
676	int code, ret;
677	uint32_t msgrng_flags;
678#ifdef INVARIANTS
679	int i = 0;
680#endif
681
682	mac_make_desc_rfr(&msg, addr);
683
684	/* Send the packet to MAC */
685	dbg_msg("mac_%d: Sending free packet %lx to stid %d\n",
686	    priv->instance, (u_long)addr, stid);
687	if (priv->type == XLR_XGMAC)
688		code = MSGRNG_CODE_XGMAC;        /* WHY? */
689	else
690		code = MSGRNG_CODE_MAC;
691
692	do {
693		msgrng_flags = msgrng_access_enable();
694		ret = message_send(1, code, stid, &msg);
695		msgrng_restore(msgrng_flags);
696		KASSERT(i++ < 100000, ("Too many credit fails\n"));
697	} while (ret != 0);
698
699	return 0;
700}
701
702/**************************************************************/
703
704static void
705xgmac_mdio_setup(volatile unsigned int *_mmio)
706{
707	int i;
708	uint32_t rd_data;
709
710	for (i = 0; i < 4; i++) {
711		rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
712		rd_data = rd_data & 0xffffdfff;	/* clear isolate bit */
713		xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
714	}
715}
716
717/**********************************************************************
718 *  Init MII interface
719 *
720 *  Input parameters:
721 *  	   s - priv structure
722 ********************************************************************* */
723#define PHY_STATUS_RETRIES 25000
724
725static void
726rmi_xlr_mac_mii_init(struct driver_data *priv)
727{
728	xlr_reg_t *mii_mmio = priv->mii_mmio;
729
730	/* use the lowest clock divisor - divisor 28 */
731	xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
732}
733
734/**********************************************************************
735 *  Read a PHY register.
736 *
737 *  Input parameters:
738 *  	   s - priv structure
739 *  	   phyaddr - PHY's address
740 *  	   regidx = index of register to read
741 *
742 *  Return value:
743 *  	   value read, or 0 if an error occurred.
744 ********************************************************************* */
745
746static int
747rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
748{
749	int i = 0;
750
751	/* setup the phy reg to be used */
752	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
753	    (phyaddr << 8) | (regidx << 0));
754	/* Issue the read command */
755	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
756	    (1 << O_MII_MGMT_COMMAND__rstat));
757
758	/* poll for the read cycle to complete */
759	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
760		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
761			break;
762	}
763
764	/* clear the read cycle */
765	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
766
767	if (i == PHY_STATUS_RETRIES) {
768		return 0xffffffff;
769	}
770	/* Read the data back */
771	return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
772}
773
774static int
775rge_mii_read(device_t dev, int phyaddr, int regidx)
776{
777	struct rge_softc *sc = device_get_softc(dev);
778
779	return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
780}
781
782/**********************************************************************
783 *  Set MII hooks to newly selected media
784 *
785 *  Input parameters:
786 *  	   ifp - Interface Pointer
787 *
788 *  Return value:
789 *  	   nothing
790 ********************************************************************* */
791static int
792rmi_xlr_mac_mediachange(struct ifnet *ifp)
793{
794	struct rge_softc *sc = ifp->if_softc;
795
796	if (ifp->if_flags & IFF_UP)
797		mii_mediachg(&sc->rge_mii);
798
799	return 0;
800}
801
802/**********************************************************************
803 *  Get the current interface media status
804 *
805 *  Input parameters:
806 *  	   ifp  - Interface Pointer
807 *  	   ifmr - Interface media request ptr
808 *
809 *  Return value:
810 *  	   nothing
811 ********************************************************************* */
812static void
813rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
814{
815	struct rge_softc *sc = ifp->if_softc;
816
817	/* Check whether this is interface is active or not. */
818	ifmr->ifm_status = IFM_AVALID;
819	if (sc->link_up) {
820		ifmr->ifm_status |= IFM_ACTIVE;
821	} else {
822		ifmr->ifm_active = IFM_ETHER;
823	}
824}
825
826/**********************************************************************
827 *  Write a value to a PHY register.
828 *
829 *  Input parameters:
830 *  	   s - priv structure
831 *  	   phyaddr - PHY to use
832 *  	   regidx - register within the PHY
833 *  	   regval - data to write to register
834 *
835 *  Return value:
836 *  	   nothing
837 ********************************************************************* */
838static void
839rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
840{
841	int i = 0;
842
843	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
844	    (phyaddr << 8) | (regidx << 0));
845
846	/* Write the data which starts the write cycle */
847	xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
848
849	/* poll for the write cycle to complete */
850	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
851		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
852			break;
853	}
854
855	return;
856}
857
858static int
859rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
860{
861	struct rge_softc *sc = device_get_softc(dev);
862
863	rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
864	return (0);
865}
866
867static void
868rmi_xlr_mac_mii_statchg(struct device *dev)
869{
870}
871
872static void
873serdes_regs_init(struct driver_data *priv)
874{
875	xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
876
877	/* Initialize SERDES CONTROL Registers */
878	rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
879	rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
880	rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
881	rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
882	rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
883	rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
884	rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
885	rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
886	rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
887	rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
888	rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
889
890	/*
891	 * GPIO setting which affect the serdes - needs figuring out
892	 */
893	DELAY(100);
894	xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
895	xlr_write_reg(mmio_gpio, 0x10, 0x7104);
896	DELAY(100);
897
898	/*
899	 * This kludge is needed to setup serdes (?) clock correctly on some
900	 * XLS boards
901	 */
902	if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
903	    xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
904	    xlr_boot1_info.board_minor_version == 4) {
905		/* use 125 Mhz instead of 156.25Mhz ref clock */
906		DELAY(100);
907		xlr_write_reg(mmio_gpio, 0x10, 0x7103);
908		xlr_write_reg(mmio_gpio, 0x21, 0x7103);
909		DELAY(100);
910	}
911
912	return;
913}
914
915static void
916serdes_autoconfig(struct driver_data *priv)
917{
918	int delay = 100000;
919
920	/* Enable Auto negotiation in the PCS Layer */
921	rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
922	DELAY(delay);
923	rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
924	DELAY(delay);
925
926	rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
927	DELAY(delay);
928	rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
929	DELAY(delay);
930
931	rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
932	DELAY(delay);
933	rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
934	DELAY(delay);
935
936	rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
937	DELAY(delay);
938	rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
939	DELAY(delay);
940
941}
942
943/*****************************************************************
944 * Initialize GMAC
945 *****************************************************************/
946static void
947rmi_xlr_config_pde(struct driver_data *priv)
948{
949	int i = 0, cpu = 0, bucket = 0;
950	uint64_t bucket_map = 0;
951
952	/* uint32_t desc_pack_ctrl = 0; */
953	uint32_t cpumask;
954
955	cpumask = 0x1;
956#ifdef SMP
957	/*
958         * rge may be called before SMP start in a BOOTP/NFSROOT
959         * setup. we will distribute packets to other cpus only when
960         * the SMP is started.
961	 */
962	if (smp_started)
963		cpumask = xlr_hw_thread_mask;
964#endif
965
966	for (i = 0; i < MAXCPU; i++) {
967		if (cpumask & (1 << i)) {
968			cpu = i;
969			bucket = ((cpu >> 2) << 3);
970			bucket_map |= (3ULL << bucket);
971		}
972	}
973	printf("rmi_xlr_config_pde: bucket_map=%jx\n", (uintmax_t)bucket_map);
974
975	/* bucket_map = 0x1; */
976	xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
977	xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
978	    ((bucket_map >> 32) & 0xffffffff));
979
980	xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
981	xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
982	    ((bucket_map >> 32) & 0xffffffff));
983
984	xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
985	xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
986	    ((bucket_map >> 32) & 0xffffffff));
987
988	xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
989	xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
990	    ((bucket_map >> 32) & 0xffffffff));
991}
992
993static void
994rge_smp_update_pde(void *dummy __unused)
995{
996	int i;
997	struct driver_data *priv;
998	struct rge_softc *sc;
999
1000	printf("Updating packet distribution for SMP\n");
1001	for (i = 0; i < XLR_MAX_MACS; i++) {
1002		sc = dev_mac[i];
1003		if (!sc)
1004			continue;
1005		priv = &(sc->priv);
1006		rmi_xlr_mac_set_enable(priv, 0);
1007		rmi_xlr_config_pde(priv);
1008		rmi_xlr_mac_set_enable(priv, 1);
1009	}
1010}
1011
1012SYSINIT(rge_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, rge_smp_update_pde, NULL);
1013
1014
1015static void
1016rmi_xlr_config_parser(struct driver_data *priv)
1017{
1018	/*
1019	 * Mark it as no classification The parser extract is gauranteed to
1020	 * be zero with no classfication
1021	 */
1022	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1023
1024	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1025
1026	/* configure the parser : L2 Type is configured in the bootloader */
1027	/* extract IP: src, dest protocol */
1028	xlr_write_reg(priv->mmio, R_L3CTABLE,
1029	    (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1030	    (0x0800 << 0));
1031	xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1032	    (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1033
1034}
1035
1036static void
1037rmi_xlr_config_classifier(struct driver_data *priv)
1038{
1039	int i = 0;
1040
1041	if (priv->type == XLR_XGMAC) {
1042		/* xgmac translation table doesn't have sane values on reset */
1043		for (i = 0; i < 64; i++)
1044			xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1045
1046		/*
1047		 * use upper 7 bits of the parser extract to index the
1048		 * translate table
1049		 */
1050		xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1051	}
1052}
1053
1054enum {
1055	SGMII_SPEED_10 = 0x00000000,
1056	SGMII_SPEED_100 = 0x02000000,
1057	SGMII_SPEED_1000 = 0x04000000,
1058};
1059
1060static void
1061rmi_xlr_gmac_config_speed(struct driver_data *priv)
1062{
1063	int phy_addr = priv->phy_addr;
1064	xlr_reg_t *mmio = priv->mmio;
1065	struct rge_softc *sc = priv->sc;
1066
1067	priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1068	priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1069	priv->speed = (priv->speed >> 3) & 0x03;
1070
1071	if (priv->speed == xlr_mac_speed_10) {
1072		if (priv->mode != XLR_RGMII)
1073			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1074		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1075		xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1076		printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1077		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1078		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1079		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1080	} else if (priv->speed == xlr_mac_speed_100) {
1081		if (priv->mode != XLR_RGMII)
1082			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1083		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1084		xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1085		printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1086		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1087		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1088		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1089	} else {
1090		if (priv->speed != xlr_mac_speed_1000) {
1091			if (priv->mode != XLR_RGMII)
1092				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1093			printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1094			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1095			xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1096			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1097			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1098			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1099		} else {
1100			if (priv->mode != XLR_RGMII)
1101				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1102			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7217);
1103			xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1104			printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1105			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1106			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1107			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1108		}
1109	}
1110
1111	if (!priv->link) {
1112		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1113		sc->link_up = 0;
1114	} else {
1115		sc->link_up = 1;
1116	}
1117}
1118
1119/*****************************************************************
1120 * Initialize XGMAC
1121 *****************************************************************/
1122static void
1123rmi_xlr_xgmac_init(struct driver_data *priv)
1124{
1125	int i = 0;
1126	xlr_reg_t *mmio = priv->mmio;
1127	int id = priv->instance;
1128	struct rge_softc *sc = priv->sc;
1129	volatile unsigned short *cpld;
1130
1131	cpld = (volatile unsigned short *)0xBD840000;
1132
1133	xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1134	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1135	xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1136	rmi_xlr_config_pde(priv);
1137	rmi_xlr_config_parser(priv);
1138	rmi_xlr_config_classifier(priv);
1139
1140	xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1141
1142	/* configure the XGMAC Registers */
1143	xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1144
1145	/* configure the XGMAC_GLUE Registers */
1146	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1147	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1148	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1149	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1150	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1151	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1152
1153	xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1154	xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1155	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1156	xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1157
1158	/*
1159	 * take XGMII phy out of reset
1160	 */
1161	/*
1162	 * we are pulling everything out of reset because writing a 0 would
1163	 * reset other devices on the chip
1164	 */
1165	cpld[ATX_CPLD_RESET_1] = 0xffff;
1166	cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1167	cpld[ATX_CPLD_RESET_2] = 0xffff;
1168
1169	xgmac_mdio_setup(mmio);
1170
1171	rmi_xlr_config_spill_area(priv);
1172
1173	if (id == 0) {
1174		for (i = 0; i < 16; i++) {
1175			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1176			    bucket_sizes.
1177			    bucket[MSGRNG_STNID_XGS0_TX + i]);
1178		}
1179
1180		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1181		    bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1182		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1183		    bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1184
1185		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1186			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1187			    cc_table_xgs_0.
1188			    counters[i >> 3][i & 0x07]);
1189		}
1190	} else if (id == 1) {
1191		for (i = 0; i < 16; i++) {
1192			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1193			    bucket_sizes.
1194			    bucket[MSGRNG_STNID_XGS1_TX + i]);
1195		}
1196
1197		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1198		    bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1199		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1200		    bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1201
1202		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1203			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1204			    cc_table_xgs_1.
1205			    counters[i >> 3][i & 0x07]);
1206		}
1207	}
1208	sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1209	sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1210	sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1211	sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1212	sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1213
1214	priv->init_frin_desc = 1;
1215}
1216
1217/*******************************************************
1218 * Initialization gmac
1219 *******************************************************/
1220static int
1221rmi_xlr_gmac_reset(struct driver_data *priv)
1222{
1223	volatile uint32_t val;
1224	xlr_reg_t *mmio = priv->mmio;
1225	int i, maxloops = 100;
1226
1227	/* Disable MAC RX */
1228	val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1229	val &= ~0x4;
1230	xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1231
1232	/* Disable Core RX */
1233	val = xlr_read_reg(mmio, R_RX_CONTROL);
1234	val &= ~0x1;
1235	xlr_write_reg(mmio, R_RX_CONTROL, val);
1236
1237	/* wait for rx to halt */
1238	for (i = 0; i < maxloops; i++) {
1239		val = xlr_read_reg(mmio, R_RX_CONTROL);
1240		if (val & 0x2)
1241			break;
1242		DELAY(1000);
1243	}
1244	if (i == maxloops)
1245		return -1;
1246
1247	/* Issue a soft reset */
1248	val = xlr_read_reg(mmio, R_RX_CONTROL);
1249	val |= 0x4;
1250	xlr_write_reg(mmio, R_RX_CONTROL, val);
1251
1252	/* wait for reset to complete */
1253	for (i = 0; i < maxloops; i++) {
1254		val = xlr_read_reg(mmio, R_RX_CONTROL);
1255		if (val & 0x8)
1256			break;
1257		DELAY(1000);
1258	}
1259	if (i == maxloops)
1260		return -1;
1261
1262	/* Clear the soft reset bit */
1263	val = xlr_read_reg(mmio, R_RX_CONTROL);
1264	val &= ~0x4;
1265	xlr_write_reg(mmio, R_RX_CONTROL, val);
1266	return 0;
1267}
1268
1269static void
1270rmi_xlr_gmac_init(struct driver_data *priv)
1271{
1272	int i = 0;
1273	xlr_reg_t *mmio = priv->mmio;
1274	int id = priv->instance;
1275	struct stn_cc *gmac_cc_config;
1276	uint32_t value = 0;
1277	int blk = id / 4, port = id % 4;
1278
1279	rmi_xlr_mac_set_enable(priv, 0);
1280
1281	rmi_xlr_config_spill_area(priv);
1282
1283	xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1284	    (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1285	    (1 << O_DESC_PACK_CTRL__MaxEntry) |
1286	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1287
1288	rmi_xlr_config_pde(priv);
1289	rmi_xlr_config_parser(priv);
1290	rmi_xlr_config_classifier(priv);
1291
1292	xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1293	xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1294	xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1295
1296	if (priv->mode == XLR_PORT0_RGMII) {
1297		printf("Port 0 set in RGMII mode\n");
1298		value = xlr_read_reg(mmio, R_RX_CONTROL);
1299		value |= 1 << O_RX_CONTROL__RGMII;
1300		xlr_write_reg(mmio, R_RX_CONTROL, value);
1301	}
1302	rmi_xlr_mac_mii_init(priv);
1303
1304
1305#if 0
1306	priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1307	    ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1308	    ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1309	    ADVERTISED_MII;
1310#endif
1311
1312	/*
1313	 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1314	 * set about every 1 sec in GigE mode, ignore it for now...
1315	 */
1316	rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1317
1318	if (priv->mode != XLR_RGMII) {
1319		serdes_regs_init(priv);
1320		serdes_autoconfig(priv);
1321	}
1322	rmi_xlr_gmac_config_speed(priv);
1323
1324	value = xlr_read_reg(mmio, R_IPG_IFG);
1325	xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1326	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1327	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1328	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1329	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1330	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1331	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1332	xlr_write_reg(mmio, R_INTMASK, 0);
1333	xlr_write_reg(mmio, R_FREEQCARVE, 0);
1334
1335	xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1336	    xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1337	xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1338	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1339	xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1340	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1341	xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1342	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1343	xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1344	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1345
1346	dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1347	    xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1348
1349	gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1350	for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1351		xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1352		    gmac_cc_config->counters[i >> 3][i & 0x07]);
1353		dbg_msg("%d: %d -> %d\n", priv->instance,
1354		    R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1355	}
1356	priv->init_frin_desc = 1;
1357}
1358
1359/**********************************************************************
1360 * Set promiscuous mode
1361 **********************************************************************/
1362static void
1363xlr_mac_set_rx_mode(struct rge_softc *sc)
1364{
1365	struct driver_data *priv = &(sc->priv);
1366	uint32_t regval;
1367
1368	regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1369
1370	if (sc->flags & IFF_PROMISC) {
1371		regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1372		    (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1373		    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1374		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1375	} else {
1376		regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1377		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1378	}
1379
1380	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1381}
1382
1383/**********************************************************************
1384 *  Configure LAN speed for the specified MAC.
1385 ********************************************************************* */
1386static int
1387rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1388{
1389	return 0;
1390}
1391
1392/**********************************************************************
1393 *  Set Ethernet duplex and flow control options for this MAC
1394 ********************************************************************* */
1395static int
1396rmi_xlr_mac_set_duplex(struct driver_data *s,
1397    xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1398{
1399	return 0;
1400}
1401
1402/*****************************************************************
1403 * Kernel Net Stack <-> MAC Driver Interface
1404 *****************************************************************/
1405/**********************************************************************
1406 **********************************************************************/
1407#define MAC_TX_FAIL 2
1408#define MAC_TX_PASS 0
1409#define MAC_TX_RETRY 1
1410
1411int xlr_dev_queue_xmit_hack = 0;
1412
1413static int
1414mac_xmit(struct mbuf *m, struct rge_softc *sc,
1415    struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1416{
1417	struct msgrng_msg msg = {0,0,0,0};
1418	int stid = priv->txbucket;
1419	uint32_t tx_cycles = 0;
1420	uint32_t mflags;
1421	int vcpu = xlr_cpu_id();
1422	int rv;
1423
1424	tx_cycles = mips_rd_count();
1425
1426	if (build_frag_list(m, &msg, tx_desc) != 0)
1427		return MAC_TX_FAIL;
1428
1429	else {
1430		mflags = msgrng_access_enable();
1431		if ((rv = message_send(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1432			msg_snd_failed++;
1433			msgrng_restore(mflags);
1434			release_tx_desc(&msg, 0);
1435			xlr_rge_msg_snd_failed[vcpu]++;
1436			dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%jx\n",
1437			    vcpu, rv, stid, (uintmax_t)msg.msg0);
1438			return MAC_TX_FAIL;
1439		}
1440		msgrng_restore(mflags);
1441		port_inc_counter(priv->instance, PORT_TX);
1442	}
1443
1444	/* Send the packet to MAC */
1445	dbg_msg("Sent tx packet to stid %d, msg0=%jx, msg1=%jx \n", stid,
1446	    (uintmax_t)msg.msg0, (uintmax_t)msg.msg1);
1447#ifdef DUMP_PACKETS
1448	{
1449		int i = 0;
1450		unsigned char *buf = (char *)m->m_data;
1451
1452		printf("Tx Packet: length=%d\n", len);
1453		for (i = 0; i < 64; i++) {
1454			if (i && (i % 16) == 0)
1455				printf("\n");
1456			printf("%02x ", buf[i]);
1457		}
1458		printf("\n");
1459	}
1460#endif
1461	xlr_inc_counter(NETIF_TX);
1462	return MAC_TX_PASS;
1463}
1464
1465static int
1466rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1467{
1468	struct driver_data *priv = &(sc->priv);
1469	int ret = -ENOSPC;
1470
1471	dbg_msg("IN\n");
1472
1473	xlr_inc_counter(NETIF_STACK_TX);
1474
1475retry:
1476	ret = mac_xmit(m, sc, priv, len, tx_desc);
1477
1478	if (ret == MAC_TX_RETRY)
1479		goto retry;
1480
1481	dbg_msg("OUT, ret = %d\n", ret);
1482	if (ret == MAC_TX_FAIL) {
1483		/* FULL */
1484		dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1485		port_inc_counter(priv->instance, PORT_STOPQ);
1486	}
1487	return ret;
1488}
1489
1490static void
1491mac_frin_replenish(void *args /* ignored */ )
1492{
1493	int cpu = xlr_core_id();
1494	int done = 0;
1495	int i = 0;
1496
1497	xlr_inc_counter(REPLENISH_ENTER);
1498	/*
1499	 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1500	 * atomic_read(frin_to_be_sent));
1501	 */
1502	xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1503
1504	for (;;) {
1505
1506		done = 0;
1507
1508		for (i = 0; i < XLR_MAX_MACS; i++) {
1509			/* int offset = 0; */
1510			void *m;
1511			uint32_t cycles;
1512			struct rge_softc *sc;
1513			struct driver_data *priv;
1514			int frin_to_be_sent;
1515
1516			sc = dev_mac[i];
1517			if (!sc)
1518				goto skip;
1519
1520			priv = &(sc->priv);
1521			frin_to_be_sent = priv->frin_to_be_sent[cpu];
1522
1523			/* if (atomic_read(frin_to_be_sent) < 0) */
1524			if (frin_to_be_sent < 0) {
1525				panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1526				    __FUNCTION__, i,
1527				    frin_to_be_sent);
1528			}
1529			/* if (!atomic_read(frin_to_be_sent)) */
1530			if (!frin_to_be_sent)
1531				goto skip;
1532
1533			cycles = mips_rd_count();
1534			{
1535				m = get_buf();
1536				if (!m) {
1537					device_printf(sc->rge_dev, "No buffer\n");
1538					goto skip;
1539				}
1540			}
1541			xlr_inc_counter(REPLENISH_FRIN);
1542			if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1543				free_buf(vtophys(m));
1544				printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1545				break;
1546			}
1547			xlr_set_counter(REPLENISH_CYCLES,
1548			    (read_c0_count() - cycles));
1549			atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1550
1551			continue;
1552	skip:
1553			done++;
1554		}
1555		if (done == XLR_MAX_MACS)
1556			break;
1557	}
1558}
1559
1560static volatile uint32_t g_tx_frm_tx_ok=0;
1561
1562static void
1563rge_tx_bkp_func(void *arg, int npending)
1564{
1565	int i = 0;
1566
1567	for (i = 0; i < xlr_board_info.gmacports; i++) {
1568		if (!dev_mac[i] || !dev_mac[i]->active)
1569			continue;
1570		rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1571	}
1572	atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1573}
1574
1575/* This function is called from an interrupt handler */
1576void
1577rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1578    int stid, struct msgrng_msg *msg,
1579    void *data /* ignored */ )
1580{
1581	uint64_t phys_addr = 0;
1582	unsigned long addr = 0;
1583	uint32_t length = 0;
1584	int ctrl = 0, port = 0;
1585	struct rge_softc *sc = NULL;
1586	struct driver_data *priv = 0;
1587	struct ifnet *ifp;
1588	int vcpu = xlr_cpu_id();
1589	int cpu = xlr_core_id();
1590
1591	dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%jx msg1=%jx\n",
1592	    bucket, size, code, stid, (uintmax_t)msg->msg0, (uintmax_t)msg->msg1);
1593
1594	phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1595	length = (msg->msg0 >> 40) & 0x3fff;
1596	if (length == 0) {
1597		ctrl = CTRL_REG_FREE;
1598		port = (msg->msg0 >> 54) & 0x0f;
1599		addr = 0;
1600	} else {
1601		ctrl = CTRL_SNGL;
1602		length = length - BYTE_OFFSET - MAC_CRC_LEN;
1603		port = msg->msg0 & 0x0f;
1604		addr = 0;
1605	}
1606
1607	if (xlr_board_info.is_xls) {
1608		if (stid == MSGRNG_STNID_GMAC1)
1609			port += 4;
1610		sc = dev_mac[dev_mac_gmac0 + port];
1611	} else {
1612		if (stid == MSGRNG_STNID_XGS0FR)
1613			sc = dev_mac[dev_mac_xgs0];
1614		else if (stid == MSGRNG_STNID_XGS1FR)
1615			sc = dev_mac[dev_mac_xgs0 + 1];
1616		else
1617			sc = dev_mac[dev_mac_gmac0 + port];
1618	}
1619	if (sc == NULL)
1620		return;
1621	priv = &(sc->priv);
1622
1623	dbg_msg("msg0 = %jx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1624	    (uintmax_t)msg->msg0, stid, port, addr, length, ctrl);
1625
1626	if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1627		xlr_rge_tx_ok_done[vcpu]++;
1628		release_tx_desc(msg, 1);
1629		ifp = sc->rge_ifp;
1630		if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1631			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1632		}
1633		if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1634			rge_tx_bkp_func(NULL, 0);
1635		xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1636		    (read_c0_count() - msgrng_msg_cycles));
1637	} else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1638		/* Rx Packet */
1639		/* struct mbuf *m = 0; */
1640		/* int logical_cpu = 0; */
1641
1642		dbg_msg("Received packet, port = %d\n", port);
1643		/*
1644		 * if num frins to be sent exceeds threshold, wake up the
1645		 * helper thread
1646		 */
1647		atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1648		if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1649			mac_frin_replenish(NULL);
1650		}
1651		dbg_msg("gmac_%d: rx packet: phys_addr = %jx, length = %x\n",
1652		    priv->instance, (uintmax_t)phys_addr, length);
1653		mac_stats_add(priv->stats.rx_packets, 1);
1654		mac_stats_add(priv->stats.rx_bytes, length);
1655		xlr_inc_counter(NETIF_RX);
1656		xlr_set_counter(NETIF_RX_CYCLES,
1657		    (read_c0_count() - msgrng_msg_cycles));
1658		rge_rx(sc, phys_addr, length);
1659		xlr_rge_rx_done[vcpu]++;
1660	} else {
1661		printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1662	}
1663
1664}
1665
1666/**********************************************************************
1667 **********************************************************************/
1668static int
1669rge_probe(dev)
1670	device_t dev;
1671{
1672	device_set_desc(dev, "RMI Gigabit Ethernet");
1673
1674	/* Always return 0 */
1675	return 0;
1676}
1677
1678volatile unsigned long xlr_debug_enabled;
1679struct callout rge_dbg_count;
1680static void
1681xlr_debug_count(void *addr)
1682{
1683	struct driver_data *priv = &dev_mac[0]->priv;
1684
1685	/* uint32_t crdt; */
1686	if (xlr_debug_enabled) {
1687		printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1688	}
1689	callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1690}
1691
1692
1693static void
1694xlr_tx_q_wakeup(void *addr)
1695{
1696	int i = 0;
1697	int j = 0;
1698
1699	for (i = 0; i < xlr_board_info.gmacports; i++) {
1700		if (!dev_mac[i] || !dev_mac[i]->active)
1701			continue;
1702		if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1703			for (j = 0; j < XLR_MAX_CORE; j++) {
1704				if (xlr_tot_avail_p2d[j]) {
1705					dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1706					break;
1707				}
1708			}
1709		}
1710	}
1711	if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1712		rge_tx_bkp_func(NULL, 0);
1713	callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1714}
1715
1716static int
1717rge_attach(device_t dev)
1718{
1719	struct ifnet *ifp;
1720	struct rge_softc *sc;
1721	struct driver_data *priv = 0;
1722	int ret = 0;
1723	struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1724
1725	sc = device_get_softc(dev);
1726	sc->rge_dev = dev;
1727
1728	/* Initialize mac's */
1729	sc->unit = device_get_unit(dev);
1730
1731	if (sc->unit > XLR_MAX_MACS) {
1732		ret = ENXIO;
1733		goto out;
1734	}
1735	RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1736
1737	priv = &(sc->priv);
1738	priv->sc = sc;
1739
1740	sc->flags = 0;		/* TODO : fix me up later */
1741
1742	priv->id = sc->unit;
1743	if (gmac_conf->type == XLR_GMAC) {
1744		priv->instance = priv->id;
1745		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1746		    0x1000 * (sc->unit % 4));
1747		if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1748			goto out;
1749	} else if (gmac_conf->type == XLR_XGMAC) {
1750		priv->instance = priv->id - xlr_board_info.gmacports;
1751		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1752	}
1753	if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI ||
1754	    (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI &&
1755	     priv->instance >=4)) {
1756		dbg_msg("Arizona board - offset 4 \n");
1757		priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1758	} else
1759		priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1760
1761	priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1762	priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1763
1764	sc->base_addr = (unsigned long)priv->mmio;
1765	sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1766
1767	sc->xmit = rge_start;
1768	sc->stop = rge_stop;
1769	sc->get_stats = rmi_xlr_mac_get_stats;
1770	sc->ioctl = rge_ioctl;
1771
1772	/* Initialize the device specific driver data */
1773	mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1774
1775	priv->type = gmac_conf->type;
1776
1777	priv->mode = gmac_conf->mode;
1778	if (xlr_board_info.is_xls == 0) {
1779		/* TODO - check II and IIB boards */
1780		if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_II &&
1781		    xlr_boot1_info.board_minor_version != 1)
1782			priv->phy_addr = priv->instance - 2;
1783		else
1784			priv->phy_addr = priv->instance;
1785		priv->mode = XLR_RGMII;
1786	} else {
1787		if (gmac_conf->mode == XLR_PORT0_RGMII &&
1788		    priv->instance == 0) {
1789			priv->mode = XLR_PORT0_RGMII;
1790			priv->phy_addr = 0;
1791		} else {
1792			priv->mode = XLR_SGMII;
1793			/* Board 11 has SGMII daughter cards with the XLS chips, in this case
1794			   the phy number is 0-3 for both GMAC blocks */
1795			if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI)
1796				priv->phy_addr = priv->instance % 4 + 16;
1797			else
1798				priv->phy_addr = priv->instance + 16;
1799		}
1800	}
1801
1802	priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1803	priv->rfrbucket = gmac_conf->station_rfr;
1804	priv->spill_configured = 0;
1805
1806	dbg_msg("priv->mmio=%p\n", priv->mmio);
1807
1808	/* Set up ifnet structure */
1809	ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1810	if (ifp == NULL) {
1811		device_printf(sc->rge_dev, "failed to if_alloc()\n");
1812		rge_release_resources(sc);
1813		ret = ENXIO;
1814		RGE_LOCK_DESTROY(sc);
1815		goto out;
1816	}
1817	ifp->if_softc = sc;
1818	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1819	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1820	ifp->if_ioctl = rge_ioctl;
1821	ifp->if_start = rge_start;
1822	ifp->if_init = rge_init;
1823	ifp->if_mtu = ETHERMTU;
1824	ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1825	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1826	IFQ_SET_READY(&ifp->if_snd);
1827	sc->active = 1;
1828	ifp->if_hwassist = 0;
1829	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1830	ifp->if_capenable = ifp->if_capabilities;
1831
1832	/* Initialize the rge_softc */
1833	sc->irq = gmac_conf->baseirq + priv->instance % 4;
1834
1835	/* Set the IRQ into the rid field */
1836	/*
1837	 * note this is a hack to pass the irq to the iodi interrupt setup
1838	 * routines
1839	 */
1840	sc->rge_irq.__r_i = (struct resource_i *)(intptr_t)sc->irq;
1841
1842	ret = bus_setup_intr(dev, &sc->rge_irq, INTR_TYPE_NET | INTR_MPSAFE,
1843	    NULL, rge_intr, sc, &sc->rge_intrhand);
1844
1845	if (ret) {
1846		rge_detach(dev);
1847		device_printf(sc->rge_dev, "couldn't set up irq\n");
1848		RGE_LOCK_DESTROY(sc);
1849		goto out;
1850	}
1851	xlr_mac_get_hwaddr(sc);
1852	xlr_mac_setup_hwaddr(priv);
1853
1854	dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1855	    (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1856	    (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1857	dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1858	    (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1859
1860	/*
1861	 * Set up ifmedia support.
1862	 */
1863	/*
1864	 * Initialize MII/media info.
1865	 */
1866	sc->rge_mii.mii_ifp = ifp;
1867	sc->rge_mii.mii_readreg = rge_mii_read;
1868	sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1869	sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1870	ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1871	    rmi_xlr_mac_mediastatus);
1872	ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1873	ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1874	sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1875
1876	/*
1877	 * Call MI attach routine.
1878	 */
1879	ether_ifattach(ifp, sc->dev_addr);
1880
1881	if (priv->type == XLR_GMAC) {
1882		rmi_xlr_gmac_init(priv);
1883	} else if (priv->type == XLR_XGMAC) {
1884		rmi_xlr_xgmac_init(priv);
1885	}
1886	dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
1887	    sc->unit, priv->mmio, sc->mtu);
1888	dev_mac[sc->unit] = sc;
1889	if (priv->type == XLR_XGMAC && priv->instance == 0)
1890		dev_mac_xgs0 = sc->unit;
1891	if (priv->type == XLR_GMAC && priv->instance == 0)
1892		dev_mac_gmac0 = sc->unit;
1893
1894	if (!gmac_common_init_done) {
1895		mac_common_init();
1896		gmac_common_init_done = 1;
1897		callout_init(&xlr_tx_stop_bkp, 1);
1898		callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
1899		callout_init(&rge_dbg_count, 1);
1900		//callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1901	}
1902	if ((ret = rmi_xlr_mac_open(sc)) == -1) {
1903		RGE_LOCK_DESTROY(sc);
1904		goto out;
1905	}
1906out:
1907	if (ret < 0) {
1908		device_printf(dev, "error - skipping\n");
1909	}
1910	return ret;
1911}
1912
1913static void
1914rge_reset(struct rge_softc *sc)
1915{
1916}
1917
1918static int
1919rge_detach(dev)
1920	device_t dev;
1921{
1922#ifdef FREEBSD_MAC_NOT_YET
1923	struct rge_softc *sc;
1924	struct ifnet *ifp;
1925
1926	sc = device_get_softc(dev);
1927	ifp = sc->rge_ifp;
1928
1929	RGE_LOCK(sc);
1930	rge_stop(sc);
1931	rge_reset(sc);
1932	RGE_UNLOCK(sc);
1933
1934	ether_ifdetach(ifp);
1935
1936	if (sc->rge_tbi) {
1937		ifmedia_removeall(&sc->rge_ifmedia);
1938	} else {
1939		bus_generic_detach(dev);
1940		device_delete_child(dev, sc->rge_miibus);
1941	}
1942
1943	rge_release_resources(sc);
1944
1945#endif				/* FREEBSD_MAC_NOT_YET */
1946	return (0);
1947}
1948static int
1949rge_suspend(device_t dev)
1950{
1951	struct rge_softc *sc;
1952
1953	sc = device_get_softc(dev);
1954	RGE_LOCK(sc);
1955	rge_stop(sc);
1956	RGE_UNLOCK(sc);
1957
1958	return 0;
1959}
1960
1961static int
1962rge_resume(device_t dev)
1963{
1964	panic("rge_resume(): unimplemented\n");
1965	return 0;
1966}
1967
1968static void
1969rge_release_resources(struct rge_softc *sc)
1970{
1971
1972	if (sc->rge_ifp != NULL)
1973		if_free(sc->rge_ifp);
1974
1975	if (mtx_initialized(&sc->rge_mtx))	/* XXX */
1976		RGE_LOCK_DESTROY(sc);
1977}
1978uint32_t gmac_rx_fail[32];
1979uint32_t gmac_rx_pass[32];
1980
1981static void
1982rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
1983{
1984	struct mbuf *m;
1985	struct ifnet *ifp = sc->rge_ifp;
1986	uint64_t mag;
1987	uint32_t sr;
1988	/*
1989	 * On 32 bit machines we use XKPHYS to get the values stores with
1990	 * the mbuf, need to explicitly enable KX. Disable interrupts while
1991	 * KX is enabled to prevent this setting leaking to other code.
1992	 */
1993	sr = xlr_enable_kx();
1994	m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
1995	mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
1996	xlr_restore_kx(sr);
1997	if (mag != 0xf00bad) {
1998		/* somebody else packet Error - FIXME in intialization */
1999		printf("cpu %d: *ERROR* Not my packet paddr %p\n",
2000		    xlr_cpu_id(), (void *)paddr);
2001		return;
2002	}
2003	/* align the data */
2004	m->m_data += BYTE_OFFSET;
2005	m->m_pkthdr.len = m->m_len = len;
2006	m->m_pkthdr.rcvif = ifp;
2007
2008#ifdef DUMP_PACKETS
2009	{
2010		int i = 0;
2011		unsigned char *buf = (char *)m->m_data;
2012
2013		printf("Rx Packet: length=%d\n", len);
2014		for (i = 0; i < 64; i++) {
2015			if (i && (i % 16) == 0)
2016				printf("\n");
2017			printf("%02x ", buf[i]);
2018		}
2019		printf("\n");
2020	}
2021#endif
2022	ifp->if_ipackets++;
2023	(*ifp->if_input) (ifp, m);
2024}
2025
2026static void
2027rge_intr(void *arg)
2028{
2029	struct rge_softc *sc = (struct rge_softc *)arg;
2030	struct driver_data *priv = &(sc->priv);
2031	xlr_reg_t *mmio = priv->mmio;
2032	uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2033
2034	if (intreg & (1 << O_INTREG__MDInt)) {
2035		uint32_t phy_int_status = 0;
2036		int i = 0;
2037
2038		for (i = 0; i < XLR_MAX_MACS; i++) {
2039			struct rge_softc *phy_dev = 0;
2040			struct driver_data *phy_priv = 0;
2041
2042			phy_dev = dev_mac[i];
2043			if (phy_dev == NULL)
2044				continue;
2045
2046			phy_priv = &phy_dev->priv;
2047
2048			if (phy_priv->type == XLR_XGMAC)
2049				continue;
2050
2051			phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2052			    phy_priv->phy_addr, 26);
2053			printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2054			    (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2055			rmi_xlr_gmac_config_speed(phy_priv);
2056		}
2057	} else {
2058		printf("[%s]: mac type = %d, instance %d error "
2059		    "interrupt: INTREG = 0x%08x\n",
2060		    __FUNCTION__, priv->type, priv->instance, intreg);
2061	}
2062
2063	/* clear all interrupts and hope to make progress */
2064	xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2065
2066	/* (not yet) on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2067	if ((xlr_revision() < 2) && (priv->type == XLR_XGMAC)) {
2068		struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2069		struct driver_data *xgs0_priv = &xgs0_dev->priv;
2070		xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2071		uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2072
2073		if (xgs0_intreg) {
2074			printf("[%s]: mac type = %d, instance %d error "
2075			    "interrupt: INTREG = 0x%08x\n",
2076			    __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2077
2078			xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2079		}
2080	}
2081}
2082
2083static void
2084rge_start_locked(struct ifnet *ifp, int threshold)
2085{
2086	struct rge_softc *sc = ifp->if_softc;
2087	struct mbuf *m = NULL;
2088	int prepend_pkt = 0;
2089	int i = 0;
2090	struct p2d_tx_desc *tx_desc = NULL;
2091	int cpu = xlr_core_id();
2092	uint32_t vcpu = xlr_cpu_id();
2093
2094	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2095		return;
2096
2097	for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2098		if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2099			return;
2100		tx_desc = get_p2d_desc();
2101		if (!tx_desc) {
2102			xlr_rge_get_p2d_failed[vcpu]++;
2103			return;
2104		}
2105		/* Grab a packet off the queue. */
2106		IFQ_DEQUEUE(&ifp->if_snd, m);
2107		if (m == NULL) {
2108			free_p2d_desc(tx_desc);
2109			return;
2110		}
2111		prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2112
2113		if (prepend_pkt) {
2114			xlr_rge_tx_prepend[vcpu]++;
2115			IF_PREPEND(&ifp->if_snd, m);
2116			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2117			return;
2118		} else {
2119			ifp->if_opackets++;
2120			xlr_rge_tx_done[vcpu]++;
2121		}
2122	}
2123}
2124
2125static void
2126rge_start(struct ifnet *ifp)
2127{
2128	rge_start_locked(ifp, RGE_TX_Q_SIZE);
2129}
2130
2131static int
2132rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2133{
2134	struct rge_softc *sc = ifp->if_softc;
2135	struct ifreq *ifr = (struct ifreq *)data;
2136	int mask, error = 0;
2137
2138	/* struct mii_data *mii; */
2139	switch (command) {
2140	case SIOCSIFMTU:
2141		ifp->if_mtu = ifr->ifr_mtu;
2142		error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2143		break;
2144	case SIOCSIFFLAGS:
2145
2146		RGE_LOCK(sc);
2147		if (ifp->if_flags & IFF_UP) {
2148			/*
2149			 * If only the state of the PROMISC flag changed,
2150			 * then just use the 'set promisc mode' command
2151			 * instead of reinitializing the entire NIC. Doing a
2152			 * full re-init means reloading the firmware and
2153			 * waiting for it to start up, which may take a
2154			 * second or two.  Similarly for ALLMULTI.
2155			 */
2156			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2157			    ifp->if_flags & IFF_PROMISC &&
2158			    !(sc->flags & IFF_PROMISC)) {
2159				sc->flags |= IFF_PROMISC;
2160				xlr_mac_set_rx_mode(sc);
2161			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2162				    !(ifp->if_flags & IFF_PROMISC) &&
2163			    sc->flags & IFF_PROMISC) {
2164				sc->flags &= IFF_PROMISC;
2165				xlr_mac_set_rx_mode(sc);
2166			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2167			    (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2168				rmi_xlr_mac_set_multicast_list(sc);
2169			} else
2170				xlr_mac_set_rx_mode(sc);
2171		} else {
2172			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2173				xlr_mac_set_rx_mode(sc);
2174			}
2175		}
2176		sc->flags = ifp->if_flags;
2177		RGE_UNLOCK(sc);
2178		error = 0;
2179		break;
2180	case SIOCADDMULTI:
2181	case SIOCDELMULTI:
2182		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2183			RGE_LOCK(sc);
2184			rmi_xlr_mac_set_multicast_list(sc);
2185			RGE_UNLOCK(sc);
2186			error = 0;
2187		}
2188		break;
2189	case SIOCSIFMEDIA:
2190	case SIOCGIFMEDIA:
2191		error = ifmedia_ioctl(ifp, ifr,
2192		    &sc->rge_mii.mii_media, command);
2193		break;
2194	case SIOCSIFCAP:
2195		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2196		ifp->if_hwassist = 0;
2197		break;
2198	default:
2199		error = ether_ioctl(ifp, command, data);
2200		break;
2201	}
2202
2203	return (error);
2204}
2205
2206static void
2207rge_init(void *addr)
2208{
2209	struct rge_softc *sc = (struct rge_softc *)addr;
2210	struct ifnet *ifp;
2211	struct driver_data *priv = &(sc->priv);
2212
2213	ifp = sc->rge_ifp;
2214
2215	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2216		return;
2217	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2218	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2219
2220	rmi_xlr_mac_set_enable(priv, 1);
2221}
2222
2223static void
2224rge_stop(struct rge_softc *sc)
2225{
2226	rmi_xlr_mac_close(sc);
2227}
2228
2229static int
2230rge_shutdown(device_t dev)
2231{
2232	struct rge_softc *sc;
2233
2234	sc = device_get_softc(dev);
2235
2236	RGE_LOCK(sc);
2237	rge_stop(sc);
2238	rge_reset(sc);
2239	RGE_UNLOCK(sc);
2240
2241	return (0);
2242}
2243
2244static int
2245rmi_xlr_mac_open(struct rge_softc *sc)
2246{
2247	struct driver_data *priv = &(sc->priv);
2248	int i;
2249
2250	dbg_msg("IN\n");
2251
2252	if (rmi_xlr_mac_fill_rxfr(sc)) {
2253		return -1;
2254	}
2255	mtx_lock_spin(&priv->lock);
2256
2257	xlr_mac_set_rx_mode(sc);
2258
2259	if (sc->unit == xlr_board_info.gmacports - 1) {
2260		printf("Enabling MDIO interrupts\n");
2261		struct rge_softc *tmp = NULL;
2262
2263		for (i = 0; i < xlr_board_info.gmacports; i++) {
2264			tmp = dev_mac[i];
2265			if (tmp)
2266				xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2267				    ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2268		}
2269	}
2270	/*
2271	 * Configure the speed, duplex, and flow control
2272	 */
2273	rmi_xlr_mac_set_speed(priv, priv->speed);
2274	rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2275	rmi_xlr_mac_set_enable(priv, 0);
2276
2277	mtx_unlock_spin(&priv->lock);
2278
2279	for (i = 0; i < 8; i++) {
2280		priv->frin_to_be_sent[i] = 0;
2281	}
2282
2283	return 0;
2284}
2285
2286/**********************************************************************
2287 **********************************************************************/
2288static int
2289rmi_xlr_mac_close(struct rge_softc *sc)
2290{
2291	struct driver_data *priv = &(sc->priv);
2292
2293	mtx_lock_spin(&priv->lock);
2294
2295	/*
2296	 * There may have left over mbufs in the ring as well as in free in
2297	 * they will be reused next time open is called
2298	 */
2299
2300	rmi_xlr_mac_set_enable(priv, 0);
2301
2302	xlr_inc_counter(NETIF_STOP_Q);
2303	port_inc_counter(priv->instance, PORT_STOPQ);
2304
2305	mtx_unlock_spin(&priv->lock);
2306
2307	return 0;
2308}
2309
2310/**********************************************************************
2311 **********************************************************************/
2312static struct rge_softc_stats *
2313rmi_xlr_mac_get_stats(struct rge_softc *sc)
2314{
2315	struct driver_data *priv = &(sc->priv);
2316
2317	/* unsigned long flags; */
2318
2319	mtx_lock_spin(&priv->lock);
2320
2321	/* XXX update other stats here */
2322
2323	mtx_unlock_spin(&priv->lock);
2324
2325	return &priv->stats;
2326}
2327
2328/**********************************************************************
2329 **********************************************************************/
2330static void
2331rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2332{
2333}
2334
2335/**********************************************************************
2336 **********************************************************************/
2337static int
2338rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2339{
2340	struct driver_data *priv = &(sc->priv);
2341
2342	if ((new_mtu > 9500) || (new_mtu < 64)) {
2343		return -EINVAL;
2344	}
2345	mtx_lock_spin(&priv->lock);
2346
2347	sc->mtu = new_mtu;
2348
2349	/* Disable MAC TX/RX */
2350	rmi_xlr_mac_set_enable(priv, 0);
2351
2352	/* Flush RX FR IN */
2353	/* Flush TX IN */
2354	rmi_xlr_mac_set_enable(priv, 1);
2355
2356	mtx_unlock_spin(&priv->lock);
2357	return 0;
2358}
2359
2360/**********************************************************************
2361 **********************************************************************/
2362static int
2363rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2364{
2365	struct driver_data *priv = &(sc->priv);
2366	int i;
2367	int ret = 0;
2368	void *ptr;
2369
2370	dbg_msg("\n");
2371	if (!priv->init_frin_desc)
2372		return ret;
2373	priv->init_frin_desc = 0;
2374
2375	dbg_msg("\n");
2376	for (i = 0; i < MAX_NUM_DESC; i++) {
2377		ptr = get_buf();
2378		if (!ptr) {
2379			ret = -ENOMEM;
2380			break;
2381		}
2382		/* Send the free Rx desc to the MAC */
2383		xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2384	}
2385
2386	return ret;
2387}
2388
2389/**********************************************************************
2390 **********************************************************************/
2391static __inline__ void *
2392rmi_xlr_config_spill(xlr_reg_t * mmio,
2393    int reg_start_0, int reg_start_1,
2394    int reg_size, int size)
2395{
2396	uint32_t spill_size = size;
2397	void *spill = NULL;
2398	uint64_t phys_addr = 0;
2399
2400
2401	spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2402	    M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2403	if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2404		panic("Unable to allocate memory for spill area!\n");
2405	}
2406	phys_addr = vtophys(spill);
2407	dbg_msg("Allocate spill %d bytes at %jx\n", size, (uintmax_t)phys_addr);
2408	xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2409	xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2410	xlr_write_reg(mmio, reg_size, spill_size);
2411
2412	return spill;
2413}
2414
2415static void
2416rmi_xlr_config_spill_area(struct driver_data *priv)
2417{
2418	/*
2419	 * if driver initialization is done parallely on multiple cpus
2420	 * spill_configured needs synchronization
2421	 */
2422	if (priv->spill_configured)
2423		return;
2424
2425	if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2426		priv->spill_configured = 1;
2427		return;
2428	}
2429	priv->spill_configured = 1;
2430
2431	priv->frin_spill =
2432	    rmi_xlr_config_spill(priv->mmio,
2433	    R_REG_FRIN_SPILL_MEM_START_0,
2434	    R_REG_FRIN_SPILL_MEM_START_1,
2435	    R_REG_FRIN_SPILL_MEM_SIZE,
2436	    MAX_FRIN_SPILL *
2437	    sizeof(struct fr_desc));
2438
2439	priv->class_0_spill =
2440	    rmi_xlr_config_spill(priv->mmio,
2441	    R_CLASS0_SPILL_MEM_START_0,
2442	    R_CLASS0_SPILL_MEM_START_1,
2443	    R_CLASS0_SPILL_MEM_SIZE,
2444	    MAX_CLASS_0_SPILL *
2445	    sizeof(union rx_tx_desc));
2446	priv->class_1_spill =
2447	    rmi_xlr_config_spill(priv->mmio,
2448	    R_CLASS1_SPILL_MEM_START_0,
2449	    R_CLASS1_SPILL_MEM_START_1,
2450	    R_CLASS1_SPILL_MEM_SIZE,
2451	    MAX_CLASS_1_SPILL *
2452	    sizeof(union rx_tx_desc));
2453
2454	priv->frout_spill =
2455	    rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2456	    R_FROUT_SPILL_MEM_START_1,
2457	    R_FROUT_SPILL_MEM_SIZE,
2458	    MAX_FROUT_SPILL *
2459	    sizeof(struct fr_desc));
2460
2461	priv->class_2_spill =
2462	    rmi_xlr_config_spill(priv->mmio,
2463	    R_CLASS2_SPILL_MEM_START_0,
2464	    R_CLASS2_SPILL_MEM_START_1,
2465	    R_CLASS2_SPILL_MEM_SIZE,
2466	    MAX_CLASS_2_SPILL *
2467	    sizeof(union rx_tx_desc));
2468	priv->class_3_spill =
2469	    rmi_xlr_config_spill(priv->mmio,
2470	    R_CLASS3_SPILL_MEM_START_0,
2471	    R_CLASS3_SPILL_MEM_START_1,
2472	    R_CLASS3_SPILL_MEM_SIZE,
2473	    MAX_CLASS_3_SPILL *
2474	    sizeof(union rx_tx_desc));
2475	priv->spill_configured = 1;
2476}
2477
2478/*****************************************************************
2479 * Write the MAC address to the XLR registers
2480 * All 4 addresses are the same for now
2481 *****************************************************************/
2482static void
2483xlr_mac_setup_hwaddr(struct driver_data *priv)
2484{
2485	struct rge_softc *sc = priv->sc;
2486
2487	xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2488	    ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2489	    | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2490	    );
2491
2492	xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2493	    ((sc->dev_addr[1] << 24) | (sc->
2494	    dev_addr[0] << 16)));
2495
2496	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2497
2498	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2499
2500	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2501
2502	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2503
2504	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2505	    (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2506	    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2507	    (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2508	    );
2509}
2510
2511/*****************************************************************
2512 * Read the MAC address from the XLR registers
2513 * All 4 addresses are the same for now
2514 *****************************************************************/
2515static void
2516xlr_mac_get_hwaddr(struct rge_softc *sc)
2517{
2518	struct driver_data *priv = &(sc->priv);
2519
2520	sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2521	sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2522	sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2523	sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2524	sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2525	sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2526}
2527
2528/*****************************************************************
2529 * Mac Module Initialization
2530 *****************************************************************/
2531static void
2532mac_common_init(void)
2533{
2534	init_p2d_allocation();
2535	init_tx_ring();
2536
2537	if (xlr_board_info.is_xls) {
2538		if (register_msgring_handler(MSGRNG_STNID_GMAC,
2539		   MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2540		   NULL)) {
2541			panic("Couldn't register msgring handler\n");
2542		}
2543		if (register_msgring_handler(MSGRNG_STNID_GMAC1,
2544		    MSGRNG_STNID_GMAC1 + 1, rmi_xlr_mac_msgring_handler,
2545		    NULL)) {
2546			panic("Couldn't register msgring handler\n");
2547		}
2548	} else {
2549		if (register_msgring_handler(MSGRNG_STNID_GMAC,
2550		   MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2551		   NULL)) {
2552			panic("Couldn't register msgring handler\n");
2553		}
2554	}
2555
2556	/*
2557	 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2558	 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2559	 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2560	 * if (register_msgring_handler (TX_STN_XGS_1,
2561	 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2562	 * msgring handler for TX_STN_XGS_1\n"); } }
2563	 */
2564}
2565