xlpge.c revision 314667
1/*-
2 * Copyright (c) 2003-2012 Broadcom Corporation
3 * All Rights Reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in
13 *    the documentation and/or other materials provided with the
14 *    distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/sys/mips/nlm/dev/net/xlpge.c 314667 2017-03-04 13:03:31Z avg $");
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/param.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/limits.h>
39#include <sys/bus.h>
40#include <sys/mbuf.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/module.h>
44#include <sys/socket.h>
45#define __RMAN_RESOURCE_VISIBLE
46#include <sys/rman.h>
47#include <sys/taskqueue.h>
48
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/ethernet.h>
52#include <net/if_dl.h>
53#include <net/if_media.h>
54#include <net/bpf.h>
55#include <net/if_types.h>
56#include <net/if_vlan_var.h>
57
58#include <dev/pci/pcivar.h>
59
60#include <netinet/in_systm.h>
61#include <netinet/in.h>
62#include <netinet/ip.h>
63
64#include <vm/vm.h>
65#include <vm/pmap.h>
66#include <vm/uma.h>
67
68#include <machine/reg.h>
69#include <machine/cpu.h>
70#include <machine/mips_opcode.h>
71#include <machine/asm.h>
72#include <machine/cpuregs.h>
73
74#include <machine/param.h>
75#include <machine/intr_machdep.h>
76#include <machine/clock.h>	/* for DELAY */
77#include <machine/bus.h>
78#include <machine/resource.h>
79#include <mips/nlm/hal/haldefs.h>
80#include <mips/nlm/hal/iomap.h>
81#include <mips/nlm/hal/mips-extns.h>
82#include <mips/nlm/hal/cop2.h>
83#include <mips/nlm/hal/fmn.h>
84#include <mips/nlm/hal/sys.h>
85#include <mips/nlm/hal/nae.h>
86#include <mips/nlm/hal/mdio.h>
87#include <mips/nlm/hal/sgmii.h>
88#include <mips/nlm/hal/xaui.h>
89#include <mips/nlm/hal/poe.h>
90#include <ucore_app_bin.h>
91#include <mips/nlm/hal/ucore_loader.h>
92#include <mips/nlm/xlp.h>
93#include <mips/nlm/board.h>
94#include <mips/nlm/msgring.h>
95
96#include <dev/mii/mii.h>
97#include <dev/mii/miivar.h>
98#include "miidevs.h"
99#include <dev/mii/brgphyreg.h>
100#include "miibus_if.h"
101#include <sys/sysctl.h>
102
103#include <mips/nlm/dev/net/xlpge.h>
104
105/*#define XLP_DRIVER_LOOPBACK*/
106
107static struct nae_port_config nae_port_config[64];
108
109int poe_cl_tbl[MAX_POE_CLASSES] = {
110	0x0, 0x249249,
111	0x492492, 0x6db6db,
112	0x924924, 0xb6db6d,
113	0xdb6db6, 0xffffff
114};
115
116/* #define DUMP_PACKET */
117
118static uint64_t
119nlm_paddr_ld(uint64_t paddr)
120{
121	uint64_t xkaddr = 0x9800000000000000 | paddr;
122
123	return (nlm_load_dword_daddr(xkaddr));
124}
125
126struct nlm_xlp_portdata ifp_ports[64];
127static uma_zone_t nl_tx_desc_zone;
128
129/* This implementation will register the following tree of device
130 * registration:
131 *                      pcibus
132 *                       |
133 *                      xlpnae (1 instance - virtual entity)
134 *                       |
135 *                     xlpge
136 *      (18 sgmii / 4 xaui / 2 interlaken instances)
137 *                       |
138 *                    miibus
139 */
140
141static int nlm_xlpnae_probe(device_t);
142static int nlm_xlpnae_attach(device_t);
143static int nlm_xlpnae_detach(device_t);
144static int nlm_xlpnae_suspend(device_t);
145static int nlm_xlpnae_resume(device_t);
146static int nlm_xlpnae_shutdown(device_t);
147
148static device_method_t nlm_xlpnae_methods[] = {
149	/* Methods from the device interface */
150	DEVMETHOD(device_probe,		nlm_xlpnae_probe),
151	DEVMETHOD(device_attach,	nlm_xlpnae_attach),
152	DEVMETHOD(device_detach,	nlm_xlpnae_detach),
153	DEVMETHOD(device_suspend,	nlm_xlpnae_suspend),
154	DEVMETHOD(device_resume,	nlm_xlpnae_resume),
155	DEVMETHOD(device_shutdown,	nlm_xlpnae_shutdown),
156
157	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
158
159	DEVMETHOD_END
160};
161
162static driver_t nlm_xlpnae_driver = {
163	"xlpnae",
164	nlm_xlpnae_methods,
165	sizeof(struct nlm_xlpnae_softc)
166};
167
168static devclass_t nlm_xlpnae_devclass;
169
170static int nlm_xlpge_probe(device_t);
171static int nlm_xlpge_attach(device_t);
172static int nlm_xlpge_detach(device_t);
173static int nlm_xlpge_suspend(device_t);
174static int nlm_xlpge_resume(device_t);
175static int nlm_xlpge_shutdown(device_t);
176
177/* mii override functions */
178static int nlm_xlpge_mii_read(struct device *, int, int);
179static int nlm_xlpge_mii_write(struct device *, int, int, int);
180static void nlm_xlpge_mii_statchg(device_t);
181
182static device_method_t nlm_xlpge_methods[] = {
183	/* Methods from the device interface */
184	DEVMETHOD(device_probe,		nlm_xlpge_probe),
185	DEVMETHOD(device_attach,	nlm_xlpge_attach),
186	DEVMETHOD(device_detach,	nlm_xlpge_detach),
187	DEVMETHOD(device_suspend,	nlm_xlpge_suspend),
188	DEVMETHOD(device_resume,	nlm_xlpge_resume),
189	DEVMETHOD(device_shutdown,	nlm_xlpge_shutdown),
190
191	/* Methods from the nexus bus needed for explicitly
192	 * probing children when driver is loaded as a kernel module
193	 */
194	DEVMETHOD(miibus_readreg,	nlm_xlpge_mii_read),
195	DEVMETHOD(miibus_writereg,	nlm_xlpge_mii_write),
196	DEVMETHOD(miibus_statchg,	nlm_xlpge_mii_statchg),
197
198	/* Terminate method list */
199	DEVMETHOD_END
200};
201
202static driver_t nlm_xlpge_driver = {
203	"xlpge",
204	nlm_xlpge_methods,
205	sizeof(struct nlm_xlpge_softc)
206};
207
208static devclass_t nlm_xlpge_devclass;
209
210DRIVER_MODULE(xlpnae, pci, nlm_xlpnae_driver, nlm_xlpnae_devclass, 0, 0);
211DRIVER_MODULE(xlpge, xlpnae, nlm_xlpge_driver, nlm_xlpge_devclass, 0, 0);
212DRIVER_MODULE(miibus, xlpge, miibus_driver, miibus_devclass, 0, 0);
213
214MODULE_DEPEND(pci, xlpnae, 1, 1, 1);
215MODULE_DEPEND(xlpnae, xlpge, 1, 1, 1);
216MODULE_DEPEND(xlpge, ether, 1, 1, 1);
217MODULE_DEPEND(xlpge, miibus, 1, 1, 1);
218
219#define SGMII_RCV_CONTEXT_WIDTH 8
220
221/* prototypes */
222static void nlm_xlpge_msgring_handler(int vc, int size,
223    int code, int srcid, struct nlm_fmn_msg *msg, void *data);
224static void nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc *sc, int num);
225static void nlm_xlpge_init(void *addr);
226static void nlm_xlpge_port_disable(struct nlm_xlpge_softc *sc);
227static void nlm_xlpge_port_enable(struct nlm_xlpge_softc *sc);
228
229/* globals */
230int dbg_on = 1;
231int cntx2port[524];
232
233static __inline void
234atomic_incr_long(unsigned long *addr)
235{
236	atomic_add_long(addr, 1);
237}
238
239/*
240 * xlpnae driver implementation
241 */
242static int
243nlm_xlpnae_probe(device_t dev)
244{
245	if (pci_get_vendor(dev) != PCI_VENDOR_NETLOGIC ||
246	    pci_get_device(dev) != PCI_DEVICE_ID_NLM_NAE)
247		return (ENXIO);
248
249	return (BUS_PROBE_DEFAULT);
250}
251
252static void
253nlm_xlpnae_print_frin_desc_carving(struct nlm_xlpnae_softc *sc)
254{
255	int intf;
256	uint32_t value;
257	int start, size;
258
259	/* XXXJC: use max_ports instead of 20 ? */
260	for (intf = 0; intf < 20; intf++) {
261		nlm_write_nae_reg(sc->base, NAE_FREE_IN_FIFO_CFG,
262		    (0x80000000 | intf));
263		value = nlm_read_nae_reg(sc->base, NAE_FREE_IN_FIFO_CFG);
264		size = 2 * ((value >> 20) & 0x3ff);
265		start = 2 * ((value >> 8) & 0x1ff);
266	}
267}
268
269static void
270nlm_config_egress(struct nlm_xlpnae_softc *sc, int nblock,
271    int context_base, int hwport, int max_channels)
272{
273	int offset, num_channels;
274	uint32_t data;
275
276	num_channels = sc->portcfg[hwport].num_channels;
277
278	data = (2048 << 12) | (hwport << 4) | 1;
279	nlm_write_nae_reg(sc->base, NAE_TX_IF_BURSTMAX_CMD, data);
280
281	data = ((context_base + num_channels - 1) << 22) |
282	    (context_base << 12) | (hwport << 4) | 1;
283	nlm_write_nae_reg(sc->base, NAE_TX_DDR_ACTVLIST_CMD, data);
284
285	config_egress_fifo_carvings(sc->base, hwport,
286	    context_base, num_channels, max_channels, sc->portcfg);
287	config_egress_fifo_credits(sc->base, hwport,
288	    context_base, num_channels, max_channels, sc->portcfg);
289
290	data = nlm_read_nae_reg(sc->base, NAE_DMA_TX_CREDIT_TH);
291	data |= (1 << 25) | (1 << 24);
292	nlm_write_nae_reg(sc->base, NAE_DMA_TX_CREDIT_TH, data);
293
294	for (offset = 0; offset < num_channels; offset++) {
295		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD1,
296		    NAE_DRR_QUANTA);
297		data = (hwport << 15) | ((context_base + offset) << 5);
298		if (sc->cmplx_type[nblock] == ILC)
299			data |= (offset << 20);
300		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD0, data | 1);
301		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD0, data);
302	}
303}
304
305static int
306xlpnae_get_maxchannels(struct nlm_xlpnae_softc *sc)
307{
308	int maxchans = 0;
309	int i;
310
311	for (i = 0; i < sc->max_ports; i++) {
312		if (sc->portcfg[i].type == UNKNOWN)
313			continue;
314		maxchans += sc->portcfg[i].num_channels;
315	}
316
317	return (maxchans);
318}
319
320static void
321nlm_setup_interface(struct nlm_xlpnae_softc *sc, int nblock,
322    int port, uint32_t cur_flow_base, uint32_t flow_mask,
323    int max_channels, int context)
324{
325	uint64_t nae_base = sc->base;
326	int mtu = 1536;			/* XXXJC: don't hard code */
327	uint32_t ucore_mask;
328
329	if (sc->cmplx_type[nblock] == XAUIC)
330		nlm_config_xaui(nae_base, nblock, mtu,
331		    mtu, sc->portcfg[port].vlan_pri_en);
332	nlm_config_freein_fifo_uniq_cfg(nae_base,
333	    port, sc->portcfg[port].free_desc_sizes);
334	nlm_config_ucore_iface_mask_cfg(nae_base,
335	    port, sc->portcfg[port].ucore_mask);
336
337	nlm_program_flow_cfg(nae_base, port, cur_flow_base, flow_mask);
338
339	if (sc->cmplx_type[nblock] == SGMIIC)
340		nlm_configure_sgmii_interface(nae_base, nblock, port, mtu, 0);
341
342	nlm_config_egress(sc, nblock, context, port, max_channels);
343
344	nlm_nae_init_netior(nae_base, sc->nblocks);
345	nlm_nae_open_if(nae_base, nblock, sc->cmplx_type[nblock], port,
346	    sc->portcfg[port].free_desc_sizes);
347
348	/*  XXXJC: check mask calculation */
349	ucore_mask = (1 << sc->nucores) - 1;
350	nlm_nae_init_ucore(nae_base, port, ucore_mask);
351}
352
353static void
354nlm_setup_interfaces(struct nlm_xlpnae_softc *sc)
355{
356	uint64_t nae_base;
357	uint32_t cur_slot, cur_slot_base;
358	uint32_t cur_flow_base, port, flow_mask;
359	int max_channels;
360	int i, context;
361
362	cur_slot = 0;
363	cur_slot_base = 0;
364	cur_flow_base = 0;
365	nae_base = sc->base;
366	flow_mask = nlm_get_flow_mask(sc->total_num_ports);
367	/* calculate max_channels */
368	max_channels = xlpnae_get_maxchannels(sc);
369
370	port = 0;
371	context = 0;
372	for (i = 0; i < sc->max_ports; i++) {
373		if (sc->portcfg[i].type == UNKNOWN)
374			continue;
375		nlm_setup_interface(sc, sc->portcfg[i].block, i, cur_flow_base,
376		    flow_mask, max_channels, context);
377		cur_flow_base += sc->per_port_num_flows;
378		context += sc->portcfg[i].num_channels;
379	}
380}
381
382static void
383nlm_xlpnae_init(int node, struct nlm_xlpnae_softc *sc)
384{
385	uint64_t nae_base;
386	uint32_t ucoremask = 0;
387	uint32_t val;
388	int i;
389
390	nae_base = sc->base;
391
392	nlm_nae_flush_free_fifo(nae_base, sc->nblocks);
393	nlm_deflate_frin_fifo_carving(nae_base, sc->max_ports);
394	nlm_reset_nae(node);
395
396	for (i = 0; i < sc->nucores; i++)	/* XXXJC: code repeated below */
397		ucoremask |= (0x1 << i);
398	printf("Loading 0x%x ucores with microcode\n", ucoremask);
399	nlm_ucore_load_all(nae_base, ucoremask, 1);
400
401	val = nlm_set_device_frequency(node, DFS_DEVICE_NAE, sc->freq);
402	printf("Setup NAE frequency to %dMHz\n", val);
403
404	nlm_mdio_reset_all(nae_base);
405
406	printf("Initialze SGMII PCS for blocks 0x%x\n", sc->sgmiimask);
407	nlm_sgmii_pcs_init(nae_base, sc->sgmiimask);
408
409	printf("Initialze XAUI PCS for blocks 0x%x\n", sc->xauimask);
410	nlm_xaui_pcs_init(nae_base, sc->xauimask);
411
412	/* clear NETIOR soft reset */
413	nlm_write_nae_reg(nae_base, NAE_LANE_CFG_SOFTRESET, 0x0);
414
415	/* Disable RX enable bit in RX_CONFIG */
416	val = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG);
417	val &= 0xfffffffe;
418	nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, val);
419
420	if (nlm_is_xlp8xx_ax() == 0) {
421		val = nlm_read_nae_reg(nae_base, NAE_TX_CONFIG);
422		val &= ~(1 << 3);
423		nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, val);
424	}
425
426	nlm_setup_poe_class_config(nae_base, MAX_POE_CLASSES,
427	    sc->ncontexts, poe_cl_tbl);
428
429	nlm_setup_vfbid_mapping(nae_base);
430
431	nlm_setup_flow_crc_poly(nae_base, sc->flow_crc_poly);
432
433	nlm_setup_rx_cal_cfg(nae_base, sc->max_ports, sc->portcfg);
434	/* note: xlp8xx Ax does not have Tx Calendering */
435	if (!nlm_is_xlp8xx_ax())
436		nlm_setup_tx_cal_cfg(nae_base, sc->max_ports, sc->portcfg);
437
438	nlm_setup_interfaces(sc);
439	nlm_config_poe(sc->poe_base, sc->poedv_base);
440
441	if (sc->hw_parser_en)
442		nlm_enable_hardware_parser(nae_base);
443
444	if (sc->prepad_en)
445		nlm_prepad_enable(nae_base, sc->prepad_size);
446
447	if (sc->ieee_1588_en)
448		nlm_setup_1588_timer(sc->base, sc->portcfg);
449}
450
451static void
452nlm_xlpnae_update_pde(void *dummy __unused)
453{
454	struct nlm_xlpnae_softc *sc;
455	uint32_t dv[NUM_WORDS_PER_DV];
456	device_t dev;
457	int vec;
458
459	dev = devclass_get_device(devclass_find("xlpnae"), 0);
460	sc = device_get_softc(dev);
461
462	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 0);
463	for (vec = 0; vec < NUM_DIST_VEC; vec++) {
464		if (nlm_get_poe_distvec(vec, dv) != 0)
465			continue;
466
467		nlm_write_poe_distvec(sc->poedv_base, vec, dv);
468	}
469	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 1);
470}
471
472SYSINIT(nlm_xlpnae_update_pde, SI_SUB_SMP, SI_ORDER_ANY,
473    nlm_xlpnae_update_pde, NULL);
474
475/* configuration common for sgmii, xaui, ilaken goes here */
476static void
477nlm_setup_portcfg(struct nlm_xlpnae_softc *sc, struct xlp_nae_ivars *naep,
478    int block, int port)
479{
480	int i;
481	uint32_t ucore_mask = 0;
482	struct xlp_block_ivars *bp;
483	struct xlp_port_ivars *p;
484
485	bp = &(naep->block_ivars[block]);
486	p  = &(bp->port_ivars[port & 0x3]);
487
488	sc->portcfg[port].node = p->node;
489	sc->portcfg[port].block = p->block;
490	sc->portcfg[port].port = p->port;
491	sc->portcfg[port].type = p->type;
492	sc->portcfg[port].mdio_bus = p->mdio_bus;
493	sc->portcfg[port].phy_addr = p->phy_addr;
494	sc->portcfg[port].loopback_mode = p->loopback_mode;
495	sc->portcfg[port].num_channels = p->num_channels;
496	if (p->free_desc_sizes != MCLBYTES) {
497		printf("[%d, %d] Error: free_desc_sizes %d != %d\n",
498		    block, port, p->free_desc_sizes, MCLBYTES);
499		return;
500	}
501	sc->portcfg[port].free_desc_sizes = p->free_desc_sizes;
502	for (i = 0; i < sc->nucores; i++)	/* XXXJC: configure this */
503		ucore_mask |= (0x1 << i);
504	sc->portcfg[port].ucore_mask = ucore_mask;
505	sc->portcfg[port].vlan_pri_en = p->vlan_pri_en;
506	sc->portcfg[port].num_free_descs = p->num_free_descs;
507	sc->portcfg[port].iface_fifo_size = p->iface_fifo_size;
508	sc->portcfg[port].rxbuf_size = p->rxbuf_size;
509	sc->portcfg[port].rx_slots_reqd = p->rx_slots_reqd;
510	sc->portcfg[port].tx_slots_reqd = p->tx_slots_reqd;
511	sc->portcfg[port].pseq_fifo_size = p->pseq_fifo_size;
512
513	sc->portcfg[port].stg2_fifo_size = p->stg2_fifo_size;
514	sc->portcfg[port].eh_fifo_size = p->eh_fifo_size;
515	sc->portcfg[port].frout_fifo_size = p->frout_fifo_size;
516	sc->portcfg[port].ms_fifo_size = p->ms_fifo_size;
517	sc->portcfg[port].pkt_fifo_size = p->pkt_fifo_size;
518	sc->portcfg[port].pktlen_fifo_size = p->pktlen_fifo_size;
519	sc->portcfg[port].max_stg2_offset = p->max_stg2_offset;
520	sc->portcfg[port].max_eh_offset = p->max_eh_offset;
521	sc->portcfg[port].max_frout_offset = p->max_frout_offset;
522	sc->portcfg[port].max_ms_offset = p->max_ms_offset;
523	sc->portcfg[port].max_pmem_offset = p->max_pmem_offset;
524	sc->portcfg[port].stg1_2_credit = p->stg1_2_credit;
525	sc->portcfg[port].stg2_eh_credit = p->stg2_eh_credit;
526	sc->portcfg[port].stg2_frout_credit = p->stg2_frout_credit;
527	sc->portcfg[port].stg2_ms_credit = p->stg2_ms_credit;
528	sc->portcfg[port].ieee1588_inc_intg = p->ieee1588_inc_intg;
529	sc->portcfg[port].ieee1588_inc_den = p->ieee1588_inc_den;
530	sc->portcfg[port].ieee1588_inc_num = p->ieee1588_inc_num;
531	sc->portcfg[port].ieee1588_userval = p->ieee1588_userval;
532	sc->portcfg[port].ieee1588_ptpoff = p->ieee1588_ptpoff;
533	sc->portcfg[port].ieee1588_tmr1 = p->ieee1588_tmr1;
534	sc->portcfg[port].ieee1588_tmr2 = p->ieee1588_tmr2;
535	sc->portcfg[port].ieee1588_tmr3 = p->ieee1588_tmr3;
536
537	sc->total_free_desc += sc->portcfg[port].free_desc_sizes;
538	sc->total_num_ports++;
539}
540
541static int
542nlm_xlpnae_attach(device_t dev)
543{
544	struct xlp_nae_ivars	*nae_ivars;
545	struct nlm_xlpnae_softc *sc;
546	device_t tmpd;
547	uint32_t dv[NUM_WORDS_PER_DV];
548	int port, i, j, nchan, nblock, node, qstart, qnum;
549	int offset, context, txq_base, rxvcbase;
550	uint64_t poe_pcibase, nae_pcibase;
551
552	node = pci_get_slot(dev) / 8;
553	nae_ivars = &xlp_board_info.nodes[node].nae_ivars;
554
555	sc = device_get_softc(dev);
556	sc->xlpnae_dev = dev;
557	sc->node = nae_ivars->node;
558	sc->base = nlm_get_nae_regbase(sc->node);
559	sc->poe_base = nlm_get_poe_regbase(sc->node);
560	sc->poedv_base = nlm_get_poedv_regbase(sc->node);
561	sc->portcfg = nae_port_config;
562	sc->blockmask = nae_ivars->blockmask;
563	sc->ilmask = nae_ivars->ilmask;
564	sc->xauimask = nae_ivars->xauimask;
565	sc->sgmiimask = nae_ivars->sgmiimask;
566	sc->nblocks = nae_ivars->nblocks;
567	sc->freq = nae_ivars->freq;
568
569	/* flow table generation is done by CRC16 polynomial */
570	sc->flow_crc_poly = nae_ivars->flow_crc_poly;
571
572	sc->hw_parser_en = nae_ivars->hw_parser_en;
573	sc->prepad_en = nae_ivars->prepad_en;
574	sc->prepad_size = nae_ivars->prepad_size;
575	sc->ieee_1588_en = nae_ivars->ieee_1588_en;
576
577	nae_pcibase = nlm_get_nae_pcibase(sc->node);
578	sc->ncontexts = nlm_read_reg(nae_pcibase, XLP_PCI_DEVINFO_REG5);
579	sc->nucores = nlm_num_uengines(nae_pcibase);
580
581	for (nblock = 0; nblock < sc->nblocks; nblock++) {
582		sc->cmplx_type[nblock] = nae_ivars->block_ivars[nblock].type;
583		sc->portmask[nblock] = nae_ivars->block_ivars[nblock].portmask;
584	}
585
586	for (i = 0; i < sc->ncontexts; i++)
587		cntx2port[i] = 18;	/* 18 is an invalid port */
588
589	if (sc->nblocks == 5)
590		sc->max_ports = 18;	/* 8xx has a block 4 with 2 ports */
591	else
592		sc->max_ports = sc->nblocks * PORTS_PER_CMPLX;
593
594	for (i = 0; i < sc->max_ports; i++)
595		sc->portcfg[i].type = UNKNOWN; /* Port Not Present */
596	/*
597	 * Now setup all internal fifo carvings based on
598	 * total number of ports in the system
599	 */
600	sc->total_free_desc = 0;
601	sc->total_num_ports = 0;
602	port = 0;
603	context = 0;
604	txq_base = nlm_qidstart(nae_pcibase);
605	rxvcbase = txq_base + sc->ncontexts;
606	for (i = 0; i < sc->nblocks; i++) {
607		uint32_t portmask;
608
609		if ((nae_ivars->blockmask & (1 << i)) == 0) {
610			port += 4;
611			continue;
612		}
613		portmask = nae_ivars->block_ivars[i].portmask;
614		for (j = 0; j < PORTS_PER_CMPLX; j++, port++) {
615			if ((portmask & (1 << j)) == 0)
616				continue;
617			nlm_setup_portcfg(sc, nae_ivars, i, port);
618			nchan = sc->portcfg[port].num_channels;
619			for (offset = 0; offset < nchan; offset++)
620				cntx2port[context + offset] = port;
621			sc->portcfg[port].txq = txq_base + context;
622			sc->portcfg[port].rxfreeq = rxvcbase + port;
623			context += nchan;
624		}
625	}
626
627	poe_pcibase = nlm_get_poe_pcibase(sc->node);
628	sc->per_port_num_flows =
629	    nlm_poe_max_flows(poe_pcibase) / sc->total_num_ports;
630
631	/* zone for P2P descriptors */
632	nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
633	    sizeof(struct xlpge_tx_desc), NULL, NULL, NULL, NULL,
634	    NAE_CACHELINE_SIZE, 0);
635
636	/* NAE FMN messages have CMS src station id's in the
637	 * range of qstart to qnum.
638	 */
639	qstart = nlm_qidstart(nae_pcibase);
640	qnum = nlm_qnum(nae_pcibase);
641	if (register_msgring_handler(qstart, qstart + qnum - 1,
642	    nlm_xlpge_msgring_handler, sc)) {
643		panic("Couldn't register NAE msgring handler\n");
644	}
645
646	/* POE FMN messages have CMS src station id's in the
647	 * range of qstart to qnum.
648	 */
649	qstart = nlm_qidstart(poe_pcibase);
650	qnum = nlm_qnum(poe_pcibase);
651	if (register_msgring_handler(qstart, qstart + qnum - 1,
652	    nlm_xlpge_msgring_handler, sc)) {
653		panic("Couldn't register POE msgring handler\n");
654	}
655
656	nlm_xlpnae_init(node, sc);
657
658	for (i = 0; i < sc->max_ports; i++) {
659		char desc[32];
660		int block, port;
661
662		if (sc->portcfg[i].type == UNKNOWN)
663			continue;
664		block = sc->portcfg[i].block;
665		port = sc->portcfg[i].port;
666		tmpd = device_add_child(dev, "xlpge", i);
667		device_set_ivars(tmpd,
668		    &(nae_ivars->block_ivars[block].port_ivars[port]));
669		sprintf(desc, "XLP NAE Port %d,%d", block, port);
670		device_set_desc_copy(tmpd, desc);
671	}
672	nlm_setup_iface_fifo_cfg(sc->base, sc->max_ports, sc->portcfg);
673	nlm_setup_rx_base_config(sc->base, sc->max_ports, sc->portcfg);
674	nlm_setup_rx_buf_config(sc->base, sc->max_ports, sc->portcfg);
675	nlm_setup_freein_fifo_cfg(sc->base, sc->portcfg);
676	nlm_program_nae_parser_seq_fifo(sc->base, sc->max_ports, sc->portcfg);
677
678	nlm_xlpnae_print_frin_desc_carving(sc);
679	bus_generic_probe(dev);
680	bus_generic_attach(dev);
681
682	/*
683	 * Enable only boot cpu at this point, full distribution comes
684	 * only after SMP is started
685	 */
686	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 0);
687	nlm_calc_poe_distvec(0x1, 0, 0, 0, 0x1 << XLPGE_RX_VC, dv);
688	nlm_write_poe_distvec(sc->poedv_base, 0, dv);
689	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 1);
690
691	return (0);
692}
693
694static int
695nlm_xlpnae_detach(device_t dev)
696{
697	/*  TODO - free zone here */
698	return (0);
699}
700
701static int
702nlm_xlpnae_suspend(device_t dev)
703{
704	return (0);
705}
706
707static int
708nlm_xlpnae_resume(device_t dev)
709{
710	return (0);
711}
712
713static int
714nlm_xlpnae_shutdown(device_t dev)
715{
716	return (0);
717}
718
719/*
720 * xlpge driver implementation
721 */
722
723static void
724nlm_xlpge_mac_set_rx_mode(struct nlm_xlpge_softc *sc)
725{
726	if (sc->if_flags & IFF_PROMISC) {
727		if (sc->type == SGMIIC)
728			nlm_nae_setup_rx_mode_sgmii(sc->base_addr,
729			    sc->block, sc->port, sc->type, 1 /* broadcast */,
730			    1/* multicast */, 0 /* pause */, 1 /* promisc */);
731		else
732			nlm_nae_setup_rx_mode_xaui(sc->base_addr,
733			    sc->block, sc->port, sc->type, 1 /* broadcast */,
734			    1/* multicast */, 0 /* pause */, 1 /* promisc */);
735	} else {
736		if (sc->type == SGMIIC)
737			nlm_nae_setup_rx_mode_sgmii(sc->base_addr,
738			    sc->block, sc->port, sc->type, 1 /* broadcast */,
739			    1/* multicast */, 0 /* pause */, 0 /* promisc */);
740		else
741			nlm_nae_setup_rx_mode_xaui(sc->base_addr,
742			    sc->block, sc->port, sc->type, 1 /* broadcast */,
743			    1/* multicast */, 0 /* pause */, 0 /* promisc */);
744	}
745}
746
747static int
748nlm_xlpge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
749{
750	struct mii_data		*mii;
751	struct nlm_xlpge_softc	*sc;
752	struct ifreq		*ifr;
753	int 			error;
754
755	sc = ifp->if_softc;
756	error = 0;
757	ifr = (struct ifreq *)data;
758
759	switch (command) {
760	case SIOCSIFFLAGS:
761		XLPGE_LOCK(sc);
762		sc->if_flags = ifp->if_flags;
763		if (ifp->if_flags & IFF_UP) {
764			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
765				nlm_xlpge_init(sc);
766			else
767				nlm_xlpge_port_enable(sc);
768			nlm_xlpge_mac_set_rx_mode(sc);
769			sc->link = NLM_LINK_UP;
770		} else {
771			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
772				nlm_xlpge_port_disable(sc);
773			sc->link = NLM_LINK_DOWN;
774		}
775		XLPGE_UNLOCK(sc);
776		error = 0;
777		break;
778	case SIOCGIFMEDIA:
779	case SIOCSIFMEDIA:
780		if (sc->mii_bus != NULL) {
781			mii = device_get_softc(sc->mii_bus);
782			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
783			    command);
784		}
785		break;
786	default:
787		error = ether_ioctl(ifp, command, data);
788		break;
789	}
790
791	return (error);
792}
793
794static int
795xlpge_tx(struct ifnet *ifp, struct mbuf *mbuf_chain)
796{
797	struct nlm_fmn_msg	msg;
798	struct xlpge_tx_desc	*p2p;
799	struct nlm_xlpge_softc	*sc;
800	struct mbuf	*m;
801	vm_paddr_t      paddr;
802	int		fbid, dst, pos, err;
803	int		ret = 0, tx_msgstatus, retries;
804
805	err = 0;
806	if (mbuf_chain == NULL)
807		return (0);
808
809	sc = ifp->if_softc;
810	p2p = NULL;
811	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) ||
812	    ifp->if_drv_flags & IFF_DRV_OACTIVE) {
813		err = ENXIO;
814		goto fail;
815	}
816
817	/* free a few in coming messages on the fb vc */
818	xlp_handle_msg_vc(1 << XLPGE_FB_VC, 2);
819
820	/* vfb id table is setup to map cpu to vc 3 of the cpu */
821	fbid = nlm_cpuid();
822	dst = sc->txq;
823
824	pos = 0;
825	p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT);
826	if (p2p == NULL) {
827		printf("alloc fail\n");
828		err = ENOBUFS;
829		goto fail;
830	}
831
832	for (m = mbuf_chain; m != NULL; m = m->m_next) {
833		vm_offset_t buf = (vm_offset_t) m->m_data;
834		int	len = m->m_len;
835		int	frag_sz;
836		uint64_t desc;
837
838		/*printf("m_data = %p len %d\n", m->m_data, len); */
839		while (len) {
840			if (pos == XLP_NTXFRAGS - 3) {
841				device_printf(sc->xlpge_dev,
842				    "packet defrag %d\n",
843				    m_length(mbuf_chain, NULL));
844				err = ENOBUFS; /* TODO fix error */
845				goto fail;
846			}
847			paddr = vtophys(buf);
848			frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
849			if (len < frag_sz)
850				frag_sz = len;
851			desc = nae_tx_desc(P2D_NEOP, 0, 127,
852			    frag_sz, paddr);
853			p2p->frag[pos] = htobe64(desc);
854			pos++;
855			len -= frag_sz;
856			buf += frag_sz;
857		}
858	}
859
860	KASSERT(pos != 0, ("Zero-length mbuf chain?\n"));
861
862	/* Make the last one P2D EOP */
863	p2p->frag[pos-1] |= htobe64((uint64_t)P2D_EOP << 62);
864
865	/* stash useful pointers in the desc */
866	p2p->frag[XLP_NTXFRAGS-3] = 0xf00bad;
867	p2p->frag[XLP_NTXFRAGS-2] = (uintptr_t)p2p;
868	p2p->frag[XLP_NTXFRAGS-1] = (uintptr_t)mbuf_chain;
869
870	paddr = vtophys(p2p);
871	msg.msg[0] = nae_tx_desc(P2P, 0, fbid, pos, paddr);
872
873	for (retries = 16;  retries > 0; retries--) {
874		ret = nlm_fmn_msgsend(dst, 1, FMN_SWCODE_NAE, &msg);
875		if (ret == 0)
876			return (0);
877	}
878
879fail:
880	if (ret != 0) {
881		tx_msgstatus = nlm_read_c2_txmsgstatus();
882		if ((tx_msgstatus >> 24) & 0x1)
883			device_printf(sc->xlpge_dev, "Transmit queue full - ");
884		if ((tx_msgstatus >> 3) & 0x1)
885			device_printf(sc->xlpge_dev, "ECC error - ");
886		if ((tx_msgstatus >> 2) & 0x1)
887			device_printf(sc->xlpge_dev, "Pending Sync - ");
888		if ((tx_msgstatus >> 1) & 0x1)
889			device_printf(sc->xlpge_dev,
890			    "Insufficient input queue credits - ");
891		if (tx_msgstatus & 0x1)
892			device_printf(sc->xlpge_dev,
893			    "Insufficient output queue credits - ");
894	}
895	device_printf(sc->xlpge_dev, "Send failed! err = %d\n", err);
896	if (p2p)
897		uma_zfree(nl_tx_desc_zone, p2p);
898	m_freem(mbuf_chain);
899	/*atomic_incr_long(&ifp->if_iqdrops); */
900	ifp->if_iqdrops++;
901	return (err);
902}
903
904
905static int
906nlm_xlpge_gmac_config_speed(struct nlm_xlpge_softc *sc)
907{
908	struct mii_data *mii;
909
910	if (sc->type == XAUIC || sc->type == ILC)
911		return (0);
912
913	if (sc->mii_bus) {
914		mii = device_get_softc(sc->mii_bus);
915		mii_pollstat(mii);
916	}
917
918	return (0);
919}
920
921static void
922nlm_xlpge_port_disable(struct nlm_xlpge_softc *sc)
923{
924	struct ifnet   *ifp;
925
926	ifp = sc->xlpge_if;
927	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
928
929	callout_stop(&sc->xlpge_callout);
930	nlm_mac_disable(sc->base_addr, sc->block, sc->type, sc->port);
931}
932
933static void
934nlm_mii_pollstat(void *arg)
935{
936	struct nlm_xlpge_softc *sc = (struct nlm_xlpge_softc *)arg;
937	struct mii_data *mii = NULL;
938
939	if (sc->mii_bus) {
940		mii = device_get_softc(sc->mii_bus);
941
942		KASSERT(mii != NULL, ("mii ptr is NULL"));
943
944		mii_pollstat(mii);
945
946		callout_reset(&sc->xlpge_callout, hz,
947		    nlm_mii_pollstat, sc);
948	}
949}
950
951static void
952nlm_xlpge_port_enable(struct nlm_xlpge_softc *sc)
953{
954	if ((sc->type != SGMIIC) && (sc->type != XAUIC))
955		return;
956	nlm_mac_enable(sc->base_addr, sc->block, sc->type, sc->port);
957	nlm_mii_pollstat((void *)sc);
958}
959
960static void
961nlm_xlpge_init(void *addr)
962{
963	struct nlm_xlpge_softc *sc;
964	struct ifnet   *ifp;
965	struct mii_data *mii = NULL;
966
967	sc = (struct nlm_xlpge_softc *)addr;
968	ifp = sc->xlpge_if;
969
970	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
971		return;
972
973	if (sc->mii_bus) {
974		mii = device_get_softc(sc->mii_bus);
975		mii_mediachg(mii);
976	}
977
978	nlm_xlpge_gmac_config_speed(sc);
979	ifp->if_drv_flags |= IFF_DRV_RUNNING;
980	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
981	nlm_xlpge_port_enable(sc);
982
983	/* start the callout */
984	callout_reset(&sc->xlpge_callout, hz, nlm_mii_pollstat, sc);
985}
986
987/*
988 * Read the MAC address from FDT or board eeprom.
989 */
990static void
991xlpge_read_mac_addr(struct nlm_xlpge_softc *sc)
992{
993
994	xlpge_get_macaddr(sc->dev_addr);
995	/* last octet is port specific */
996	sc->dev_addr[5] += (sc->block * 4) + sc->port;
997
998	if (sc->type == SGMIIC)
999		nlm_nae_setup_mac_addr_sgmii(sc->base_addr, sc->block,
1000		    sc->port, sc->type, sc->dev_addr);
1001	else if (sc->type == XAUIC)
1002		nlm_nae_setup_mac_addr_xaui(sc->base_addr, sc->block,
1003		    sc->port, sc->type, sc->dev_addr);
1004}
1005
1006
1007static int
1008xlpge_mediachange(struct ifnet *ifp)
1009{
1010	return (0);
1011}
1012
1013static void
1014xlpge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1015{
1016	struct nlm_xlpge_softc *sc;
1017	struct mii_data *md;
1018
1019	md = NULL;
1020	sc = ifp->if_softc;
1021
1022	if (sc->mii_bus)
1023		md = device_get_softc(sc->mii_bus);
1024
1025	ifmr->ifm_status = IFM_AVALID;
1026	ifmr->ifm_active = IFM_ETHER;
1027
1028	if (sc->link == NLM_LINK_DOWN)
1029		return;
1030
1031	if (md != NULL)
1032		ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
1033	ifmr->ifm_status |= IFM_ACTIVE;
1034}
1035
1036static int
1037nlm_xlpge_ifinit(struct nlm_xlpge_softc *sc)
1038{
1039	struct ifnet *ifp;
1040	device_t dev;
1041	int port = sc->block * 4 + sc->port;
1042
1043	dev = sc->xlpge_dev;
1044	ifp = sc->xlpge_if = if_alloc(IFT_ETHER);
1045	/*(sc->network_sc)->ifp_ports[port].xlpge_if = ifp;*/
1046	ifp_ports[port].xlpge_if = ifp;
1047
1048	if (ifp == NULL) {
1049		device_printf(dev, "cannot if_alloc()\n");
1050		return (ENOSPC);
1051	}
1052	ifp->if_softc = sc;
1053	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1054	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1055	sc->if_flags = ifp->if_flags;
1056	/*ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;*/
1057	ifp->if_capabilities = 0;
1058	ifp->if_capenable = ifp->if_capabilities;
1059	ifp->if_ioctl = nlm_xlpge_ioctl;
1060	ifp->if_init  = nlm_xlpge_init ;
1061	ifp->if_hwassist = 0;
1062	ifp->if_snd.ifq_drv_maxlen = NLM_XLPGE_TXQ_SIZE; /* TODO: make this a sysint */
1063	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1064	IFQ_SET_READY(&ifp->if_snd);
1065
1066	ifmedia_init(&sc->xlpge_mii.mii_media, 0, xlpge_mediachange,
1067	    xlpge_mediastatus);
1068	ifmedia_add(&sc->xlpge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1069	ifmedia_set(&sc->xlpge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1070	sc->xlpge_mii.mii_media.ifm_media =
1071	    sc->xlpge_mii.mii_media.ifm_cur->ifm_media;
1072	xlpge_read_mac_addr(sc);
1073
1074	ether_ifattach(ifp, sc->dev_addr);
1075
1076	/* override if_transmit : per ifnet(9), do it after if_attach */
1077	ifp->if_transmit = xlpge_tx;
1078
1079	return (0);
1080}
1081
1082static int
1083nlm_xlpge_probe(device_t dev)
1084{
1085	return (BUS_PROBE_DEFAULT);
1086}
1087
1088static void *
1089get_buf(void)
1090{
1091	struct mbuf     *m_new;
1092	uint64_t        *md;
1093#ifdef INVARIANTS
1094	vm_paddr_t      temp1, temp2;
1095#endif
1096
1097	if ((m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
1098		return (NULL);
1099	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1100	KASSERT(((uintptr_t)m_new->m_data & (NAE_CACHELINE_SIZE - 1)) == 0,
1101	    ("m_new->m_data is not cacheline aligned"));
1102	md = (uint64_t *)m_new->m_data;
1103	md[0] = (intptr_t)m_new;        /* Back Ptr */
1104	md[1] = 0xf00bad;
1105	m_adj(m_new, NAE_CACHELINE_SIZE);
1106
1107#ifdef INVARIANTS
1108	temp1 = vtophys((vm_offset_t) m_new->m_data);
1109	temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
1110	KASSERT((temp1 + 1536) == temp2,
1111	    ("Alloced buffer is not contiguous"));
1112#endif
1113	return ((void *)m_new->m_data);
1114}
1115
1116static void
1117nlm_xlpge_mii_init(device_t dev, struct nlm_xlpge_softc *sc)
1118{
1119	int error;
1120
1121	error = mii_attach(dev, &sc->mii_bus, sc->xlpge_if,
1122			xlpge_mediachange, xlpge_mediastatus,
1123			BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY, 0);
1124
1125	if (error) {
1126		device_printf(dev, "attaching PHYs failed\n");
1127		sc->mii_bus = NULL;
1128	}
1129
1130	if (sc->mii_bus != NULL) {
1131		/* enable MDIO interrupts in the PHY */
1132		/* XXXJC: TODO */
1133	}
1134}
1135
1136static int
1137xlpge_stats_sysctl(SYSCTL_HANDLER_ARGS)
1138{
1139	struct nlm_xlpge_softc *sc;
1140	uint32_t val;
1141	int reg, field;
1142
1143	sc = arg1;
1144	field = arg2;
1145	reg = SGMII_STATS_MLR(sc->block, sc->port) + field;
1146	val = nlm_read_nae_reg(sc->base_addr, reg);
1147	return (sysctl_handle_int(oidp, &val, 0, req));
1148}
1149
1150static void
1151nlm_xlpge_setup_stats_sysctl(device_t dev, struct nlm_xlpge_softc *sc)
1152{
1153	struct sysctl_ctx_list *ctx;
1154	struct sysctl_oid_list *child;
1155	struct sysctl_oid *tree;
1156
1157	ctx = device_get_sysctl_ctx(dev);
1158	tree = device_get_sysctl_tree(dev);
1159	child = SYSCTL_CHILDREN(tree);
1160
1161#define XLPGE_STAT(name, offset, desc) \
1162	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, name, 	\
1163	    CTLTYPE_UINT | CTLFLAG_RD, sc, offset,	\
1164	    xlpge_stats_sysctl, "IU", desc)
1165
1166	XLPGE_STAT("tr127", nlm_sgmii_stats_tr127, "TxRx 64 - 127 Bytes");
1167	XLPGE_STAT("tr255", nlm_sgmii_stats_tr255, "TxRx 128 - 255 Bytes");
1168	XLPGE_STAT("tr511", nlm_sgmii_stats_tr511, "TxRx 256 - 511 Bytes");
1169	XLPGE_STAT("tr1k",  nlm_sgmii_stats_tr1k,  "TxRx 512 - 1023 Bytes");
1170	XLPGE_STAT("trmax", nlm_sgmii_stats_trmax, "TxRx 1024 - 1518 Bytes");
1171	XLPGE_STAT("trmgv", nlm_sgmii_stats_trmgv, "TxRx 1519 - 1522 Bytes");
1172
1173	XLPGE_STAT("rbyt", nlm_sgmii_stats_rbyt, "Rx Bytes");
1174	XLPGE_STAT("rpkt", nlm_sgmii_stats_rpkt, "Rx Packets");
1175	XLPGE_STAT("rfcs", nlm_sgmii_stats_rfcs, "Rx FCS Error");
1176	XLPGE_STAT("rmca", nlm_sgmii_stats_rmca, "Rx Multicast Packets");
1177	XLPGE_STAT("rbca", nlm_sgmii_stats_rbca, "Rx Broadcast Packets");
1178	XLPGE_STAT("rxcf", nlm_sgmii_stats_rxcf, "Rx Control Frames");
1179	XLPGE_STAT("rxpf", nlm_sgmii_stats_rxpf, "Rx Pause Frames");
1180	XLPGE_STAT("rxuo", nlm_sgmii_stats_rxuo, "Rx Unknown Opcode");
1181	XLPGE_STAT("raln", nlm_sgmii_stats_raln, "Rx Alignment Errors");
1182	XLPGE_STAT("rflr", nlm_sgmii_stats_rflr, "Rx Framelength Errors");
1183	XLPGE_STAT("rcde", nlm_sgmii_stats_rcde, "Rx Code Errors");
1184	XLPGE_STAT("rcse", nlm_sgmii_stats_rcse, "Rx Carrier Sense Errors");
1185	XLPGE_STAT("rund", nlm_sgmii_stats_rund, "Rx Undersize Packet Errors");
1186	XLPGE_STAT("rovr", nlm_sgmii_stats_rovr, "Rx Oversize Packet Errors");
1187	XLPGE_STAT("rfrg", nlm_sgmii_stats_rfrg, "Rx Fragments");
1188	XLPGE_STAT("rjbr", nlm_sgmii_stats_rjbr, "Rx Jabber");
1189
1190	XLPGE_STAT("tbyt", nlm_sgmii_stats_tbyt, "Tx Bytes");
1191	XLPGE_STAT("tpkt", nlm_sgmii_stats_tpkt, "Tx Packets");
1192	XLPGE_STAT("tmca", nlm_sgmii_stats_tmca, "Tx Multicast Packets");
1193	XLPGE_STAT("tbca", nlm_sgmii_stats_tbca, "Tx Broadcast Packets");
1194	XLPGE_STAT("txpf", nlm_sgmii_stats_txpf, "Tx Pause Frame");
1195	XLPGE_STAT("tdfr", nlm_sgmii_stats_tdfr, "Tx Deferral Packets");
1196	XLPGE_STAT("tedf", nlm_sgmii_stats_tedf, "Tx Excessive Deferral Pkts");
1197	XLPGE_STAT("tscl", nlm_sgmii_stats_tscl, "Tx Single Collisions");
1198	XLPGE_STAT("tmcl", nlm_sgmii_stats_tmcl, "Tx Multiple Collisions");
1199	XLPGE_STAT("tlcl", nlm_sgmii_stats_tlcl, "Tx Late Collision Pkts");
1200	XLPGE_STAT("txcl", nlm_sgmii_stats_txcl, "Tx Excessive Collisions");
1201	XLPGE_STAT("tncl", nlm_sgmii_stats_tncl, "Tx Total Collisions");
1202	XLPGE_STAT("tjbr", nlm_sgmii_stats_tjbr, "Tx Jabber Frames");
1203	XLPGE_STAT("tfcs", nlm_sgmii_stats_tfcs, "Tx FCS Errors");
1204	XLPGE_STAT("txcf", nlm_sgmii_stats_txcf, "Tx Control Frames");
1205	XLPGE_STAT("tovr", nlm_sgmii_stats_tovr, "Tx Oversize Frames");
1206	XLPGE_STAT("tund", nlm_sgmii_stats_tund, "Tx Undersize Frames");
1207	XLPGE_STAT("tfrg", nlm_sgmii_stats_tfrg, "Tx Fragments");
1208#undef XLPGE_STAT
1209}
1210
1211static int
1212nlm_xlpge_attach(device_t dev)
1213{
1214	struct xlp_port_ivars *pv;
1215	struct nlm_xlpge_softc *sc;
1216	int port;
1217
1218	pv = device_get_ivars(dev);
1219	sc = device_get_softc(dev);
1220	sc->xlpge_dev = dev;
1221	sc->mii_bus = NULL;
1222	sc->block = pv->block;
1223	sc->node = pv->node;
1224	sc->port = pv->port;
1225	sc->type = pv->type;
1226	sc->xlpge_if = NULL;
1227	sc->phy_addr = pv->phy_addr;
1228	sc->mdio_bus = pv->mdio_bus;
1229	sc->portcfg = nae_port_config;
1230	sc->hw_parser_en = pv->hw_parser_en;
1231
1232	/* default settings */
1233	sc->speed = NLM_SGMII_SPEED_10;
1234	sc->duplexity = NLM_SGMII_DUPLEX_FULL;
1235	sc->link = NLM_LINK_DOWN;
1236	sc->flowctrl = NLM_FLOWCTRL_DISABLED;
1237
1238	sc->network_sc = device_get_softc(device_get_parent(dev));
1239	sc->base_addr = sc->network_sc->base;
1240	sc->prepad_en = sc->network_sc->prepad_en;
1241	sc->prepad_size = sc->network_sc->prepad_size;
1242
1243	callout_init(&sc->xlpge_callout, 1);
1244
1245	XLPGE_LOCK_INIT(sc, device_get_nameunit(dev));
1246
1247	port = (sc->block*4)+sc->port;
1248	sc->nfree_desc = nae_port_config[port].num_free_descs;
1249	sc->txq = nae_port_config[port].txq;
1250	sc->rxfreeq = nae_port_config[port].rxfreeq;
1251
1252	nlm_xlpge_submit_rx_free_desc(sc, sc->nfree_desc);
1253	if (sc->hw_parser_en)
1254		nlm_enable_hardware_parser_per_port(sc->base_addr,
1255		    sc->block, sc->port);
1256
1257	nlm_xlpge_ifinit(sc);
1258	ifp_ports[port].xlpge_sc = sc;
1259	nlm_xlpge_mii_init(dev, sc);
1260
1261	nlm_xlpge_setup_stats_sysctl(dev, sc);
1262
1263	return (0);
1264}
1265
1266static int
1267nlm_xlpge_detach(device_t dev)
1268{
1269	return (0);
1270}
1271
1272static int
1273nlm_xlpge_suspend(device_t dev)
1274{
1275	return (0);
1276}
1277
1278static int
1279nlm_xlpge_resume(device_t dev)
1280{
1281	return (0);
1282}
1283
1284static int
1285nlm_xlpge_shutdown(device_t dev)
1286{
1287	return (0);
1288}
1289
1290/*
1291 * miibus function with custom implementation
1292 */
1293static int
1294nlm_xlpge_mii_read(struct device *dev, int phyaddr, int regidx)
1295{
1296	struct nlm_xlpge_softc *sc;
1297	int val;
1298
1299	sc = device_get_softc(dev);
1300	if (sc->type == SGMIIC)
1301		val = nlm_gmac_mdio_read(sc->base_addr, sc->mdio_bus,
1302		    BLOCK_7, LANE_CFG, phyaddr, regidx);
1303	else
1304		val = 0xffff;
1305
1306	return (val);
1307}
1308
1309static int
1310nlm_xlpge_mii_write(struct device *dev, int phyaddr, int regidx, int val)
1311{
1312	struct nlm_xlpge_softc *sc;
1313
1314	sc = device_get_softc(dev);
1315	if (sc->type == SGMIIC)
1316		nlm_gmac_mdio_write(sc->base_addr, sc->mdio_bus, BLOCK_7,
1317		    LANE_CFG, phyaddr, regidx, val);
1318
1319	return (0);
1320}
1321
1322static void
1323nlm_xlpge_mii_statchg(device_t dev)
1324{
1325	struct nlm_xlpge_softc *sc;
1326	struct mii_data *mii;
1327	char *speed, *duplexity;
1328
1329	sc = device_get_softc(dev);
1330	if (sc->mii_bus == NULL)
1331		return;
1332
1333	mii = device_get_softc(sc->mii_bus);
1334	if (mii->mii_media_status & IFM_ACTIVE) {
1335		if (IFM_SUBTYPE(mii->mii_media_active) ==  IFM_10_T) {
1336			sc->speed = NLM_SGMII_SPEED_10;
1337			speed =  "10Mbps";
1338		} else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
1339			sc->speed = NLM_SGMII_SPEED_100;
1340			speed = "100Mbps";
1341		} else { /* default to 1G */
1342			sc->speed = NLM_SGMII_SPEED_1000;
1343			speed =  "1Gbps";
1344		}
1345
1346		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1347			sc->duplexity = NLM_SGMII_DUPLEX_FULL;
1348			duplexity =  "full";
1349		} else {
1350			sc->duplexity = NLM_SGMII_DUPLEX_HALF;
1351			duplexity = "half";
1352		}
1353
1354		printf("Port [%d, %d] setup with speed=%s duplex=%s\n",
1355		    sc->block, sc->port, speed, duplexity);
1356
1357		nlm_nae_setup_mac(sc->base_addr, sc->block, sc->port, 0, 1, 1,
1358		    sc->speed, sc->duplexity);
1359	}
1360}
1361
1362/*
1363 * xlpge support function implementations
1364 */
1365static void
1366nlm_xlpge_release_mbuf(uint64_t paddr)
1367{
1368	uint64_t	mag, desc, mbuf;
1369
1370	paddr += (XLP_NTXFRAGS - 3) * sizeof(uint64_t);
1371	mag = nlm_paddr_ld(paddr);
1372	desc = nlm_paddr_ld(paddr + sizeof(uint64_t));
1373	mbuf = nlm_paddr_ld(paddr + 2 * sizeof(uint64_t));
1374
1375	if (mag != 0xf00bad) {
1376		/* somebody else packet Error - FIXME in intialization */
1377		printf("cpu %d: ERR Tx packet paddr %jx, mag %jx, desc %jx mbuf %jx\n",
1378		    nlm_cpuid(), (uintmax_t)paddr, (uintmax_t)mag,
1379		    (intmax_t)desc, (uintmax_t)mbuf);
1380		return;
1381	}
1382	m_freem((struct mbuf *)(uintptr_t)mbuf);
1383	uma_zfree(nl_tx_desc_zone, (void *)(uintptr_t)desc);
1384}
1385
1386static void
1387nlm_xlpge_rx(struct nlm_xlpge_softc *sc, int port, vm_paddr_t paddr, int len)
1388{
1389	struct ifnet	*ifp;
1390	struct mbuf	*m;
1391	vm_offset_t	temp;
1392	unsigned long	mag;
1393	int		prepad_size;
1394
1395	ifp = sc->xlpge_if;
1396	temp = nlm_paddr_ld(paddr - NAE_CACHELINE_SIZE);
1397	mag = nlm_paddr_ld(paddr - NAE_CACHELINE_SIZE + sizeof(uint64_t));
1398
1399	m = (struct mbuf *)(intptr_t)temp;
1400	if (mag != 0xf00bad) {
1401		/* somebody else packet Error - FIXME in intialization */
1402		printf("cpu %d: ERR Rx packet paddr %jx, temp %p, mag %lx\n",
1403		    nlm_cpuid(), (uintmax_t)paddr, (void *)temp, mag);
1404		return;
1405	}
1406
1407	m->m_pkthdr.rcvif = ifp;
1408
1409#ifdef DUMP_PACKET
1410	{
1411		int     i = 0, j = 64;
1412		unsigned char *buf = (char *)m->m_data;
1413		printf("(cpu_%d: nlge_rx, !RX_COPY) Rx Packet: length=%d\n",
1414				nlm_cpuid(), len);
1415		if (len < j)
1416			j = len;
1417		if (sc->prepad_en)
1418			j += ((sc->prepad_size + 1) * 16);
1419		for (i = 0; i < j; i++) {
1420			if (i && (i % 16) == 0)
1421				printf("\n");
1422			printf("%02x ", buf[i]);
1423		}
1424		printf("\n");
1425	}
1426#endif
1427
1428	if (sc->prepad_en) {
1429		prepad_size = ((sc->prepad_size + 1) * 16);
1430		m->m_data += prepad_size;
1431		m->m_pkthdr.len = m->m_len = (len - prepad_size);
1432	} else
1433		m->m_pkthdr.len = m->m_len = len;
1434
1435	/*atomic_incr_long(&ifp->if_ipackets);*/
1436	ifp->if_ipackets++;
1437#ifdef XLP_DRIVER_LOOPBACK
1438	if (port == 16 || port == 17)
1439		(*ifp->if_input)(ifp, m);
1440	else
1441		xlpge_tx(ifp, m);
1442#else
1443	(*ifp->if_input)(ifp, m);
1444#endif
1445}
1446
1447void
1448nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc *sc, int num)
1449{
1450	int i, size, ret, n;
1451	struct nlm_fmn_msg msg;
1452	void *ptr;
1453
1454	for(i = 0; i < num; i++) {
1455		memset(&msg, 0, sizeof(msg));
1456		ptr = get_buf();
1457		if (!ptr) {
1458			device_printf(sc->xlpge_dev, "Cannot allocate mbuf\n");
1459			break;
1460		}
1461
1462		msg.msg[0] = vtophys(ptr);
1463		if (msg.msg[0] == 0) {
1464			printf("Bad ptr for %p\n", ptr);
1465			break;
1466		}
1467		size = 1;
1468
1469		n = 0;
1470		while (1) {
1471			/* on success returns 1, else 0 */
1472			ret = nlm_fmn_msgsend(sc->rxfreeq, size, 0, &msg);
1473			if (ret == 0)
1474				break;
1475			if (n++ > 10000) {
1476				printf("Too many credit fails for send free desc\n");
1477				break;
1478			}
1479		}
1480	}
1481}
1482
1483void
1484nlm_xlpge_msgring_handler(int vc, int size, int code, int src_id,
1485    struct nlm_fmn_msg *msg, void *data)
1486{
1487	uint64_t phys_addr;
1488	struct nlm_xlpnae_softc *sc;
1489	struct nlm_xlpge_softc *xlpge_sc;
1490	struct ifnet *ifp;
1491	uint32_t context;
1492	uint32_t port = 0;
1493	uint32_t length;
1494
1495	sc = (struct nlm_xlpnae_softc *)data;
1496	KASSERT(sc != NULL, ("Null sc in msgring handler"));
1497
1498	if (size == 1) { /* process transmit complete */
1499		phys_addr = msg->msg[0] & 0xffffffffffULL;
1500
1501		/* context is SGMII_RCV_CONTEXT_NUM + three bit vlan type
1502		 * or vlan priority
1503		 */
1504		context = (msg->msg[0] >> 40) & 0x3fff;
1505		port = cntx2port[context];
1506
1507		if (port >= XLP_MAX_PORTS) {
1508			printf("%s:%d Bad port %d (context=%d)\n",
1509				__func__, __LINE__, port, context);
1510			return;
1511		}
1512		ifp = ifp_ports[port].xlpge_if;
1513		xlpge_sc = ifp_ports[port].xlpge_sc;
1514
1515		nlm_xlpge_release_mbuf(phys_addr);
1516
1517		/*atomic_incr_long(&ifp->if_opackets);*/
1518		ifp->if_opackets++;
1519
1520	} else if (size > 1) { /* Recieve packet */
1521		phys_addr = msg->msg[1] & 0xffffffffc0ULL;
1522		length = (msg->msg[1] >> 40) & 0x3fff;
1523		length -= MAC_CRC_LEN;
1524
1525		/* context is SGMII_RCV_CONTEXT_NUM + three bit vlan type
1526		 * or vlan priority
1527		 */
1528		context = (msg->msg[1] >> 54) & 0x3ff;
1529		port = cntx2port[context];
1530
1531		if (port >= XLP_MAX_PORTS) {
1532			printf("%s:%d Bad port %d (context=%d)\n",
1533				__func__, __LINE__, port, context);
1534			return;
1535		}
1536
1537		ifp = ifp_ports[port].xlpge_if;
1538		xlpge_sc = ifp_ports[port].xlpge_sc;
1539
1540		nlm_xlpge_rx(xlpge_sc, port, phys_addr, length);
1541		/* return back a free descriptor to NA */
1542		nlm_xlpge_submit_rx_free_desc(xlpge_sc, 1);
1543	}
1544}
1545