xlpge.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2003-2012 Broadcom Corporation
5 * All Rights Reserved
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
25 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
27 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
28 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/mips/nlm/dev/net/xlpge.c 330897 2018-03-14 03:19:51Z eadler $");
33#include <sys/endian.h>
34#include <sys/systm.h>
35#include <sys/sockio.h>
36#include <sys/param.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/proc.h>
40#include <sys/limits.h>
41#include <sys/bus.h>
42#include <sys/mbuf.h>
43#include <sys/malloc.h>
44#include <sys/kernel.h>
45#include <sys/module.h>
46#include <sys/socket.h>
47#define __RMAN_RESOURCE_VISIBLE
48#include <sys/rman.h>
49#include <sys/taskqueue.h>
50
51#include <net/if.h>
52#include <net/if_var.h>
53#include <net/if_arp.h>
54#include <net/ethernet.h>
55#include <net/if_dl.h>
56#include <net/if_media.h>
57#include <net/bpf.h>
58#include <net/if_types.h>
59#include <net/if_vlan_var.h>
60
61#include <dev/pci/pcivar.h>
62
63#include <netinet/in_systm.h>
64#include <netinet/in.h>
65#include <netinet/ip.h>
66
67#include <vm/vm.h>
68#include <vm/pmap.h>
69#include <vm/uma.h>
70
71#include <machine/reg.h>
72#include <machine/cpu.h>
73#include <machine/mips_opcode.h>
74#include <machine/asm.h>
75#include <machine/cpuregs.h>
76
77#include <machine/intr_machdep.h>
78#include <machine/clock.h>	/* for DELAY */
79#include <machine/bus.h>
80#include <machine/resource.h>
81#include <mips/nlm/hal/haldefs.h>
82#include <mips/nlm/hal/iomap.h>
83#include <mips/nlm/hal/mips-extns.h>
84#include <mips/nlm/hal/cop2.h>
85#include <mips/nlm/hal/fmn.h>
86#include <mips/nlm/hal/sys.h>
87#include <mips/nlm/hal/nae.h>
88#include <mips/nlm/hal/mdio.h>
89#include <mips/nlm/hal/sgmii.h>
90#include <mips/nlm/hal/xaui.h>
91#include <mips/nlm/hal/poe.h>
92#include <ucore_app_bin.h>
93#include <mips/nlm/hal/ucore_loader.h>
94#include <mips/nlm/xlp.h>
95#include <mips/nlm/board.h>
96#include <mips/nlm/msgring.h>
97
98#include <dev/mii/mii.h>
99#include <dev/mii/miivar.h>
100#include "miidevs.h"
101#include <dev/mii/brgphyreg.h>
102#include "miibus_if.h"
103#include <sys/sysctl.h>
104
105#include <mips/nlm/dev/net/xlpge.h>
106
107/*#define XLP_DRIVER_LOOPBACK*/
108
109static struct nae_port_config nae_port_config[64];
110
111int poe_cl_tbl[MAX_POE_CLASSES] = {
112	0x0, 0x249249,
113	0x492492, 0x6db6db,
114	0x924924, 0xb6db6d,
115	0xdb6db6, 0xffffff
116};
117
118/* #define DUMP_PACKET */
119
120static uint64_t
121nlm_paddr_ld(uint64_t paddr)
122{
123	uint64_t xkaddr = 0x9800000000000000 | paddr;
124
125	return (nlm_load_dword_daddr(xkaddr));
126}
127
128struct nlm_xlp_portdata ifp_ports[64];
129static uma_zone_t nl_tx_desc_zone;
130
131/* This implementation will register the following tree of device
132 * registration:
133 *                      pcibus
134 *                       |
135 *                      xlpnae (1 instance - virtual entity)
136 *                       |
137 *                     xlpge
138 *      (18 sgmii / 4 xaui / 2 interlaken instances)
139 *                       |
140 *                    miibus
141 */
142
143static int nlm_xlpnae_probe(device_t);
144static int nlm_xlpnae_attach(device_t);
145static int nlm_xlpnae_detach(device_t);
146static int nlm_xlpnae_suspend(device_t);
147static int nlm_xlpnae_resume(device_t);
148static int nlm_xlpnae_shutdown(device_t);
149
150static device_method_t nlm_xlpnae_methods[] = {
151	/* Methods from the device interface */
152	DEVMETHOD(device_probe,		nlm_xlpnae_probe),
153	DEVMETHOD(device_attach,	nlm_xlpnae_attach),
154	DEVMETHOD(device_detach,	nlm_xlpnae_detach),
155	DEVMETHOD(device_suspend,	nlm_xlpnae_suspend),
156	DEVMETHOD(device_resume,	nlm_xlpnae_resume),
157	DEVMETHOD(device_shutdown,	nlm_xlpnae_shutdown),
158
159	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
160
161	DEVMETHOD_END
162};
163
164static driver_t nlm_xlpnae_driver = {
165	"xlpnae",
166	nlm_xlpnae_methods,
167	sizeof(struct nlm_xlpnae_softc)
168};
169
170static devclass_t nlm_xlpnae_devclass;
171
172static int nlm_xlpge_probe(device_t);
173static int nlm_xlpge_attach(device_t);
174static int nlm_xlpge_detach(device_t);
175static int nlm_xlpge_suspend(device_t);
176static int nlm_xlpge_resume(device_t);
177static int nlm_xlpge_shutdown(device_t);
178
179/* mii override functions */
180static int nlm_xlpge_mii_read(struct device *, int, int);
181static int nlm_xlpge_mii_write(struct device *, int, int, int);
182static void nlm_xlpge_mii_statchg(device_t);
183
184static device_method_t nlm_xlpge_methods[] = {
185	/* Methods from the device interface */
186	DEVMETHOD(device_probe,		nlm_xlpge_probe),
187	DEVMETHOD(device_attach,	nlm_xlpge_attach),
188	DEVMETHOD(device_detach,	nlm_xlpge_detach),
189	DEVMETHOD(device_suspend,	nlm_xlpge_suspend),
190	DEVMETHOD(device_resume,	nlm_xlpge_resume),
191	DEVMETHOD(device_shutdown,	nlm_xlpge_shutdown),
192
193	/* Methods from the nexus bus needed for explicitly
194	 * probing children when driver is loaded as a kernel module
195	 */
196	DEVMETHOD(miibus_readreg,	nlm_xlpge_mii_read),
197	DEVMETHOD(miibus_writereg,	nlm_xlpge_mii_write),
198	DEVMETHOD(miibus_statchg,	nlm_xlpge_mii_statchg),
199
200	/* Terminate method list */
201	DEVMETHOD_END
202};
203
204static driver_t nlm_xlpge_driver = {
205	"xlpge",
206	nlm_xlpge_methods,
207	sizeof(struct nlm_xlpge_softc)
208};
209
210static devclass_t nlm_xlpge_devclass;
211
212DRIVER_MODULE(xlpnae, pci, nlm_xlpnae_driver, nlm_xlpnae_devclass, 0, 0);
213DRIVER_MODULE(xlpge, xlpnae, nlm_xlpge_driver, nlm_xlpge_devclass, 0, 0);
214DRIVER_MODULE(miibus, xlpge, miibus_driver, miibus_devclass, 0, 0);
215
216MODULE_DEPEND(pci, xlpnae, 1, 1, 1);
217MODULE_DEPEND(xlpnae, xlpge, 1, 1, 1);
218MODULE_DEPEND(xlpge, ether, 1, 1, 1);
219MODULE_DEPEND(xlpge, miibus, 1, 1, 1);
220
221#define SGMII_RCV_CONTEXT_WIDTH 8
222
223/* prototypes */
224static void nlm_xlpge_msgring_handler(int vc, int size,
225    int code, int srcid, struct nlm_fmn_msg *msg, void *data);
226static void nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc *sc, int num);
227static void nlm_xlpge_init(void *addr);
228static void nlm_xlpge_port_disable(struct nlm_xlpge_softc *sc);
229static void nlm_xlpge_port_enable(struct nlm_xlpge_softc *sc);
230
231/* globals */
232int dbg_on = 1;
233int cntx2port[524];
234
235static __inline void
236atomic_incr_long(unsigned long *addr)
237{
238	atomic_add_long(addr, 1);
239}
240
241/*
242 * xlpnae driver implementation
243 */
244static int
245nlm_xlpnae_probe(device_t dev)
246{
247	if (pci_get_vendor(dev) != PCI_VENDOR_NETLOGIC ||
248	    pci_get_device(dev) != PCI_DEVICE_ID_NLM_NAE)
249		return (ENXIO);
250
251	return (BUS_PROBE_DEFAULT);
252}
253
254static void
255nlm_xlpnae_print_frin_desc_carving(struct nlm_xlpnae_softc *sc)
256{
257	int intf;
258	uint32_t value;
259	int start, size;
260
261	/* XXXJC: use max_ports instead of 20 ? */
262	for (intf = 0; intf < 20; intf++) {
263		nlm_write_nae_reg(sc->base, NAE_FREE_IN_FIFO_CFG,
264		    (0x80000000 | intf));
265		value = nlm_read_nae_reg(sc->base, NAE_FREE_IN_FIFO_CFG);
266		size = 2 * ((value >> 20) & 0x3ff);
267		start = 2 * ((value >> 8) & 0x1ff);
268	}
269}
270
271static void
272nlm_config_egress(struct nlm_xlpnae_softc *sc, int nblock,
273    int context_base, int hwport, int max_channels)
274{
275	int offset, num_channels;
276	uint32_t data;
277
278	num_channels = sc->portcfg[hwport].num_channels;
279
280	data = (2048 << 12) | (hwport << 4) | 1;
281	nlm_write_nae_reg(sc->base, NAE_TX_IF_BURSTMAX_CMD, data);
282
283	data = ((context_base + num_channels - 1) << 22) |
284	    (context_base << 12) | (hwport << 4) | 1;
285	nlm_write_nae_reg(sc->base, NAE_TX_DDR_ACTVLIST_CMD, data);
286
287	config_egress_fifo_carvings(sc->base, hwport,
288	    context_base, num_channels, max_channels, sc->portcfg);
289	config_egress_fifo_credits(sc->base, hwport,
290	    context_base, num_channels, max_channels, sc->portcfg);
291
292	data = nlm_read_nae_reg(sc->base, NAE_DMA_TX_CREDIT_TH);
293	data |= (1 << 25) | (1 << 24);
294	nlm_write_nae_reg(sc->base, NAE_DMA_TX_CREDIT_TH, data);
295
296	for (offset = 0; offset < num_channels; offset++) {
297		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD1,
298		    NAE_DRR_QUANTA);
299		data = (hwport << 15) | ((context_base + offset) << 5);
300		if (sc->cmplx_type[nblock] == ILC)
301			data |= (offset << 20);
302		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD0, data | 1);
303		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD0, data);
304	}
305}
306
307static int
308xlpnae_get_maxchannels(struct nlm_xlpnae_softc *sc)
309{
310	int maxchans = 0;
311	int i;
312
313	for (i = 0; i < sc->max_ports; i++) {
314		if (sc->portcfg[i].type == UNKNOWN)
315			continue;
316		maxchans += sc->portcfg[i].num_channels;
317	}
318
319	return (maxchans);
320}
321
322static void
323nlm_setup_interface(struct nlm_xlpnae_softc *sc, int nblock,
324    int port, uint32_t cur_flow_base, uint32_t flow_mask,
325    int max_channels, int context)
326{
327	uint64_t nae_base = sc->base;
328	int mtu = 1536;			/* XXXJC: don't hard code */
329	uint32_t ucore_mask;
330
331	if (sc->cmplx_type[nblock] == XAUIC)
332		nlm_config_xaui(nae_base, nblock, mtu,
333		    mtu, sc->portcfg[port].vlan_pri_en);
334	nlm_config_freein_fifo_uniq_cfg(nae_base,
335	    port, sc->portcfg[port].free_desc_sizes);
336	nlm_config_ucore_iface_mask_cfg(nae_base,
337	    port, sc->portcfg[port].ucore_mask);
338
339	nlm_program_flow_cfg(nae_base, port, cur_flow_base, flow_mask);
340
341	if (sc->cmplx_type[nblock] == SGMIIC)
342		nlm_configure_sgmii_interface(nae_base, nblock, port, mtu, 0);
343
344	nlm_config_egress(sc, nblock, context, port, max_channels);
345
346	nlm_nae_init_netior(nae_base, sc->nblocks);
347	nlm_nae_open_if(nae_base, nblock, sc->cmplx_type[nblock], port,
348	    sc->portcfg[port].free_desc_sizes);
349
350	/*  XXXJC: check mask calculation */
351	ucore_mask = (1 << sc->nucores) - 1;
352	nlm_nae_init_ucore(nae_base, port, ucore_mask);
353}
354
355static void
356nlm_setup_interfaces(struct nlm_xlpnae_softc *sc)
357{
358	uint64_t nae_base;
359	uint32_t cur_slot, cur_slot_base;
360	uint32_t cur_flow_base, port, flow_mask;
361	int max_channels;
362	int i, context;
363
364	cur_slot = 0;
365	cur_slot_base = 0;
366	cur_flow_base = 0;
367	nae_base = sc->base;
368	flow_mask = nlm_get_flow_mask(sc->total_num_ports);
369	/* calculate max_channels */
370	max_channels = xlpnae_get_maxchannels(sc);
371
372	port = 0;
373	context = 0;
374	for (i = 0; i < sc->max_ports; i++) {
375		if (sc->portcfg[i].type == UNKNOWN)
376			continue;
377		nlm_setup_interface(sc, sc->portcfg[i].block, i, cur_flow_base,
378		    flow_mask, max_channels, context);
379		cur_flow_base += sc->per_port_num_flows;
380		context += sc->portcfg[i].num_channels;
381	}
382}
383
384static void
385nlm_xlpnae_init(int node, struct nlm_xlpnae_softc *sc)
386{
387	uint64_t nae_base;
388	uint32_t ucoremask = 0;
389	uint32_t val;
390	int i;
391
392	nae_base = sc->base;
393
394	nlm_nae_flush_free_fifo(nae_base, sc->nblocks);
395	nlm_deflate_frin_fifo_carving(nae_base, sc->max_ports);
396	nlm_reset_nae(node);
397
398	for (i = 0; i < sc->nucores; i++)	/* XXXJC: code repeated below */
399		ucoremask |= (0x1 << i);
400	printf("Loading 0x%x ucores with microcode\n", ucoremask);
401	nlm_ucore_load_all(nae_base, ucoremask, 1);
402
403	val = nlm_set_device_frequency(node, DFS_DEVICE_NAE, sc->freq);
404	printf("Setup NAE frequency to %dMHz\n", val);
405
406	nlm_mdio_reset_all(nae_base);
407
408	printf("Initialze SGMII PCS for blocks 0x%x\n", sc->sgmiimask);
409	nlm_sgmii_pcs_init(nae_base, sc->sgmiimask);
410
411	printf("Initialze XAUI PCS for blocks 0x%x\n", sc->xauimask);
412	nlm_xaui_pcs_init(nae_base, sc->xauimask);
413
414	/* clear NETIOR soft reset */
415	nlm_write_nae_reg(nae_base, NAE_LANE_CFG_SOFTRESET, 0x0);
416
417	/* Disable RX enable bit in RX_CONFIG */
418	val = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG);
419	val &= 0xfffffffe;
420	nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, val);
421
422	if (nlm_is_xlp8xx_ax() == 0) {
423		val = nlm_read_nae_reg(nae_base, NAE_TX_CONFIG);
424		val &= ~(1 << 3);
425		nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, val);
426	}
427
428	nlm_setup_poe_class_config(nae_base, MAX_POE_CLASSES,
429	    sc->ncontexts, poe_cl_tbl);
430
431	nlm_setup_vfbid_mapping(nae_base);
432
433	nlm_setup_flow_crc_poly(nae_base, sc->flow_crc_poly);
434
435	nlm_setup_rx_cal_cfg(nae_base, sc->max_ports, sc->portcfg);
436	/* note: xlp8xx Ax does not have Tx Calendering */
437	if (!nlm_is_xlp8xx_ax())
438		nlm_setup_tx_cal_cfg(nae_base, sc->max_ports, sc->portcfg);
439
440	nlm_setup_interfaces(sc);
441	nlm_config_poe(sc->poe_base, sc->poedv_base);
442
443	if (sc->hw_parser_en)
444		nlm_enable_hardware_parser(nae_base);
445
446	if (sc->prepad_en)
447		nlm_prepad_enable(nae_base, sc->prepad_size);
448
449	if (sc->ieee_1588_en)
450		nlm_setup_1588_timer(sc->base, sc->portcfg);
451}
452
453static void
454nlm_xlpnae_update_pde(void *dummy __unused)
455{
456	struct nlm_xlpnae_softc *sc;
457	uint32_t dv[NUM_WORDS_PER_DV];
458	device_t dev;
459	int vec;
460
461	dev = devclass_get_device(devclass_find("xlpnae"), 0);
462	sc = device_get_softc(dev);
463
464	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 0);
465	for (vec = 0; vec < NUM_DIST_VEC; vec++) {
466		if (nlm_get_poe_distvec(vec, dv) != 0)
467			continue;
468
469		nlm_write_poe_distvec(sc->poedv_base, vec, dv);
470	}
471	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 1);
472}
473
474SYSINIT(nlm_xlpnae_update_pde, SI_SUB_SMP, SI_ORDER_ANY,
475    nlm_xlpnae_update_pde, NULL);
476
477/* configuration common for sgmii, xaui, ilaken goes here */
478static void
479nlm_setup_portcfg(struct nlm_xlpnae_softc *sc, struct xlp_nae_ivars *naep,
480    int block, int port)
481{
482	int i;
483	uint32_t ucore_mask = 0;
484	struct xlp_block_ivars *bp;
485	struct xlp_port_ivars *p;
486
487	bp = &(naep->block_ivars[block]);
488	p  = &(bp->port_ivars[port & 0x3]);
489
490	sc->portcfg[port].node = p->node;
491	sc->portcfg[port].block = p->block;
492	sc->portcfg[port].port = p->port;
493	sc->portcfg[port].type = p->type;
494	sc->portcfg[port].mdio_bus = p->mdio_bus;
495	sc->portcfg[port].phy_addr = p->phy_addr;
496	sc->portcfg[port].loopback_mode = p->loopback_mode;
497	sc->portcfg[port].num_channels = p->num_channels;
498	if (p->free_desc_sizes != MCLBYTES) {
499		printf("[%d, %d] Error: free_desc_sizes %d != %d\n",
500		    block, port, p->free_desc_sizes, MCLBYTES);
501		return;
502	}
503	sc->portcfg[port].free_desc_sizes = p->free_desc_sizes;
504	for (i = 0; i < sc->nucores; i++)	/* XXXJC: configure this */
505		ucore_mask |= (0x1 << i);
506	sc->portcfg[port].ucore_mask = ucore_mask;
507	sc->portcfg[port].vlan_pri_en = p->vlan_pri_en;
508	sc->portcfg[port].num_free_descs = p->num_free_descs;
509	sc->portcfg[port].iface_fifo_size = p->iface_fifo_size;
510	sc->portcfg[port].rxbuf_size = p->rxbuf_size;
511	sc->portcfg[port].rx_slots_reqd = p->rx_slots_reqd;
512	sc->portcfg[port].tx_slots_reqd = p->tx_slots_reqd;
513	sc->portcfg[port].pseq_fifo_size = p->pseq_fifo_size;
514
515	sc->portcfg[port].stg2_fifo_size = p->stg2_fifo_size;
516	sc->portcfg[port].eh_fifo_size = p->eh_fifo_size;
517	sc->portcfg[port].frout_fifo_size = p->frout_fifo_size;
518	sc->portcfg[port].ms_fifo_size = p->ms_fifo_size;
519	sc->portcfg[port].pkt_fifo_size = p->pkt_fifo_size;
520	sc->portcfg[port].pktlen_fifo_size = p->pktlen_fifo_size;
521	sc->portcfg[port].max_stg2_offset = p->max_stg2_offset;
522	sc->portcfg[port].max_eh_offset = p->max_eh_offset;
523	sc->portcfg[port].max_frout_offset = p->max_frout_offset;
524	sc->portcfg[port].max_ms_offset = p->max_ms_offset;
525	sc->portcfg[port].max_pmem_offset = p->max_pmem_offset;
526	sc->portcfg[port].stg1_2_credit = p->stg1_2_credit;
527	sc->portcfg[port].stg2_eh_credit = p->stg2_eh_credit;
528	sc->portcfg[port].stg2_frout_credit = p->stg2_frout_credit;
529	sc->portcfg[port].stg2_ms_credit = p->stg2_ms_credit;
530	sc->portcfg[port].ieee1588_inc_intg = p->ieee1588_inc_intg;
531	sc->portcfg[port].ieee1588_inc_den = p->ieee1588_inc_den;
532	sc->portcfg[port].ieee1588_inc_num = p->ieee1588_inc_num;
533	sc->portcfg[port].ieee1588_userval = p->ieee1588_userval;
534	sc->portcfg[port].ieee1588_ptpoff = p->ieee1588_ptpoff;
535	sc->portcfg[port].ieee1588_tmr1 = p->ieee1588_tmr1;
536	sc->portcfg[port].ieee1588_tmr2 = p->ieee1588_tmr2;
537	sc->portcfg[port].ieee1588_tmr3 = p->ieee1588_tmr3;
538
539	sc->total_free_desc += sc->portcfg[port].free_desc_sizes;
540	sc->total_num_ports++;
541}
542
543static int
544nlm_xlpnae_attach(device_t dev)
545{
546	struct xlp_nae_ivars	*nae_ivars;
547	struct nlm_xlpnae_softc *sc;
548	device_t tmpd;
549	uint32_t dv[NUM_WORDS_PER_DV];
550	int port, i, j, nchan, nblock, node, qstart, qnum;
551	int offset, context, txq_base, rxvcbase;
552	uint64_t poe_pcibase, nae_pcibase;
553
554	node = pci_get_slot(dev) / 8;
555	nae_ivars = &xlp_board_info.nodes[node].nae_ivars;
556
557	sc = device_get_softc(dev);
558	sc->xlpnae_dev = dev;
559	sc->node = nae_ivars->node;
560	sc->base = nlm_get_nae_regbase(sc->node);
561	sc->poe_base = nlm_get_poe_regbase(sc->node);
562	sc->poedv_base = nlm_get_poedv_regbase(sc->node);
563	sc->portcfg = nae_port_config;
564	sc->blockmask = nae_ivars->blockmask;
565	sc->ilmask = nae_ivars->ilmask;
566	sc->xauimask = nae_ivars->xauimask;
567	sc->sgmiimask = nae_ivars->sgmiimask;
568	sc->nblocks = nae_ivars->nblocks;
569	sc->freq = nae_ivars->freq;
570
571	/* flow table generation is done by CRC16 polynomial */
572	sc->flow_crc_poly = nae_ivars->flow_crc_poly;
573
574	sc->hw_parser_en = nae_ivars->hw_parser_en;
575	sc->prepad_en = nae_ivars->prepad_en;
576	sc->prepad_size = nae_ivars->prepad_size;
577	sc->ieee_1588_en = nae_ivars->ieee_1588_en;
578
579	nae_pcibase = nlm_get_nae_pcibase(sc->node);
580	sc->ncontexts = nlm_read_reg(nae_pcibase, XLP_PCI_DEVINFO_REG5);
581	sc->nucores = nlm_num_uengines(nae_pcibase);
582
583	for (nblock = 0; nblock < sc->nblocks; nblock++) {
584		sc->cmplx_type[nblock] = nae_ivars->block_ivars[nblock].type;
585		sc->portmask[nblock] = nae_ivars->block_ivars[nblock].portmask;
586	}
587
588	for (i = 0; i < sc->ncontexts; i++)
589		cntx2port[i] = 18;	/* 18 is an invalid port */
590
591	if (sc->nblocks == 5)
592		sc->max_ports = 18;	/* 8xx has a block 4 with 2 ports */
593	else
594		sc->max_ports = sc->nblocks * PORTS_PER_CMPLX;
595
596	for (i = 0; i < sc->max_ports; i++)
597		sc->portcfg[i].type = UNKNOWN; /* Port Not Present */
598	/*
599	 * Now setup all internal fifo carvings based on
600	 * total number of ports in the system
601	 */
602	sc->total_free_desc = 0;
603	sc->total_num_ports = 0;
604	port = 0;
605	context = 0;
606	txq_base = nlm_qidstart(nae_pcibase);
607	rxvcbase = txq_base + sc->ncontexts;
608	for (i = 0; i < sc->nblocks; i++) {
609		uint32_t portmask;
610
611		if ((nae_ivars->blockmask & (1 << i)) == 0) {
612			port += 4;
613			continue;
614		}
615		portmask = nae_ivars->block_ivars[i].portmask;
616		for (j = 0; j < PORTS_PER_CMPLX; j++, port++) {
617			if ((portmask & (1 << j)) == 0)
618				continue;
619			nlm_setup_portcfg(sc, nae_ivars, i, port);
620			nchan = sc->portcfg[port].num_channels;
621			for (offset = 0; offset < nchan; offset++)
622				cntx2port[context + offset] = port;
623			sc->portcfg[port].txq = txq_base + context;
624			sc->portcfg[port].rxfreeq = rxvcbase + port;
625			context += nchan;
626		}
627	}
628
629	poe_pcibase = nlm_get_poe_pcibase(sc->node);
630	sc->per_port_num_flows =
631	    nlm_poe_max_flows(poe_pcibase) / sc->total_num_ports;
632
633	/* zone for P2P descriptors */
634	nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
635	    sizeof(struct xlpge_tx_desc), NULL, NULL, NULL, NULL,
636	    NAE_CACHELINE_SIZE, 0);
637
638	/* NAE FMN messages have CMS src station id's in the
639	 * range of qstart to qnum.
640	 */
641	qstart = nlm_qidstart(nae_pcibase);
642	qnum = nlm_qnum(nae_pcibase);
643	if (register_msgring_handler(qstart, qstart + qnum - 1,
644	    nlm_xlpge_msgring_handler, sc)) {
645		panic("Couldn't register NAE msgring handler\n");
646	}
647
648	/* POE FMN messages have CMS src station id's in the
649	 * range of qstart to qnum.
650	 */
651	qstart = nlm_qidstart(poe_pcibase);
652	qnum = nlm_qnum(poe_pcibase);
653	if (register_msgring_handler(qstart, qstart + qnum - 1,
654	    nlm_xlpge_msgring_handler, sc)) {
655		panic("Couldn't register POE msgring handler\n");
656	}
657
658	nlm_xlpnae_init(node, sc);
659
660	for (i = 0; i < sc->max_ports; i++) {
661		char desc[32];
662		int block, port;
663
664		if (sc->portcfg[i].type == UNKNOWN)
665			continue;
666		block = sc->portcfg[i].block;
667		port = sc->portcfg[i].port;
668		tmpd = device_add_child(dev, "xlpge", i);
669		device_set_ivars(tmpd,
670		    &(nae_ivars->block_ivars[block].port_ivars[port]));
671		sprintf(desc, "XLP NAE Port %d,%d", block, port);
672		device_set_desc_copy(tmpd, desc);
673	}
674	nlm_setup_iface_fifo_cfg(sc->base, sc->max_ports, sc->portcfg);
675	nlm_setup_rx_base_config(sc->base, sc->max_ports, sc->portcfg);
676	nlm_setup_rx_buf_config(sc->base, sc->max_ports, sc->portcfg);
677	nlm_setup_freein_fifo_cfg(sc->base, sc->portcfg);
678	nlm_program_nae_parser_seq_fifo(sc->base, sc->max_ports, sc->portcfg);
679
680	nlm_xlpnae_print_frin_desc_carving(sc);
681	bus_generic_probe(dev);
682	bus_generic_attach(dev);
683
684	/*
685	 * Enable only boot cpu at this point, full distribution comes
686	 * only after SMP is started
687	 */
688	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 0);
689	nlm_calc_poe_distvec(0x1, 0, 0, 0, 0x1 << XLPGE_RX_VC, dv);
690	nlm_write_poe_distvec(sc->poedv_base, 0, dv);
691	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 1);
692
693	return (0);
694}
695
696static int
697nlm_xlpnae_detach(device_t dev)
698{
699	/*  TODO - free zone here */
700	return (0);
701}
702
703static int
704nlm_xlpnae_suspend(device_t dev)
705{
706	return (0);
707}
708
709static int
710nlm_xlpnae_resume(device_t dev)
711{
712	return (0);
713}
714
715static int
716nlm_xlpnae_shutdown(device_t dev)
717{
718	return (0);
719}
720
721/*
722 * xlpge driver implementation
723 */
724
725static void
726nlm_xlpge_mac_set_rx_mode(struct nlm_xlpge_softc *sc)
727{
728	if (sc->if_flags & IFF_PROMISC) {
729		if (sc->type == SGMIIC)
730			nlm_nae_setup_rx_mode_sgmii(sc->base_addr,
731			    sc->block, sc->port, sc->type, 1 /* broadcast */,
732			    1/* multicast */, 0 /* pause */, 1 /* promisc */);
733		else
734			nlm_nae_setup_rx_mode_xaui(sc->base_addr,
735			    sc->block, sc->port, sc->type, 1 /* broadcast */,
736			    1/* multicast */, 0 /* pause */, 1 /* promisc */);
737	} else {
738		if (sc->type == SGMIIC)
739			nlm_nae_setup_rx_mode_sgmii(sc->base_addr,
740			    sc->block, sc->port, sc->type, 1 /* broadcast */,
741			    1/* multicast */, 0 /* pause */, 0 /* promisc */);
742		else
743			nlm_nae_setup_rx_mode_xaui(sc->base_addr,
744			    sc->block, sc->port, sc->type, 1 /* broadcast */,
745			    1/* multicast */, 0 /* pause */, 0 /* promisc */);
746	}
747}
748
749static int
750nlm_xlpge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
751{
752	struct mii_data		*mii;
753	struct nlm_xlpge_softc	*sc;
754	struct ifreq		*ifr;
755	int			error;
756
757	sc = ifp->if_softc;
758	error = 0;
759	ifr = (struct ifreq *)data;
760
761	switch (command) {
762	case SIOCSIFFLAGS:
763		XLPGE_LOCK(sc);
764		sc->if_flags = ifp->if_flags;
765		if (ifp->if_flags & IFF_UP) {
766			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
767				nlm_xlpge_init(sc);
768			else
769				nlm_xlpge_port_enable(sc);
770			nlm_xlpge_mac_set_rx_mode(sc);
771			sc->link = NLM_LINK_UP;
772		} else {
773			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
774				nlm_xlpge_port_disable(sc);
775			sc->link = NLM_LINK_DOWN;
776		}
777		XLPGE_UNLOCK(sc);
778		error = 0;
779		break;
780	case SIOCGIFMEDIA:
781	case SIOCSIFMEDIA:
782		if (sc->mii_bus != NULL) {
783			mii = device_get_softc(sc->mii_bus);
784			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
785			    command);
786		}
787		break;
788	default:
789		error = ether_ioctl(ifp, command, data);
790		break;
791	}
792
793	return (error);
794}
795
796static int
797xlpge_tx(struct ifnet *ifp, struct mbuf *mbuf_chain)
798{
799	struct nlm_fmn_msg	msg;
800	struct xlpge_tx_desc	*p2p;
801	struct nlm_xlpge_softc	*sc;
802	struct mbuf	*m;
803	vm_paddr_t      paddr;
804	int		fbid, dst, pos, err;
805	int		ret = 0, tx_msgstatus, retries;
806
807	err = 0;
808	if (mbuf_chain == NULL)
809		return (0);
810
811	sc = ifp->if_softc;
812	p2p = NULL;
813	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) ||
814	    ifp->if_drv_flags & IFF_DRV_OACTIVE) {
815		err = ENXIO;
816		goto fail;
817	}
818
819	/* free a few in coming messages on the fb vc */
820	xlp_handle_msg_vc(1 << XLPGE_FB_VC, 2);
821
822	/* vfb id table is setup to map cpu to vc 3 of the cpu */
823	fbid = nlm_cpuid();
824	dst = sc->txq;
825
826	pos = 0;
827	p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT);
828	if (p2p == NULL) {
829		printf("alloc fail\n");
830		err = ENOBUFS;
831		goto fail;
832	}
833
834	for (m = mbuf_chain; m != NULL; m = m->m_next) {
835		vm_offset_t buf = (vm_offset_t) m->m_data;
836		int	len = m->m_len;
837		int	frag_sz;
838		uint64_t desc;
839
840		/*printf("m_data = %p len %d\n", m->m_data, len); */
841		while (len) {
842			if (pos == XLP_NTXFRAGS - 3) {
843				device_printf(sc->xlpge_dev,
844				    "packet defrag %d\n",
845				    m_length(mbuf_chain, NULL));
846				err = ENOBUFS; /* TODO fix error */
847				goto fail;
848			}
849			paddr = vtophys(buf);
850			frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
851			if (len < frag_sz)
852				frag_sz = len;
853			desc = nae_tx_desc(P2D_NEOP, 0, 127,
854			    frag_sz, paddr);
855			p2p->frag[pos] = htobe64(desc);
856			pos++;
857			len -= frag_sz;
858			buf += frag_sz;
859		}
860	}
861
862	KASSERT(pos != 0, ("Zero-length mbuf chain?\n"));
863
864	/* Make the last one P2D EOP */
865	p2p->frag[pos-1] |= htobe64((uint64_t)P2D_EOP << 62);
866
867	/* stash useful pointers in the desc */
868	p2p->frag[XLP_NTXFRAGS-3] = 0xf00bad;
869	p2p->frag[XLP_NTXFRAGS-2] = (uintptr_t)p2p;
870	p2p->frag[XLP_NTXFRAGS-1] = (uintptr_t)mbuf_chain;
871
872	paddr = vtophys(p2p);
873	msg.msg[0] = nae_tx_desc(P2P, 0, fbid, pos, paddr);
874
875	for (retries = 16;  retries > 0; retries--) {
876		ret = nlm_fmn_msgsend(dst, 1, FMN_SWCODE_NAE, &msg);
877		if (ret == 0)
878			return (0);
879	}
880
881fail:
882	if (ret != 0) {
883		tx_msgstatus = nlm_read_c2_txmsgstatus();
884		if ((tx_msgstatus >> 24) & 0x1)
885			device_printf(sc->xlpge_dev, "Transmit queue full - ");
886		if ((tx_msgstatus >> 3) & 0x1)
887			device_printf(sc->xlpge_dev, "ECC error - ");
888		if ((tx_msgstatus >> 2) & 0x1)
889			device_printf(sc->xlpge_dev, "Pending Sync - ");
890		if ((tx_msgstatus >> 1) & 0x1)
891			device_printf(sc->xlpge_dev,
892			    "Insufficient input queue credits - ");
893		if (tx_msgstatus & 0x1)
894			device_printf(sc->xlpge_dev,
895			    "Insufficient output queue credits - ");
896	}
897	device_printf(sc->xlpge_dev, "Send failed! err = %d\n", err);
898	if (p2p)
899		uma_zfree(nl_tx_desc_zone, p2p);
900	m_freem(mbuf_chain);
901	if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
902	return (err);
903}
904
905
906static int
907nlm_xlpge_gmac_config_speed(struct nlm_xlpge_softc *sc)
908{
909	struct mii_data *mii;
910
911	if (sc->type == XAUIC || sc->type == ILC)
912		return (0);
913
914	if (sc->mii_bus) {
915		mii = device_get_softc(sc->mii_bus);
916		mii_pollstat(mii);
917	}
918
919	return (0);
920}
921
922static void
923nlm_xlpge_port_disable(struct nlm_xlpge_softc *sc)
924{
925	struct ifnet   *ifp;
926
927	ifp = sc->xlpge_if;
928	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
929
930	callout_stop(&sc->xlpge_callout);
931	nlm_mac_disable(sc->base_addr, sc->block, sc->type, sc->port);
932}
933
934static void
935nlm_mii_pollstat(void *arg)
936{
937	struct nlm_xlpge_softc *sc = (struct nlm_xlpge_softc *)arg;
938	struct mii_data *mii = NULL;
939
940	if (sc->mii_bus) {
941		mii = device_get_softc(sc->mii_bus);
942
943		KASSERT(mii != NULL, ("mii ptr is NULL"));
944
945		mii_pollstat(mii);
946
947		callout_reset(&sc->xlpge_callout, hz,
948		    nlm_mii_pollstat, sc);
949	}
950}
951
952static void
953nlm_xlpge_port_enable(struct nlm_xlpge_softc *sc)
954{
955	if ((sc->type != SGMIIC) && (sc->type != XAUIC))
956		return;
957	nlm_mac_enable(sc->base_addr, sc->block, sc->type, sc->port);
958	nlm_mii_pollstat((void *)sc);
959}
960
961static void
962nlm_xlpge_init(void *addr)
963{
964	struct nlm_xlpge_softc *sc;
965	struct ifnet   *ifp;
966	struct mii_data *mii = NULL;
967
968	sc = (struct nlm_xlpge_softc *)addr;
969	ifp = sc->xlpge_if;
970
971	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
972		return;
973
974	if (sc->mii_bus) {
975		mii = device_get_softc(sc->mii_bus);
976		mii_mediachg(mii);
977	}
978
979	nlm_xlpge_gmac_config_speed(sc);
980	ifp->if_drv_flags |= IFF_DRV_RUNNING;
981	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
982	nlm_xlpge_port_enable(sc);
983
984	/* start the callout */
985	callout_reset(&sc->xlpge_callout, hz, nlm_mii_pollstat, sc);
986}
987
988/*
989 * Read the MAC address from FDT or board eeprom.
990 */
991static void
992xlpge_read_mac_addr(struct nlm_xlpge_softc *sc)
993{
994
995	xlpge_get_macaddr(sc->dev_addr);
996	/* last octet is port specific */
997	sc->dev_addr[5] += (sc->block * 4) + sc->port;
998
999	if (sc->type == SGMIIC)
1000		nlm_nae_setup_mac_addr_sgmii(sc->base_addr, sc->block,
1001		    sc->port, sc->type, sc->dev_addr);
1002	else if (sc->type == XAUIC)
1003		nlm_nae_setup_mac_addr_xaui(sc->base_addr, sc->block,
1004		    sc->port, sc->type, sc->dev_addr);
1005}
1006
1007
1008static int
1009xlpge_mediachange(struct ifnet *ifp)
1010{
1011	return (0);
1012}
1013
1014static void
1015xlpge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1016{
1017	struct nlm_xlpge_softc *sc;
1018	struct mii_data *md;
1019
1020	md = NULL;
1021	sc = ifp->if_softc;
1022
1023	if (sc->mii_bus)
1024		md = device_get_softc(sc->mii_bus);
1025
1026	ifmr->ifm_status = IFM_AVALID;
1027	ifmr->ifm_active = IFM_ETHER;
1028
1029	if (sc->link == NLM_LINK_DOWN)
1030		return;
1031
1032	if (md != NULL)
1033		ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
1034	ifmr->ifm_status |= IFM_ACTIVE;
1035}
1036
1037static int
1038nlm_xlpge_ifinit(struct nlm_xlpge_softc *sc)
1039{
1040	struct ifnet *ifp;
1041	device_t dev;
1042	int port = sc->block * 4 + sc->port;
1043
1044	dev = sc->xlpge_dev;
1045	ifp = sc->xlpge_if = if_alloc(IFT_ETHER);
1046	/*(sc->network_sc)->ifp_ports[port].xlpge_if = ifp;*/
1047	ifp_ports[port].xlpge_if = ifp;
1048
1049	if (ifp == NULL) {
1050		device_printf(dev, "cannot if_alloc()\n");
1051		return (ENOSPC);
1052	}
1053	ifp->if_softc = sc;
1054	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1055	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1056	sc->if_flags = ifp->if_flags;
1057	/*ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;*/
1058	ifp->if_capabilities = 0;
1059	ifp->if_capenable = ifp->if_capabilities;
1060	ifp->if_ioctl = nlm_xlpge_ioctl;
1061	ifp->if_init  = nlm_xlpge_init ;
1062	ifp->if_hwassist = 0;
1063	ifp->if_snd.ifq_drv_maxlen = NLM_XLPGE_TXQ_SIZE; /* TODO: make this a sysint */
1064	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1065	IFQ_SET_READY(&ifp->if_snd);
1066
1067	ifmedia_init(&sc->xlpge_mii.mii_media, 0, xlpge_mediachange,
1068	    xlpge_mediastatus);
1069	ifmedia_add(&sc->xlpge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1070	ifmedia_set(&sc->xlpge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1071	sc->xlpge_mii.mii_media.ifm_media =
1072	    sc->xlpge_mii.mii_media.ifm_cur->ifm_media;
1073	xlpge_read_mac_addr(sc);
1074
1075	ether_ifattach(ifp, sc->dev_addr);
1076
1077	/* override if_transmit : per ifnet(9), do it after if_attach */
1078	ifp->if_transmit = xlpge_tx;
1079
1080	return (0);
1081}
1082
1083static int
1084nlm_xlpge_probe(device_t dev)
1085{
1086	return (BUS_PROBE_DEFAULT);
1087}
1088
1089static void *
1090get_buf(void)
1091{
1092	struct mbuf     *m_new;
1093	uint64_t        *md;
1094#ifdef INVARIANTS
1095	vm_paddr_t      temp1, temp2;
1096#endif
1097
1098	if ((m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
1099		return (NULL);
1100	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1101	KASSERT(((uintptr_t)m_new->m_data & (NAE_CACHELINE_SIZE - 1)) == 0,
1102	    ("m_new->m_data is not cacheline aligned"));
1103	md = (uint64_t *)m_new->m_data;
1104	md[0] = (intptr_t)m_new;        /* Back Ptr */
1105	md[1] = 0xf00bad;
1106	m_adj(m_new, NAE_CACHELINE_SIZE);
1107
1108#ifdef INVARIANTS
1109	temp1 = vtophys((vm_offset_t) m_new->m_data);
1110	temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
1111	KASSERT((temp1 + 1536) == temp2,
1112	    ("Alloced buffer is not contiguous"));
1113#endif
1114	return ((void *)m_new->m_data);
1115}
1116
1117static void
1118nlm_xlpge_mii_init(device_t dev, struct nlm_xlpge_softc *sc)
1119{
1120	int error;
1121
1122	error = mii_attach(dev, &sc->mii_bus, sc->xlpge_if,
1123			xlpge_mediachange, xlpge_mediastatus,
1124			BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY, 0);
1125
1126	if (error) {
1127		device_printf(dev, "attaching PHYs failed\n");
1128		sc->mii_bus = NULL;
1129	}
1130
1131	if (sc->mii_bus != NULL) {
1132		/* enable MDIO interrupts in the PHY */
1133		/* XXXJC: TODO */
1134	}
1135}
1136
1137static int
1138xlpge_stats_sysctl(SYSCTL_HANDLER_ARGS)
1139{
1140	struct nlm_xlpge_softc *sc;
1141	uint32_t val;
1142	int reg, field;
1143
1144	sc = arg1;
1145	field = arg2;
1146	reg = SGMII_STATS_MLR(sc->block, sc->port) + field;
1147	val = nlm_read_nae_reg(sc->base_addr, reg);
1148	return (sysctl_handle_int(oidp, &val, 0, req));
1149}
1150
1151static void
1152nlm_xlpge_setup_stats_sysctl(device_t dev, struct nlm_xlpge_softc *sc)
1153{
1154	struct sysctl_ctx_list *ctx;
1155	struct sysctl_oid_list *child;
1156	struct sysctl_oid *tree;
1157
1158	ctx = device_get_sysctl_ctx(dev);
1159	tree = device_get_sysctl_tree(dev);
1160	child = SYSCTL_CHILDREN(tree);
1161
1162#define XLPGE_STAT(name, offset, desc) \
1163	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, name,	\
1164	    CTLTYPE_UINT | CTLFLAG_RD, sc, offset,	\
1165	    xlpge_stats_sysctl, "IU", desc)
1166
1167	XLPGE_STAT("tr127", nlm_sgmii_stats_tr127, "TxRx 64 - 127 Bytes");
1168	XLPGE_STAT("tr255", nlm_sgmii_stats_tr255, "TxRx 128 - 255 Bytes");
1169	XLPGE_STAT("tr511", nlm_sgmii_stats_tr511, "TxRx 256 - 511 Bytes");
1170	XLPGE_STAT("tr1k",  nlm_sgmii_stats_tr1k,  "TxRx 512 - 1023 Bytes");
1171	XLPGE_STAT("trmax", nlm_sgmii_stats_trmax, "TxRx 1024 - 1518 Bytes");
1172	XLPGE_STAT("trmgv", nlm_sgmii_stats_trmgv, "TxRx 1519 - 1522 Bytes");
1173
1174	XLPGE_STAT("rbyt", nlm_sgmii_stats_rbyt, "Rx Bytes");
1175	XLPGE_STAT("rpkt", nlm_sgmii_stats_rpkt, "Rx Packets");
1176	XLPGE_STAT("rfcs", nlm_sgmii_stats_rfcs, "Rx FCS Error");
1177	XLPGE_STAT("rmca", nlm_sgmii_stats_rmca, "Rx Multicast Packets");
1178	XLPGE_STAT("rbca", nlm_sgmii_stats_rbca, "Rx Broadcast Packets");
1179	XLPGE_STAT("rxcf", nlm_sgmii_stats_rxcf, "Rx Control Frames");
1180	XLPGE_STAT("rxpf", nlm_sgmii_stats_rxpf, "Rx Pause Frames");
1181	XLPGE_STAT("rxuo", nlm_sgmii_stats_rxuo, "Rx Unknown Opcode");
1182	XLPGE_STAT("raln", nlm_sgmii_stats_raln, "Rx Alignment Errors");
1183	XLPGE_STAT("rflr", nlm_sgmii_stats_rflr, "Rx Framelength Errors");
1184	XLPGE_STAT("rcde", nlm_sgmii_stats_rcde, "Rx Code Errors");
1185	XLPGE_STAT("rcse", nlm_sgmii_stats_rcse, "Rx Carrier Sense Errors");
1186	XLPGE_STAT("rund", nlm_sgmii_stats_rund, "Rx Undersize Packet Errors");
1187	XLPGE_STAT("rovr", nlm_sgmii_stats_rovr, "Rx Oversize Packet Errors");
1188	XLPGE_STAT("rfrg", nlm_sgmii_stats_rfrg, "Rx Fragments");
1189	XLPGE_STAT("rjbr", nlm_sgmii_stats_rjbr, "Rx Jabber");
1190
1191	XLPGE_STAT("tbyt", nlm_sgmii_stats_tbyt, "Tx Bytes");
1192	XLPGE_STAT("tpkt", nlm_sgmii_stats_tpkt, "Tx Packets");
1193	XLPGE_STAT("tmca", nlm_sgmii_stats_tmca, "Tx Multicast Packets");
1194	XLPGE_STAT("tbca", nlm_sgmii_stats_tbca, "Tx Broadcast Packets");
1195	XLPGE_STAT("txpf", nlm_sgmii_stats_txpf, "Tx Pause Frame");
1196	XLPGE_STAT("tdfr", nlm_sgmii_stats_tdfr, "Tx Deferral Packets");
1197	XLPGE_STAT("tedf", nlm_sgmii_stats_tedf, "Tx Excessive Deferral Pkts");
1198	XLPGE_STAT("tscl", nlm_sgmii_stats_tscl, "Tx Single Collisions");
1199	XLPGE_STAT("tmcl", nlm_sgmii_stats_tmcl, "Tx Multiple Collisions");
1200	XLPGE_STAT("tlcl", nlm_sgmii_stats_tlcl, "Tx Late Collision Pkts");
1201	XLPGE_STAT("txcl", nlm_sgmii_stats_txcl, "Tx Excessive Collisions");
1202	XLPGE_STAT("tncl", nlm_sgmii_stats_tncl, "Tx Total Collisions");
1203	XLPGE_STAT("tjbr", nlm_sgmii_stats_tjbr, "Tx Jabber Frames");
1204	XLPGE_STAT("tfcs", nlm_sgmii_stats_tfcs, "Tx FCS Errors");
1205	XLPGE_STAT("txcf", nlm_sgmii_stats_txcf, "Tx Control Frames");
1206	XLPGE_STAT("tovr", nlm_sgmii_stats_tovr, "Tx Oversize Frames");
1207	XLPGE_STAT("tund", nlm_sgmii_stats_tund, "Tx Undersize Frames");
1208	XLPGE_STAT("tfrg", nlm_sgmii_stats_tfrg, "Tx Fragments");
1209#undef XLPGE_STAT
1210}
1211
1212static int
1213nlm_xlpge_attach(device_t dev)
1214{
1215	struct xlp_port_ivars *pv;
1216	struct nlm_xlpge_softc *sc;
1217	int port;
1218
1219	pv = device_get_ivars(dev);
1220	sc = device_get_softc(dev);
1221	sc->xlpge_dev = dev;
1222	sc->mii_bus = NULL;
1223	sc->block = pv->block;
1224	sc->node = pv->node;
1225	sc->port = pv->port;
1226	sc->type = pv->type;
1227	sc->xlpge_if = NULL;
1228	sc->phy_addr = pv->phy_addr;
1229	sc->mdio_bus = pv->mdio_bus;
1230	sc->portcfg = nae_port_config;
1231	sc->hw_parser_en = pv->hw_parser_en;
1232
1233	/* default settings */
1234	sc->speed = NLM_SGMII_SPEED_10;
1235	sc->duplexity = NLM_SGMII_DUPLEX_FULL;
1236	sc->link = NLM_LINK_DOWN;
1237	sc->flowctrl = NLM_FLOWCTRL_DISABLED;
1238
1239	sc->network_sc = device_get_softc(device_get_parent(dev));
1240	sc->base_addr = sc->network_sc->base;
1241	sc->prepad_en = sc->network_sc->prepad_en;
1242	sc->prepad_size = sc->network_sc->prepad_size;
1243
1244	callout_init(&sc->xlpge_callout, 1);
1245
1246	XLPGE_LOCK_INIT(sc, device_get_nameunit(dev));
1247
1248	port = (sc->block*4)+sc->port;
1249	sc->nfree_desc = nae_port_config[port].num_free_descs;
1250	sc->txq = nae_port_config[port].txq;
1251	sc->rxfreeq = nae_port_config[port].rxfreeq;
1252
1253	nlm_xlpge_submit_rx_free_desc(sc, sc->nfree_desc);
1254	if (sc->hw_parser_en)
1255		nlm_enable_hardware_parser_per_port(sc->base_addr,
1256		    sc->block, sc->port);
1257
1258	nlm_xlpge_ifinit(sc);
1259	ifp_ports[port].xlpge_sc = sc;
1260	nlm_xlpge_mii_init(dev, sc);
1261
1262	nlm_xlpge_setup_stats_sysctl(dev, sc);
1263
1264	return (0);
1265}
1266
1267static int
1268nlm_xlpge_detach(device_t dev)
1269{
1270	return (0);
1271}
1272
1273static int
1274nlm_xlpge_suspend(device_t dev)
1275{
1276	return (0);
1277}
1278
1279static int
1280nlm_xlpge_resume(device_t dev)
1281{
1282	return (0);
1283}
1284
1285static int
1286nlm_xlpge_shutdown(device_t dev)
1287{
1288	return (0);
1289}
1290
1291/*
1292 * miibus function with custom implementation
1293 */
1294static int
1295nlm_xlpge_mii_read(struct device *dev, int phyaddr, int regidx)
1296{
1297	struct nlm_xlpge_softc *sc;
1298	int val;
1299
1300	sc = device_get_softc(dev);
1301	if (sc->type == SGMIIC)
1302		val = nlm_gmac_mdio_read(sc->base_addr, sc->mdio_bus,
1303		    BLOCK_7, LANE_CFG, phyaddr, regidx);
1304	else
1305		val = 0xffff;
1306
1307	return (val);
1308}
1309
1310static int
1311nlm_xlpge_mii_write(struct device *dev, int phyaddr, int regidx, int val)
1312{
1313	struct nlm_xlpge_softc *sc;
1314
1315	sc = device_get_softc(dev);
1316	if (sc->type == SGMIIC)
1317		nlm_gmac_mdio_write(sc->base_addr, sc->mdio_bus, BLOCK_7,
1318		    LANE_CFG, phyaddr, regidx, val);
1319
1320	return (0);
1321}
1322
1323static void
1324nlm_xlpge_mii_statchg(device_t dev)
1325{
1326	struct nlm_xlpge_softc *sc;
1327	struct mii_data *mii;
1328	char *speed, *duplexity;
1329
1330	sc = device_get_softc(dev);
1331	if (sc->mii_bus == NULL)
1332		return;
1333
1334	mii = device_get_softc(sc->mii_bus);
1335	if (mii->mii_media_status & IFM_ACTIVE) {
1336		if (IFM_SUBTYPE(mii->mii_media_active) ==  IFM_10_T) {
1337			sc->speed = NLM_SGMII_SPEED_10;
1338			speed =  "10Mbps";
1339		} else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
1340			sc->speed = NLM_SGMII_SPEED_100;
1341			speed = "100Mbps";
1342		} else { /* default to 1G */
1343			sc->speed = NLM_SGMII_SPEED_1000;
1344			speed =  "1Gbps";
1345		}
1346
1347		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1348			sc->duplexity = NLM_SGMII_DUPLEX_FULL;
1349			duplexity =  "full";
1350		} else {
1351			sc->duplexity = NLM_SGMII_DUPLEX_HALF;
1352			duplexity = "half";
1353		}
1354
1355		printf("Port [%d, %d] setup with speed=%s duplex=%s\n",
1356		    sc->block, sc->port, speed, duplexity);
1357
1358		nlm_nae_setup_mac(sc->base_addr, sc->block, sc->port, 0, 1, 1,
1359		    sc->speed, sc->duplexity);
1360	}
1361}
1362
1363/*
1364 * xlpge support function implementations
1365 */
1366static void
1367nlm_xlpge_release_mbuf(uint64_t paddr)
1368{
1369	uint64_t	mag, desc, mbuf;
1370
1371	paddr += (XLP_NTXFRAGS - 3) * sizeof(uint64_t);
1372	mag = nlm_paddr_ld(paddr);
1373	desc = nlm_paddr_ld(paddr + sizeof(uint64_t));
1374	mbuf = nlm_paddr_ld(paddr + 2 * sizeof(uint64_t));
1375
1376	if (mag != 0xf00bad) {
1377		/* somebody else packet Error - FIXME in intialization */
1378		printf("cpu %d: ERR Tx packet paddr %jx, mag %jx, desc %jx mbuf %jx\n",
1379		    nlm_cpuid(), (uintmax_t)paddr, (uintmax_t)mag,
1380		    (intmax_t)desc, (uintmax_t)mbuf);
1381		return;
1382	}
1383	m_freem((struct mbuf *)(uintptr_t)mbuf);
1384	uma_zfree(nl_tx_desc_zone, (void *)(uintptr_t)desc);
1385}
1386
1387static void
1388nlm_xlpge_rx(struct nlm_xlpge_softc *sc, int port, vm_paddr_t paddr, int len)
1389{
1390	struct ifnet	*ifp;
1391	struct mbuf	*m;
1392	vm_offset_t	temp;
1393	unsigned long	mag;
1394	int		prepad_size;
1395
1396	ifp = sc->xlpge_if;
1397	temp = nlm_paddr_ld(paddr - NAE_CACHELINE_SIZE);
1398	mag = nlm_paddr_ld(paddr - NAE_CACHELINE_SIZE + sizeof(uint64_t));
1399
1400	m = (struct mbuf *)(intptr_t)temp;
1401	if (mag != 0xf00bad) {
1402		/* somebody else packet Error - FIXME in intialization */
1403		printf("cpu %d: ERR Rx packet paddr %jx, temp %p, mag %lx\n",
1404		    nlm_cpuid(), (uintmax_t)paddr, (void *)temp, mag);
1405		return;
1406	}
1407
1408	m->m_pkthdr.rcvif = ifp;
1409
1410#ifdef DUMP_PACKET
1411	{
1412		int     i = 0, j = 64;
1413		unsigned char *buf = (char *)m->m_data;
1414		printf("(cpu_%d: nlge_rx, !RX_COPY) Rx Packet: length=%d\n",
1415				nlm_cpuid(), len);
1416		if (len < j)
1417			j = len;
1418		if (sc->prepad_en)
1419			j += ((sc->prepad_size + 1) * 16);
1420		for (i = 0; i < j; i++) {
1421			if (i && (i % 16) == 0)
1422				printf("\n");
1423			printf("%02x ", buf[i]);
1424		}
1425		printf("\n");
1426	}
1427#endif
1428
1429	if (sc->prepad_en) {
1430		prepad_size = ((sc->prepad_size + 1) * 16);
1431		m->m_data += prepad_size;
1432		m->m_pkthdr.len = m->m_len = (len - prepad_size);
1433	} else
1434		m->m_pkthdr.len = m->m_len = len;
1435
1436	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1437#ifdef XLP_DRIVER_LOOPBACK
1438	if (port == 16 || port == 17)
1439		(*ifp->if_input)(ifp, m);
1440	else
1441		xlpge_tx(ifp, m);
1442#else
1443	(*ifp->if_input)(ifp, m);
1444#endif
1445}
1446
1447void
1448nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc *sc, int num)
1449{
1450	int i, size, ret, n;
1451	struct nlm_fmn_msg msg;
1452	void *ptr;
1453
1454	for(i = 0; i < num; i++) {
1455		memset(&msg, 0, sizeof(msg));
1456		ptr = get_buf();
1457		if (!ptr) {
1458			device_printf(sc->xlpge_dev, "Cannot allocate mbuf\n");
1459			break;
1460		}
1461
1462		msg.msg[0] = vtophys(ptr);
1463		if (msg.msg[0] == 0) {
1464			printf("Bad ptr for %p\n", ptr);
1465			break;
1466		}
1467		size = 1;
1468
1469		n = 0;
1470		while (1) {
1471			/* on success returns 1, else 0 */
1472			ret = nlm_fmn_msgsend(sc->rxfreeq, size, 0, &msg);
1473			if (ret == 0)
1474				break;
1475			if (n++ > 10000) {
1476				printf("Too many credit fails for send free desc\n");
1477				break;
1478			}
1479		}
1480	}
1481}
1482
1483void
1484nlm_xlpge_msgring_handler(int vc, int size, int code, int src_id,
1485    struct nlm_fmn_msg *msg, void *data)
1486{
1487	uint64_t phys_addr;
1488	struct nlm_xlpnae_softc *sc;
1489	struct nlm_xlpge_softc *xlpge_sc;
1490	struct ifnet *ifp;
1491	uint32_t context;
1492	uint32_t port = 0;
1493	uint32_t length;
1494
1495	sc = (struct nlm_xlpnae_softc *)data;
1496	KASSERT(sc != NULL, ("Null sc in msgring handler"));
1497
1498	if (size == 1) { /* process transmit complete */
1499		phys_addr = msg->msg[0] & 0xffffffffffULL;
1500
1501		/* context is SGMII_RCV_CONTEXT_NUM + three bit vlan type
1502		 * or vlan priority
1503		 */
1504		context = (msg->msg[0] >> 40) & 0x3fff;
1505		port = cntx2port[context];
1506
1507		if (port >= XLP_MAX_PORTS) {
1508			printf("%s:%d Bad port %d (context=%d)\n",
1509				__func__, __LINE__, port, context);
1510			return;
1511		}
1512		ifp = ifp_ports[port].xlpge_if;
1513		xlpge_sc = ifp_ports[port].xlpge_sc;
1514
1515		nlm_xlpge_release_mbuf(phys_addr);
1516
1517		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1518
1519	} else if (size > 1) { /* Recieve packet */
1520		phys_addr = msg->msg[1] & 0xffffffffc0ULL;
1521		length = (msg->msg[1] >> 40) & 0x3fff;
1522		length -= MAC_CRC_LEN;
1523
1524		/* context is SGMII_RCV_CONTEXT_NUM + three bit vlan type
1525		 * or vlan priority
1526		 */
1527		context = (msg->msg[1] >> 54) & 0x3ff;
1528		port = cntx2port[context];
1529
1530		if (port >= XLP_MAX_PORTS) {
1531			printf("%s:%d Bad port %d (context=%d)\n",
1532				__func__, __LINE__, port, context);
1533			return;
1534		}
1535
1536		ifp = ifp_ports[port].xlpge_if;
1537		xlpge_sc = ifp_ports[port].xlpge_sc;
1538
1539		nlm_xlpge_rx(xlpge_sc, port, phys_addr, length);
1540		/* return back a free descriptor to NA */
1541		nlm_xlpge_submit_rx_free_desc(xlpge_sc, 1);
1542	}
1543}
1544