phyp_llan.c revision 257292
1/*-
2 * Copyright 2013 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/powerpc/pseries/phyp_llan.c 257292 2013-10-28 23:47:52Z nwhitehorn $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/sockio.h>
33#include <sys/endian.h>
34#include <sys/mbuf.h>
35#include <sys/module.h>
36#include <sys/malloc.h>
37#include <sys/kernel.h>
38#include <sys/socket.h>
39
40#include <net/bpf.h>
41#include <net/if.h>
42#include <net/if_arp.h>
43#include <net/ethernet.h>
44#include <net/if_dl.h>
45#include <net/if_media.h>
46#include <net/if_types.h>
47#include <net/if_vlan_var.h>
48
49#include <dev/ofw/openfirm.h>
50#include <dev/ofw/ofw_bus.h>
51#include <dev/ofw/ofw_bus_subr.h>
52#include <machine/bus.h>
53#include <machine/resource.h>
54#include <sys/bus.h>
55#include <sys/rman.h>
56
57#include <powerpc/pseries/phyp-hvcall.h>
58
59#define LLAN_MAX_RX_PACKETS	100
60#define LLAN_MAX_TX_PACKETS	100
61#define LLAN_RX_BUF_LEN		8*PAGE_SIZE
62
63#define LLAN_BUFDESC_VALID	(1ULL << 63)
64#define LLAN_ADD_MULTICAST	0x1
65#define LLAN_DEL_MULTICAST	0x2
66#define LLAN_CLEAR_MULTICAST	0x3
67
68struct llan_xfer {
69	struct mbuf *rx_mbuf;
70	bus_dmamap_t rx_dmamap;
71	uint64_t rx_bufdesc;
72};
73
74struct llan_receive_queue_entry { /* PAPR page 539 */
75	uint8_t control;
76	uint8_t reserved;
77	uint16_t offset;
78	uint32_t length;
79	uint64_t handle;
80} __packed;
81
82struct llan_softc {
83	device_t	dev;
84	struct mtx	io_lock;
85
86	cell_t		unit;
87	uint8_t		mac_address[8];
88
89	int		irqid;
90	struct resource	*irq;
91	void		*irq_cookie;
92
93	bus_dma_tag_t	rx_dma_tag;
94	bus_dma_tag_t	rxbuf_dma_tag;
95	bus_dma_tag_t	tx_dma_tag;
96
97	bus_dmamap_t	tx_dma_map;
98
99	struct llan_receive_queue_entry *rx_buf;
100	int		rx_dma_slot;
101	int		rx_valid_val;
102	bus_dmamap_t	rx_buf_map;
103	bus_addr_t	rx_buf_phys;
104	bus_size_t	rx_buf_len;
105	bus_addr_t	input_buf_phys;
106	bus_addr_t	filter_buf_phys;
107	struct llan_xfer rx_xfer[LLAN_MAX_RX_PACKETS];
108
109	struct ifnet	*ifp;
110};
111
112static int	llan_probe(device_t);
113static int	llan_attach(device_t);
114static void	llan_intr(void *xsc);
115static void	llan_init(void *xsc);
116static void	llan_start(struct ifnet *ifp);
117static int	llan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
118static void	llan_rx_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs,
119		    int err);
120static int	llan_add_rxbuf(struct llan_softc *sc, struct llan_xfer *rx);
121static int	llan_set_multicast(struct llan_softc *sc);
122
123static devclass_t       llan_devclass;
124static device_method_t  llan_methods[] = {
125        DEVMETHOD(device_probe,         llan_probe),
126        DEVMETHOD(device_attach,        llan_attach),
127
128        DEVMETHOD_END
129};
130static driver_t llan_driver = {
131        "llan",
132        llan_methods,
133        sizeof(struct llan_softc)
134};
135DRIVER_MODULE(llan, vdevice, llan_driver, llan_devclass, 0, 0);
136
137static int
138llan_probe(device_t dev)
139{
140	if (!ofw_bus_is_compatible(dev,"IBM,l-lan"))
141		return (ENXIO);
142
143	device_set_desc(dev, "POWER Hypervisor Virtual Ethernet");
144	return (0);
145}
146
147static int
148llan_attach(device_t dev)
149{
150	struct llan_softc *sc;
151	phandle_t node;
152	int error, i;
153
154	sc = device_get_softc(dev);
155	sc->dev = dev;
156
157	/* Get firmware properties */
158	node = ofw_bus_get_node(dev);
159	OF_getprop(node, "local-mac-address", sc->mac_address,
160	    sizeof(sc->mac_address));
161	OF_getprop(node, "reg", &sc->unit, sizeof(sc->unit));
162
163	mtx_init(&sc->io_lock, "llan", NULL, MTX_DEF);
164
165        /* Setup interrupt */
166	sc->irqid = 0;
167	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
168	    RF_ACTIVE);
169
170	if (!sc->irq) {
171		device_printf(dev, "Could not allocate IRQ\n");
172		mtx_destroy(&sc->io_lock);
173		return (ENXIO);
174	}
175
176	bus_setup_intr(dev, sc->irq, INTR_TYPE_MISC | INTR_MPSAFE |
177	    INTR_ENTROPY, NULL, llan_intr, sc, &sc->irq_cookie);
178
179	/* Setup DMA */
180	error = bus_dma_tag_create(bus_get_dma_tag(dev), 16, 0,
181            BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
182	    LLAN_RX_BUF_LEN, 1, BUS_SPACE_MAXSIZE_32BIT,
183	    0, NULL, NULL, &sc->rx_dma_tag);
184	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
185            BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
186	    BUS_SPACE_MAXSIZE, 1, BUS_SPACE_MAXSIZE_32BIT,
187	    0, NULL, NULL, &sc->rxbuf_dma_tag);
188	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
189            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
190	    BUS_SPACE_MAXSIZE, 6, BUS_SPACE_MAXSIZE_32BIT, 0,
191	    busdma_lock_mutex, &sc->io_lock, &sc->tx_dma_tag);
192
193	error = bus_dmamem_alloc(sc->rx_dma_tag, (void **)&sc->rx_buf,
194	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx_buf_map);
195	error = bus_dmamap_load(sc->rx_dma_tag, sc->rx_buf_map, sc->rx_buf,
196	    LLAN_RX_BUF_LEN, llan_rx_load_cb, sc, 0);
197
198	/* TX DMA maps */
199	bus_dmamap_create(sc->tx_dma_tag, 0, &sc->tx_dma_map);
200
201	/* RX DMA */
202	for (i = 0; i < LLAN_MAX_RX_PACKETS; i++) {
203		error = bus_dmamap_create(sc->rxbuf_dma_tag, 0,
204		    &sc->rx_xfer[i].rx_dmamap);
205		sc->rx_xfer[i].rx_mbuf = NULL;
206	}
207
208	/* Attach to network stack */
209	sc->ifp = if_alloc(IFT_ETHER);
210	sc->ifp->if_softc = sc;
211
212	if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
213	sc->ifp->if_mtu = ETHERMTU; /* XXX max-frame-size from OF? */
214	sc->ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
215	sc->ifp->if_hwassist = 0; /* XXX: ibm,illan-options */
216	sc->ifp->if_capabilities = 0;
217	sc->ifp->if_capenable = 0;
218	sc->ifp->if_start = llan_start;
219	sc->ifp->if_ioctl = llan_ioctl;
220	sc->ifp->if_init = llan_init;
221
222	IFQ_SET_MAXLEN(&sc->ifp->if_snd, LLAN_MAX_TX_PACKETS);
223	sc->ifp->if_snd.ifq_drv_maxlen = LLAN_MAX_TX_PACKETS;
224	IFQ_SET_READY(&sc->ifp->if_snd);
225
226	ether_ifattach(sc->ifp, &sc->mac_address[2]);
227
228	return (0);
229}
230
231static void
232llan_rx_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int err)
233{
234	struct llan_softc *sc = xsc;
235
236	sc->rx_buf_phys = segs[0].ds_addr;
237	sc->rx_buf_len = segs[0].ds_len - 2*PAGE_SIZE;
238	sc->input_buf_phys = segs[0].ds_addr + segs[0].ds_len - PAGE_SIZE;
239	sc->filter_buf_phys = segs[0].ds_addr + segs[0].ds_len - 2*PAGE_SIZE;
240}
241
242static void
243llan_init(void *xsc)
244{
245	struct llan_softc *sc = xsc;
246	uint64_t rx_buf_desc;
247	uint64_t macaddr;
248	int err, i;
249
250	mtx_lock(&sc->io_lock);
251
252	phyp_hcall(H_FREE_LOGICAL_LAN, sc->unit);
253
254	/* Create buffers (page 539) */
255	sc->rx_dma_slot = 0;
256	sc->rx_valid_val = 1;
257
258	rx_buf_desc = LLAN_BUFDESC_VALID;
259	rx_buf_desc |= (sc->rx_buf_len << 32);
260	rx_buf_desc |= sc->rx_buf_phys;
261	memcpy(&macaddr, sc->mac_address, 8);
262	err = phyp_hcall(H_REGISTER_LOGICAL_LAN, sc->unit, sc->input_buf_phys,
263	    rx_buf_desc, sc->filter_buf_phys, macaddr);
264
265	for (i = 0; i < LLAN_MAX_RX_PACKETS; i++)
266		llan_add_rxbuf(sc, &sc->rx_xfer[i]);
267
268	phyp_hcall(H_VIO_SIGNAL, sc->unit, 1); /* Enable interrupts */
269
270	/* Tell stack we're up */
271	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
272	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
273
274	mtx_unlock(&sc->io_lock);
275}
276
277static int
278llan_add_rxbuf(struct llan_softc *sc, struct llan_xfer *rx)
279{
280	struct mbuf *m;
281	bus_dma_segment_t segs[1];
282	int error, nsegs;
283
284	mtx_assert(&sc->io_lock, MA_OWNED);
285
286	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
287	if (m == NULL)
288		return (ENOBUFS);
289
290	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
291	if (rx->rx_mbuf != NULL) {
292		bus_dmamap_sync(sc->rxbuf_dma_tag, rx->rx_dmamap,
293		    BUS_DMASYNC_POSTREAD);
294		bus_dmamap_unload(sc->rxbuf_dma_tag, rx->rx_dmamap);
295	}
296
297	/* Save pointer to buffer structure */
298	m_copyback(m, 0, 8, (void *)&rx);
299
300	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dma_tag, rx->rx_dmamap, m,
301	    segs, &nsegs, BUS_DMA_NOWAIT);
302	if (error != 0) {
303		device_printf(sc->dev,
304		    "cannot load RX DMA map %p, error = %d\n", rx, error);
305		m_freem(m);
306		return (error);
307	}
308
309	/* If nsegs is wrong then the stack is corrupt. */
310	KASSERT(nsegs == 1,
311	    ("%s: too many DMA segments (%d)", __func__, nsegs));
312	rx->rx_mbuf = m;
313
314	bus_dmamap_sync(sc->rxbuf_dma_tag, rx->rx_dmamap, BUS_DMASYNC_PREREAD);
315
316	rx->rx_bufdesc = LLAN_BUFDESC_VALID;
317	rx->rx_bufdesc |= (((uint64_t)segs[0].ds_len) << 32);
318	rx->rx_bufdesc |= segs[0].ds_addr;
319	error = phyp_hcall(H_ADD_LOGICAL_LAN_BUFFER, sc->unit, rx->rx_bufdesc);
320	if (error != 0) {
321		m_freem(m);
322		rx->rx_mbuf = NULL;
323		return (ENOBUFS);
324	}
325
326        return (0);
327}
328
329static void
330llan_intr(void *xsc)
331{
332	struct llan_softc *sc = xsc;
333	struct llan_xfer *rx;
334	struct mbuf *m;
335
336	mtx_lock(&sc->io_lock);
337	phyp_hcall(H_VIO_SIGNAL, sc->unit, 0);
338
339	while ((sc->rx_buf[sc->rx_dma_slot].control >> 7) == sc->rx_valid_val) {
340		rx = (struct llan_xfer *)sc->rx_buf[sc->rx_dma_slot].handle;
341		m = rx->rx_mbuf;
342		m_adj(m, sc->rx_buf[sc->rx_dma_slot].offset - 8);
343		m->m_len = sc->rx_buf[sc->rx_dma_slot].length;
344
345		/* llan_add_rxbuf does DMA sync and unload as well as requeue */
346		if (llan_add_rxbuf(sc, rx) != 0) {
347			sc->ifp->if_ierrors++;
348			phyp_hcall(H_ADD_LOGICAL_LAN_BUFFER, sc->unit,
349			    rx->rx_bufdesc);
350			continue;
351		}
352
353		sc->ifp->if_ipackets++;
354		m_adj(m, sc->rx_buf[sc->rx_dma_slot].offset);
355		m->m_len = sc->rx_buf[sc->rx_dma_slot].length;
356		m->m_pkthdr.rcvif = sc->ifp;
357		m->m_pkthdr.len = m->m_len;
358		sc->rx_dma_slot++;
359
360		if (sc->rx_dma_slot >= sc->rx_buf_len/sizeof(sc->rx_buf[0])) {
361			sc->rx_dma_slot = 0;
362			sc->rx_valid_val = !sc->rx_valid_val;
363		}
364
365		mtx_unlock(&sc->io_lock);
366		(*sc->ifp->if_input)(sc->ifp, m);
367		mtx_lock(&sc->io_lock);
368	}
369
370	phyp_hcall(H_VIO_SIGNAL, sc->unit, 1);
371	mtx_unlock(&sc->io_lock);
372}
373
374static void
375llan_send_packet(void *xsc, bus_dma_segment_t *segs, int nsegs,
376    bus_size_t mapsize, int error)
377{
378	struct llan_softc *sc = xsc;
379	uint64_t bufdescs[6];
380	int i;
381
382	bzero(bufdescs, sizeof(bufdescs));
383
384	for (i = 0; i < nsegs; i++) {
385		bufdescs[i] = LLAN_BUFDESC_VALID;
386		bufdescs[i] |= (((uint64_t)segs[i].ds_len) << 32);
387		bufdescs[i] |= segs[i].ds_addr;
388	}
389
390	phyp_hcall(H_SEND_LOGICAL_LAN, sc->unit, bufdescs[0],
391	    bufdescs[1], bufdescs[2], bufdescs[3], bufdescs[4], bufdescs[5], 0);
392	/*
393	 * The hypercall returning implies completion -- or that the call will
394	 * not complete. In principle, we should try a few times if we get back
395	 * H_BUSY based on the continuation token in R4. For now, just drop
396	 * the packet in such cases.
397	 */
398}
399
400static void
401llan_start_locked(struct ifnet *ifp)
402{
403	struct llan_softc *sc = ifp->if_softc;
404	bus_addr_t first;
405	int nsegs;
406	struct mbuf *mb_head, *m;
407
408	mtx_assert(&sc->io_lock, MA_OWNED);
409	first = 0;
410
411	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
412	    IFF_DRV_RUNNING)
413		return;
414
415	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
416		IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
417
418		if (mb_head == NULL)
419			break;
420
421		BPF_MTAP(ifp, mb_head);
422
423		for (m = mb_head, nsegs = 0; m != NULL; m = m->m_next)
424			nsegs++;
425		if (nsegs > 6) {
426			m = m_collapse(mb_head, M_NOWAIT, 6);
427			if (m == NULL) {
428				m_freem(mb_head);
429				continue;
430			}
431		}
432
433		bus_dmamap_load_mbuf(sc->tx_dma_tag, sc->tx_dma_map,
434			mb_head, llan_send_packet, sc, 0);
435		bus_dmamap_unload(sc->tx_dma_tag, sc->tx_dma_map);
436		m_freem(mb_head);
437	}
438}
439
440static void
441llan_start(struct ifnet *ifp)
442{
443	struct llan_softc *sc = ifp->if_softc;
444
445	mtx_lock(&sc->io_lock);
446	llan_start_locked(ifp);
447	mtx_unlock(&sc->io_lock);
448}
449
450static int
451llan_set_multicast(struct llan_softc *sc)
452{
453	struct ifnet *ifp = sc->ifp;
454	struct ifmultiaddr *inm;
455	uint64_t macaddr;
456
457	mtx_assert(&sc->io_lock, MA_OWNED);
458
459	phyp_hcall(H_MULTICAST_CTRL, sc->unit, LLAN_CLEAR_MULTICAST, 0);
460
461	if_maddr_rlock(ifp);
462	TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
463		if (inm->ifma_addr->sa_family != AF_LINK)
464			continue;
465
466		memcpy((uint8_t *)&macaddr + 2,
467		    LLADDR((struct sockaddr_dl *)inm->ifma_addr), 6);
468		phyp_hcall(H_MULTICAST_CTRL, sc->unit, LLAN_ADD_MULTICAST,
469		    macaddr);
470	}
471	if_maddr_runlock(ifp);
472
473	return (0);
474}
475
476static int
477llan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
478{
479	int err = 0;
480	struct llan_softc *sc = ifp->if_softc;
481
482	switch (cmd) {
483	case SIOCADDMULTI:
484	case SIOCDELMULTI:
485		mtx_lock(&sc->io_lock);
486		if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
487			llan_set_multicast(sc);
488		mtx_unlock(&sc->io_lock);
489		break;
490	case SIOCSIFFLAGS:
491	default:
492		err = ether_ioctl(ifp, cmd, data);
493		break;
494	}
495
496	return (err);
497}
498
499