1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo
5 * Copyright (C) 2013-2016 Universita` di Pisa
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *   1. Redistributions of source code must retain the above copyright
12 *      notice, this list of conditions and the following disclaimer.
13 *   2. Redistributions in binary form must reproduce the above copyright
14 *      notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 *
32 * The header contains the definitions of constants and function
33 * prototypes used only in kernelspace.
34 */
35
36#ifndef _NET_NETMAP_KERN_H_
37#define _NET_NETMAP_KERN_H_
38
39#if defined(linux)
40
41#if defined(CONFIG_NETMAP_EXTMEM)
42#define WITH_EXTMEM
43#endif
44#if  defined(CONFIG_NETMAP_VALE)
45#define WITH_VALE
46#endif
47#if defined(CONFIG_NETMAP_PIPE)
48#define WITH_PIPES
49#endif
50#if defined(CONFIG_NETMAP_MONITOR)
51#define WITH_MONITOR
52#endif
53#if defined(CONFIG_NETMAP_GENERIC)
54#define WITH_GENERIC
55#endif
56#if defined(CONFIG_NETMAP_PTNETMAP)
57#define WITH_PTNETMAP
58#endif
59#if defined(CONFIG_NETMAP_SINK)
60#define WITH_SINK
61#endif
62#if defined(CONFIG_NETMAP_NULL)
63#define WITH_NMNULL
64#endif
65
66#elif defined (_WIN32)
67#define WITH_VALE	// comment out to disable VALE support
68#define WITH_PIPES
69#define WITH_MONITOR
70#define WITH_GENERIC
71#define WITH_NMNULL
72
73#else	/* neither linux nor windows */
74#define WITH_VALE	// comment out to disable VALE support
75#define WITH_PIPES
76#define WITH_MONITOR
77#define WITH_GENERIC
78#define WITH_EXTMEM
79#define WITH_NMNULL
80#endif
81
82#if defined(__FreeBSD__)
83#include <sys/selinfo.h>
84
85#define likely(x)	__builtin_expect((long)!!(x), 1L)
86#define unlikely(x)	__builtin_expect((long)!!(x), 0L)
87#define __user
88
89#define	NM_LOCK_T	struct mtx	/* low level spinlock, used to protect queues */
90
91#define NM_MTX_T	struct sx	/* OS-specific mutex (sleepable) */
92#define NM_MTX_INIT(m)		sx_init(&(m), #m)
93#define NM_MTX_DESTROY(m)	sx_destroy(&(m))
94#define NM_MTX_LOCK(m)		sx_xlock(&(m))
95#define NM_MTX_SPINLOCK(m)	while (!sx_try_xlock(&(m))) ;
96#define NM_MTX_UNLOCK(m)	sx_xunlock(&(m))
97#define NM_MTX_ASSERT(m)	sx_assert(&(m), SA_XLOCKED)
98
99#define	NM_SELINFO_T	struct nm_selinfo
100#define NM_SELRECORD_T	struct thread
101#define	MBUF_LEN(m)	((m)->m_pkthdr.len)
102#define MBUF_TXQ(m)	((m)->m_pkthdr.flowid)
103#define MBUF_TRANSMIT(na, ifp, m)	((na)->if_transmit(ifp, m))
104#define	GEN_TX_MBUF_IFP(m)	((m)->m_pkthdr.rcvif)
105#define	GEN_TX_MBUF_NA(m)	((struct netmap_adapter *)(m)->m_ext.ext_arg1)
106
107#define NM_ATOMIC_T	volatile int /* required by atomic/bitops.h */
108/* atomic operations */
109#include <machine/atomic.h>
110#define NM_ATOMIC_TEST_AND_SET(p)       (!atomic_cmpset_acq_int((p), 0, 1))
111#define NM_ATOMIC_CLEAR(p)              atomic_store_rel_int((p), 0)
112
113struct netmap_adapter *netmap_getna(if_t ifp);
114
115#define MBUF_REFCNT(m)		((m)->m_ext.ext_count)
116#define SET_MBUF_REFCNT(m, x)   (m)->m_ext.ext_count = x
117
118#define MBUF_QUEUED(m)		1
119
120struct nm_selinfo {
121	/* Support for select(2) and poll(2). */
122	struct selinfo si;
123	/* Support for kqueue(9). See comments in netmap_freebsd.c */
124	struct taskqueue *ntfytq;
125	struct task ntfytask;
126	struct mtx m;
127	char mtxname[32];
128	int kqueue_users;
129};
130
131
132struct hrtimer {
133    /* Not used in FreeBSD. */
134};
135
136#define NM_BNS_GET(b)
137#define NM_BNS_PUT(b)
138
139#elif defined (linux)
140
141#define	NM_LOCK_T	safe_spinlock_t	// see bsd_glue.h
142#define	NM_SELINFO_T	wait_queue_head_t
143#define	MBUF_LEN(m)	((m)->len)
144#define MBUF_TRANSMIT(na, ifp, m)							\
145	({										\
146		/* Avoid infinite recursion with generic. */				\
147		m->priority = NM_MAGIC_PRIORITY_TX;					\
148		(((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp));	\
149		0;									\
150	})
151
152/* See explanation in nm_os_generic_xmit_frame. */
153#define	GEN_TX_MBUF_IFP(m)	((if_t)skb_shinfo(m)->destructor_arg)
154
155#define NM_ATOMIC_T	volatile long unsigned int
156
157#define NM_MTX_T	struct mutex	/* OS-specific sleepable lock */
158#define NM_MTX_INIT(m)	mutex_init(&(m))
159#define NM_MTX_DESTROY(m)	do { (void)(m); } while (0)
160#define NM_MTX_LOCK(m)		mutex_lock(&(m))
161#define NM_MTX_UNLOCK(m)	mutex_unlock(&(m))
162#define NM_MTX_ASSERT(m)	mutex_is_locked(&(m))
163
164#ifndef DEV_NETMAP
165#define DEV_NETMAP
166#endif /* DEV_NETMAP */
167
168#elif defined (__APPLE__)
169
170#warning apple support is incomplete.
171#define likely(x)	__builtin_expect(!!(x), 1)
172#define unlikely(x)	__builtin_expect(!!(x), 0)
173#define	NM_LOCK_T	IOLock *
174#define	NM_SELINFO_T	struct selinfo
175#define	MBUF_LEN(m)	((m)->m_pkthdr.len)
176
177#elif defined (_WIN32)
178#include "../../../WINDOWS/win_glue.h"
179
180#define NM_SELRECORD_T		IO_STACK_LOCATION
181#define NM_SELINFO_T		win_SELINFO		// see win_glue.h
182#define NM_LOCK_T		win_spinlock_t	// see win_glue.h
183#define NM_MTX_T		KGUARDED_MUTEX	/* OS-specific mutex (sleepable) */
184
185#define NM_MTX_INIT(m)		KeInitializeGuardedMutex(&m);
186#define NM_MTX_DESTROY(m)	do { (void)(m); } while (0)
187#define NM_MTX_LOCK(m)		KeAcquireGuardedMutex(&(m))
188#define NM_MTX_UNLOCK(m)	KeReleaseGuardedMutex(&(m))
189#define NM_MTX_ASSERT(m)	assert(&m.Count>0)
190
191//These linknames are for the NDIS driver
192#define NETMAP_NDIS_LINKNAME_STRING             L"\\DosDevices\\NMAPNDIS"
193#define NETMAP_NDIS_NTDEVICE_STRING             L"\\Device\\NMAPNDIS"
194
195//Definition of internal driver-to-driver ioctl codes
196#define NETMAP_KERNEL_XCHANGE_POINTERS		_IO('i', 180)
197#define NETMAP_KERNEL_SEND_SHUTDOWN_SIGNAL	_IO_direct('i', 195)
198
199typedef struct hrtimer{
200	KTIMER timer;
201	BOOLEAN active;
202	KDPC deferred_proc;
203};
204
205/* MSVC does not have likely/unlikely support */
206#ifdef _MSC_VER
207#define likely(x)	(x)
208#define unlikely(x)	(x)
209#else
210#define likely(x)	__builtin_expect((long)!!(x), 1L)
211#define unlikely(x)	__builtin_expect((long)!!(x), 0L)
212#endif //_MSC_VER
213
214#else
215
216#error unsupported platform
217
218#endif /* end - platform-specific code */
219
220#ifndef _WIN32 /* support for emulated sysctl */
221#define SYSBEGIN(x)
222#define SYSEND
223#endif /* _WIN32 */
224
225#define NM_ACCESS_ONCE(x)	(*(volatile __typeof__(x) *)&(x))
226
227#define	NMG_LOCK_T		NM_MTX_T
228#define	NMG_LOCK_INIT()		NM_MTX_INIT(netmap_global_lock)
229#define	NMG_LOCK_DESTROY()	NM_MTX_DESTROY(netmap_global_lock)
230#define	NMG_LOCK()		NM_MTX_LOCK(netmap_global_lock)
231#define	NMG_UNLOCK()		NM_MTX_UNLOCK(netmap_global_lock)
232#define	NMG_LOCK_ASSERT()	NM_MTX_ASSERT(netmap_global_lock)
233
234#if defined(__FreeBSD__)
235#define nm_prerr_int	printf
236#define nm_prinf_int	printf
237#elif defined (_WIN32)
238#define nm_prerr_int	DbgPrint
239#define nm_prinf_int	DbgPrint
240#elif defined(linux)
241#define nm_prerr_int(fmt, arg...)    printk(KERN_ERR fmt, ##arg)
242#define nm_prinf_int(fmt, arg...)    printk(KERN_INFO fmt, ##arg)
243#endif
244
245#define nm_prinf(format, ...)					\
246	do {							\
247		struct timeval __xxts;				\
248		microtime(&__xxts);				\
249		nm_prinf_int("%03d.%06d [%4d] %-25s " format "\n",\
250		(int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec,	\
251		__LINE__, __FUNCTION__, ##__VA_ARGS__);		\
252	} while (0)
253
254#define nm_prerr(format, ...)					\
255	do {							\
256		struct timeval __xxts;				\
257		microtime(&__xxts);				\
258		nm_prerr_int("%03d.%06d [%4d] %-25s " format "\n",\
259		(int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec,	\
260		__LINE__, __FUNCTION__, ##__VA_ARGS__);		\
261	} while (0)
262
263/* Disabled printf (used to be nm_prdis). */
264#define nm_prdis(format, ...)
265
266/* Rate limited, lps indicates how many per second. */
267#define nm_prlim(lps, format, ...)				\
268	do {							\
269		static int t0, __cnt;				\
270		if (t0 != time_second) {			\
271			t0 = time_second;			\
272			__cnt = 0;				\
273		}						\
274		if (__cnt++ < lps)				\
275			nm_prinf(format, ##__VA_ARGS__);	\
276	} while (0)
277
278struct netmap_adapter;
279struct nm_bdg_fwd;
280struct nm_bridge;
281struct netmap_priv_d;
282struct nm_bdg_args;
283
284/* os-specific NM_SELINFO_T initialization/destruction functions */
285int nm_os_selinfo_init(NM_SELINFO_T *, const char *name);
286void nm_os_selinfo_uninit(NM_SELINFO_T *);
287
288const char *nm_dump_buf(char *p, int len, int lim, char *dst);
289
290void nm_os_selwakeup(NM_SELINFO_T *si);
291void nm_os_selrecord(NM_SELRECORD_T *sr, NM_SELINFO_T *si);
292
293int nm_os_ifnet_init(void);
294void nm_os_ifnet_fini(void);
295void nm_os_ifnet_lock(void);
296void nm_os_ifnet_unlock(void);
297
298unsigned nm_os_ifnet_mtu(if_t ifp);
299
300void nm_os_get_module(void);
301void nm_os_put_module(void);
302
303void netmap_make_zombie(if_t);
304void netmap_undo_zombie(if_t);
305
306/* os independent alloc/realloc/free */
307void *nm_os_malloc(size_t);
308void *nm_os_vmalloc(size_t);
309void *nm_os_realloc(void *, size_t new_size, size_t old_size);
310void nm_os_free(void *);
311void nm_os_vfree(void *);
312
313/* os specific attach/detach enter/exit-netmap-mode routines */
314void nm_os_onattach(if_t);
315void nm_os_ondetach(if_t);
316void nm_os_onenter(if_t);
317void nm_os_onexit(if_t);
318
319/* passes a packet up to the host stack.
320 * If the packet is sent (or dropped) immediately it returns NULL,
321 * otherwise it links the packet to prev and returns m.
322 * In this case, a final call with m=NULL and prev != NULL will send up
323 * the entire chain to the host stack.
324 */
325void *nm_os_send_up(if_t, struct mbuf *m, struct mbuf *prev);
326
327int nm_os_mbuf_has_seg_offld(struct mbuf *m);
328int nm_os_mbuf_has_csum_offld(struct mbuf *m);
329
330#include "netmap_mbq.h"
331
332extern NMG_LOCK_T	netmap_global_lock;
333
334enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX };
335
336static __inline const char*
337nm_txrx2str(enum txrx t)
338{
339	return (t== NR_RX ? "RX" : "TX");
340}
341
342static __inline enum txrx
343nm_txrx_swap(enum txrx t)
344{
345	return (t== NR_RX ? NR_TX : NR_RX);
346}
347
348#define for_rx_tx(t)	for ((t) = 0; (t) < NR_TXRX; (t)++)
349
350#ifdef WITH_MONITOR
351struct netmap_zmon_list {
352	struct netmap_kring *next;
353	struct netmap_kring *prev;
354};
355#endif /* WITH_MONITOR */
356
357/*
358 * private, kernel view of a ring. Keeps track of the status of
359 * a ring across system calls.
360 *
361 *	nr_hwcur	index of the next buffer to refill.
362 *			It corresponds to ring->head
363 *			at the time the system call returns.
364 *
365 *	nr_hwtail	index of the first buffer owned by the kernel.
366 *			On RX, hwcur->hwtail are receive buffers
367 *			not yet released. hwcur is advanced following
368 *			ring->head, hwtail is advanced on incoming packets,
369 *			and a wakeup is generated when hwtail passes ring->cur
370 *			    On TX, hwcur->rcur have been filled by the sender
371 *			but not sent yet to the NIC; rcur->hwtail are available
372 *			for new transmissions, and hwtail->hwcur-1 are pending
373 *			transmissions not yet acknowledged.
374 *
375 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
376 * This is so that, on a reset, buffers owned by userspace are not
377 * modified by the kernel. In particular:
378 * RX rings: the next empty buffer (hwtail + hwofs) coincides with
379 * 	the next empty buffer as known by the hardware (next_to_check or so).
380 * TX rings: hwcur + hwofs coincides with next_to_send
381 *
382 * The following fields are used to implement lock-free copy of packets
383 * from input to output ports in VALE switch:
384 *	nkr_hwlease	buffer after the last one being copied.
385 *			A writer in nm_bdg_flush reserves N buffers
386 *			from nr_hwlease, advances it, then does the
387 *			copy outside the lock.
388 *			In RX rings (used for VALE ports),
389 *			nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1
390 *			In TX rings (used for NIC or host stack ports)
391 *			nkr_hwcur <= nkr_hwlease < nkr_hwtail
392 *	nkr_leases	array of nkr_num_slots where writers can report
393 *			completion of their block. NR_NOSLOT (~0) indicates
394 *			that the writer has not finished yet
395 *	nkr_lease_idx	index of next free slot in nr_leases, to be assigned
396 *
397 * The kring is manipulated by txsync/rxsync and generic netmap function.
398 *
399 * Concurrent rxsync or txsync on the same ring are prevented through
400 * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need
401 * for NIC rings, and for TX rings attached to the host stack.
402 *
403 * RX rings attached to the host stack use an mbq (rx_queue) on both
404 * rxsync_from_host() and netmap_transmit(). The mbq is protected
405 * by its internal lock.
406 *
407 * RX rings attached to the VALE switch are accessed by both senders
408 * and receiver. They are protected through the q_lock on the RX ring.
409 */
410struct netmap_kring {
411	struct netmap_ring	*ring;
412
413	uint32_t	nr_hwcur;  /* should be nr_hwhead */
414	uint32_t	nr_hwtail;
415
416	/*
417	 * Copies of values in user rings, so we do not need to look
418	 * at the ring (which could be modified). These are set in the
419	 * *sync_prologue()/finalize() routines.
420	 */
421	uint32_t	rhead;
422	uint32_t	rcur;
423	uint32_t	rtail;
424
425	uint32_t	nr_kflags;	/* private driver flags */
426#define NKR_PENDINTR	0x1		// Pending interrupt.
427#define NKR_EXCLUSIVE	0x2		/* exclusive binding */
428#define NKR_FORWARD	0x4		/* (host ring only) there are
429					   packets to forward
430					 */
431#define NKR_NEEDRING	0x8		/* ring needed even if users==0
432					 * (used internally by pipes and
433					 *  by ptnetmap host ports)
434					 */
435#define NKR_NOINTR      0x10            /* don't use interrupts on this ring */
436#define NKR_FAKERING	0x20		/* don't allocate/free buffers */
437
438	uint32_t	nr_mode;
439	uint32_t	nr_pending_mode;
440#define NKR_NETMAP_OFF	0x0
441#define NKR_NETMAP_ON	0x1
442
443	uint32_t	nkr_num_slots;
444
445	/*
446	 * On a NIC reset, the NIC ring indexes may be reset but the
447	 * indexes in the netmap rings remain the same. nkr_hwofs
448	 * keeps track of the offset between the two.
449	 *
450	 * Moreover, during reset, we can restore only the subset of
451	 * the NIC ring that corresponds to the kernel-owned part of
452	 * the netmap ring. The rest of the slots must be restored
453	 * by the *sync routines when the user releases more slots.
454	 * The nkr_to_refill field keeps track of the number of slots
455	 * that still need to be restored.
456	 */
457	int32_t		nkr_hwofs;
458	int32_t		nkr_to_refill;
459
460	/* last_reclaim is opaque marker to help reduce the frequency
461	 * of operations such as reclaiming tx buffers. A possible use
462	 * is set it to ticks and do the reclaim only once per tick.
463	 */
464	uint64_t	last_reclaim;
465
466
467	NM_SELINFO_T	si;		/* poll/select wait queue */
468	NM_LOCK_T	q_lock;		/* protects kring and ring. */
469	NM_ATOMIC_T	nr_busy;	/* prevent concurrent syscalls */
470
471	/* the adapter the owns this kring */
472	struct netmap_adapter *na;
473
474	/* the adapter that wants to be notified when this kring has
475	 * new slots available. This is usually the same as the above,
476	 * but wrappers may let it point to themselves
477	 */
478	struct netmap_adapter *notify_na;
479
480	/* The following fields are for VALE switch support */
481	struct nm_bdg_fwd *nkr_ft;
482	uint32_t	*nkr_leases;
483#define NR_NOSLOT	((uint32_t)~0)	/* used in nkr_*lease* */
484	uint32_t	nkr_hwlease;
485	uint32_t	nkr_lease_idx;
486
487	/* while nkr_stopped is set, no new [tr]xsync operations can
488	 * be started on this kring.
489	 * This is used by netmap_disable_all_rings()
490	 * to find a synchronization point where critical data
491	 * structures pointed to by the kring can be added or removed
492	 */
493	volatile int nkr_stopped;
494
495	/* Support for adapters without native netmap support.
496	 * On tx rings we preallocate an array of tx buffers
497	 * (same size as the netmap ring), on rx rings we
498	 * store incoming mbufs in a queue that is drained by
499	 * a rxsync.
500	 */
501	struct mbuf	**tx_pool;
502	struct mbuf	*tx_event;	/* TX event used as a notification */
503	NM_LOCK_T	tx_event_lock;	/* protects the tx_event mbuf */
504#ifdef __FreeBSD__
505	struct callout	tx_event_callout;
506#endif
507	struct mbq	rx_queue;       /* intercepted rx mbufs. */
508
509	uint32_t	users;		/* existing bindings for this ring */
510
511	uint32_t	ring_id;	/* kring identifier */
512	enum txrx	tx;		/* kind of ring (tx or rx) */
513	char name[64];			/* diagnostic */
514
515	/* [tx]sync callback for this kring.
516	 * The default nm_kring_create callback (netmap_krings_create)
517	 * sets the nm_sync callback of each hardware tx(rx) kring to
518	 * the corresponding nm_txsync(nm_rxsync) taken from the
519	 * netmap_adapter; moreover, it sets the sync callback
520	 * of the host tx(rx) ring to netmap_txsync_to_host
521	 * (netmap_rxsync_from_host).
522	 *
523	 * Overrides: the above configuration is not changed by
524	 * any of the nm_krings_create callbacks.
525	 */
526	int (*nm_sync)(struct netmap_kring *kring, int flags);
527	int (*nm_notify)(struct netmap_kring *kring, int flags);
528
529#ifdef WITH_PIPES
530	struct netmap_kring *pipe;	/* if this is a pipe ring,
531					 * pointer to the other end
532					 */
533	uint32_t pipe_tail;		/* hwtail updated by the other end */
534#endif /* WITH_PIPES */
535
536	/* mask for the offset-related part of the ptr field in the slots */
537	uint64_t offset_mask;
538	/* maximum user-specified offset, as stipulated at bind time.
539	 * Larger offset requests will be silently capped to offset_max.
540	 */
541	uint64_t offset_max;
542	/* minimum gap between two consecutive offsets into the same
543	 * buffer, as stipulated at bind time. This is used to choose
544	 * the hwbuf_len, but is not otherwise checked for compliance
545	 * at runtime.
546	 */
547	uint64_t offset_gap;
548
549	/* size of hardware buffer. This may be less than the size of
550	 * the netmap buffers because of non-zero offsets, or because
551	 * the netmap buffer size exceeds the capability of the hardware.
552	 */
553	uint64_t hwbuf_len;
554
555	/* required alignment (in bytes) for the buffers used by this ring.
556	 * Netmap buffers are aligned to cachelines, which should suffice
557	 * for most NICs. If the user is passing offsets, though, we need
558	 * to check that the resulting buf address complies with any
559	 * alignment restriction.
560	 */
561	uint64_t buf_align;
562
563	/* hardware specific logic for the selection of the hwbuf_len */
564	int (*nm_bufcfg)(struct netmap_kring *kring, uint64_t target);
565
566	int (*save_notify)(struct netmap_kring *kring, int flags);
567
568#ifdef WITH_MONITOR
569	/* array of krings that are monitoring this kring */
570	struct netmap_kring **monitors;
571	uint32_t max_monitors; /* current size of the monitors array */
572	uint32_t n_monitors;	/* next unused entry in the monitor array */
573	uint32_t mon_pos[NR_TXRX]; /* index of this ring in the monitored ring array */
574	uint32_t mon_tail;  /* last seen slot on rx */
575
576	/* circular list of zero-copy monitors */
577	struct netmap_zmon_list zmon_list[NR_TXRX];
578
579	/*
580	 * Monitors work by intercepting the sync and notify callbacks of the
581	 * monitored krings. This is implemented by replacing the pointers
582	 * above and saving the previous ones in mon_* pointers below
583	 */
584	int (*mon_sync)(struct netmap_kring *kring, int flags);
585	int (*mon_notify)(struct netmap_kring *kring, int flags);
586
587#endif
588}
589#ifdef _WIN32
590__declspec(align(64));
591#else
592__attribute__((__aligned__(64)));
593#endif
594
595/* return 1 iff the kring needs to be turned on */
596static inline int
597nm_kring_pending_on(struct netmap_kring *kring)
598{
599	return kring->nr_pending_mode == NKR_NETMAP_ON &&
600	       kring->nr_mode == NKR_NETMAP_OFF;
601}
602
603/* return 1 iff the kring needs to be turned off */
604static inline int
605nm_kring_pending_off(struct netmap_kring *kring)
606{
607	return kring->nr_pending_mode == NKR_NETMAP_OFF &&
608	       kring->nr_mode == NKR_NETMAP_ON;
609}
610
611/* return the next index, with wraparound */
612static inline uint32_t
613nm_next(uint32_t i, uint32_t lim)
614{
615	return unlikely (i == lim) ? 0 : i + 1;
616}
617
618
619/* return the previous index, with wraparound */
620static inline uint32_t
621nm_prev(uint32_t i, uint32_t lim)
622{
623	return unlikely (i == 0) ? lim : i - 1;
624}
625
626
627/*
628 *
629 * Here is the layout for the Rx and Tx rings.
630
631       RxRING                            TxRING
632
633      +-----------------+            +-----------------+
634      |                 |            |                 |
635      |      free       |            |      free       |
636      +-----------------+            +-----------------+
637head->| owned by user   |<-hwcur     | not sent to nic |<-hwcur
638      |                 |            | yet             |
639      +-----------------+            |                 |
640 cur->| available to    |            |                 |
641      | user, not read  |            +-----------------+
642      | yet             |       cur->| (being          |
643      |                 |            |  prepared)      |
644      |                 |            |                 |
645      +-----------------+            +     ------      +
646tail->|                 |<-hwtail    |                 |<-hwlease
647      | (being          | ...        |                 | ...
648      |  prepared)      | ...        |                 | ...
649      +-----------------+ ...        |                 | ...
650      |                 |<-hwlease   +-----------------+
651      |                 |      tail->|                 |<-hwtail
652      |                 |            |                 |
653      |                 |            |                 |
654      |                 |            |                 |
655      +-----------------+            +-----------------+
656
657 * The cur/tail (user view) and hwcur/hwtail (kernel view)
658 * are used in the normal operation of the card.
659 *
660 * When a ring is the output of a switch port (Rx ring for
661 * a VALE port, Tx ring for the host stack or NIC), slots
662 * are reserved in blocks through 'hwlease' which points
663 * to the next unused slot.
664 * On an Rx ring, hwlease is always after hwtail,
665 * and completions cause hwtail to advance.
666 * On a Tx ring, hwlease is always between cur and hwtail,
667 * and completions cause cur to advance.
668 *
669 * nm_kr_space() returns the maximum number of slots that
670 * can be assigned.
671 * nm_kr_lease() reserves the required number of buffers,
672 *    advances nkr_hwlease and also returns an entry in
673 *    a circular array where completions should be reported.
674 */
675
676struct lut_entry;
677#ifdef __FreeBSD__
678#define plut_entry lut_entry
679#endif
680
681struct netmap_lut {
682	struct lut_entry *lut;
683	struct plut_entry *plut;
684	uint32_t objtotal;	/* max buffer index */
685	uint32_t objsize;	/* buffer size */
686};
687
688struct netmap_vp_adapter; // forward
689struct nm_bridge;
690
691/* Struct to be filled by nm_config callbacks. */
692struct nm_config_info {
693	unsigned num_tx_rings;
694	unsigned num_rx_rings;
695	unsigned num_tx_descs;
696	unsigned num_rx_descs;
697	unsigned rx_buf_maxsize;
698};
699
700/*
701 * default type for the magic field.
702 * May be overridden in glue code.
703 */
704#ifndef NM_OS_MAGIC
705#define NM_OS_MAGIC uint32_t
706#endif /* !NM_OS_MAGIC */
707
708/*
709 * The "struct netmap_adapter" extends the "struct adapter"
710 * (or equivalent) device descriptor.
711 * It contains all base fields needed to support netmap operation.
712 * There are in fact different types of netmap adapters
713 * (native, generic, VALE switch...) so a netmap_adapter is
714 * just the first field in the derived type.
715 */
716struct netmap_adapter {
717	/*
718	 * On linux we do not have a good way to tell if an interface
719	 * is netmap-capable. So we always use the following trick:
720	 * NA(ifp) points here, and the first entry (which hopefully
721	 * always exists and is at least 32 bits) contains a magic
722	 * value which we can use to detect that the interface is good.
723	 */
724	NM_OS_MAGIC magic;
725	uint32_t na_flags;	/* enabled, and other flags */
726#define NAF_SKIP_INTR	1	/* use the regular interrupt handler.
727				 * useful during initialization
728				 */
729#define NAF_SW_ONLY	2	/* forward packets only to sw adapter */
730#define NAF_BDG_MAYSLEEP 4	/* the bridge is allowed to sleep when
731				 * forwarding packets coming from this
732				 * interface
733				 */
734#define NAF_MEM_OWNER	8	/* the adapter uses its own memory area
735				 * that cannot be changed
736				 */
737#define NAF_NATIVE      16      /* the adapter is native.
738				 * Virtual ports (non persistent vale ports,
739				 * pipes, monitors...) should never use
740				 * this flag.
741				 */
742#define	NAF_NETMAP_ON	32	/* netmap is active (either native or
743				 * emulated). Where possible (e.g. FreeBSD)
744				 * IFCAP_NETMAP also mirrors this flag.
745				 */
746#define NAF_HOST_RINGS  64	/* the adapter supports the host rings */
747#define NAF_FORCE_NATIVE 128	/* the adapter is always NATIVE */
748/* free */
749#define NAF_MOREFRAG	512	/* the adapter supports NS_MOREFRAG */
750#define NAF_OFFSETS	1024	/* the adapter supports the slot offsets */
751#define NAF_HOST_ALL	2048	/* the adapter wants as many host rings as hw */
752#define NAF_ZOMBIE	(1U<<30) /* the nic driver has been unloaded */
753#define	NAF_BUSY	(1U<<31) /* the adapter is used internally and
754				  * cannot be registered from userspace
755				  */
756	int active_fds; /* number of user-space descriptors using this
757			 interface, which is equal to the number of
758			 struct netmap_if objs in the mapped region. */
759
760	u_int num_rx_rings; /* number of adapter receive rings */
761	u_int num_tx_rings; /* number of adapter transmit rings */
762	u_int num_host_rx_rings; /* number of host receive rings */
763	u_int num_host_tx_rings; /* number of host transmit rings */
764
765	u_int num_tx_desc;  /* number of descriptor in each queue */
766	u_int num_rx_desc;
767
768	/* tx_rings and rx_rings are private but allocated as a
769	 * contiguous chunk of memory. Each array has N+K entries,
770	 * N for the hardware rings and K for the host rings.
771	 */
772	struct netmap_kring **tx_rings; /* array of TX rings. */
773	struct netmap_kring **rx_rings; /* array of RX rings. */
774
775	void *tailroom;		       /* space below the rings array */
776				       /* (used for leases) */
777
778
779	NM_SELINFO_T si[NR_TXRX];	/* global wait queues */
780
781	/* count users of the global wait queues */
782	int si_users[NR_TXRX];
783
784	void *pdev; /* used to store pci device */
785
786	/* copy of if_qflush and if_transmit pointers, to intercept
787	 * packets from the network stack when netmap is active.
788	 */
789	int     (*if_transmit)(if_t, struct mbuf *);
790
791	/* copy of if_input for netmap_send_up() */
792	void     (*if_input)(if_t, struct mbuf *);
793
794	/* Back reference to the parent ifnet struct. Used for
795	 * hardware ports (emulated netmap included). */
796	if_t ifp; /* adapter is if_getsoftc(ifp) */
797
798	/*---- callbacks for this netmap adapter -----*/
799	/*
800	 * nm_dtor() is the cleanup routine called when destroying
801	 *	the adapter.
802	 *	Called with NMG_LOCK held.
803	 *
804	 * nm_register() is called on NIOCREGIF and close() to enter
805	 *	or exit netmap mode on the NIC
806	 *	Called with NNG_LOCK held.
807	 *
808	 * nm_txsync() pushes packets to the underlying hw/switch
809	 *
810	 * nm_rxsync() collects packets from the underlying hw/switch
811	 *
812	 * nm_config() returns configuration information from the OS
813	 *	Called with NMG_LOCK held.
814	 *
815	 * nm_bufcfg()
816	 *      the purpose of this callback is to fill the kring->hwbuf_len
817	 *      (l) and kring->buf_align fields. The l value is most important
818	 *      for RX rings, where we want to disallow writes outside of the
819	 *      netmap buffer. The l value must be computed taking into account
820	 *      the stipulated max_offset (o), possibly increased if there are
821	 *      alignment constraints, the maxframe (m), if known, and the
822	 *      current NETMAP_BUF_SIZE (b) of the memory region used by the
823	 *      adapter. We want the largest supported l such that o + l <= b.
824	 *      If m is known to be <= b - o, the callback may also choose the
825	 *      largest l <= m, ignoring the offset.  The buf_align field is
826	 *      most important for TX rings when there are offsets.  The user
827	 *      will see this value in the ring->buf_align field.  Misaligned
828	 *      offsets will cause the corresponding packets to be silently
829	 *      dropped.
830	 *
831	 * nm_krings_create() create and init the tx_rings and
832	 * 	rx_rings arrays of kring structures. In particular,
833	 * 	set the nm_sync callbacks for each ring.
834	 * 	There is no need to also allocate the corresponding
835	 * 	netmap_rings, since netmap_mem_rings_create() will always
836	 * 	be called to provide the missing ones.
837	 *	Called with NNG_LOCK held.
838	 *
839	 * nm_krings_delete() cleanup and delete the tx_rings and rx_rings
840	 * 	arrays
841	 *	Called with NMG_LOCK held.
842	 *
843	 * nm_notify() is used to act after data have become available
844	 * 	(or the stopped state of the ring has changed)
845	 *	For hw devices this is typically a selwakeup(),
846	 *	but for NIC/host ports attached to a switch (or vice-versa)
847	 *	we also need to invoke the 'txsync' code downstream.
848	 *      This callback pointer is actually used only to initialize
849	 *      kring->nm_notify.
850	 *      Return values are the same as for netmap_rx_irq().
851	 */
852	void (*nm_dtor)(struct netmap_adapter *);
853
854	int (*nm_register)(struct netmap_adapter *, int onoff);
855	void (*nm_intr)(struct netmap_adapter *, int onoff);
856
857	int (*nm_txsync)(struct netmap_kring *kring, int flags);
858	int (*nm_rxsync)(struct netmap_kring *kring, int flags);
859	int (*nm_notify)(struct netmap_kring *kring, int flags);
860	int (*nm_bufcfg)(struct netmap_kring *kring, uint64_t target);
861#define NAF_FORCE_READ      1
862#define NAF_FORCE_RECLAIM   2
863#define NAF_CAN_FORWARD_DOWN 4
864	/* return configuration information */
865	int (*nm_config)(struct netmap_adapter *, struct nm_config_info *info);
866	int (*nm_krings_create)(struct netmap_adapter *);
867	void (*nm_krings_delete)(struct netmap_adapter *);
868	/*
869	 * nm_bdg_attach() initializes the na_vp field to point
870	 *      to an adapter that can be attached to a VALE switch. If the
871	 *      current adapter is already a VALE port, na_vp is simply a cast;
872	 *      otherwise, na_vp points to a netmap_bwrap_adapter.
873	 *      If applicable, this callback also initializes na_hostvp,
874	 *      that can be used to connect the adapter host rings to the
875	 *      switch.
876	 *      Called with NMG_LOCK held.
877	 *
878	 * nm_bdg_ctl() is called on the actual attach/detach to/from
879	 *      to/from the switch, to perform adapter-specific
880	 *      initializations
881	 *      Called with NMG_LOCK held.
882	 */
883	int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *,
884			struct nm_bridge *);
885	int (*nm_bdg_ctl)(struct nmreq_header *, struct netmap_adapter *);
886
887	/* adapter used to attach this adapter to a VALE switch (if any) */
888	struct netmap_vp_adapter *na_vp;
889	/* adapter used to attach the host rings of this adapter
890	 * to a VALE switch (if any) */
891	struct netmap_vp_adapter *na_hostvp;
892
893	/* standard refcount to control the lifetime of the adapter
894	 * (it should be equal to the lifetime of the corresponding ifp)
895	 */
896	int na_refcount;
897
898	/* memory allocator (opaque)
899	 * We also cache a pointer to the lut_entry for translating
900	 * buffer addresses, the total number of buffers and the buffer size.
901	 */
902 	struct netmap_mem_d *nm_mem;
903	struct netmap_mem_d *nm_mem_prev;
904	struct netmap_lut na_lut;
905
906	/* additional information attached to this adapter
907	 * by other netmap subsystems. Currently used by
908	 * bwrap, LINUX/v1000 and ptnetmap
909	 */
910	void *na_private;
911
912	/* array of pipes that have this adapter as a parent */
913	struct netmap_pipe_adapter **na_pipes;
914	int na_next_pipe;	/* next free slot in the array */
915	int na_max_pipes;	/* size of the array */
916
917	/* Offset of ethernet header for each packet. */
918	u_int virt_hdr_len;
919
920	/* Max number of bytes that the NIC can store in the buffer
921	 * referenced by each RX descriptor. This translates to the maximum
922	 * bytes that a single netmap slot can reference. Larger packets
923	 * require NS_MOREFRAG support. */
924	unsigned rx_buf_maxsize;
925
926	char name[NETMAP_REQ_IFNAMSIZ]; /* used at least by pipes */
927
928#ifdef WITH_MONITOR
929	unsigned long	monitor_id;	/* debugging */
930#endif
931};
932
933static __inline u_int
934nma_get_ndesc(struct netmap_adapter *na, enum txrx t)
935{
936	return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc);
937}
938
939static __inline void
940nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v)
941{
942	if (t == NR_TX)
943		na->num_tx_desc = v;
944	else
945		na->num_rx_desc = v;
946}
947
948static __inline u_int
949nma_get_nrings(struct netmap_adapter *na, enum txrx t)
950{
951	return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings);
952}
953
954static __inline u_int
955nma_get_host_nrings(struct netmap_adapter *na, enum txrx t)
956{
957	return (t == NR_TX ? na->num_host_tx_rings : na->num_host_rx_rings);
958}
959
960static __inline void
961nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
962{
963	if (t == NR_TX)
964		na->num_tx_rings = v;
965	else
966		na->num_rx_rings = v;
967}
968
969static __inline void
970nma_set_host_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
971{
972	if (t == NR_TX)
973		na->num_host_tx_rings = v;
974	else
975		na->num_host_rx_rings = v;
976}
977
978static __inline struct netmap_kring**
979NMR(struct netmap_adapter *na, enum txrx t)
980{
981	return (t == NR_TX ? na->tx_rings : na->rx_rings);
982}
983
984int nma_intr_enable(struct netmap_adapter *na, int onoff);
985
986/*
987 * If the NIC is owned by the kernel
988 * (i.e., bridge), neither another bridge nor user can use it;
989 * if the NIC is owned by a user, only users can share it.
990 * Evaluation must be done under NMG_LOCK().
991 */
992#define NETMAP_OWNED_BY_KERN(na)	((na)->na_flags & NAF_BUSY)
993#define NETMAP_OWNED_BY_ANY(na) \
994	(NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
995
996/*
997 * derived netmap adapters for various types of ports
998 */
999struct netmap_vp_adapter {	/* VALE software port */
1000	struct netmap_adapter up;
1001
1002	/*
1003	 * Bridge support:
1004	 *
1005	 * bdg_port is the port number used in the bridge;
1006	 * na_bdg points to the bridge this NA is attached to.
1007	 */
1008	int bdg_port;
1009	struct nm_bridge *na_bdg;
1010	int retry;
1011	int autodelete; /* remove the ifp on last reference */
1012
1013	/* Maximum Frame Size, used in bdg_mismatch_datapath() */
1014	u_int mfs;
1015	/* Last source MAC on this port */
1016	uint64_t last_smac;
1017};
1018
1019
1020struct netmap_hw_adapter {	/* physical device */
1021	struct netmap_adapter up;
1022
1023#ifdef linux
1024	struct net_device_ops nm_ndo;
1025	struct ethtool_ops    nm_eto;
1026#endif
1027	const struct ethtool_ops*   save_ethtool;
1028
1029	int (*nm_hw_register)(struct netmap_adapter *, int onoff);
1030};
1031
1032#ifdef WITH_GENERIC
1033/* Mitigation support. */
1034struct nm_generic_mit {
1035	struct hrtimer mit_timer;
1036	int mit_pending;
1037	int mit_ring_idx;  /* index of the ring being mitigated */
1038	struct netmap_adapter *mit_na;  /* backpointer */
1039};
1040
1041struct netmap_generic_adapter {	/* emulated device */
1042	struct netmap_hw_adapter up;
1043
1044	/* Pointer to a previously used netmap adapter. */
1045	struct netmap_adapter *prev;
1046
1047	/* Emulated netmap adapters support:
1048	 *  - mit implements rx interrupt mitigation;
1049	 */
1050	struct nm_generic_mit *mit;
1051#ifdef linux
1052        netdev_tx_t (*save_start_xmit)(struct mbuf *, if_t);
1053#endif
1054	/* Is the adapter able to use multiple RX slots to scatter
1055	 * each packet pushed up by the driver? */
1056	int rxsg;
1057
1058	/* Is the transmission path controlled by a netmap-aware
1059	 * device queue (i.e. qdisc on linux)? */
1060	int txqdisc;
1061};
1062#endif  /* WITH_GENERIC */
1063
1064static __inline u_int
1065netmap_real_rings(struct netmap_adapter *na, enum txrx t)
1066{
1067	return nma_get_nrings(na, t) +
1068		!!(na->na_flags & NAF_HOST_RINGS) * nma_get_host_nrings(na, t);
1069}
1070
1071/* account for fake rings */
1072static __inline u_int
1073netmap_all_rings(struct netmap_adapter *na, enum txrx t)
1074{
1075	return max(nma_get_nrings(na, t) + 1, netmap_real_rings(na, t));
1076}
1077
1078int netmap_default_bdg_attach(const char *name, struct netmap_adapter *na,
1079		struct nm_bridge *);
1080struct nm_bdg_polling_state;
1081/*
1082 * Bridge wrapper for non VALE ports attached to a VALE switch.
1083 *
1084 * The real device must already have its own netmap adapter (hwna).
1085 * The bridge wrapper and the hwna adapter share the same set of
1086 * netmap rings and buffers, but they have two separate sets of
1087 * krings descriptors, with tx/rx meanings swapped:
1088 *
1089 *                                  netmap
1090 *           bwrap     krings       rings      krings      hwna
1091 *         +------+   +------+     +-----+    +------+   +------+
1092 *         |tx_rings->|      |\   /|     |----|      |<-tx_rings|
1093 *         |      |   +------+ \ / +-----+    +------+   |      |
1094 *         |      |             X                        |      |
1095 *         |      |            / \                       |      |
1096 *         |      |   +------+/   \+-----+    +------+   |      |
1097 *         |rx_rings->|      |     |     |----|      |<-rx_rings|
1098 *         |      |   +------+     +-----+    +------+   |      |
1099 *         +------+                                      +------+
1100 *
1101 * - packets coming from the bridge go to the brwap rx rings,
1102 *   which are also the hwna tx rings.  The bwrap notify callback
1103 *   will then complete the hwna tx (see netmap_bwrap_notify).
1104 *
1105 * - packets coming from the outside go to the hwna rx rings,
1106 *   which are also the bwrap tx rings.  The (overwritten) hwna
1107 *   notify method will then complete the bridge tx
1108 *   (see netmap_bwrap_intr_notify).
1109 *
1110 *   The bridge wrapper may optionally connect the hwna 'host' rings
1111 *   to the bridge. This is done by using a second port in the
1112 *   bridge and connecting it to the 'host' netmap_vp_adapter
1113 *   contained in the netmap_bwrap_adapter. The brwap host adapter
1114 *   cross-links the hwna host rings in the same way as shown above.
1115 *
1116 * - packets coming from the bridge and directed to the host stack
1117 *   are handled by the bwrap host notify callback
1118 *   (see netmap_bwrap_host_notify)
1119 *
1120 * - packets coming from the host stack are still handled by the
1121 *   overwritten hwna notify callback (netmap_bwrap_intr_notify),
1122 *   but are diverted to the host adapter depending on the ring number.
1123 *
1124 */
1125struct netmap_bwrap_adapter {
1126	struct netmap_vp_adapter up;
1127	struct netmap_vp_adapter host;  /* for host rings */
1128	struct netmap_adapter *hwna;	/* the underlying device */
1129
1130	/*
1131	 * When we attach a physical interface to the bridge, we
1132	 * allow the controlling process to terminate, so we need
1133	 * a place to store the n_detmap_priv_d data structure.
1134	 * This is only done when physical interfaces
1135	 * are attached to a bridge.
1136	 */
1137	struct netmap_priv_d *na_kpriv;
1138	struct nm_bdg_polling_state *na_polling_state;
1139	/* we overwrite the hwna->na_vp pointer, so we save
1140	 * here its original value, to be restored at detach
1141	 */
1142	struct netmap_vp_adapter *saved_na_vp;
1143	int (*nm_intr_notify)(struct netmap_kring *kring, int flags);
1144};
1145int nm_is_bwrap(struct netmap_adapter *na);
1146int nm_bdg_polling(struct nmreq_header *hdr);
1147
1148int netmap_bdg_attach(struct nmreq_header *hdr, void *auth_token);
1149int netmap_bdg_detach(struct nmreq_header *hdr, void *auth_token);
1150#ifdef WITH_VALE
1151int netmap_vale_list(struct nmreq_header *hdr);
1152int netmap_vi_create(struct nmreq_header *hdr, int);
1153int nm_vi_create(struct nmreq_header *);
1154int nm_vi_destroy(const char *name);
1155#else /* !WITH_VALE */
1156#define netmap_vi_create(hdr, a) (EOPNOTSUPP)
1157#endif /* WITH_VALE */
1158
1159#ifdef WITH_PIPES
1160
1161#define NM_MAXPIPES 	64	/* max number of pipes per adapter */
1162
1163struct netmap_pipe_adapter {
1164	/* pipe identifier is up.name */
1165	struct netmap_adapter up;
1166
1167#define NM_PIPE_ROLE_MASTER	0x1
1168#define NM_PIPE_ROLE_SLAVE	0x2
1169	int role;	/* either NM_PIPE_ROLE_MASTER or NM_PIPE_ROLE_SLAVE */
1170
1171	struct netmap_adapter *parent; /* adapter that owns the memory */
1172	struct netmap_pipe_adapter *peer; /* the other end of the pipe */
1173	int peer_ref;		/* 1 iff we are holding a ref to the peer */
1174	if_t parent_ifp;	/* maybe null */
1175
1176	u_int parent_slot; /* index in the parent pipe array */
1177};
1178
1179#endif /* WITH_PIPES */
1180
1181#ifdef WITH_NMNULL
1182struct netmap_null_adapter {
1183	struct netmap_adapter up;
1184};
1185#endif /* WITH_NMNULL */
1186
1187
1188/* return slots reserved to rx clients; used in drivers */
1189static inline uint32_t
1190nm_kr_rxspace(struct netmap_kring *k)
1191{
1192	int space = k->nr_hwtail - k->nr_hwcur;
1193	if (space < 0)
1194		space += k->nkr_num_slots;
1195	nm_prdis("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
1196
1197	return space;
1198}
1199
1200/* return slots reserved to tx clients */
1201#define nm_kr_txspace(_k) nm_kr_rxspace(_k)
1202
1203
1204/* True if no space in the tx ring, only valid after txsync_prologue */
1205static inline int
1206nm_kr_txempty(struct netmap_kring *kring)
1207{
1208	return kring->rhead == kring->nr_hwtail;
1209}
1210
1211/* True if no more completed slots in the rx ring, only valid after
1212 * rxsync_prologue */
1213#define nm_kr_rxempty(_k)	nm_kr_txempty(_k)
1214
1215/* True if the application needs to wait for more space on the ring
1216 * (more received packets or more free tx slots).
1217 * Only valid after *xsync_prologue. */
1218static inline int
1219nm_kr_wouldblock(struct netmap_kring *kring)
1220{
1221	return kring->rcur == kring->nr_hwtail;
1222}
1223
1224/*
1225 * protect against multiple threads using the same ring.
1226 * also check that the ring has not been stopped or locked
1227 */
1228#define NM_KR_BUSY	1	/* some other thread is syncing the ring */
1229#define NM_KR_STOPPED	2	/* unbounded stop (ifconfig down or driver unload) */
1230#define NM_KR_LOCKED	3	/* bounded, brief stop for mutual exclusion */
1231
1232
1233/* release the previously acquired right to use the *sync() methods of the ring */
1234static __inline void nm_kr_put(struct netmap_kring *kr)
1235{
1236	NM_ATOMIC_CLEAR(&kr->nr_busy);
1237}
1238
1239
1240/* true if the ifp that backed the adapter has disappeared (e.g., the
1241 * driver has been unloaded)
1242 */
1243static inline int nm_iszombie(struct netmap_adapter *na);
1244
1245/* try to obtain exclusive right to issue the *sync() operations on the ring.
1246 * The right is obtained and must be later relinquished via nm_kr_put() if and
1247 * only if nm_kr_tryget() returns 0.
1248 * If can_sleep is 1 there are only two other possible outcomes:
1249 * - the function returns NM_KR_BUSY
1250 * - the function returns NM_KR_STOPPED and sets the POLLERR bit in *perr
1251 *   (if non-null)
1252 * In both cases the caller will typically skip the ring, possibly collecting
1253 * errors along the way.
1254 * If the calling context does not allow sleeping, the caller must pass 0 in can_sleep.
1255 * In the latter case, the function may also return NM_KR_LOCKED and leave *perr
1256 * untouched: ideally, the caller should try again at a later time.
1257 */
1258static __inline int nm_kr_tryget(struct netmap_kring *kr, int can_sleep, int *perr)
1259{
1260	int busy = 1, stopped;
1261	/* check a first time without taking the lock
1262	 * to avoid starvation for nm_kr_get()
1263	 */
1264retry:
1265	stopped = kr->nkr_stopped;
1266	if (unlikely(stopped)) {
1267		goto stop;
1268	}
1269	busy = NM_ATOMIC_TEST_AND_SET(&kr->nr_busy);
1270	/* we should not return NM_KR_BUSY if the ring was
1271	 * actually stopped, so check another time after
1272	 * the barrier provided by the atomic operation
1273	 */
1274	stopped = kr->nkr_stopped;
1275	if (unlikely(stopped)) {
1276		goto stop;
1277	}
1278
1279	if (unlikely(nm_iszombie(kr->na))) {
1280		stopped = NM_KR_STOPPED;
1281		goto stop;
1282	}
1283
1284	return unlikely(busy) ? NM_KR_BUSY : 0;
1285
1286stop:
1287	if (!busy)
1288		nm_kr_put(kr);
1289	if (stopped == NM_KR_STOPPED) {
1290/* if POLLERR is defined we want to use it to simplify netmap_poll().
1291 * Otherwise, any non-zero value will do.
1292 */
1293#ifdef POLLERR
1294#define NM_POLLERR POLLERR
1295#else
1296#define NM_POLLERR 1
1297#endif /* POLLERR */
1298		if (perr)
1299			*perr |= NM_POLLERR;
1300#undef NM_POLLERR
1301	} else if (can_sleep) {
1302		tsleep(kr, 0, "NM_KR_TRYGET", 4);
1303		goto retry;
1304	}
1305	return stopped;
1306}
1307
1308/* put the ring in the 'stopped' state and wait for the current user (if any) to
1309 * notice. stopped must be either NM_KR_STOPPED or NM_KR_LOCKED
1310 */
1311static __inline void nm_kr_stop(struct netmap_kring *kr, int stopped)
1312{
1313	kr->nkr_stopped = stopped;
1314	while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))
1315		tsleep(kr, 0, "NM_KR_GET", 4);
1316}
1317
1318/* restart a ring after a stop */
1319static __inline void nm_kr_start(struct netmap_kring *kr)
1320{
1321	kr->nkr_stopped = 0;
1322	nm_kr_put(kr);
1323}
1324
1325
1326/*
1327 * The following functions are used by individual drivers to
1328 * support netmap operation.
1329 *
1330 * netmap_attach() initializes a struct netmap_adapter, allocating the
1331 * 	struct netmap_ring's and the struct selinfo.
1332 *
1333 * netmap_detach() frees the memory allocated by netmap_attach().
1334 *
1335 * netmap_transmit() replaces the if_transmit routine of the interface,
1336 *	and is used to intercept packets coming from the stack.
1337 *
1338 * netmap_load_map/netmap_reload_map are helper routines to set/reset
1339 *	the dmamap for a packet buffer
1340 *
1341 * netmap_reset() is a helper routine to be called in the hw driver
1342 *	when reinitializing a ring. It should not be called by
1343 *	virtual ports (vale, pipes, monitor)
1344 */
1345int netmap_attach(struct netmap_adapter *);
1346int netmap_attach_ext(struct netmap_adapter *, size_t size, int override_reg);
1347void netmap_detach(if_t);
1348int netmap_transmit(if_t, struct mbuf *);
1349struct netmap_slot *netmap_reset(struct netmap_adapter *na,
1350	enum txrx tx, u_int n, u_int new_cur);
1351int netmap_ring_reinit(struct netmap_kring *);
1352int netmap_rings_config_get(struct netmap_adapter *, struct nm_config_info *);
1353
1354/* Return codes for netmap_*x_irq. */
1355enum {
1356	/* Driver should do normal interrupt processing, e.g. because
1357	 * the interface is not in netmap mode. */
1358	NM_IRQ_PASS = 0,
1359	/* Port is in netmap mode, and the interrupt work has been
1360	 * completed. The driver does not have to notify netmap
1361	 * again before the next interrupt. */
1362	NM_IRQ_COMPLETED = -1,
1363	/* Port is in netmap mode, but the interrupt work has not been
1364	 * completed. The driver has to make sure netmap will be
1365	 * notified again soon, even if no more interrupts come (e.g.
1366	 * on Linux the driver should not call napi_complete()). */
1367	NM_IRQ_RESCHED = -2,
1368};
1369
1370/* default functions to handle rx/tx interrupts */
1371int netmap_rx_irq(if_t, u_int, u_int *);
1372#define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
1373int netmap_common_irq(struct netmap_adapter *, u_int, u_int *work_done);
1374
1375
1376#ifdef WITH_VALE
1377/* functions used by external modules to interface with VALE */
1378#define netmap_vp_to_ifp(_vp)	((_vp)->up.ifp)
1379#define netmap_ifp_to_vp(_ifp)	(NA(_ifp)->na_vp)
1380#define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp)
1381#define netmap_bdg_idx(_vp)	((_vp)->bdg_port)
1382const char *netmap_bdg_name(struct netmap_vp_adapter *);
1383#else /* !WITH_VALE */
1384#define netmap_vp_to_ifp(_vp)	NULL
1385#define netmap_ifp_to_vp(_ifp)	NULL
1386#define netmap_ifp_to_host_vp(_ifp) NULL
1387#define netmap_bdg_idx(_vp)	-1
1388#endif /* WITH_VALE */
1389
1390static inline int
1391nm_netmap_on(struct netmap_adapter *na)
1392{
1393	return na && na->na_flags & NAF_NETMAP_ON;
1394}
1395
1396static inline int
1397nm_native_on(struct netmap_adapter *na)
1398{
1399	return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE);
1400}
1401
1402static inline struct netmap_kring *
1403netmap_kring_on(struct netmap_adapter *na, u_int q, enum txrx t)
1404{
1405	struct netmap_kring *kring = NULL;
1406
1407	if (!nm_native_on(na))
1408		return NULL;
1409
1410	if (t == NR_RX && q < na->num_rx_rings)
1411		kring = na->rx_rings[q];
1412	else if (t == NR_TX && q < na->num_tx_rings)
1413		kring = na->tx_rings[q];
1414	else
1415		return NULL;
1416
1417	return (kring->nr_mode == NKR_NETMAP_ON) ? kring : NULL;
1418}
1419
1420static inline int
1421nm_iszombie(struct netmap_adapter *na)
1422{
1423	return na == NULL || (na->na_flags & NAF_ZOMBIE);
1424}
1425
1426void nm_set_native_flags(struct netmap_adapter *);
1427void nm_clear_native_flags(struct netmap_adapter *);
1428
1429void netmap_krings_mode_commit(struct netmap_adapter *na, int onoff);
1430
1431/*
1432 * nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap
1433 * kthreads.
1434 * We need netmap_ring* parameter, because in ptnetmap it is decoupled
1435 * from host kring.
1436 * The user-space ring pointers (head/cur/tail) are shared through
1437 * CSB between host and guest.
1438 */
1439
1440/*
1441 * validates parameters in the ring/kring, returns a value for head
1442 * If any error, returns ring_size to force a reinit.
1443 */
1444uint32_t nm_txsync_prologue(struct netmap_kring *, struct netmap_ring *);
1445
1446
1447/*
1448 * validates parameters in the ring/kring, returns a value for head
1449 * If any error, returns ring_size lim to force a reinit.
1450 */
1451uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *);
1452
1453
1454/* check/fix address and len in tx rings */
1455#if 1 /* debug version */
1456#define	NM_CHECK_ADDR_LEN(_na, _a, _l)	do {				\
1457	if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) {	\
1458		nm_prlim(5, "bad addr/len ring %d slot %d idx %d len %d",	\
1459			kring->ring_id, nm_i, slot->buf_idx, len);	\
1460		if (_l > NETMAP_BUF_SIZE(_na))				\
1461			_l = NETMAP_BUF_SIZE(_na);			\
1462	} } while (0)
1463#else /* no debug version */
1464#define	NM_CHECK_ADDR_LEN(_na, _a, _l)	do {				\
1465		if (_l > NETMAP_BUF_SIZE(_na))				\
1466			_l = NETMAP_BUF_SIZE(_na);			\
1467	} while (0)
1468#endif
1469
1470#define NM_CHECK_ADDR_LEN_OFF(na_, l_, o_) do {				\
1471	if ((l_) + (o_) < (l_) || 					\
1472	    (l_) + (o_) > NETMAP_BUF_SIZE(na_)) {			\
1473		(l_) = NETMAP_BUF_SIZE(na_) - (o_);			\
1474	} } while (0)
1475
1476
1477/*---------------------------------------------------------------*/
1478/*
1479 * Support routines used by netmap subsystems
1480 * (native drivers, VALE, generic, pipes, monitors, ...)
1481 */
1482
1483
1484/* common routine for all functions that create a netmap adapter. It performs
1485 * two main tasks:
1486 * - if the na points to an ifp, mark the ifp as netmap capable
1487 *   using na as its native adapter;
1488 * - provide defaults for the setup callbacks and the memory allocator
1489 */
1490int netmap_attach_common(struct netmap_adapter *);
1491/* fill priv->np_[tr]xq{first,last} using the ringid and flags information
1492 * coming from a struct nmreq_register
1493 */
1494int netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr);
1495/* update the ring parameters (number and size of tx and rx rings).
1496 * It calls the nm_config callback, if available.
1497 */
1498int netmap_update_config(struct netmap_adapter *na);
1499/* create and initialize the common fields of the krings array.
1500 * using the information that must be already available in the na.
1501 * tailroom can be used to request the allocation of additional
1502 * tailroom bytes after the krings array. This is used by
1503 * netmap_vp_adapter's (i.e., VALE ports) to make room for
1504 * leasing-related data structures
1505 */
1506int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1507/* deletes the kring array of the adapter. The array must have
1508 * been created using netmap_krings_create
1509 */
1510void netmap_krings_delete(struct netmap_adapter *na);
1511
1512int netmap_hw_krings_create(struct netmap_adapter *na);
1513void netmap_hw_krings_delete(struct netmap_adapter *na);
1514
1515/* set the stopped/enabled status of ring
1516 * When stopping, they also wait for all current activity on the ring to
1517 * terminate. The status change is then notified using the na nm_notify
1518 * callback.
1519 */
1520void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped);
1521/* set the stopped/enabled status of all rings of the adapter. */
1522void netmap_set_all_rings(struct netmap_adapter *, int stopped);
1523/* convenience wrappers for netmap_set_all_rings */
1524void netmap_disable_all_rings(if_t);
1525void netmap_enable_all_rings(if_t);
1526
1527int netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu);
1528int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1529		struct nmreq_header *);
1530void netmap_do_unregif(struct netmap_priv_d *priv);
1531
1532u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
1533int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1534		if_t *ifp, struct netmap_mem_d *nmd, int create);
1535void netmap_unget_na(struct netmap_adapter *na, if_t ifp);
1536int netmap_get_hw_na(if_t ifp,
1537		struct netmap_mem_d *nmd, struct netmap_adapter **na);
1538void netmap_mem_restore(struct netmap_adapter *na);
1539
1540#ifdef WITH_VALE
1541uint32_t netmap_vale_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1542		struct netmap_vp_adapter *, void *private_data);
1543
1544/* these are redefined in case of no VALE support */
1545int netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1546		struct netmap_mem_d *nmd, int create);
1547void *netmap_vale_create(const char *bdg_name, int *return_status);
1548int netmap_vale_destroy(const char *bdg_name, void *auth_token);
1549
1550extern unsigned int vale_max_bridges;
1551
1552#else /* !WITH_VALE */
1553#define netmap_bdg_learning(_1, _2, _3, _4)	0
1554#define	netmap_get_vale_na(_1, _2, _3, _4)	0
1555#define netmap_bdg_create(_1, _2)	NULL
1556#define netmap_bdg_destroy(_1, _2)	0
1557#define vale_max_bridges		1
1558#endif /* !WITH_VALE */
1559
1560#ifdef WITH_PIPES
1561/* max number of pipes per device */
1562#define NM_MAXPIPES	64	/* XXX this should probably be a sysctl */
1563void netmap_pipe_dealloc(struct netmap_adapter *);
1564int netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1565			struct netmap_mem_d *nmd, int create);
1566#else /* !WITH_PIPES */
1567#define NM_MAXPIPES	0
1568#define netmap_pipe_alloc(_1, _2) 	0
1569#define netmap_pipe_dealloc(_1)
1570#define netmap_get_pipe_na(hdr, _2, _3, _4)	\
1571	((strchr(hdr->nr_name, '{') != NULL || strchr(hdr->nr_name, '}') != NULL) ? EOPNOTSUPP : 0)
1572#endif
1573
1574#ifdef WITH_MONITOR
1575int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1576		struct netmap_mem_d *nmd, int create);
1577void netmap_monitor_stop(struct netmap_adapter *na);
1578#else
1579#define netmap_get_monitor_na(hdr, _2, _3, _4) \
1580	(((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1581#endif
1582
1583#ifdef WITH_NMNULL
1584int netmap_get_null_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1585		struct netmap_mem_d *nmd, int create);
1586#else /* !WITH_NMNULL */
1587#define netmap_get_null_na(hdr, _2, _3, _4) \
1588	(((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1589#endif /* WITH_NMNULL */
1590
1591#ifdef CONFIG_NET_NS
1592struct net *netmap_bns_get(void);
1593void netmap_bns_put(struct net *);
1594void netmap_bns_getbridges(struct nm_bridge **, u_int *);
1595#else
1596extern struct nm_bridge *nm_bridges;
1597#define netmap_bns_get()
1598#define netmap_bns_put(_1)
1599#define netmap_bns_getbridges(b, n) \
1600	do { *b = nm_bridges; *n = vale_max_bridges; } while (0)
1601#endif
1602
1603/* Various prototypes */
1604int netmap_poll(struct netmap_priv_d *, int events, NM_SELRECORD_T *td);
1605int netmap_init(void);
1606void netmap_fini(void);
1607int netmap_get_memory(struct netmap_priv_d* p);
1608void netmap_dtor(void *data);
1609
1610int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
1611		struct thread *, int nr_body_is_user);
1612int netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
1613			struct thread *td);
1614size_t nmreq_size_by_type(uint16_t nr_reqtype);
1615
1616/* netmap_adapter creation/destruction */
1617
1618// #define NM_DEBUG_PUTGET 1
1619
1620#ifdef NM_DEBUG_PUTGET
1621
1622#define NM_DBG(f) __##f
1623
1624void __netmap_adapter_get(struct netmap_adapter *na);
1625
1626#define netmap_adapter_get(na) 				\
1627	do {						\
1628		struct netmap_adapter *__na = na;	\
1629		__netmap_adapter_get(__na);		\
1630		nm_prinf("getting %p:%s -> %d", __na, (__na)->name, (__na)->na_refcount);	\
1631	} while (0)
1632
1633int __netmap_adapter_put(struct netmap_adapter *na);
1634
1635#define netmap_adapter_put(na)				\
1636	({						\
1637		struct netmap_adapter *__na = na;	\
1638		if (__na == NULL)			\
1639			nm_prinf("putting NULL");	\
1640		else					\
1641			nm_prinf("putting %p:%s -> %d", __na, (__na)->name, (__na)->na_refcount - 1);	\
1642		__netmap_adapter_put(__na);	\
1643	})
1644
1645#else /* !NM_DEBUG_PUTGET */
1646
1647#define NM_DBG(f) f
1648void netmap_adapter_get(struct netmap_adapter *na);
1649int netmap_adapter_put(struct netmap_adapter *na);
1650
1651#endif /* !NM_DEBUG_PUTGET */
1652
1653
1654/*
1655 * module variables
1656 */
1657#define NETMAP_BUF_BASE(_na)	((_na)->na_lut.lut[0].vaddr)
1658#define NETMAP_BUF_SIZE(_na)	((_na)->na_lut.objsize)
1659extern int netmap_no_pendintr;
1660extern int netmap_verbose;
1661#ifdef CONFIG_NETMAP_DEBUG
1662extern int netmap_debug;		/* for debugging */
1663#else /* !CONFIG_NETMAP_DEBUG */
1664#define netmap_debug (0)
1665#endif /* !CONFIG_NETMAP_DEBUG */
1666enum {                                  /* debug flags */
1667	NM_DEBUG_ON = 1,		/* generic debug messages */
1668	NM_DEBUG_HOST = 0x2,            /* debug host stack */
1669	NM_DEBUG_RXSYNC = 0x10,         /* debug on rxsync/txsync */
1670	NM_DEBUG_TXSYNC = 0x20,
1671	NM_DEBUG_RXINTR = 0x100,        /* debug on rx/tx intr (driver) */
1672	NM_DEBUG_TXINTR = 0x200,
1673	NM_DEBUG_NIC_RXSYNC = 0x1000,   /* debug on rx/tx intr (driver) */
1674	NM_DEBUG_NIC_TXSYNC = 0x2000,
1675	NM_DEBUG_MEM = 0x4000,		/* verbose memory allocations/deallocations */
1676	NM_DEBUG_VALE = 0x8000,		/* debug messages from memory allocators */
1677	NM_DEBUG_BDG = NM_DEBUG_VALE,
1678};
1679
1680extern int netmap_txsync_retry;
1681extern int netmap_generic_hwcsum;
1682extern int netmap_generic_mit;
1683extern int netmap_generic_ringsize;
1684extern int netmap_generic_rings;
1685#ifdef linux
1686extern int netmap_generic_txqdisc;
1687#endif
1688
1689/*
1690 * NA returns a pointer to the struct netmap adapter from the ifp.
1691 * The if_getnetmapadapter() and if_setnetmapadapter() helpers are
1692 * os-specific and must be defined in glue code.
1693 */
1694#define	NA(_ifp)	(if_getnetmapadapter(_ifp))
1695
1696/*
1697 * we provide a default implementation of NM_ATTACH_NA/NM_DETACH_NA
1698 * based on the if_setnetmapadapter() setter function.
1699 * Glue code may override this by defining its own NM_ATTACH_NA
1700 */
1701#ifndef NM_ATTACH_NA
1702/*
1703 * On old versions of FreeBSD, NA(ifp) is a pspare. On linux we
1704 * overload another pointer in the netdev.
1705 *
1706 * We check if NA(ifp) is set and its first element has a related
1707 * magic value. The capenable is within the struct netmap_adapter.
1708 */
1709#define	NETMAP_MAGIC	0x52697a7a
1710
1711#define NM_NA_VALID(ifp)	(NA(ifp) &&		\
1712	((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
1713
1714#define	NM_ATTACH_NA(ifp, na) do {					\
1715	if_setnetmapadapter(ifp, na);					\
1716	if (NA(ifp))							\
1717		NA(ifp)->magic = 					\
1718			((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC;	\
1719} while(0)
1720#define NM_RESTORE_NA(ifp, na) 	if_setnetmapadapter(ifp, na);
1721
1722#define NM_DETACH_NA(ifp)	do { if_setnetmapadapter(ifp, NULL); } while (0)
1723#define NM_NA_CLASH(ifp)	(NA(ifp) && !NM_NA_VALID(ifp))
1724#endif /* !NM_ATTACH_NA */
1725
1726
1727#define NM_IS_NATIVE(ifp)	(NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor)
1728
1729#if defined(__FreeBSD__)
1730
1731/* Assigns the device IOMMU domain to an allocator.
1732 * Returns -ENOMEM in case the domain is different */
1733#define nm_iommu_group_id(dev) (-1)
1734
1735/* Callback invoked by the dma machinery after a successful dmamap_load */
1736static void netmap_dmamap_cb(__unused void *arg,
1737    __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error)
1738{
1739}
1740
1741/* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
1742 * XXX can we do it without a callback ?
1743 */
1744static inline int
1745netmap_load_map(struct netmap_adapter *na,
1746	bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1747{
1748	if (map)
1749		bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1750		    netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1751	return 0;
1752}
1753
1754static inline void
1755netmap_unload_map(struct netmap_adapter *na,
1756        bus_dma_tag_t tag, bus_dmamap_t map)
1757{
1758	if (map)
1759		bus_dmamap_unload(tag, map);
1760}
1761
1762#define netmap_sync_map(na, tag, map, sz, t)
1763
1764/* update the map when a buffer changes. */
1765static inline void
1766netmap_reload_map(struct netmap_adapter *na,
1767	bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1768{
1769	if (map) {
1770		bus_dmamap_unload(tag, map);
1771		bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1772		    netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1773	}
1774}
1775
1776#elif defined(_WIN32)
1777
1778#else /* linux */
1779
1780int nm_iommu_group_id(bus_dma_tag_t dev);
1781#include <linux/dma-mapping.h>
1782
1783/*
1784 * on linux we need
1785 *	dma_map_single(&pdev->dev, virt_addr, len, direction)
1786 *	dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction)
1787 */
1788#if 0
1789	struct e1000_buffer *buffer_info =  &tx_ring->buffer_info[l];
1790	/* set time_stamp *before* dma to help avoid a possible race */
1791	buffer_info->time_stamp = jiffies;
1792	buffer_info->mapped_as_page = false;
1793	buffer_info->length = len;
1794	//buffer_info->next_to_watch = l;
1795	/* reload dma map */
1796	dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1797			NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1798	buffer_info->dma = dma_map_single(&adapter->pdev->dev,
1799			addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1800
1801	if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1802		nm_prerr("dma mapping error");
1803		/* goto dma_error; See e1000_put_txbuf() */
1804		/* XXX reset */
1805	}
1806	tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX
1807
1808#endif
1809
1810static inline int
1811netmap_load_map(struct netmap_adapter *na,
1812	bus_dma_tag_t tag, bus_dmamap_t map, void *buf, u_int size)
1813{
1814	if (map) {
1815		*map = dma_map_single(na->pdev, buf, size,
1816				      DMA_BIDIRECTIONAL);
1817		if (dma_mapping_error(na->pdev, *map)) {
1818			*map = 0;
1819			return ENOMEM;
1820		}
1821	}
1822	return 0;
1823}
1824
1825static inline void
1826netmap_unload_map(struct netmap_adapter *na,
1827	bus_dma_tag_t tag, bus_dmamap_t map, u_int sz)
1828{
1829	if (*map) {
1830		dma_unmap_single(na->pdev, *map, sz,
1831				 DMA_BIDIRECTIONAL);
1832	}
1833}
1834
1835#ifdef NETMAP_LINUX_HAVE_DMASYNC
1836static inline void
1837netmap_sync_map_cpu(struct netmap_adapter *na,
1838	bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t)
1839{
1840	if (*map) {
1841		dma_sync_single_for_cpu(na->pdev, *map, sz,
1842			(t == NR_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1843	}
1844}
1845
1846static inline void
1847netmap_sync_map_dev(struct netmap_adapter *na,
1848	bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t)
1849{
1850	if (*map) {
1851		dma_sync_single_for_device(na->pdev, *map, sz,
1852			(t == NR_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1853	}
1854}
1855
1856static inline void
1857netmap_reload_map(struct netmap_adapter *na,
1858	bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1859{
1860	u_int sz = NETMAP_BUF_SIZE(na);
1861
1862	if (*map) {
1863		dma_unmap_single(na->pdev, *map, sz,
1864				DMA_BIDIRECTIONAL);
1865	}
1866
1867	*map = dma_map_single(na->pdev, buf, sz,
1868				DMA_BIDIRECTIONAL);
1869}
1870#else /* !NETMAP_LINUX_HAVE_DMASYNC */
1871#define netmap_sync_map_cpu(na, tag, map, sz, t)
1872#define netmap_sync_map_dev(na, tag, map, sz, t)
1873#endif /* NETMAP_LINUX_HAVE_DMASYNC */
1874
1875#endif /* linux */
1876
1877
1878/*
1879 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
1880 */
1881static inline int
1882netmap_idx_n2k(struct netmap_kring *kr, int idx)
1883{
1884	int n = kr->nkr_num_slots;
1885
1886	if (likely(kr->nkr_hwofs == 0)) {
1887		return idx;
1888	}
1889
1890	idx += kr->nkr_hwofs;
1891	if (idx < 0)
1892		return idx + n;
1893	else if (idx < n)
1894		return idx;
1895	else
1896		return idx - n;
1897}
1898
1899
1900static inline int
1901netmap_idx_k2n(struct netmap_kring *kr, int idx)
1902{
1903	int n = kr->nkr_num_slots;
1904
1905	if (likely(kr->nkr_hwofs == 0)) {
1906		return idx;
1907	}
1908
1909	idx -= kr->nkr_hwofs;
1910	if (idx < 0)
1911		return idx + n;
1912	else if (idx < n)
1913		return idx;
1914	else
1915		return idx - n;
1916}
1917
1918
1919/* Entries of the look-up table. */
1920#ifdef __FreeBSD__
1921struct lut_entry {
1922	void *vaddr;		/* virtual address. */
1923	vm_paddr_t paddr;	/* physical address. */
1924};
1925#else /* linux & _WIN32 */
1926/* dma-mapping in linux can assign a buffer a different address
1927 * depending on the device, so we need to have a separate
1928 * physical-address look-up table for each na.
1929 * We can still share the vaddrs, though, therefore we split
1930 * the lut_entry structure.
1931 */
1932struct lut_entry {
1933	void *vaddr;		/* virtual address. */
1934};
1935
1936struct plut_entry {
1937	vm_paddr_t paddr;	/* physical address. */
1938};
1939#endif /* linux & _WIN32 */
1940
1941struct netmap_obj_pool;
1942
1943/* alignment for netmap buffers */
1944#define NM_BUF_ALIGN	64
1945
1946/*
1947 * NMB return the virtual address of a buffer (buffer 0 on bad index)
1948 * PNMB also fills the physical address
1949 */
1950static inline void *
1951NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1952{
1953	struct lut_entry *lut = na->na_lut.lut;
1954	uint32_t i = slot->buf_idx;
1955	return (unlikely(i >= na->na_lut.objtotal)) ?
1956		lut[0].vaddr : lut[i].vaddr;
1957}
1958
1959static inline void *
1960PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1961{
1962	uint32_t i = slot->buf_idx;
1963	struct lut_entry *lut = na->na_lut.lut;
1964	struct plut_entry *plut = na->na_lut.plut;
1965	void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr;
1966
1967#ifdef _WIN32
1968	*pp = (i >= na->na_lut.objtotal) ? (uint64_t)plut[0].paddr.QuadPart : (uint64_t)plut[i].paddr.QuadPart;
1969#else
1970	*pp = (i >= na->na_lut.objtotal) ? plut[0].paddr : plut[i].paddr;
1971#endif
1972	return ret;
1973}
1974
1975static inline void
1976nm_write_offset(struct netmap_kring *kring,
1977		struct netmap_slot *slot, uint64_t offset)
1978{
1979	slot->ptr = (slot->ptr & ~kring->offset_mask) |
1980		(offset & kring->offset_mask);
1981}
1982
1983static inline uint64_t
1984nm_get_offset(struct netmap_kring *kring, struct netmap_slot *slot)
1985{
1986	uint64_t offset = (slot->ptr & kring->offset_mask);
1987	if (unlikely(offset > kring->offset_max))
1988		offset = kring->offset_max;
1989	return offset;
1990}
1991
1992static inline void *
1993NMB_O(struct netmap_kring *kring, struct netmap_slot *slot)
1994{
1995	void *addr = NMB(kring->na, slot);
1996	return (char *)addr + nm_get_offset(kring, slot);
1997}
1998
1999static inline void *
2000PNMB_O(struct netmap_kring *kring, struct netmap_slot *slot, uint64_t *pp)
2001{
2002	void *addr = PNMB(kring->na, slot, pp);
2003	uint64_t offset = nm_get_offset(kring, slot);
2004	addr = (char *)addr + offset;
2005	*pp += offset;
2006	return addr;
2007}
2008
2009
2010/*
2011 * Structure associated to each netmap file descriptor.
2012 * It is created on open and left unbound (np_nifp == NULL).
2013 * A successful NIOCREGIF will set np_nifp and the first few fields;
2014 * this is protected by a global lock (NMG_LOCK) due to low contention.
2015 *
2016 * np_refs counts the number of references to the structure: one for the fd,
2017 * plus (on FreeBSD) one for each active mmap which we track ourselves
2018 * (linux automatically tracks them, but FreeBSD does not).
2019 * np_refs is protected by NMG_LOCK.
2020 *
2021 * Read access to the structure is lock free, because ni_nifp once set
2022 * can only go to 0 when nobody is using the entry anymore. Readers
2023 * must check that np_nifp != NULL before using the other fields.
2024 */
2025struct netmap_priv_d {
2026	struct netmap_if * volatile np_nifp;	/* netmap if descriptor. */
2027
2028	struct netmap_adapter	*np_na;
2029	if_t		np_ifp;
2030	uint32_t	np_flags;	/* from the ioctl */
2031	u_int		np_qfirst[NR_TXRX],
2032			np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */
2033	uint16_t	np_txpoll;
2034	uint16_t        np_kloop_state;	/* use with NMG_LOCK held */
2035#define NM_SYNC_KLOOP_RUNNING	(1 << 0)
2036#define NM_SYNC_KLOOP_STOPPING	(1 << 1)
2037	int             np_sync_flags; /* to be passed to nm_sync */
2038
2039	int		np_refs;	/* use with NMG_LOCK held */
2040
2041	/* pointers to the selinfo to be used for selrecord.
2042	 * Either the local or the global one depending on the
2043	 * number of rings.
2044	 */
2045	NM_SELINFO_T *np_si[NR_TXRX];
2046
2047	/* In the optional CSB mode, the user must specify the start address
2048	 * of two arrays of Communication Status Block (CSB) entries, for the
2049	 * two directions (kernel read application write, and kernel write
2050	 * application read).
2051	 * The number of entries must agree with the number of rings bound to
2052	 * the netmap file descriptor. The entries corresponding to the TX
2053	 * rings are laid out before the ones corresponding to the RX rings.
2054	 *
2055	 * Array of CSB entries for application --> kernel communication
2056	 * (N entries). */
2057	struct nm_csb_atok	*np_csb_atok_base;
2058	/* Array of CSB entries for kernel --> application communication
2059	 * (N entries). */
2060	struct nm_csb_ktoa	*np_csb_ktoa_base;
2061
2062#ifdef linux
2063	struct file	*np_filp;  /* used by sync kloop */
2064#endif /* linux */
2065};
2066
2067struct netmap_priv_d *netmap_priv_new(void);
2068void netmap_priv_delete(struct netmap_priv_d *);
2069
2070static inline int nm_kring_pending(struct netmap_priv_d *np)
2071{
2072	struct netmap_adapter *na = np->np_na;
2073	enum txrx t;
2074	int i;
2075
2076	for_rx_tx(t) {
2077		for (i = np->np_qfirst[t]; i < np->np_qlast[t]; i++) {
2078			struct netmap_kring *kring = NMR(na, t)[i];
2079			if (kring->nr_mode != kring->nr_pending_mode) {
2080				return 1;
2081			}
2082		}
2083	}
2084	return 0;
2085}
2086
2087/* call with NMG_LOCK held */
2088static __inline int
2089nm_si_user(struct netmap_priv_d *priv, enum txrx t)
2090{
2091	return (priv->np_na != NULL &&
2092		(priv->np_qlast[t] - priv->np_qfirst[t] > 1));
2093}
2094
2095#ifdef WITH_PIPES
2096int netmap_pipe_txsync(struct netmap_kring *txkring, int flags);
2097int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags);
2098int netmap_pipe_krings_create_both(struct netmap_adapter *na,
2099				  struct netmap_adapter *ona);
2100void netmap_pipe_krings_delete_both(struct netmap_adapter *na,
2101				    struct netmap_adapter *ona);
2102int netmap_pipe_reg_both(struct netmap_adapter *na,
2103			 struct netmap_adapter *ona);
2104#endif /* WITH_PIPES */
2105
2106#ifdef WITH_MONITOR
2107
2108struct netmap_monitor_adapter {
2109	struct netmap_adapter up;
2110
2111	struct netmap_priv_d priv;
2112	uint32_t flags;
2113};
2114
2115#endif /* WITH_MONITOR */
2116
2117
2118#ifdef WITH_GENERIC
2119/*
2120 * generic netmap emulation for devices that do not have
2121 * native netmap support.
2122 */
2123int generic_netmap_attach(if_t ifp);
2124int generic_rx_handler(if_t ifp, struct mbuf *m);
2125
2126int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept);
2127int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept);
2128
2129int na_is_generic(struct netmap_adapter *na);
2130
2131/*
2132 * the generic transmit routine is passed a structure to optionally
2133 * build a queue of descriptors, in an OS-specific way.
2134 * The payload is at addr, if non-null, and the routine should send or queue
2135 * the packet, returning 0 if successful, 1 on failure.
2136 *
2137 * At the end, if head is non-null, there will be an additional call
2138 * to the function with addr = NULL; this should tell the OS-specific
2139 * routine to send the queue and free any resources. Failure is ignored.
2140 */
2141struct nm_os_gen_arg {
2142	if_t ifp;
2143	void *m;	/* os-specific mbuf-like object */
2144	void *head, *tail; /* tailq, if the OS-specific routine needs to build one */
2145	void *addr;	/* payload of current packet */
2146	u_int len;	/* packet length */
2147	u_int ring_nr;	/* transmit ring index */
2148	u_int qevent;   /* in txqdisc mode, place an event on this mbuf */
2149};
2150
2151int nm_os_generic_xmit_frame(struct nm_os_gen_arg *);
2152int nm_os_generic_find_num_desc(if_t ifp, u_int *tx, u_int *rx);
2153void nm_os_generic_find_num_queues(if_t ifp, u_int *txq, u_int *rxq);
2154void nm_os_generic_set_features(struct netmap_generic_adapter *gna);
2155
2156static inline if_t
2157netmap_generic_getifp(struct netmap_generic_adapter *gna)
2158{
2159        if (gna->prev)
2160            return gna->prev->ifp;
2161
2162        return gna->up.up.ifp;
2163}
2164
2165void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done);
2166
2167//#define RATE_GENERIC  /* Enables communication statistics for generic. */
2168#ifdef RATE_GENERIC
2169void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi);
2170#else
2171#define generic_rate(txp, txs, txi, rxp, rxs, rxi)
2172#endif
2173
2174/*
2175 * netmap_mitigation API. This is used by the generic adapter
2176 * to reduce the number of interrupt requests/selwakeup
2177 * to clients on incoming packets.
2178 */
2179void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx,
2180                                struct netmap_adapter *na);
2181void nm_os_mitigation_start(struct nm_generic_mit *mit);
2182void nm_os_mitigation_restart(struct nm_generic_mit *mit);
2183int nm_os_mitigation_active(struct nm_generic_mit *mit);
2184void nm_os_mitigation_cleanup(struct nm_generic_mit *mit);
2185#else /* !WITH_GENERIC */
2186#define generic_netmap_attach(ifp)	(EOPNOTSUPP)
2187#define na_is_generic(na)		(0)
2188#endif /* WITH_GENERIC */
2189
2190/* Shared declarations for the VALE switch. */
2191
2192/*
2193 * Each transmit queue accumulates a batch of packets into
2194 * a structure before forwarding. Packets to the same
2195 * destination are put in a list using ft_next as a link field.
2196 * ft_frags and ft_next are valid only on the first fragment.
2197 */
2198struct nm_bdg_fwd {	/* forwarding entry for a bridge */
2199	void *ft_buf;		/* netmap or indirect buffer */
2200	uint8_t ft_frags;	/* how many fragments (only on 1st frag) */
2201	uint16_t ft_offset;	/* dst port (unused) */
2202	uint16_t ft_flags;	/* flags, e.g. indirect */
2203	uint16_t ft_len;	/* src fragment len */
2204	uint16_t ft_next;	/* next packet to same destination */
2205};
2206
2207/* struct 'virtio_net_hdr' from linux. */
2208struct nm_vnet_hdr {
2209#define VIRTIO_NET_HDR_F_NEEDS_CSUM     1	/* Use csum_start, csum_offset */
2210#define VIRTIO_NET_HDR_F_DATA_VALID    2	/* Csum is valid */
2211    uint8_t flags;
2212#define VIRTIO_NET_HDR_GSO_NONE         0       /* Not a GSO frame */
2213#define VIRTIO_NET_HDR_GSO_TCPV4        1       /* GSO frame, IPv4 TCP (TSO) */
2214#define VIRTIO_NET_HDR_GSO_UDP          3       /* GSO frame, IPv4 UDP (UFO) */
2215#define VIRTIO_NET_HDR_GSO_TCPV6        4       /* GSO frame, IPv6 TCP */
2216#define VIRTIO_NET_HDR_GSO_ECN          0x80    /* TCP has ECN set */
2217    uint8_t gso_type;
2218    uint16_t hdr_len;
2219    uint16_t gso_size;
2220    uint16_t csum_start;
2221    uint16_t csum_offset;
2222};
2223
2224#define WORST_CASE_GSO_HEADER	(14+40+60)  /* IPv6 + TCP */
2225
2226/* Private definitions for IPv4, IPv6, UDP and TCP headers. */
2227
2228struct nm_iphdr {
2229	uint8_t		version_ihl;
2230	uint8_t		tos;
2231	uint16_t	tot_len;
2232	uint16_t	id;
2233	uint16_t	frag_off;
2234	uint8_t		ttl;
2235	uint8_t		protocol;
2236	uint16_t	check;
2237	uint32_t	saddr;
2238	uint32_t	daddr;
2239	/*The options start here. */
2240};
2241
2242struct nm_tcphdr {
2243	uint16_t	source;
2244	uint16_t	dest;
2245	uint32_t	seq;
2246	uint32_t	ack_seq;
2247	uint8_t		doff;  /* Data offset + Reserved */
2248	uint8_t		flags;
2249	uint16_t	window;
2250	uint16_t	check;
2251	uint16_t	urg_ptr;
2252};
2253
2254struct nm_udphdr {
2255	uint16_t	source;
2256	uint16_t	dest;
2257	uint16_t	len;
2258	uint16_t	check;
2259};
2260
2261struct nm_ipv6hdr {
2262	uint8_t		priority_version;
2263	uint8_t		flow_lbl[3];
2264
2265	uint16_t	payload_len;
2266	uint8_t		nexthdr;
2267	uint8_t		hop_limit;
2268
2269	uint8_t		saddr[16];
2270	uint8_t		daddr[16];
2271};
2272
2273/* Type used to store a checksum (in host byte order) that hasn't been
2274 * folded yet.
2275 */
2276#define rawsum_t uint32_t
2277
2278rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum);
2279uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph);
2280void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
2281		      size_t datalen, uint16_t *check);
2282void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
2283		      size_t datalen, uint16_t *check);
2284uint16_t nm_os_csum_fold(rawsum_t cur_sum);
2285
2286void bdg_mismatch_datapath(struct netmap_vp_adapter *na,
2287			   struct netmap_vp_adapter *dst_na,
2288			   const struct nm_bdg_fwd *ft_p,
2289			   struct netmap_ring *dst_ring,
2290			   u_int *j, u_int lim, u_int *howmany);
2291
2292/* persistent virtual port routines */
2293int nm_os_vi_persist(const char *, if_t *);
2294void nm_os_vi_detach(if_t);
2295void nm_os_vi_init_index(void);
2296
2297/*
2298 * kernel thread routines
2299 */
2300struct nm_kctx; /* OS-specific kernel context - opaque */
2301typedef void (*nm_kctx_worker_fn_t)(void *data);
2302
2303/* kthread configuration */
2304struct nm_kctx_cfg {
2305	long			type;		/* kthread type/identifier */
2306	nm_kctx_worker_fn_t	worker_fn;	/* worker function */
2307	void			*worker_private;/* worker parameter */
2308	int			attach_user;	/* attach kthread to user process */
2309};
2310/* kthread configuration */
2311struct nm_kctx *nm_os_kctx_create(struct nm_kctx_cfg *cfg,
2312					void *opaque);
2313int nm_os_kctx_worker_start(struct nm_kctx *);
2314void nm_os_kctx_worker_stop(struct nm_kctx *);
2315void nm_os_kctx_destroy(struct nm_kctx *);
2316void nm_os_kctx_worker_setaff(struct nm_kctx *, int);
2317u_int nm_os_ncpus(void);
2318
2319int netmap_sync_kloop(struct netmap_priv_d *priv,
2320		      struct nmreq_header *hdr);
2321int netmap_sync_kloop_stop(struct netmap_priv_d *priv);
2322
2323#ifdef WITH_PTNETMAP
2324/* ptnetmap guest routines */
2325
2326/*
2327 * ptnetmap_memdev routines used to talk with ptnetmap_memdev device driver
2328 */
2329struct ptnetmap_memdev;
2330int nm_os_pt_memdev_iomap(struct ptnetmap_memdev *, vm_paddr_t *, void **,
2331                          uint64_t *);
2332void nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *);
2333uint32_t nm_os_pt_memdev_ioread(struct ptnetmap_memdev *, unsigned int);
2334
2335/*
2336 * netmap adapter for guest ptnetmap ports
2337 */
2338struct netmap_pt_guest_adapter {
2339        /* The netmap adapter to be used by netmap applications.
2340	 * This field must be the first, to allow upcast. */
2341	struct netmap_hw_adapter hwup;
2342
2343        /* The netmap adapter to be used by the driver. */
2344        struct netmap_hw_adapter dr;
2345
2346	/* Reference counter to track users of backend netmap port: the
2347	 * network stack and netmap clients.
2348	 * Used to decide when we need (de)allocate krings/rings and
2349	 * start (stop) ptnetmap kthreads. */
2350	int backend_users;
2351
2352};
2353
2354int netmap_pt_guest_attach(struct netmap_adapter *na,
2355			unsigned int nifp_offset,
2356			unsigned int memid);
2357bool netmap_pt_guest_txsync(struct nm_csb_atok *atok,
2358			struct nm_csb_ktoa *ktoa,
2359			struct netmap_kring *kring, int flags);
2360bool netmap_pt_guest_rxsync(struct nm_csb_atok *atok,
2361			struct nm_csb_ktoa *ktoa,
2362			struct netmap_kring *kring, int flags);
2363int ptnet_nm_krings_create(struct netmap_adapter *na);
2364void ptnet_nm_krings_delete(struct netmap_adapter *na);
2365void ptnet_nm_dtor(struct netmap_adapter *na);
2366
2367/* Helper function wrapping nm_sync_kloop_appl_read(). */
2368static inline void
2369ptnet_sync_tail(struct nm_csb_ktoa *ktoa, struct netmap_kring *kring)
2370{
2371	struct netmap_ring *ring = kring->ring;
2372
2373	/* Update hwcur and hwtail as known by the host. */
2374        nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur);
2375
2376	/* nm_sync_finalize */
2377	ring->tail = kring->rtail = kring->nr_hwtail;
2378}
2379#endif /* WITH_PTNETMAP */
2380
2381#ifdef __FreeBSD__
2382/*
2383 * FreeBSD mbuf allocator/deallocator in emulation mode:
2384 *
2385 * We allocate mbufs with m_gethdr(), since the mbuf header is needed
2386 * by the driver. We also attach a customly-provided external storage,
2387 * which in this case is a netmap buffer.
2388 *
2389 * The dtor function does nothing, however we need it since mb_free_ext()
2390 * has a KASSERT(), checking that the mbuf dtor function is not NULL.
2391 */
2392
2393static inline void
2394nm_generic_mbuf_dtor(struct mbuf *m)
2395{
2396	uma_zfree(zone_clust, m->m_ext.ext_buf);
2397}
2398
2399#define SET_MBUF_DESTRUCTOR(m, fn, na)	do {		\
2400	(m)->m_ext.ext_free = (fn != NULL) ?		\
2401	    (void *)fn : (void *)nm_generic_mbuf_dtor;	\
2402	(m)->m_ext.ext_arg1 = na;			\
2403} while (0)
2404
2405static inline struct mbuf *
2406nm_os_get_mbuf(if_t ifp __unused, int len)
2407{
2408	struct mbuf *m;
2409	void *buf;
2410
2411	KASSERT(len <= MCLBYTES, ("%s: len %d", __func__, len));
2412
2413	m = m_gethdr(M_NOWAIT, MT_DATA);
2414	if (__predict_false(m == NULL))
2415		return (NULL);
2416	buf = uma_zalloc(zone_clust, M_NOWAIT);
2417	if (__predict_false(buf == NULL)) {
2418		m_free(m);
2419		return (NULL);
2420	}
2421	m_extadd(m, buf, MCLBYTES, nm_generic_mbuf_dtor, NULL, NULL, 0,
2422	    EXT_NET_DRV);
2423	return (m);
2424}
2425
2426static inline void
2427nm_os_mbuf_reinit(struct mbuf *m)
2428{
2429	void *buf;
2430
2431	KASSERT((m->m_flags & M_EXT) != 0,
2432	    ("%s: mbuf %p has no external storage", __func__, m));
2433	KASSERT(m->m_ext.ext_size == MCLBYTES,
2434	    ("%s: mbuf %p has wrong external storage size %u", __func__, m,
2435	    m->m_ext.ext_size));
2436
2437	buf = m->m_ext.ext_buf;
2438	m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
2439	m_extadd(m, buf, MCLBYTES, nm_generic_mbuf_dtor, NULL, NULL, 0,
2440	    EXT_NET_DRV);
2441}
2442
2443#endif /* __FreeBSD__ */
2444
2445struct nmreq_option * nmreq_getoption(struct nmreq_header *, uint16_t);
2446
2447int netmap_init_bridges(void);
2448void netmap_uninit_bridges(void);
2449
2450/* Functions to read and write CSB fields from the kernel. */
2451#if defined (linux)
2452#define CSB_READ(csb, field, r) (get_user(r, &csb->field))
2453#define CSB_WRITE(csb, field, v) (put_user(v, &csb->field))
2454#else  /* ! linux */
2455#define CSB_READ(csb, field, r) do {				\
2456	int32_t v __diagused;					\
2457								\
2458	v = fuword32(&csb->field);				\
2459	KASSERT(v != -1, ("%s: fuword32 failed", __func__));	\
2460	r = v;							\
2461} while (0)
2462#define CSB_WRITE(csb, field, v) do {				\
2463	int error __diagused;					\
2464								\
2465	error = suword32(&csb->field, v);			\
2466	KASSERT(error == 0, ("%s: suword32 failed", __func__));	\
2467} while (0)
2468#endif /* ! linux */
2469
2470/* some macros that may not be defined */
2471#ifndef ETH_HLEN
2472#define ETH_HLEN 6
2473#endif
2474#ifndef ETH_FCS_LEN
2475#define ETH_FCS_LEN 4
2476#endif
2477#ifndef VLAN_HLEN
2478#define VLAN_HLEN 4
2479#endif
2480
2481#endif /* _NET_NETMAP_KERN_H_ */
2482