1/*-
2 * Copyright (c) 2007-2009 Robert N. M. Watson
3 * Copyright (c) 2010-2011 Juniper Networks, Inc.
4 * All rights reserved.
5 *
6 * This software was developed by Robert N. M. Watson under contract
7 * to Juniper Networks, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34/*
35 * netisr is a packet dispatch service, allowing synchronous (directly
36 * dispatched) and asynchronous (deferred dispatch) processing of packets by
37 * registered protocol handlers.  Callers pass a protocol identifier and
38 * packet to netisr, along with a direct dispatch hint, and work will either
39 * be immediately processed by the registered handler, or passed to a
40 * software interrupt (SWI) thread for deferred dispatch.  Callers will
41 * generally select one or the other based on:
42 *
43 * - Whether directly dispatching a netisr handler lead to code reentrance or
44 *   lock recursion, such as entering the socket code from the socket code.
45 * - Whether directly dispatching a netisr handler lead to recursive
46 *   processing, such as when decapsulating several wrapped layers of tunnel
47 *   information (IPSEC within IPSEC within ...).
48 *
49 * Maintaining ordering for protocol streams is a critical design concern.
50 * Enforcing ordering limits the opportunity for concurrency, but maintains
51 * the strong ordering requirements found in some protocols, such as TCP.  Of
52 * related concern is CPU affinity--it is desirable to process all data
53 * associated with a particular stream on the same CPU over time in order to
54 * avoid acquiring locks associated with the connection on different CPUs,
55 * keep connection data in one cache, and to generally encourage associated
56 * user threads to live on the same CPU as the stream.  It's also desirable
57 * to avoid lock migration and contention where locks are associated with
58 * more than one flow.
59 *
60 * netisr supports several policy variations, represented by the
61 * NETISR_POLICY_* constants, allowing protocols to play various roles in
62 * identifying flows, assigning work to CPUs, etc.  These are described in
63 * netisr.h.
64 */
65
66#include "opt_ddb.h"
67#include "opt_device_polling.h"
68
69#include <sys/param.h>
70#include <sys/bus.h>
71#include <sys/kernel.h>
72#include <sys/kthread.h>
73#include <sys/interrupt.h>
74#include <sys/lock.h>
75#include <sys/mbuf.h>
76#include <sys/mutex.h>
77#include <sys/pcpu.h>
78#include <sys/proc.h>
79#include <sys/rmlock.h>
80#include <sys/sched.h>
81#include <sys/smp.h>
82#include <sys/socket.h>
83#include <sys/sysctl.h>
84#include <sys/systm.h>
85
86#ifdef DDB
87#include <ddb/ddb.h>
88#endif
89
90#define	_WANT_NETISR_INTERNAL	/* Enable definitions from netisr_internal.h */
91#include <net/if.h>
92#include <net/if_var.h>
93#include <net/netisr.h>
94#include <net/netisr_internal.h>
95#include <net/vnet.h>
96
97/*-
98 * Synchronize use and modification of the registered netisr data structures;
99 * acquire a read lock while modifying the set of registered protocols to
100 * prevent partially registered or unregistered protocols from being run.
101 *
102 * The following data structures and fields are protected by this lock:
103 *
104 * - The netisr_proto array, including all fields of struct netisr_proto.
105 * - The nws array, including all fields of struct netisr_worker.
106 * - The nws_array array.
107 *
108 * Note: the NETISR_LOCKING define controls whether read locks are acquired
109 * in packet processing paths requiring netisr registration stability.  This
110 * is disabled by default as it can lead to measurable performance
111 * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
112 * because netisr registration and unregistration is extremely rare at
113 * runtime.  If it becomes more common, this decision should be revisited.
114 *
115 * XXXRW: rmlocks don't support assertions.
116 */
117static struct rmlock	netisr_rmlock;
118#define	NETISR_LOCK_INIT()	rm_init_flags(&netisr_rmlock, "netisr", \
119				    RM_NOWITNESS)
120#define	NETISR_LOCK_ASSERT()
121#define	NETISR_RLOCK(tracker)	rm_rlock(&netisr_rmlock, (tracker))
122#define	NETISR_RUNLOCK(tracker)	rm_runlock(&netisr_rmlock, (tracker))
123#define	NETISR_WLOCK()		rm_wlock(&netisr_rmlock)
124#define	NETISR_WUNLOCK()	rm_wunlock(&netisr_rmlock)
125/* #define	NETISR_LOCKING */
126
127static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
128
129/*-
130 * Three global direct dispatch policies are supported:
131 *
132 * NETISR_DISPATCH_QUEUED: All work is deferred for a netisr, regardless of
133 * context (may be overriden by protocols).
134 *
135 * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch,
136 * and we're running on the CPU the work would be performed on, then direct
137 * dispatch it if it wouldn't violate ordering constraints on the workstream.
138 *
139 * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch,
140 * always direct dispatch.  (The default.)
141 *
142 * Notice that changing the global policy could lead to short periods of
143 * misordered processing, but this is considered acceptable as compared to
144 * the complexity of enforcing ordering during policy changes.  Protocols can
145 * override the global policy (when they're not doing that, they select
146 * NETISR_DISPATCH_DEFAULT).
147 */
148#define	NETISR_DISPATCH_POLICY_DEFAULT	NETISR_DISPATCH_DIRECT
149#define	NETISR_DISPATCH_POLICY_MAXSTR	20 /* Used for temporary buffers. */
150static u_int	netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT;
151static int	sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS);
152SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RW |
153    CTLFLAG_TUN, 0, 0, sysctl_netisr_dispatch_policy, "A",
154    "netisr dispatch policy");
155
156/*
157 * Allow the administrator to limit the number of threads (CPUs) to use for
158 * netisr.  We don't check netisr_maxthreads before creating the thread for
159 * CPU 0. This must be set at boot. We will create at most one thread per CPU.
160 * By default we initialize this to 1 which would assign just 1 cpu (cpu0) and
161 * therefore only 1 workstream. If set to -1, netisr would use all cpus
162 * (mp_ncpus) and therefore would have those many workstreams. One workstream
163 * per thread (CPU).
164 */
165static int	netisr_maxthreads = 1;		/* Max number of threads. */
166TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
167SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
168    &netisr_maxthreads, 0,
169    "Use at most this many CPUs for netisr processing");
170
171static int	netisr_bindthreads = 0;		/* Bind threads to CPUs. */
172TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
173SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
174    &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
175
176/*
177 * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
178 * both for initial configuration and later modification using
179 * netisr_setqlimit().
180 */
181#define	NETISR_DEFAULT_MAXQLIMIT	10240
182static u_int	netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
183TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
184SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
185    &netisr_maxqlimit, 0,
186    "Maximum netisr per-protocol, per-CPU queue depth.");
187
188/*
189 * The default per-workstream mbuf queue limit for protocols that don't
190 * initialize the nh_qlimit field of their struct netisr_handler.  If this is
191 * set above netisr_maxqlimit, we truncate it to the maximum during boot.
192 */
193#define	NETISR_DEFAULT_DEFAULTQLIMIT	256
194static u_int	netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
195TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
196SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
197    &netisr_defaultqlimit, 0,
198    "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
199
200/*
201 * Store and export the compile-time constant NETISR_MAXPROT limit on the
202 * number of protocols that can register with netisr at a time.  This is
203 * required for crashdump analysis, as it sizes netisr_proto[].
204 */
205static u_int	netisr_maxprot = NETISR_MAXPROT;
206SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD,
207    &netisr_maxprot, 0,
208    "Compile-time limit on the number of protocols supported by netisr.");
209
210/*
211 * The netisr_proto array describes all registered protocols, indexed by
212 * protocol number.  See netisr_internal.h for more details.
213 */
214static struct netisr_proto	netisr_proto[NETISR_MAXPROT];
215
216/*
217 * Per-CPU workstream data.  See netisr_internal.h for more details.
218 */
219DPCPU_DEFINE(struct netisr_workstream, nws);
220
221/*
222 * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
223 * accessing workstreams.  This allows constructions of the form
224 * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
225 */
226static u_int				 nws_array[MAXCPU];
227
228/*
229 * Number of registered workstreams.  Will be at most the number of running
230 * CPUs once fully started.
231 */
232static u_int				 nws_count;
233SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
234    &nws_count, 0, "Number of extant netisr threads.");
235
236/*
237 * Synchronization for each workstream: a mutex protects all mutable fields
238 * in each stream, including per-protocol state (mbuf queues).  The SWI is
239 * woken up if asynchronous dispatch is required.
240 */
241#define	NWS_LOCK(s)		mtx_lock(&(s)->nws_mtx)
242#define	NWS_LOCK_ASSERT(s)	mtx_assert(&(s)->nws_mtx, MA_OWNED)
243#define	NWS_UNLOCK(s)		mtx_unlock(&(s)->nws_mtx)
244#define	NWS_SIGNAL(s)		swi_sched((s)->nws_swi_cookie, 0)
245
246/*
247 * Utility routines for protocols that implement their own mapping of flows
248 * to CPUs.
249 */
250u_int
251netisr_get_cpucount(void)
252{
253
254	return (nws_count);
255}
256
257u_int
258netisr_get_cpuid(u_int cpunumber)
259{
260
261	KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
262	    nws_count));
263
264	return (nws_array[cpunumber]);
265}
266
267/*
268 * The default implementation of flow -> CPU ID mapping.
269 *
270 * Non-static so that protocols can use it to map their own work to specific
271 * CPUs in a manner consistent to netisr for affinity purposes.
272 */
273u_int
274netisr_default_flow2cpu(u_int flowid)
275{
276
277	return (nws_array[flowid % nws_count]);
278}
279
280/*
281 * Dispatch tunable and sysctl configuration.
282 */
283struct netisr_dispatch_table_entry {
284	u_int		 ndte_policy;
285	const char	*ndte_policy_str;
286};
287static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = {
288	{ NETISR_DISPATCH_DEFAULT, "default" },
289	{ NETISR_DISPATCH_DEFERRED, "deferred" },
290	{ NETISR_DISPATCH_HYBRID, "hybrid" },
291	{ NETISR_DISPATCH_DIRECT, "direct" },
292};
293static const u_int netisr_dispatch_table_len =
294    (sizeof(netisr_dispatch_table) / sizeof(netisr_dispatch_table[0]));
295
296static void
297netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer,
298    u_int buflen)
299{
300	const struct netisr_dispatch_table_entry *ndtep;
301	const char *str;
302	u_int i;
303
304	str = "unknown";
305	for (i = 0; i < netisr_dispatch_table_len; i++) {
306		ndtep = &netisr_dispatch_table[i];
307		if (ndtep->ndte_policy == dispatch_policy) {
308			str = ndtep->ndte_policy_str;
309			break;
310		}
311	}
312	snprintf(buffer, buflen, "%s", str);
313}
314
315static int
316netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp)
317{
318	const struct netisr_dispatch_table_entry *ndtep;
319	u_int i;
320
321	for (i = 0; i < netisr_dispatch_table_len; i++) {
322		ndtep = &netisr_dispatch_table[i];
323		if (strcmp(ndtep->ndte_policy_str, str) == 0) {
324			*dispatch_policyp = ndtep->ndte_policy;
325			return (0);
326		}
327	}
328	return (EINVAL);
329}
330
331static int
332sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS)
333{
334	char tmp[NETISR_DISPATCH_POLICY_MAXSTR];
335	u_int dispatch_policy;
336	int error;
337
338	netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp,
339	    sizeof(tmp));
340	error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req);
341	if (error == 0 && req->newptr != NULL) {
342		error = netisr_dispatch_policy_from_str(tmp,
343		    &dispatch_policy);
344		if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT)
345			error = EINVAL;
346		if (error == 0)
347			netisr_dispatch_policy = dispatch_policy;
348	}
349	return (error);
350}
351
352/*
353 * Register a new netisr handler, which requires initializing per-protocol
354 * fields for each workstream.  All netisr work is briefly suspended while
355 * the protocol is installed.
356 */
357void
358netisr_register(const struct netisr_handler *nhp)
359{
360	struct netisr_work *npwp;
361	const char *name;
362	u_int i, proto;
363
364	proto = nhp->nh_proto;
365	name = nhp->nh_name;
366
367	/*
368	 * Test that the requested registration is valid.
369	 */
370	KASSERT(nhp->nh_name != NULL,
371	    ("%s: nh_name NULL for %u", __func__, proto));
372	KASSERT(nhp->nh_handler != NULL,
373	    ("%s: nh_handler NULL for %s", __func__, name));
374	KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
375	    nhp->nh_policy == NETISR_POLICY_FLOW ||
376	    nhp->nh_policy == NETISR_POLICY_CPU,
377	    ("%s: unsupported nh_policy %u for %s", __func__,
378	    nhp->nh_policy, name));
379	KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
380	    nhp->nh_m2flow == NULL,
381	    ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
382	    name));
383	KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
384	    ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
385	    name));
386	KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
387	    ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
388	    name));
389	KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT ||
390	    nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED ||
391	    nhp->nh_dispatch == NETISR_DISPATCH_HYBRID ||
392	    nhp->nh_dispatch == NETISR_DISPATCH_DIRECT,
393	    ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch));
394
395	KASSERT(proto < NETISR_MAXPROT,
396	    ("%s(%u, %s): protocol too big", __func__, proto, name));
397
398	/*
399	 * Test that no existing registration exists for this protocol.
400	 */
401	NETISR_WLOCK();
402	KASSERT(netisr_proto[proto].np_name == NULL,
403	    ("%s(%u, %s): name present", __func__, proto, name));
404	KASSERT(netisr_proto[proto].np_handler == NULL,
405	    ("%s(%u, %s): handler present", __func__, proto, name));
406
407	netisr_proto[proto].np_name = name;
408	netisr_proto[proto].np_handler = nhp->nh_handler;
409	netisr_proto[proto].np_m2flow = nhp->nh_m2flow;
410	netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid;
411	netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu;
412	if (nhp->nh_qlimit == 0)
413		netisr_proto[proto].np_qlimit = netisr_defaultqlimit;
414	else if (nhp->nh_qlimit > netisr_maxqlimit) {
415		printf("%s: %s requested queue limit %u capped to "
416		    "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
417		    netisr_maxqlimit);
418		netisr_proto[proto].np_qlimit = netisr_maxqlimit;
419	} else
420		netisr_proto[proto].np_qlimit = nhp->nh_qlimit;
421	netisr_proto[proto].np_policy = nhp->nh_policy;
422	netisr_proto[proto].np_dispatch = nhp->nh_dispatch;
423	CPU_FOREACH(i) {
424		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
425		bzero(npwp, sizeof(*npwp));
426		npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
427	}
428	NETISR_WUNLOCK();
429}
430
431/*
432 * Clear drop counters across all workstreams for a protocol.
433 */
434void
435netisr_clearqdrops(const struct netisr_handler *nhp)
436{
437	struct netisr_work *npwp;
438#ifdef INVARIANTS
439	const char *name;
440#endif
441	u_int i, proto;
442
443	proto = nhp->nh_proto;
444#ifdef INVARIANTS
445	name = nhp->nh_name;
446#endif
447	KASSERT(proto < NETISR_MAXPROT,
448	    ("%s(%u): protocol too big for %s", __func__, proto, name));
449
450	NETISR_WLOCK();
451	KASSERT(netisr_proto[proto].np_handler != NULL,
452	    ("%s(%u): protocol not registered for %s", __func__, proto,
453	    name));
454
455	CPU_FOREACH(i) {
456		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
457		npwp->nw_qdrops = 0;
458	}
459	NETISR_WUNLOCK();
460}
461
462/*
463 * Query current drop counters across all workstreams for a protocol.
464 */
465void
466netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
467{
468	struct netisr_work *npwp;
469	struct rm_priotracker tracker;
470#ifdef INVARIANTS
471	const char *name;
472#endif
473	u_int i, proto;
474
475	*qdropp = 0;
476	proto = nhp->nh_proto;
477#ifdef INVARIANTS
478	name = nhp->nh_name;
479#endif
480	KASSERT(proto < NETISR_MAXPROT,
481	    ("%s(%u): protocol too big for %s", __func__, proto, name));
482
483	NETISR_RLOCK(&tracker);
484	KASSERT(netisr_proto[proto].np_handler != NULL,
485	    ("%s(%u): protocol not registered for %s", __func__, proto,
486	    name));
487
488	CPU_FOREACH(i) {
489		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
490		*qdropp += npwp->nw_qdrops;
491	}
492	NETISR_RUNLOCK(&tracker);
493}
494
495/*
496 * Query current per-workstream queue limit for a protocol.
497 */
498void
499netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
500{
501	struct rm_priotracker tracker;
502#ifdef INVARIANTS
503	const char *name;
504#endif
505	u_int proto;
506
507	proto = nhp->nh_proto;
508#ifdef INVARIANTS
509	name = nhp->nh_name;
510#endif
511	KASSERT(proto < NETISR_MAXPROT,
512	    ("%s(%u): protocol too big for %s", __func__, proto, name));
513
514	NETISR_RLOCK(&tracker);
515	KASSERT(netisr_proto[proto].np_handler != NULL,
516	    ("%s(%u): protocol not registered for %s", __func__, proto,
517	    name));
518	*qlimitp = netisr_proto[proto].np_qlimit;
519	NETISR_RUNLOCK(&tracker);
520}
521
522/*
523 * Update the queue limit across per-workstream queues for a protocol.  We
524 * simply change the limits, and don't drain overflowed packets as they will
525 * (hopefully) take care of themselves shortly.
526 */
527int
528netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
529{
530	struct netisr_work *npwp;
531#ifdef INVARIANTS
532	const char *name;
533#endif
534	u_int i, proto;
535
536	if (qlimit > netisr_maxqlimit)
537		return (EINVAL);
538
539	proto = nhp->nh_proto;
540#ifdef INVARIANTS
541	name = nhp->nh_name;
542#endif
543	KASSERT(proto < NETISR_MAXPROT,
544	    ("%s(%u): protocol too big for %s", __func__, proto, name));
545
546	NETISR_WLOCK();
547	KASSERT(netisr_proto[proto].np_handler != NULL,
548	    ("%s(%u): protocol not registered for %s", __func__, proto,
549	    name));
550
551	netisr_proto[proto].np_qlimit = qlimit;
552	CPU_FOREACH(i) {
553		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
554		npwp->nw_qlimit = qlimit;
555	}
556	NETISR_WUNLOCK();
557	return (0);
558}
559
560/*
561 * Drain all packets currently held in a particular protocol work queue.
562 */
563static void
564netisr_drain_proto(struct netisr_work *npwp)
565{
566	struct mbuf *m;
567
568	/*
569	 * We would assert the lock on the workstream but it's not passed in.
570	 */
571	while ((m = npwp->nw_head) != NULL) {
572		npwp->nw_head = m->m_nextpkt;
573		m->m_nextpkt = NULL;
574		if (npwp->nw_head == NULL)
575			npwp->nw_tail = NULL;
576		npwp->nw_len--;
577		m_freem(m);
578	}
579	KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
580	KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
581}
582
583/*
584 * Remove the registration of a network protocol, which requires clearing
585 * per-protocol fields across all workstreams, including freeing all mbufs in
586 * the queues at time of unregister.  All work in netisr is briefly suspended
587 * while this takes place.
588 */
589void
590netisr_unregister(const struct netisr_handler *nhp)
591{
592	struct netisr_work *npwp;
593#ifdef INVARIANTS
594	const char *name;
595#endif
596	u_int i, proto;
597
598	proto = nhp->nh_proto;
599#ifdef INVARIANTS
600	name = nhp->nh_name;
601#endif
602	KASSERT(proto < NETISR_MAXPROT,
603	    ("%s(%u): protocol too big for %s", __func__, proto, name));
604
605	NETISR_WLOCK();
606	KASSERT(netisr_proto[proto].np_handler != NULL,
607	    ("%s(%u): protocol not registered for %s", __func__, proto,
608	    name));
609
610	netisr_proto[proto].np_name = NULL;
611	netisr_proto[proto].np_handler = NULL;
612	netisr_proto[proto].np_m2flow = NULL;
613	netisr_proto[proto].np_m2cpuid = NULL;
614	netisr_proto[proto].np_qlimit = 0;
615	netisr_proto[proto].np_policy = 0;
616	CPU_FOREACH(i) {
617		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
618		netisr_drain_proto(npwp);
619		bzero(npwp, sizeof(*npwp));
620	}
621	NETISR_WUNLOCK();
622}
623
624/*
625 * Compose the global and per-protocol policies on dispatch, and return the
626 * dispatch policy to use.
627 */
628static u_int
629netisr_get_dispatch(struct netisr_proto *npp)
630{
631
632	/*
633	 * Protocol-specific configuration overrides the global default.
634	 */
635	if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT)
636		return (npp->np_dispatch);
637	return (netisr_dispatch_policy);
638}
639
640/*
641 * Look up the workstream given a packet and source identifier.  Do this by
642 * checking the protocol's policy, and optionally call out to the protocol
643 * for assistance if required.
644 */
645static struct mbuf *
646netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy,
647    uintptr_t source, struct mbuf *m, u_int *cpuidp)
648{
649	struct ifnet *ifp;
650	u_int policy;
651
652	NETISR_LOCK_ASSERT();
653
654	/*
655	 * In the event we have only one worker, shortcut and deliver to it
656	 * without further ado.
657	 */
658	if (nws_count == 1) {
659		*cpuidp = nws_array[0];
660		return (m);
661	}
662
663	/*
664	 * What happens next depends on the policy selected by the protocol.
665	 * If we want to support per-interface policies, we should do that
666	 * here first.
667	 */
668	policy = npp->np_policy;
669	if (policy == NETISR_POLICY_CPU) {
670		m = npp->np_m2cpuid(m, source, cpuidp);
671		if (m == NULL)
672			return (NULL);
673
674		/*
675		 * It's possible for a protocol not to have a good idea about
676		 * where to process a packet, in which case we fall back on
677		 * the netisr code to decide.  In the hybrid case, return the
678		 * current CPU ID, which will force an immediate direct
679		 * dispatch.  In the queued case, fall back on the SOURCE
680		 * policy.
681		 */
682		if (*cpuidp != NETISR_CPUID_NONE)
683			return (m);
684		if (dispatch_policy == NETISR_DISPATCH_HYBRID) {
685			*cpuidp = curcpu;
686			return (m);
687		}
688		policy = NETISR_POLICY_SOURCE;
689	}
690
691	if (policy == NETISR_POLICY_FLOW) {
692		if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE &&
693		    npp->np_m2flow != NULL) {
694			m = npp->np_m2flow(m, source);
695			if (m == NULL)
696				return (NULL);
697		}
698		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
699			*cpuidp =
700			    netisr_default_flow2cpu(m->m_pkthdr.flowid);
701			return (m);
702		}
703		policy = NETISR_POLICY_SOURCE;
704	}
705
706	KASSERT(policy == NETISR_POLICY_SOURCE,
707	    ("%s: invalid policy %u for %s", __func__, npp->np_policy,
708	    npp->np_name));
709
710	ifp = m->m_pkthdr.rcvif;
711	if (ifp != NULL)
712		*cpuidp = nws_array[(ifp->if_index + source) % nws_count];
713	else
714		*cpuidp = nws_array[source % nws_count];
715	return (m);
716}
717
718/*
719 * Process packets associated with a workstream and protocol.  For reasons of
720 * fairness, we process up to one complete netisr queue at a time, moving the
721 * queue to a stack-local queue for processing, but do not loop refreshing
722 * from the global queue.  The caller is responsible for deciding whether to
723 * loop, and for setting the NWS_RUNNING flag.  The passed workstream will be
724 * locked on entry and relocked before return, but will be released while
725 * processing.  The number of packets processed is returned.
726 */
727static u_int
728netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
729{
730	struct netisr_work local_npw, *npwp;
731	u_int handled;
732	struct mbuf *m;
733
734	NETISR_LOCK_ASSERT();
735	NWS_LOCK_ASSERT(nwsp);
736
737	KASSERT(nwsp->nws_flags & NWS_RUNNING,
738	    ("%s(%u): not running", __func__, proto));
739	KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
740	    ("%s(%u): invalid proto\n", __func__, proto));
741
742	npwp = &nwsp->nws_work[proto];
743	if (npwp->nw_len == 0)
744		return (0);
745
746	/*
747	 * Move the global work queue to a thread-local work queue.
748	 *
749	 * Notice that this means the effective maximum length of the queue
750	 * is actually twice that of the maximum queue length specified in
751	 * the protocol registration call.
752	 */
753	handled = npwp->nw_len;
754	local_npw = *npwp;
755	npwp->nw_head = NULL;
756	npwp->nw_tail = NULL;
757	npwp->nw_len = 0;
758	nwsp->nws_pendingbits &= ~(1 << proto);
759	NWS_UNLOCK(nwsp);
760	while ((m = local_npw.nw_head) != NULL) {
761		local_npw.nw_head = m->m_nextpkt;
762		m->m_nextpkt = NULL;
763		if (local_npw.nw_head == NULL)
764			local_npw.nw_tail = NULL;
765		local_npw.nw_len--;
766		VNET_ASSERT(m->m_pkthdr.rcvif != NULL,
767		    ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m));
768		CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
769		netisr_proto[proto].np_handler(m);
770		CURVNET_RESTORE();
771	}
772	KASSERT(local_npw.nw_len == 0,
773	    ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
774	if (netisr_proto[proto].np_drainedcpu)
775		netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu);
776	NWS_LOCK(nwsp);
777	npwp->nw_handled += handled;
778	return (handled);
779}
780
781/*
782 * SWI handler for netisr -- processes packets in a set of workstreams that
783 * it owns, woken up by calls to NWS_SIGNAL().  If this workstream is already
784 * being direct dispatched, go back to sleep and wait for the dispatching
785 * thread to wake us up again.
786 */
787static void
788swi_net(void *arg)
789{
790#ifdef NETISR_LOCKING
791	struct rm_priotracker tracker;
792#endif
793	struct netisr_workstream *nwsp;
794	u_int bits, prot;
795
796	nwsp = arg;
797
798#ifdef DEVICE_POLLING
799	KASSERT(nws_count == 1,
800	    ("%s: device_polling but nws_count != 1", __func__));
801	netisr_poll();
802#endif
803#ifdef NETISR_LOCKING
804	NETISR_RLOCK(&tracker);
805#endif
806	NWS_LOCK(nwsp);
807	KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
808	if (nwsp->nws_flags & NWS_DISPATCHING)
809		goto out;
810	nwsp->nws_flags |= NWS_RUNNING;
811	nwsp->nws_flags &= ~NWS_SCHEDULED;
812	while ((bits = nwsp->nws_pendingbits) != 0) {
813		while ((prot = ffs(bits)) != 0) {
814			prot--;
815			bits &= ~(1 << prot);
816			(void)netisr_process_workstream_proto(nwsp, prot);
817		}
818	}
819	nwsp->nws_flags &= ~NWS_RUNNING;
820out:
821	NWS_UNLOCK(nwsp);
822#ifdef NETISR_LOCKING
823	NETISR_RUNLOCK(&tracker);
824#endif
825#ifdef DEVICE_POLLING
826	netisr_pollmore();
827#endif
828}
829
830static int
831netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
832    struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
833{
834
835	NWS_LOCK_ASSERT(nwsp);
836
837	*dosignalp = 0;
838	if (npwp->nw_len < npwp->nw_qlimit) {
839		m->m_nextpkt = NULL;
840		if (npwp->nw_head == NULL) {
841			npwp->nw_head = m;
842			npwp->nw_tail = m;
843		} else {
844			npwp->nw_tail->m_nextpkt = m;
845			npwp->nw_tail = m;
846		}
847		npwp->nw_len++;
848		if (npwp->nw_len > npwp->nw_watermark)
849			npwp->nw_watermark = npwp->nw_len;
850
851		/*
852		 * We must set the bit regardless of NWS_RUNNING, so that
853		 * swi_net() keeps calling netisr_process_workstream_proto().
854		 */
855		nwsp->nws_pendingbits |= (1 << proto);
856		if (!(nwsp->nws_flags &
857		    (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
858			nwsp->nws_flags |= NWS_SCHEDULED;
859			*dosignalp = 1;	/* Defer until unlocked. */
860		}
861		npwp->nw_queued++;
862		return (0);
863	} else {
864		m_freem(m);
865		npwp->nw_qdrops++;
866		return (ENOBUFS);
867	}
868}
869
870static int
871netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
872{
873	struct netisr_workstream *nwsp;
874	struct netisr_work *npwp;
875	int dosignal, error;
876
877#ifdef NETISR_LOCKING
878	NETISR_LOCK_ASSERT();
879#endif
880	KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
881	    cpuid, mp_maxid));
882	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
883
884	dosignal = 0;
885	error = 0;
886	nwsp = DPCPU_ID_PTR(cpuid, nws);
887	npwp = &nwsp->nws_work[proto];
888	NWS_LOCK(nwsp);
889	error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
890	NWS_UNLOCK(nwsp);
891	if (dosignal)
892		NWS_SIGNAL(nwsp);
893	return (error);
894}
895
896int
897netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
898{
899#ifdef NETISR_LOCKING
900	struct rm_priotracker tracker;
901#endif
902	u_int cpuid;
903	int error;
904
905	KASSERT(proto < NETISR_MAXPROT,
906	    ("%s: invalid proto %u", __func__, proto));
907
908#ifdef NETISR_LOCKING
909	NETISR_RLOCK(&tracker);
910#endif
911	KASSERT(netisr_proto[proto].np_handler != NULL,
912	    ("%s: invalid proto %u", __func__, proto));
913
914	m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED,
915	    source, m, &cpuid);
916	if (m != NULL) {
917		KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
918		    cpuid));
919		error = netisr_queue_internal(proto, m, cpuid);
920	} else
921		error = ENOBUFS;
922#ifdef NETISR_LOCKING
923	NETISR_RUNLOCK(&tracker);
924#endif
925	return (error);
926}
927
928int
929netisr_queue(u_int proto, struct mbuf *m)
930{
931
932	return (netisr_queue_src(proto, 0, m));
933}
934
935/*
936 * Dispatch a packet for netisr processing; direct dispatch is permitted by
937 * calling context.
938 */
939int
940netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
941{
942#ifdef NETISR_LOCKING
943	struct rm_priotracker tracker;
944#endif
945	struct netisr_workstream *nwsp;
946	struct netisr_proto *npp;
947	struct netisr_work *npwp;
948	int dosignal, error;
949	u_int cpuid, dispatch_policy;
950
951	KASSERT(proto < NETISR_MAXPROT,
952	    ("%s: invalid proto %u", __func__, proto));
953#ifdef NETISR_LOCKING
954	NETISR_RLOCK(&tracker);
955#endif
956	npp = &netisr_proto[proto];
957	KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__,
958	    proto));
959
960	dispatch_policy = netisr_get_dispatch(npp);
961	if (dispatch_policy == NETISR_DISPATCH_DEFERRED)
962		return (netisr_queue_src(proto, source, m));
963
964	/*
965	 * If direct dispatch is forced, then unconditionally dispatch
966	 * without a formal CPU selection.  Borrow the current CPU's stats,
967	 * even if there's no worker on it.  In this case we don't update
968	 * nws_flags because all netisr processing will be source ordered due
969	 * to always being forced to directly dispatch.
970	 */
971	if (dispatch_policy == NETISR_DISPATCH_DIRECT) {
972		nwsp = DPCPU_PTR(nws);
973		npwp = &nwsp->nws_work[proto];
974		npwp->nw_dispatched++;
975		npwp->nw_handled++;
976		netisr_proto[proto].np_handler(m);
977		error = 0;
978		goto out_unlock;
979	}
980
981	KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID,
982	    ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy));
983
984	/*
985	 * Otherwise, we execute in a hybrid mode where we will try to direct
986	 * dispatch if we're on the right CPU and the netisr worker isn't
987	 * already running.
988	 */
989	sched_pin();
990	m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID,
991	    source, m, &cpuid);
992	if (m == NULL) {
993		error = ENOBUFS;
994		goto out_unpin;
995	}
996	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
997	if (cpuid != curcpu)
998		goto queue_fallback;
999	nwsp = DPCPU_PTR(nws);
1000	npwp = &nwsp->nws_work[proto];
1001
1002	/*-
1003	 * We are willing to direct dispatch only if three conditions hold:
1004	 *
1005	 * (1) The netisr worker isn't already running,
1006	 * (2) Another thread isn't already directly dispatching, and
1007	 * (3) The netisr hasn't already been woken up.
1008	 */
1009	NWS_LOCK(nwsp);
1010	if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
1011		error = netisr_queue_workstream(nwsp, proto, npwp, m,
1012		    &dosignal);
1013		NWS_UNLOCK(nwsp);
1014		if (dosignal)
1015			NWS_SIGNAL(nwsp);
1016		goto out_unpin;
1017	}
1018
1019	/*
1020	 * The current thread is now effectively the netisr worker, so set
1021	 * the dispatching flag to prevent concurrent processing of the
1022	 * stream from another thread (even the netisr worker), which could
1023	 * otherwise lead to effective misordering of the stream.
1024	 */
1025	nwsp->nws_flags |= NWS_DISPATCHING;
1026	NWS_UNLOCK(nwsp);
1027	netisr_proto[proto].np_handler(m);
1028	NWS_LOCK(nwsp);
1029	nwsp->nws_flags &= ~NWS_DISPATCHING;
1030	npwp->nw_handled++;
1031	npwp->nw_hybrid_dispatched++;
1032
1033	/*
1034	 * If other work was enqueued by another thread while we were direct
1035	 * dispatching, we need to signal the netisr worker to do that work.
1036	 * In the future, we might want to do some of that work in the
1037	 * current thread, rather than trigger further context switches.  If
1038	 * so, we'll want to establish a reasonable bound on the work done in
1039	 * the "borrowed" context.
1040	 */
1041	if (nwsp->nws_pendingbits != 0) {
1042		nwsp->nws_flags |= NWS_SCHEDULED;
1043		dosignal = 1;
1044	} else
1045		dosignal = 0;
1046	NWS_UNLOCK(nwsp);
1047	if (dosignal)
1048		NWS_SIGNAL(nwsp);
1049	error = 0;
1050	goto out_unpin;
1051
1052queue_fallback:
1053	error = netisr_queue_internal(proto, m, cpuid);
1054out_unpin:
1055	sched_unpin();
1056out_unlock:
1057#ifdef NETISR_LOCKING
1058	NETISR_RUNLOCK(&tracker);
1059#endif
1060	return (error);
1061}
1062
1063int
1064netisr_dispatch(u_int proto, struct mbuf *m)
1065{
1066
1067	return (netisr_dispatch_src(proto, 0, m));
1068}
1069
1070#ifdef DEVICE_POLLING
1071/*
1072 * Kernel polling borrows a netisr thread to run interface polling in; this
1073 * function allows kernel polling to request that the netisr thread be
1074 * scheduled even if no packets are pending for protocols.
1075 */
1076void
1077netisr_sched_poll(void)
1078{
1079	struct netisr_workstream *nwsp;
1080
1081	nwsp = DPCPU_ID_PTR(nws_array[0], nws);
1082	NWS_SIGNAL(nwsp);
1083}
1084#endif
1085
1086static void
1087netisr_start_swi(u_int cpuid, struct pcpu *pc)
1088{
1089	char swiname[12];
1090	struct netisr_workstream *nwsp;
1091	int error;
1092
1093	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
1094
1095	nwsp = DPCPU_ID_PTR(cpuid, nws);
1096	mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
1097	nwsp->nws_cpu = cpuid;
1098	snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
1099	error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
1100	    SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
1101	if (error)
1102		panic("%s: swi_add %d", __func__, error);
1103	pc->pc_netisr = nwsp->nws_intr_event;
1104	if (netisr_bindthreads) {
1105		error = intr_event_bind(nwsp->nws_intr_event, cpuid);
1106		if (error != 0)
1107			printf("%s: cpu %u: intr_event_bind: %d", __func__,
1108			    cpuid, error);
1109	}
1110	NETISR_WLOCK();
1111	nws_array[nws_count] = nwsp->nws_cpu;
1112	nws_count++;
1113	NETISR_WUNLOCK();
1114}
1115
1116/*
1117 * Initialize the netisr subsystem.  We rely on BSS and static initialization
1118 * of most fields in global data structures.
1119 *
1120 * Start a worker thread for the boot CPU so that we can support network
1121 * traffic immediately in case the network stack is used before additional
1122 * CPUs are started (for example, diskless boot).
1123 */
1124static void
1125netisr_init(void *arg)
1126{
1127	char tmp[NETISR_DISPATCH_POLICY_MAXSTR];
1128	u_int dispatch_policy;
1129	int error;
1130
1131	KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1132
1133	NETISR_LOCK_INIT();
1134	if (netisr_maxthreads == 0 || netisr_maxthreads < -1 )
1135		netisr_maxthreads = 1;		/* default behavior */
1136	else if (netisr_maxthreads == -1)
1137		netisr_maxthreads = mp_ncpus;	/* use max cpus */
1138	if (netisr_maxthreads > mp_ncpus) {
1139		printf("netisr_init: forcing maxthreads from %d to %d\n",
1140		    netisr_maxthreads, mp_ncpus);
1141		netisr_maxthreads = mp_ncpus;
1142	}
1143	if (netisr_defaultqlimit > netisr_maxqlimit) {
1144		printf("netisr_init: forcing defaultqlimit from %d to %d\n",
1145		    netisr_defaultqlimit, netisr_maxqlimit);
1146		netisr_defaultqlimit = netisr_maxqlimit;
1147	}
1148#ifdef DEVICE_POLLING
1149	/*
1150	 * The device polling code is not yet aware of how to deal with
1151	 * multiple netisr threads, so for the time being compiling in device
1152	 * polling disables parallel netisr workers.
1153	 */
1154	if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1155		printf("netisr_init: forcing maxthreads to 1 and "
1156		    "bindthreads to 0 for device polling\n");
1157		netisr_maxthreads = 1;
1158		netisr_bindthreads = 0;
1159	}
1160#endif
1161
1162	if (TUNABLE_STR_FETCH("net.isr.dispatch", tmp, sizeof(tmp))) {
1163		error = netisr_dispatch_policy_from_str(tmp,
1164		    &dispatch_policy);
1165		if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT)
1166			error = EINVAL;
1167		if (error == 0)
1168			netisr_dispatch_policy = dispatch_policy;
1169		else
1170			printf(
1171			    "%s: invalid dispatch policy %s, using default\n",
1172			    __func__, tmp);
1173	}
1174
1175	netisr_start_swi(curcpu, pcpu_find(curcpu));
1176}
1177SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1178
1179/*
1180 * Start worker threads for additional CPUs.  No attempt to gracefully handle
1181 * work reassignment, we don't yet support dynamic reconfiguration.
1182 */
1183static void
1184netisr_start(void *arg)
1185{
1186	struct pcpu *pc;
1187
1188	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1189		if (nws_count >= netisr_maxthreads)
1190			break;
1191		/* XXXRW: Is skipping absent CPUs still required here? */
1192		if (CPU_ABSENT(pc->pc_cpuid))
1193			continue;
1194		/* Worker will already be present for boot CPU. */
1195		if (pc->pc_netisr != NULL)
1196			continue;
1197		netisr_start_swi(pc->pc_cpuid, pc);
1198	}
1199}
1200SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1201
1202/*
1203 * Sysctl monitoring for netisr: query a list of registered protocols.
1204 */
1205static int
1206sysctl_netisr_proto(SYSCTL_HANDLER_ARGS)
1207{
1208	struct rm_priotracker tracker;
1209	struct sysctl_netisr_proto *snpp, *snp_array;
1210	struct netisr_proto *npp;
1211	u_int counter, proto;
1212	int error;
1213
1214	if (req->newptr != NULL)
1215		return (EINVAL);
1216	snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP,
1217	    M_ZERO | M_WAITOK);
1218	counter = 0;
1219	NETISR_RLOCK(&tracker);
1220	for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1221		npp = &netisr_proto[proto];
1222		if (npp->np_name == NULL)
1223			continue;
1224		snpp = &snp_array[counter];
1225		snpp->snp_version = sizeof(*snpp);
1226		strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN);
1227		snpp->snp_proto = proto;
1228		snpp->snp_qlimit = npp->np_qlimit;
1229		snpp->snp_policy = npp->np_policy;
1230		snpp->snp_dispatch = npp->np_dispatch;
1231		if (npp->np_m2flow != NULL)
1232			snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW;
1233		if (npp->np_m2cpuid != NULL)
1234			snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID;
1235		if (npp->np_drainedcpu != NULL)
1236			snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU;
1237		counter++;
1238	}
1239	NETISR_RUNLOCK(&tracker);
1240	KASSERT(counter <= NETISR_MAXPROT,
1241	    ("sysctl_netisr_proto: counter too big (%d)", counter));
1242	error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter);
1243	free(snp_array, M_TEMP);
1244	return (error);
1245}
1246
1247SYSCTL_PROC(_net_isr, OID_AUTO, proto,
1248    CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto,
1249    "S,sysctl_netisr_proto",
1250    "Return list of protocols registered with netisr");
1251
1252/*
1253 * Sysctl monitoring for netisr: query a list of workstreams.
1254 */
1255static int
1256sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
1257{
1258	struct rm_priotracker tracker;
1259	struct sysctl_netisr_workstream *snwsp, *snws_array;
1260	struct netisr_workstream *nwsp;
1261	u_int counter, cpuid;
1262	int error;
1263
1264	if (req->newptr != NULL)
1265		return (EINVAL);
1266	snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
1267	    M_ZERO | M_WAITOK);
1268	counter = 0;
1269	NETISR_RLOCK(&tracker);
1270	CPU_FOREACH(cpuid) {
1271		nwsp = DPCPU_ID_PTR(cpuid, nws);
1272		if (nwsp->nws_intr_event == NULL)
1273			continue;
1274		NWS_LOCK(nwsp);
1275		snwsp = &snws_array[counter];
1276		snwsp->snws_version = sizeof(*snwsp);
1277
1278		/*
1279		 * For now, we equate workstream IDs and CPU IDs in the
1280		 * kernel, but expose them independently to userspace in case
1281		 * that assumption changes in the future.
1282		 */
1283		snwsp->snws_wsid = cpuid;
1284		snwsp->snws_cpu = cpuid;
1285		if (nwsp->nws_intr_event != NULL)
1286			snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
1287		NWS_UNLOCK(nwsp);
1288		counter++;
1289	}
1290	NETISR_RUNLOCK(&tracker);
1291	KASSERT(counter <= MAXCPU,
1292	    ("sysctl_netisr_workstream: counter too big (%d)", counter));
1293	error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
1294	free(snws_array, M_TEMP);
1295	return (error);
1296}
1297
1298SYSCTL_PROC(_net_isr, OID_AUTO, workstream,
1299    CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream,
1300    "S,sysctl_netisr_workstream",
1301    "Return list of workstreams implemented by netisr");
1302
1303/*
1304 * Sysctl monitoring for netisr: query per-protocol data across all
1305 * workstreams.
1306 */
1307static int
1308sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
1309{
1310	struct rm_priotracker tracker;
1311	struct sysctl_netisr_work *snwp, *snw_array;
1312	struct netisr_workstream *nwsp;
1313	struct netisr_proto *npp;
1314	struct netisr_work *nwp;
1315	u_int counter, cpuid, proto;
1316	int error;
1317
1318	if (req->newptr != NULL)
1319		return (EINVAL);
1320	snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
1321	    M_TEMP, M_ZERO | M_WAITOK);
1322	counter = 0;
1323	NETISR_RLOCK(&tracker);
1324	CPU_FOREACH(cpuid) {
1325		nwsp = DPCPU_ID_PTR(cpuid, nws);
1326		if (nwsp->nws_intr_event == NULL)
1327			continue;
1328		NWS_LOCK(nwsp);
1329		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1330			npp = &netisr_proto[proto];
1331			if (npp->np_name == NULL)
1332				continue;
1333			nwp = &nwsp->nws_work[proto];
1334			snwp = &snw_array[counter];
1335			snwp->snw_version = sizeof(*snwp);
1336			snwp->snw_wsid = cpuid;		/* See comment above. */
1337			snwp->snw_proto = proto;
1338			snwp->snw_len = nwp->nw_len;
1339			snwp->snw_watermark = nwp->nw_watermark;
1340			snwp->snw_dispatched = nwp->nw_dispatched;
1341			snwp->snw_hybrid_dispatched =
1342			    nwp->nw_hybrid_dispatched;
1343			snwp->snw_qdrops = nwp->nw_qdrops;
1344			snwp->snw_queued = nwp->nw_queued;
1345			snwp->snw_handled = nwp->nw_handled;
1346			counter++;
1347		}
1348		NWS_UNLOCK(nwsp);
1349	}
1350	KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
1351	    ("sysctl_netisr_work: counter too big (%d)", counter));
1352	NETISR_RUNLOCK(&tracker);
1353	error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
1354	free(snw_array, M_TEMP);
1355	return (error);
1356}
1357
1358SYSCTL_PROC(_net_isr, OID_AUTO, work,
1359    CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work,
1360    "S,sysctl_netisr_work",
1361    "Return list of per-workstream, per-protocol work in netisr");
1362
1363#ifdef DDB
1364DB_SHOW_COMMAND(netisr, db_show_netisr)
1365{
1366	struct netisr_workstream *nwsp;
1367	struct netisr_work *nwp;
1368	int first, proto;
1369	u_int cpuid;
1370
1371	db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1372	    "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
1373	CPU_FOREACH(cpuid) {
1374		nwsp = DPCPU_ID_PTR(cpuid, nws);
1375		if (nwsp->nws_intr_event == NULL)
1376			continue;
1377		first = 1;
1378		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1379			if (netisr_proto[proto].np_handler == NULL)
1380				continue;
1381			nwp = &nwsp->nws_work[proto];
1382			if (first) {
1383				db_printf("%3d ", cpuid);
1384				first = 0;
1385			} else
1386				db_printf("%3s ", "");
1387			db_printf(
1388			    "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1389			    netisr_proto[proto].np_name, nwp->nw_len,
1390			    nwp->nw_watermark, nwp->nw_qlimit,
1391			    nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1392			    nwp->nw_qdrops, nwp->nw_queued);
1393		}
1394	}
1395}
1396#endif
1397