1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2001-2002 Luigi Rizzo
5 *
6 * Supported by: the Xorp Project (www.xorp.org)
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33#include "opt_device_polling.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/kthread.h>
39#include <sys/proc.h>
40#include <sys/epoch.h>
41#include <sys/eventhandler.h>
42#include <sys/resourcevar.h>
43#include <sys/socket.h>			/* needed by net/if.h		*/
44#include <sys/sockio.h>
45#include <sys/sysctl.h>
46#include <sys/syslog.h>
47
48#include <net/if.h>
49#include <net/if_var.h>
50#include <net/netisr.h>			/* for NETISR_POLL		*/
51#include <net/vnet.h>
52
53void hardclock_device_poll(void);	/* hook from hardclock		*/
54
55static struct mtx	poll_mtx;
56
57/*
58 * Polling support for [network] device drivers.
59 *
60 * Drivers which support this feature can register with the
61 * polling code.
62 *
63 * If registration is successful, the driver must disable interrupts,
64 * and further I/O is performed through the handler, which is invoked
65 * (at least once per clock tick) with 3 arguments: the "arg" passed at
66 * register time (a struct ifnet pointer), a command, and a "count" limit.
67 *
68 * The command can be one of the following:
69 *  POLL_ONLY: quick move of "count" packets from input/output queues.
70 *  POLL_AND_CHECK_STATUS: as above, plus check status registers or do
71 *	other more expensive operations. This command is issued periodically
72 *	but less frequently than POLL_ONLY.
73 *
74 * The count limit specifies how much work the handler can do during the
75 * call -- typically this is the number of packets to be received, or
76 * transmitted, etc. (drivers are free to interpret this number, as long
77 * as the max time spent in the function grows roughly linearly with the
78 * count).
79 *
80 * Polling is enabled and disabled via setting IFCAP_POLLING flag on
81 * the interface. The driver ioctl handler should register interface
82 * with polling and disable interrupts, if registration was successful.
83 *
84 * A second variable controls the sharing of CPU between polling/kernel
85 * network processing, and other activities (typically userlevel tasks):
86 * kern.polling.user_frac (between 0 and 100, default 50) sets the share
87 * of CPU allocated to user tasks. CPU is allocated proportionally to the
88 * shares, by dynamically adjusting the "count" (poll_burst).
89 *
90 * Other parameters can should be left to their default values.
91 * The following constraints hold
92 *
93 *	1 <= poll_each_burst <= poll_burst <= poll_burst_max
94 *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
95 */
96
97#define MIN_POLL_BURST_MAX	10
98#define MAX_POLL_BURST_MAX	20000
99
100static uint32_t poll_burst = 5;
101static uint32_t poll_burst_max = 150;	/* good for 100Mbit net and HZ=1000 */
102static uint32_t poll_each_burst = 5;
103
104static SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
105    "Device polling parameters");
106
107SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD,
108	&poll_burst, 0, "Current polling burst size");
109
110static int	netisr_poll_scheduled;
111static int	netisr_pollmore_scheduled;
112static int	poll_shutting_down;
113
114static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)
115{
116	uint32_t val = poll_burst_max;
117	int error;
118
119	error = sysctl_handle_int(oidp, &val, 0, req);
120	if (error || !req->newptr )
121		return (error);
122	if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX)
123		return (EINVAL);
124
125	mtx_lock(&poll_mtx);
126	poll_burst_max = val;
127	if (poll_burst > poll_burst_max)
128		poll_burst = poll_burst_max;
129	if (poll_each_burst > poll_burst_max)
130		poll_each_burst = MIN_POLL_BURST_MAX;
131	mtx_unlock(&poll_mtx);
132
133	return (0);
134}
135SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max,
136    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
137    poll_burst_max_sysctl, "I",
138    "Max Polling burst size");
139
140static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)
141{
142	uint32_t val = poll_each_burst;
143	int error;
144
145	error = sysctl_handle_int(oidp, &val, 0, req);
146	if (error || !req->newptr )
147		return (error);
148	if (val < 1)
149		return (EINVAL);
150
151	mtx_lock(&poll_mtx);
152	if (val > poll_burst_max) {
153		mtx_unlock(&poll_mtx);
154		return (EINVAL);
155	}
156	poll_each_burst = val;
157	mtx_unlock(&poll_mtx);
158
159	return (0);
160}
161SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst,
162    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
163    poll_each_burst_sysctl, "I",
164    "Max size of each burst");
165
166static uint32_t poll_in_idle_loop=0;	/* do we poll in idle loop ? */
167SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
168	&poll_in_idle_loop, 0, "Enable device polling in idle loop");
169
170static uint32_t user_frac = 50;
171static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
172{
173	uint32_t val = user_frac;
174	int error;
175
176	error = sysctl_handle_int(oidp, &val, 0, req);
177	if (error || !req->newptr )
178		return (error);
179	if (val > 99)
180		return (EINVAL);
181
182	mtx_lock(&poll_mtx);
183	user_frac = val;
184	mtx_unlock(&poll_mtx);
185
186	return (0);
187}
188SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac,
189    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
190    user_frac_sysctl, "I",
191    "Desired user fraction of cpu time");
192
193static uint32_t reg_frac_count = 0;
194static uint32_t reg_frac = 20 ;
195static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS)
196{
197	uint32_t val = reg_frac;
198	int error;
199
200	error = sysctl_handle_int(oidp, &val, 0, req);
201	if (error || !req->newptr )
202		return (error);
203	if (val < 1 || val > hz)
204		return (EINVAL);
205
206	mtx_lock(&poll_mtx);
207	reg_frac = val;
208	if (reg_frac_count >= reg_frac)
209		reg_frac_count = 0;
210	mtx_unlock(&poll_mtx);
211
212	return (0);
213}
214SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac,
215    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
216    reg_frac_sysctl, "I",
217    "Every this many cycles check registers");
218
219static uint32_t short_ticks;
220SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD,
221	&short_ticks, 0, "Hardclock ticks shorter than they should be");
222
223static uint32_t lost_polls;
224SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD,
225	&lost_polls, 0, "How many times we would have lost a poll tick");
226
227static uint32_t pending_polls;
228SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD,
229	&pending_polls, 0, "Do we need to poll again");
230
231static int residual_burst = 0;
232SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD,
233	&residual_burst, 0, "# of residual cycles in burst");
234
235static uint32_t poll_handlers; /* next free entry in pr[]. */
236SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
237	&poll_handlers, 0, "Number of registered poll handlers");
238
239static uint32_t phase;
240SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD,
241	&phase, 0, "Polling phase");
242
243static uint32_t suspect;
244SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD,
245	&suspect, 0, "suspect event");
246
247static uint32_t stalled;
248SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD,
249	&stalled, 0, "potential stalls");
250
251static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */
252SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
253	&idlepoll_sleeping, 0, "idlepoll is sleeping");
254
255#define POLL_LIST_LEN  128
256struct pollrec {
257	poll_handler_t	*handler;
258	struct ifnet	*ifp;
259};
260
261static struct pollrec pr[POLL_LIST_LEN];
262
263static void
264poll_shutdown(void *arg, int howto)
265{
266
267	poll_shutting_down = 1;
268}
269
270static void
271init_device_poll(void)
272{
273
274	mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
275	EVENTHANDLER_REGISTER(shutdown_post_sync, poll_shutdown, NULL,
276	    SHUTDOWN_PRI_LAST);
277}
278SYSINIT(device_poll, SI_SUB_SOFTINTR, SI_ORDER_MIDDLE, init_device_poll, NULL);
279
280/*
281 * Hook from hardclock. Tries to schedule a netisr, but keeps track
282 * of lost ticks due to the previous handler taking too long.
283 * Normally, this should not happen, because polling handler should
284 * run for a short time. However, in some cases (e.g. when there are
285 * changes in link status etc.) the drivers take a very long time
286 * (even in the order of milliseconds) to reset and reconfigure the
287 * device, causing apparent lost polls.
288 *
289 * The first part of the code is just for debugging purposes, and tries
290 * to count how often hardclock ticks are shorter than they should,
291 * meaning either stray interrupts or delayed events.
292 */
293void
294hardclock_device_poll(void)
295{
296	static struct timeval prev_t, t;
297	int delta;
298
299	if (poll_handlers == 0 || poll_shutting_down)
300		return;
301
302	microuptime(&t);
303	delta = (t.tv_usec - prev_t.tv_usec) +
304		(t.tv_sec - prev_t.tv_sec)*1000000;
305	if (delta * hz < 500000)
306		short_ticks++;
307	else
308		prev_t = t;
309
310	if (pending_polls > 100) {
311		/*
312		 * Too much, assume it has stalled (not always true
313		 * see comment above).
314		 */
315		stalled++;
316		pending_polls = 0;
317		phase = 0;
318	}
319
320	if (phase <= 2) {
321		if (phase != 0)
322			suspect++;
323		phase = 1;
324		netisr_poll_scheduled = 1;
325		netisr_pollmore_scheduled = 1;
326		netisr_sched_poll();
327		phase = 2;
328	}
329	if (pending_polls++ > 0)
330		lost_polls++;
331}
332
333/*
334 * ether_poll is called from the idle loop.
335 */
336static void
337ether_poll(int count)
338{
339	struct epoch_tracker et;
340	int i;
341
342	mtx_lock(&poll_mtx);
343
344	if (count > poll_each_burst)
345		count = poll_each_burst;
346
347	NET_EPOCH_ENTER(et);
348	for (i = 0 ; i < poll_handlers ; i++)
349		pr[i].handler(pr[i].ifp, POLL_ONLY, count);
350	NET_EPOCH_EXIT(et);
351
352	mtx_unlock(&poll_mtx);
353}
354
355/*
356 * netisr_pollmore is called after other netisr's, possibly scheduling
357 * another NETISR_POLL call, or adapting the burst size for the next cycle.
358 *
359 * It is very bad to fetch large bursts of packets from a single card at once,
360 * because the burst could take a long time to be completely processed, or
361 * could saturate the intermediate queue (ipintrq or similar) leading to
362 * losses or unfairness. To reduce the problem, and also to account better for
363 * time spent in network-related processing, we split the burst in smaller
364 * chunks of fixed size, giving control to the other netisr's between chunks.
365 * This helps in improving the fairness, reducing livelock (because we
366 * emulate more closely the "process to completion" that we have with
367 * fastforwarding) and accounting for the work performed in low level
368 * handling and forwarding.
369 */
370
371static struct timeval poll_start_t;
372
373void
374netisr_pollmore()
375{
376	struct timeval t;
377	int kern_load;
378
379	if (poll_handlers == 0)
380		return;
381
382	mtx_lock(&poll_mtx);
383	if (!netisr_pollmore_scheduled) {
384		mtx_unlock(&poll_mtx);
385		return;
386	}
387	netisr_pollmore_scheduled = 0;
388	phase = 5;
389	if (residual_burst > 0) {
390		netisr_poll_scheduled = 1;
391		netisr_pollmore_scheduled = 1;
392		netisr_sched_poll();
393		mtx_unlock(&poll_mtx);
394		/* will run immediately on return, followed by netisrs */
395		return;
396	}
397	/* here we can account time spent in netisr's in this tick */
398	microuptime(&t);
399	kern_load = (t.tv_usec - poll_start_t.tv_usec) +
400		(t.tv_sec - poll_start_t.tv_sec)*1000000;	/* us */
401	kern_load = (kern_load * hz) / 10000;			/* 0..100 */
402	if (kern_load > (100 - user_frac)) { /* try decrease ticks */
403		if (poll_burst > 1)
404			poll_burst--;
405	} else {
406		if (poll_burst < poll_burst_max)
407			poll_burst++;
408	}
409
410	pending_polls--;
411	if (pending_polls == 0) /* we are done */
412		phase = 0;
413	else {
414		/*
415		 * Last cycle was long and caused us to miss one or more
416		 * hardclock ticks. Restart processing again, but slightly
417		 * reduce the burst size to prevent that this happens again.
418		 */
419		poll_burst -= (poll_burst / 8);
420		if (poll_burst < 1)
421			poll_burst = 1;
422		netisr_poll_scheduled = 1;
423		netisr_pollmore_scheduled = 1;
424		netisr_sched_poll();
425		phase = 6;
426	}
427	mtx_unlock(&poll_mtx);
428}
429
430/*
431 * netisr_poll is typically scheduled once per tick.
432 */
433void
434netisr_poll(void)
435{
436	int i, cycles;
437	enum poll_cmd arg = POLL_ONLY;
438
439	NET_EPOCH_ASSERT();
440
441	if (poll_handlers == 0)
442		return;
443
444	mtx_lock(&poll_mtx);
445	if (!netisr_poll_scheduled) {
446		mtx_unlock(&poll_mtx);
447		return;
448	}
449	netisr_poll_scheduled = 0;
450	phase = 3;
451	if (residual_burst == 0) { /* first call in this tick */
452		microuptime(&poll_start_t);
453		if (++reg_frac_count == reg_frac) {
454			arg = POLL_AND_CHECK_STATUS;
455			reg_frac_count = 0;
456		}
457
458		residual_burst = poll_burst;
459	}
460	cycles = (residual_burst < poll_each_burst) ?
461		residual_burst : poll_each_burst;
462	residual_burst -= cycles;
463
464	for (i = 0 ; i < poll_handlers ; i++)
465		pr[i].handler(pr[i].ifp, arg, cycles);
466
467	phase = 4;
468	mtx_unlock(&poll_mtx);
469}
470
471/*
472 * Try to register routine for polling. Returns 0 if successful
473 * (and polling should be enabled), error code otherwise.
474 * A device is not supposed to register itself multiple times.
475 *
476 * This is called from within the *_ioctl() functions.
477 */
478int
479ether_poll_register(poll_handler_t *h, if_t ifp)
480{
481	int i;
482
483	KASSERT(h != NULL, ("%s: handler is NULL", __func__));
484	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
485
486	mtx_lock(&poll_mtx);
487	if (poll_handlers >= POLL_LIST_LEN) {
488		/*
489		 * List full, cannot register more entries.
490		 * This should never happen; if it does, it is probably a
491		 * broken driver trying to register multiple times. Checking
492		 * this at runtime is expensive, and won't solve the problem
493		 * anyways, so just report a few times and then give up.
494		 */
495		static int verbose = 10 ;
496		if (verbose >0) {
497			log(LOG_ERR, "poll handlers list full, "
498			    "maybe a broken driver ?\n");
499			verbose--;
500		}
501		mtx_unlock(&poll_mtx);
502		return (ENOMEM); /* no polling for you */
503	}
504
505	for (i = 0 ; i < poll_handlers ; i++)
506		if (pr[i].ifp == ifp && pr[i].handler != NULL) {
507			mtx_unlock(&poll_mtx);
508			log(LOG_DEBUG, "ether_poll_register: %s: handler"
509			    " already registered\n", ifp->if_xname);
510			return (EEXIST);
511		}
512
513	pr[poll_handlers].handler = h;
514	pr[poll_handlers].ifp = ifp;
515	poll_handlers++;
516	mtx_unlock(&poll_mtx);
517	if (idlepoll_sleeping)
518		wakeup(&idlepoll_sleeping);
519	return (0);
520}
521
522/*
523 * Remove interface from the polling list. Called from *_ioctl(), too.
524 */
525int
526ether_poll_deregister(if_t ifp)
527{
528	int i;
529
530	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
531
532	mtx_lock(&poll_mtx);
533
534	for (i = 0 ; i < poll_handlers ; i++)
535		if (pr[i].ifp == ifp) /* found it */
536			break;
537	if (i == poll_handlers) {
538		log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
539		    ifp->if_xname);
540		mtx_unlock(&poll_mtx);
541		return (ENOENT);
542	}
543	poll_handlers--;
544	if (i < poll_handlers) { /* Last entry replaces this one. */
545		pr[i].handler = pr[poll_handlers].handler;
546		pr[i].ifp = pr[poll_handlers].ifp;
547	}
548	mtx_unlock(&poll_mtx);
549	return (0);
550}
551
552static void
553poll_idle(void)
554{
555	struct thread *td = curthread;
556	struct rtprio rtp;
557
558	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
559	rtp.type = RTP_PRIO_IDLE;
560	PROC_SLOCK(td->td_proc);
561	rtp_to_pri(&rtp, td);
562	PROC_SUNLOCK(td->td_proc);
563
564	for (;;) {
565		if (poll_in_idle_loop && poll_handlers > 0) {
566			idlepoll_sleeping = 0;
567			ether_poll(poll_each_burst);
568			thread_lock(td);
569			mi_switch(SW_VOL);
570		} else {
571			idlepoll_sleeping = 1;
572			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
573		}
574	}
575}
576
577static struct proc *idlepoll;
578static struct kproc_desc idlepoll_kp = {
579	 "idlepoll",
580	 poll_idle,
581	 &idlepoll
582};
583SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start,
584    &idlepoll_kp);
585