ip_dn_io.c revision 297228
1/*-
2 * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
3 * All rights reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * Dummynet portions related to packet handling.
29 */
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/netpfil/ipfw/ip_dn_io.c 297228 2016-03-24 09:22:58Z hselasky $");
32
33#include "opt_inet6.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/malloc.h>
38#include <sys/mbuf.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/module.h>
42#include <sys/priv.h>
43#include <sys/proc.h>
44#include <sys/rwlock.h>
45#include <sys/socket.h>
46#include <sys/time.h>
47#include <sys/sysctl.h>
48
49#include <net/if.h>	/* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
50#include <net/netisr.h>
51#include <net/vnet.h>
52
53#include <netinet/in.h>
54#include <netinet/ip.h>		/* ip_len, ip_off */
55#include <netinet/ip_var.h>	/* ip_output(), IP_FORWARDING */
56#include <netinet/ip_fw.h>
57#include <netinet/ip_dummynet.h>
58#include <netinet/if_ether.h> /* various ether_* routines */
59#include <netinet/ip6.h>       /* for ip6_input, ip6_output prototypes */
60#include <netinet6/ip6_var.h>
61
62#include <netpfil/ipfw/ip_fw_private.h>
63#include <netpfil/ipfw/dn_heap.h>
64#include <netpfil/ipfw/ip_dn_private.h>
65#include <netpfil/ipfw/dn_sched.h>
66
67/*
68 * We keep a private variable for the simulation time, but we could
69 * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
70 * instead of dn_cfg.curr_time
71 */
72
73struct dn_parms dn_cfg;
74//VNET_DEFINE(struct dn_parms, _base_dn_cfg);
75
76static long tick_last;		/* Last tick duration (usec). */
77static long tick_delta;		/* Last vs standard tick diff (usec). */
78static long tick_delta_sum;	/* Accumulated tick difference (usec).*/
79static long tick_adjustment;	/* Tick adjustments done. */
80static long tick_lost;		/* Lost(coalesced) ticks number. */
81/* Adjusted vs non-adjusted curr_time difference (ticks). */
82static long tick_diff;
83
84static unsigned long	io_pkt;
85static unsigned long	io_pkt_fast;
86static unsigned long	io_pkt_drop;
87
88/*
89 * We use a heap to store entities for which we have pending timer events.
90 * The heap is checked at every tick and all entities with expired events
91 * are extracted.
92 */
93
94MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
95
96extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
97
98#ifdef SYSCTL_NODE
99
100/*
101 * Because of the way the SYSBEGIN/SYSEND macros work on other
102 * platforms, there should not be functions between them.
103 * So keep the handlers outside the block.
104 */
105static int
106sysctl_hash_size(SYSCTL_HANDLER_ARGS)
107{
108	int error, value;
109
110	value = dn_cfg.hash_size;
111	error = sysctl_handle_int(oidp, &value, 0, req);
112	if (error != 0 || req->newptr == NULL)
113		return (error);
114	if (value < 16 || value > 65536)
115		return (EINVAL);
116	dn_cfg.hash_size = value;
117	return (0);
118}
119
120static int
121sysctl_limits(SYSCTL_HANDLER_ARGS)
122{
123	int error;
124	long value;
125
126	if (arg2 != 0)
127		value = dn_cfg.slot_limit;
128	else
129		value = dn_cfg.byte_limit;
130	error = sysctl_handle_long(oidp, &value, 0, req);
131
132	if (error != 0 || req->newptr == NULL)
133		return (error);
134	if (arg2 != 0) {
135		if (value < 1)
136			return (EINVAL);
137		dn_cfg.slot_limit = value;
138	} else {
139		if (value < 1500)
140			return (EINVAL);
141		dn_cfg.byte_limit = value;
142	}
143	return (0);
144}
145
146SYSBEGIN(f4)
147
148SYSCTL_DECL(_net_inet);
149SYSCTL_DECL(_net_inet_ip);
150static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
151
152/* wrapper to pass dn_cfg fields to SYSCTL_* */
153//#define DC(x)	(&(VNET_NAME(_base_dn_cfg).x))
154#define DC(x)	(&(dn_cfg.x))
155/* parameters */
156
157
158SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size,
159    CTLTYPE_INT | CTLFLAG_RW, 0, 0, sysctl_hash_size,
160    "I", "Default hash table size");
161
162
163SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
164    CTLTYPE_LONG | CTLFLAG_RW, 0, 1, sysctl_limits,
165    "L", "Upper limit in slots for pipe queue.");
166SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
167    CTLTYPE_LONG | CTLFLAG_RW, 0, 0, sysctl_limits,
168    "L", "Upper limit in bytes for pipe queue.");
169SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
170    CTLFLAG_RW, DC(io_fast), 0, "Enable fast dummynet io.");
171SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug,
172    CTLFLAG_RW, DC(debug), 0, "Dummynet debug level");
173
174/* RED parameters */
175SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
176    CTLFLAG_RD, DC(red_lookup_depth), 0, "Depth of RED lookup table");
177SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
178    CTLFLAG_RD, DC(red_avg_pkt_size), 0, "RED Medium packet size");
179SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
180    CTLFLAG_RD, DC(red_max_pkt_size), 0, "RED Max packet size");
181
182/* time adjustment */
183SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
184    CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
185SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
186    CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
187SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
188    CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
189SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
190    CTLFLAG_RD, &tick_diff, 0,
191    "Adjusted vs non-adjusted curr_time difference (ticks).");
192SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
193    CTLFLAG_RD, &tick_lost, 0,
194    "Number of ticks coalesced by dummynet taskqueue.");
195
196/* Drain parameters */
197SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire,
198    CTLFLAG_RW, DC(expire), 0, "Expire empty queues/pipes");
199SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle,
200    CTLFLAG_RD, DC(expire_cycle), 0, "Expire cycle for queues/pipes");
201
202/* statistics */
203SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count,
204    CTLFLAG_RD, DC(schk_count), 0, "Number of schedulers");
205SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count,
206    CTLFLAG_RD, DC(si_count), 0, "Number of scheduler instances");
207SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count,
208    CTLFLAG_RD, DC(fsk_count), 0, "Number of flowsets");
209SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count,
210    CTLFLAG_RD, DC(queue_count), 0, "Number of queues");
211SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
212    CTLFLAG_RD, &io_pkt, 0,
213    "Number of packets passed to dummynet.");
214SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
215    CTLFLAG_RD, &io_pkt_fast, 0,
216    "Number of packets bypassed dummynet scheduler.");
217SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
218    CTLFLAG_RD, &io_pkt_drop, 0,
219    "Number of packets dropped by dummynet.");
220#undef DC
221SYSEND
222
223#endif
224
225static void	dummynet_send(struct mbuf *);
226
227/*
228 * Packets processed by dummynet have an mbuf tag associated with
229 * them that carries their dummynet state.
230 * Outside dummynet, only the 'rule' field is relevant, and it must
231 * be at the beginning of the structure.
232 */
233struct dn_pkt_tag {
234	struct ipfw_rule_ref rule;	/* matching rule	*/
235
236	/* second part, dummynet specific */
237	int dn_dir;		/* action when packet comes out.*/
238				/* see ip_fw_private.h		*/
239	uint64_t output_time;	/* when the pkt is due for delivery*/
240	struct ifnet *ifp;	/* interface, for ip_output	*/
241	struct _ip6dn_args ip6opt;	/* XXX ipv6 options	*/
242};
243
244/*
245 * Return the mbuf tag holding the dummynet state (it should
246 * be the first one on the list).
247 */
248static struct dn_pkt_tag *
249dn_tag_get(struct mbuf *m)
250{
251	struct m_tag *mtag = m_tag_first(m);
252	KASSERT(mtag != NULL &&
253	    mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
254	    mtag->m_tag_id == PACKET_TAG_DUMMYNET,
255	    ("packet on dummynet queue w/o dummynet tag!"));
256	return (struct dn_pkt_tag *)(mtag+1);
257}
258
259static inline void
260mq_append(struct mq *q, struct mbuf *m)
261{
262	if (q->head == NULL)
263		q->head = m;
264	else
265		q->tail->m_nextpkt = m;
266	q->tail = m;
267	m->m_nextpkt = NULL;
268}
269
270/*
271 * Dispose a list of packet. Use a functions so if we need to do
272 * more work, this is a central point to do it.
273 */
274void dn_free_pkts(struct mbuf *mnext)
275{
276        struct mbuf *m;
277
278        while ((m = mnext) != NULL) {
279                mnext = m->m_nextpkt;
280                FREE_PKT(m);
281        }
282}
283
284static int
285red_drops (struct dn_queue *q, int len)
286{
287	/*
288	 * RED algorithm
289	 *
290	 * RED calculates the average queue size (avg) using a low-pass filter
291	 * with an exponential weighted (w_q) moving average:
292	 * 	avg  <-  (1-w_q) * avg + w_q * q_size
293	 * where q_size is the queue length (measured in bytes or * packets).
294	 *
295	 * If q_size == 0, we compute the idle time for the link, and set
296	 *	avg = (1 - w_q)^(idle/s)
297	 * where s is the time needed for transmitting a medium-sized packet.
298	 *
299	 * Now, if avg < min_th the packet is enqueued.
300	 * If avg > max_th the packet is dropped. Otherwise, the packet is
301	 * dropped with probability P function of avg.
302	 */
303
304	struct dn_fsk *fs = q->fs;
305	int64_t p_b = 0;
306
307	/* Queue in bytes or packets? */
308	uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ?
309	    q->ni.len_bytes : q->ni.length;
310
311	/* Average queue size estimation. */
312	if (q_size != 0) {
313		/* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
314		int diff = SCALE(q_size) - q->avg;
315		int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
316
317		q->avg += (int)v;
318	} else {
319		/*
320		 * Queue is empty, find for how long the queue has been
321		 * empty and use a lookup table for computing
322		 * (1 - * w_q)^(idle_time/s) where s is the time to send a
323		 * (small) packet.
324		 * XXX check wraps...
325		 */
326		if (q->avg) {
327			u_int t = div64((dn_cfg.curr_time - q->q_time), fs->lookup_step);
328
329			q->avg = (t < fs->lookup_depth) ?
330			    SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
331		}
332	}
333
334	/* Should i drop? */
335	if (q->avg < fs->min_th) {
336		q->count = -1;
337		return (0);	/* accept packet */
338	}
339	if (q->avg >= fs->max_th) {	/* average queue >=  max threshold */
340		if (fs->fs.flags & DN_IS_GENTLE_RED) {
341			/*
342			 * According to Gentle-RED, if avg is greater than
343			 * max_th the packet is dropped with a probability
344			 *	 p_b = c_3 * avg - c_4
345			 * where c_3 = (1 - max_p) / max_th
346			 *       c_4 = 1 - 2 * max_p
347			 */
348			p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
349			    fs->c_4;
350		} else {
351			q->count = -1;
352			return (1);
353		}
354	} else if (q->avg > fs->min_th) {
355		/*
356		 * We compute p_b using the linear dropping function
357		 *	 p_b = c_1 * avg - c_2
358		 * where c_1 = max_p / (max_th - min_th)
359		 * 	 c_2 = max_p * min_th / (max_th - min_th)
360		 */
361		p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
362	}
363
364	if (fs->fs.flags & DN_QSIZE_BYTES)
365		p_b = div64((p_b * len) , fs->max_pkt_size);
366	if (++q->count == 0)
367		q->random = random() & 0xffff;
368	else {
369		/*
370		 * q->count counts packets arrived since last drop, so a greater
371		 * value of q->count means a greater packet drop probability.
372		 */
373		if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
374			q->count = 0;
375			/* After a drop we calculate a new random value. */
376			q->random = random() & 0xffff;
377			return (1);	/* drop */
378		}
379	}
380	/* End of RED algorithm. */
381
382	return (0);	/* accept */
383
384}
385
386/*
387 * Enqueue a packet in q, subject to space and queue management policy
388 * (whose parameters are in q->fs).
389 * Update stats for the queue and the scheduler.
390 * Return 0 on success, 1 on drop. The packet is consumed anyways.
391 */
392int
393dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
394{
395	struct dn_fs *f;
396	struct dn_flow *ni;	/* stats for scheduler instance */
397	uint64_t len;
398
399	if (q->fs == NULL || q->_si == NULL) {
400		printf("%s fs %p si %p, dropping\n",
401			__FUNCTION__, q->fs, q->_si);
402		FREE_PKT(m);
403		return 1;
404	}
405	f = &(q->fs->fs);
406	ni = &q->_si->ni;
407	len = m->m_pkthdr.len;
408	/* Update statistics, then check reasons to drop pkt. */
409	q->ni.tot_bytes += len;
410	q->ni.tot_pkts++;
411	ni->tot_bytes += len;
412	ni->tot_pkts++;
413	if (drop)
414		goto drop;
415	if (f->plr && random() < f->plr)
416		goto drop;
417	if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len))
418		goto drop;
419	if (f->flags & DN_QSIZE_BYTES) {
420		if (q->ni.len_bytes > f->qsize)
421			goto drop;
422	} else if (q->ni.length >= f->qsize) {
423		goto drop;
424	}
425	mq_append(&q->mq, m);
426	q->ni.length++;
427	q->ni.len_bytes += len;
428	ni->length++;
429	ni->len_bytes += len;
430	return 0;
431
432drop:
433	io_pkt_drop++;
434	q->ni.drops++;
435	ni->drops++;
436	FREE_PKT(m);
437	return 1;
438}
439
440/*
441 * Fetch packets from the delay line which are due now. If there are
442 * leftover packets, reinsert the delay line in the heap.
443 * Runs under scheduler lock.
444 */
445static void
446transmit_event(struct mq *q, struct delay_line *dline, uint64_t now)
447{
448	struct mbuf *m;
449	struct dn_pkt_tag *pkt = NULL;
450
451	dline->oid.subtype = 0; /* not in heap */
452	while ((m = dline->mq.head) != NULL) {
453		pkt = dn_tag_get(m);
454		if (!DN_KEY_LEQ(pkt->output_time, now))
455			break;
456		dline->mq.head = m->m_nextpkt;
457		mq_append(q, m);
458	}
459	if (m != NULL) {
460		dline->oid.subtype = 1; /* in heap */
461		heap_insert(&dn_cfg.evheap, pkt->output_time, dline);
462	}
463}
464
465/*
466 * Convert the additional MAC overheads/delays into an equivalent
467 * number of bits for the given data rate. The samples are
468 * in milliseconds so we need to divide by 1000.
469 */
470static uint64_t
471extra_bits(struct mbuf *m, struct dn_schk *s)
472{
473	int index;
474	uint64_t bits;
475	struct dn_profile *pf = s->profile;
476
477	if (!pf || pf->samples_no == 0)
478		return 0;
479	index  = random() % pf->samples_no;
480	bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000);
481	if (index >= pf->loss_level) {
482		struct dn_pkt_tag *dt = dn_tag_get(m);
483		if (dt)
484			dt->dn_dir = DIR_DROP;
485	}
486	return bits;
487}
488
489/*
490 * Send traffic from a scheduler instance due by 'now'.
491 * Return a pointer to the head of the queue.
492 */
493static struct mbuf *
494serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now)
495{
496	struct mq def_q;
497	struct dn_schk *s = si->sched;
498	struct mbuf *m = NULL;
499	int delay_line_idle = (si->dline.mq.head == NULL);
500	int done, bw;
501
502	if (q == NULL) {
503		q = &def_q;
504		q->head = NULL;
505	}
506
507	bw = s->link.bandwidth;
508	si->kflags &= ~DN_ACTIVE;
509
510	if (bw > 0)
511		si->credit += (now - si->sched_time) * bw;
512	else
513		si->credit = 0;
514	si->sched_time = now;
515	done = 0;
516	while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) {
517		uint64_t len_scaled;
518
519		done++;
520		len_scaled = (bw == 0) ? 0 : hz *
521			(m->m_pkthdr.len * 8 + extra_bits(m, s));
522		si->credit -= len_scaled;
523		/* Move packet in the delay line */
524		dn_tag_get(m)->output_time = dn_cfg.curr_time + s->link.delay ;
525		mq_append(&si->dline.mq, m);
526	}
527
528	/*
529	 * If credit >= 0 the instance is idle, mark time.
530	 * Otherwise put back in the heap, and adjust the output
531	 * time of the last inserted packet, m, which was too early.
532	 */
533	if (si->credit >= 0) {
534		si->idle_time = now;
535	} else {
536		uint64_t t;
537		KASSERT (bw > 0, ("bw=0 and credit<0 ?"));
538		t = div64(bw - 1 - si->credit, bw);
539		if (m)
540			dn_tag_get(m)->output_time += t;
541		si->kflags |= DN_ACTIVE;
542		heap_insert(&dn_cfg.evheap, now + t, si);
543	}
544	if (delay_line_idle && done)
545		transmit_event(q, &si->dline, now);
546	return q->head;
547}
548
549/*
550 * The timer handler for dummynet. Time is computed in ticks, but
551 * but the code is tolerant to the actual rate at which this is called.
552 * Once complete, the function reschedules itself for the next tick.
553 */
554void
555dummynet_task(void *context, int pending)
556{
557	struct timeval t;
558	struct mq q = { NULL, NULL }; /* queue to accumulate results */
559
560	CURVNET_SET((struct vnet *)context);
561
562	DN_BH_WLOCK();
563
564	/* Update number of lost(coalesced) ticks. */
565	tick_lost += pending - 1;
566
567	getmicrouptime(&t);
568	/* Last tick duration (usec). */
569	tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
570	(t.tv_usec - dn_cfg.prev_t.tv_usec);
571	/* Last tick vs standard tick difference (usec). */
572	tick_delta = (tick_last * hz - 1000000) / hz;
573	/* Accumulated tick difference (usec). */
574	tick_delta_sum += tick_delta;
575
576	dn_cfg.prev_t = t;
577
578	/*
579	* Adjust curr_time if the accumulated tick difference is
580	* greater than the 'standard' tick. Since curr_time should
581	* be monotonically increasing, we do positive adjustments
582	* as required, and throttle curr_time in case of negative
583	* adjustment.
584	*/
585	dn_cfg.curr_time++;
586	if (tick_delta_sum - tick >= 0) {
587		int diff = tick_delta_sum / tick;
588
589		dn_cfg.curr_time += diff;
590		tick_diff += diff;
591		tick_delta_sum %= tick;
592		tick_adjustment++;
593	} else if (tick_delta_sum + tick <= 0) {
594		dn_cfg.curr_time--;
595		tick_diff--;
596		tick_delta_sum += tick;
597		tick_adjustment++;
598	}
599
600	/* serve pending events, accumulate in q */
601	for (;;) {
602		struct dn_id *p;    /* generic parameter to handler */
603
604		if (dn_cfg.evheap.elements == 0 ||
605		    DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key))
606			break;
607		p = HEAP_TOP(&dn_cfg.evheap)->object;
608		heap_extract(&dn_cfg.evheap, NULL);
609
610		if (p->type == DN_SCH_I) {
611			serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time);
612		} else { /* extracted a delay line */
613			transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time);
614		}
615	}
616	if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) {
617		dn_cfg.expire_cycle = 0;
618		dn_drain_scheduler();
619		dn_drain_queue();
620	}
621
622	dn_reschedule();
623	DN_BH_WUNLOCK();
624	if (q.head != NULL)
625		dummynet_send(q.head);
626	CURVNET_RESTORE();
627}
628
629/*
630 * forward a chain of packets to the proper destination.
631 * This runs outside the dummynet lock.
632 */
633static void
634dummynet_send(struct mbuf *m)
635{
636	struct mbuf *n;
637
638	for (; m != NULL; m = n) {
639		struct ifnet *ifp = NULL;	/* gcc 3.4.6 complains */
640        	struct m_tag *tag;
641		int dst;
642
643		n = m->m_nextpkt;
644		m->m_nextpkt = NULL;
645		tag = m_tag_first(m);
646		if (tag == NULL) { /* should not happen */
647			dst = DIR_DROP;
648		} else {
649			struct dn_pkt_tag *pkt = dn_tag_get(m);
650			/* extract the dummynet info, rename the tag
651			 * to carry reinject info.
652			 */
653			dst = pkt->dn_dir;
654			ifp = pkt->ifp;
655			tag->m_tag_cookie = MTAG_IPFW_RULE;
656			tag->m_tag_id = 0;
657		}
658
659		switch (dst) {
660		case DIR_OUT:
661			ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
662			break ;
663
664		case DIR_IN :
665			netisr_dispatch(NETISR_IP, m);
666			break;
667
668#ifdef INET6
669		case DIR_IN | PROTO_IPV6:
670			netisr_dispatch(NETISR_IPV6, m);
671			break;
672
673		case DIR_OUT | PROTO_IPV6:
674			ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
675			break;
676#endif
677
678		case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */
679			if (bridge_dn_p != NULL)
680				((*bridge_dn_p)(m, ifp));
681			else
682				printf("dummynet: if_bridge not loaded\n");
683
684			break;
685
686		case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */
687			/*
688			 * The Ethernet code assumes the Ethernet header is
689			 * contiguous in the first mbuf header.
690			 * Insure this is true.
691			 */
692			if (m->m_len < ETHER_HDR_LEN &&
693			    (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
694				printf("dummynet/ether: pullup failed, "
695				    "dropping packet\n");
696				break;
697			}
698			ether_demux(m->m_pkthdr.rcvif, m);
699			break;
700
701		case DIR_OUT | PROTO_LAYER2: /* N_TO_ETH_OUT: */
702			ether_output_frame(ifp, m);
703			break;
704
705		case DIR_DROP:
706			/* drop the packet after some time */
707			FREE_PKT(m);
708			break;
709
710		default:
711			printf("dummynet: bad switch %d!\n", dst);
712			FREE_PKT(m);
713			break;
714		}
715	}
716}
717
718static inline int
719tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa)
720{
721	struct dn_pkt_tag *dt;
722	struct m_tag *mtag;
723
724	mtag = m_tag_get(PACKET_TAG_DUMMYNET,
725		    sizeof(*dt), M_NOWAIT | M_ZERO);
726	if (mtag == NULL)
727		return 1;		/* Cannot allocate packet header. */
728	m_tag_prepend(m, mtag);		/* Attach to mbuf chain. */
729	dt = (struct dn_pkt_tag *)(mtag + 1);
730	dt->rule = fwa->rule;
731	dt->rule.info &= IPFW_ONEPASS;	/* only keep this info */
732	dt->dn_dir = dir;
733	dt->ifp = fwa->oif;
734	/* dt->output tame is updated as we move through */
735	dt->output_time = dn_cfg.curr_time;
736	return 0;
737}
738
739
740/*
741 * dummynet hook for packets.
742 * We use the argument to locate the flowset fs and the sched_set sch
743 * associated to it. The we apply flow_mask and sched_mask to
744 * determine the queue and scheduler instances.
745 *
746 * dir		where shall we send the packet after dummynet.
747 * *m0		the mbuf with the packet
748 * ifp		the 'ifp' parameter from the caller.
749 *		NULL in ip_input, destination interface in ip_output,
750 */
751int
752dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
753{
754	struct mbuf *m = *m0;
755	struct dn_fsk *fs = NULL;
756	struct dn_sch_inst *si;
757	struct dn_queue *q = NULL;	/* default */
758
759	int fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
760		((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0);
761	DN_BH_WLOCK();
762	io_pkt++;
763	/* we could actually tag outside the lock, but who cares... */
764	if (tag_mbuf(m, dir, fwa))
765		goto dropit;
766	if (dn_cfg.busy) {
767		/* if the upper half is busy doing something expensive,
768		 * lets queue the packet and move forward
769		 */
770		mq_append(&dn_cfg.pending, m);
771		m = *m0 = NULL; /* consumed */
772		goto done; /* already active, nothing to do */
773	}
774	/* XXX locate_flowset could be optimised with a direct ref. */
775	fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL);
776	if (fs == NULL)
777		goto dropit;	/* This queue/pipe does not exist! */
778	if (fs->sched == NULL)	/* should not happen */
779		goto dropit;
780	/* find scheduler instance, possibly applying sched_mask */
781	si = ipdn_si_find(fs->sched, &(fwa->f_id));
782	if (si == NULL)
783		goto dropit;
784	/*
785	 * If the scheduler supports multiple queues, find the right one
786	 * (otherwise it will be ignored by enqueue).
787	 */
788	if (fs->sched->fp->flags & DN_MULTIQUEUE) {
789		q = ipdn_q_find(fs, si, &(fwa->f_id));
790		if (q == NULL)
791			goto dropit;
792	}
793	if (fs->sched->fp->enqueue(si, q, m)) {
794		/* packet was dropped by enqueue() */
795		m = *m0 = NULL;
796		goto dropit;
797	}
798
799	if (si->kflags & DN_ACTIVE) {
800		m = *m0 = NULL; /* consumed */
801		goto done; /* already active, nothing to do */
802	}
803
804	/* compute the initial allowance */
805	if (si->idle_time < dn_cfg.curr_time) {
806	    /* Do this only on the first packet on an idle pipe */
807	    struct dn_link *p = &fs->sched->link;
808
809	    si->sched_time = dn_cfg.curr_time;
810	    si->credit = dn_cfg.io_fast ? p->bandwidth : 0;
811	    if (p->burst) {
812		uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth;
813		if (burst > p->burst)
814			burst = p->burst;
815		si->credit += burst;
816	    }
817	}
818	/* pass through scheduler and delay line */
819	m = serve_sched(NULL, si, dn_cfg.curr_time);
820
821	/* optimization -- pass it back to ipfw for immediate send */
822	/* XXX Don't call dummynet_send() if scheduler return the packet
823	 *     just enqueued. This avoid a lock order reversal.
824	 *
825	 */
826	if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) {
827		/* fast io, rename the tag * to carry reinject info. */
828		struct m_tag *tag = m_tag_first(m);
829
830		tag->m_tag_cookie = MTAG_IPFW_RULE;
831		tag->m_tag_id = 0;
832		io_pkt_fast++;
833		if (m->m_nextpkt != NULL) {
834			printf("dummynet: fast io: pkt chain detected!\n");
835			m->m_nextpkt = NULL;
836		}
837		m = NULL;
838	} else {
839		*m0 = NULL;
840	}
841done:
842	DN_BH_WUNLOCK();
843	if (m)
844		dummynet_send(m);
845	return 0;
846
847dropit:
848	io_pkt_drop++;
849	DN_BH_WUNLOCK();
850	if (m)
851		FREE_PKT(m);
852	*m0 = NULL;
853	return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS;
854}
855