1/*
2 * Copyright (c) 2010 Riccardo Panicucci, Universita` di Pisa
3 * Copyright (c) 2000-2002 Luigi Rizzo, Universita` di Pisa
4 * All rights reserved
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * $FreeBSD$
30 */
31
32#ifdef _KERNEL
33#include <sys/malloc.h>
34#include <sys/socket.h>
35#include <sys/socketvar.h>
36#include <sys/kernel.h>
37#include <sys/mbuf.h>
38#include <sys/module.h>
39#include <net/if.h>	/* IFNAMSIZ */
40#include <netinet/in.h>
41#include <netinet/ip_var.h>		/* ipfw_rule_ref */
42#include <netinet/ip_fw.h>	/* flow_id */
43#include <netinet/ip_dummynet.h>
44#include <netpfil/ipfw/dn_heap.h>
45#include <netpfil/ipfw/ip_dn_private.h>
46#include <netpfil/ipfw/dn_sched.h>
47#else
48#include <dn_test.h>
49#endif
50
51#ifndef MAX64
52#define MAX64(x,y)  (( (int64_t) ( (y)-(x) )) > 0 ) ? (y) : (x)
53#endif
54
55/*
56 * timestamps are computed on 64 bit using fixed point arithmetic.
57 * LMAX_BITS, WMAX_BITS are the max number of bits for the packet len
58 * and sum of weights, respectively. FRAC_BITS is the number of
59 * fractional bits. We want FRAC_BITS >> WMAX_BITS to avoid too large
60 * errors when computing the inverse, FRAC_BITS < 32 so we can do 1/w
61 * using an unsigned 32-bit division, and to avoid wraparounds we need
62 * LMAX_BITS + WMAX_BITS + FRAC_BITS << 64
63 * As an example
64 * FRAC_BITS = 26, LMAX_BITS=14, WMAX_BITS = 19
65 */
66#ifndef FRAC_BITS
67#define FRAC_BITS    28 /* shift for fixed point arithmetic */
68#define	ONE_FP	(1UL << FRAC_BITS)
69#endif
70
71/*
72 * Private information for the scheduler instance:
73 * sch_heap (key is Finish time) returns the next queue to serve
74 * ne_heap (key is Start time) stores not-eligible queues
75 * idle_heap (key=start/finish time) stores idle flows. It must
76 *	support extract-from-middle.
77 * A flow is only in 1 of the three heaps.
78 * XXX todo: use a more efficient data structure, e.g. a tree sorted
79 * by F with min_subtree(S) in each node
80 */
81struct wf2qp_si {
82    struct dn_heap sch_heap;	/* top extract - key Finish  time */
83    struct dn_heap ne_heap;	/* top extract - key Start   time */
84    struct dn_heap idle_heap;	/* random extract - key Start=Finish time */
85    uint64_t V;			/* virtual time */
86    uint32_t inv_wsum;		/* inverse of sum of weights */
87    uint32_t wsum;		/* sum of weights */
88};
89
90struct wf2qp_queue {
91    struct dn_queue _q;
92    uint64_t S, F;		/* start time, finish time */
93    uint32_t inv_w;		/* ONE_FP / weight */
94    int32_t heap_pos;		/* position (index) of struct in heap */
95};
96
97/*
98 * This file implements a WF2Q+ scheduler as it has been in dummynet
99 * since 2000.
100 * The scheduler supports per-flow queues and has O(log N) complexity.
101 *
102 * WF2Q+ needs to drain entries from the idle heap so that we
103 * can keep the sum of weights up to date. We can do it whenever
104 * we get a chance, or periodically, or following some other
105 * strategy. The function idle_check() drains at most N elements
106 * from the idle heap.
107 */
108static void
109idle_check(struct wf2qp_si *si, int n, int force)
110{
111    struct dn_heap *h = &si->idle_heap;
112    while (n-- > 0 && h->elements > 0 &&
113		(force || DN_KEY_LT(HEAP_TOP(h)->key, si->V))) {
114	struct dn_queue *q = HEAP_TOP(h)->object;
115        struct wf2qp_queue *alg_fq = (struct wf2qp_queue *)q;
116
117        heap_extract(h, NULL);
118        /* XXX to let the flowset delete the queue we should
119	 * mark it as 'unused' by the scheduler.
120	 */
121        alg_fq->S = alg_fq->F + 1; /* Mark timestamp as invalid. */
122        si->wsum -= q->fs->fs.par[0];	/* adjust sum of weights */
123	if (si->wsum > 0)
124		si->inv_wsum = ONE_FP/si->wsum;
125    }
126}
127
128static int
129wf2qp_enqueue(struct dn_sch_inst *_si, struct dn_queue *q, struct mbuf *m)
130{
131    struct dn_fsk *fs = q->fs;
132    struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
133    struct wf2qp_queue *alg_fq;
134    uint64_t len = m->m_pkthdr.len;
135
136    if (m != q->mq.head) {
137	if (dn_enqueue(q, m, 0)) /* packet was dropped */
138	    return 1;
139	if (m != q->mq.head)	/* queue was already busy */
140	    return 0;
141    }
142
143    /* If reach this point, queue q was idle */
144    alg_fq = (struct wf2qp_queue *)q;
145
146    if (DN_KEY_LT(alg_fq->F, alg_fq->S)) {
147        /* F<S means timestamps are invalid ->brand new queue. */
148        alg_fq->S = si->V;		/* init start time */
149        si->wsum += fs->fs.par[0];	/* add weight of new queue. */
150	si->inv_wsum = ONE_FP/si->wsum;
151    } else { /* if it was idle then it was in the idle heap */
152        heap_extract(&si->idle_heap, q);
153        alg_fq->S = MAX64(alg_fq->F, si->V);	/* compute new S */
154    }
155    alg_fq->F = alg_fq->S + len * alg_fq->inv_w;
156
157    /* if nothing is backlogged, make sure this flow is eligible */
158    if (si->ne_heap.elements == 0 && si->sch_heap.elements == 0)
159        si->V = MAX64(alg_fq->S, si->V);
160
161    /*
162     * Look at eligibility. A flow is not eligibile if S>V (when
163     * this happens, it means that there is some other flow already
164     * scheduled for the same pipe, so the sch_heap cannot be
165     * empty). If the flow is not eligible we just store it in the
166     * ne_heap. Otherwise, we store in the sch_heap.
167     * Note that for all flows in sch_heap (SCH), S_i <= V,
168     * and for all flows in ne_heap (NEH), S_i > V.
169     * So when we need to compute max(V, min(S_i)) forall i in
170     * SCH+NEH, we only need to look into NEH.
171     */
172    if (DN_KEY_LT(si->V, alg_fq->S)) {
173        /* S>V means flow Not eligible. */
174        if (si->sch_heap.elements == 0)
175            D("++ ouch! not eligible but empty scheduler!");
176        heap_insert(&si->ne_heap, alg_fq->S, q);
177    } else {
178        heap_insert(&si->sch_heap, alg_fq->F, q);
179    }
180    return 0;
181}
182
183/* XXX invariant: sch > 0 || V >= min(S in neh) */
184static struct mbuf *
185wf2qp_dequeue(struct dn_sch_inst *_si)
186{
187	/* Access scheduler instance private data */
188	struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
189	struct mbuf *m;
190	struct dn_queue *q;
191	struct dn_heap *sch = &si->sch_heap;
192	struct dn_heap *neh = &si->ne_heap;
193	struct wf2qp_queue *alg_fq;
194
195	if (sch->elements == 0 && neh->elements == 0) {
196		/* we have nothing to do. We could kill the idle heap
197		 * altogether and reset V
198		 */
199		idle_check(si, 0x7fffffff, 1);
200		si->V = 0;
201		si->wsum = 0;	/* should be set already */
202		return NULL;	/* quick return if nothing to do */
203	}
204	idle_check(si, 1, 0);	/* drain something from the idle heap */
205
206	/* make sure at least one element is eligible, bumping V
207	 * and moving entries that have become eligible.
208	 * We need to repeat the first part twice, before and
209	 * after extracting the candidate, or enqueue() will
210	 * find the data structure in a wrong state.
211	 */
212  m = NULL;
213  for(;;) {
214	/*
215	 * Compute V = max(V, min(S_i)). Remember that all elements
216	 * in sch have by definition S_i <= V so if sch is not empty,
217	 * V is surely the max and we must not update it. Conversely,
218	 * if sch is empty we only need to look at neh.
219	 * We don't need to move the queues, as it will be done at the
220	 * next enqueue
221	 */
222	if (sch->elements == 0 && neh->elements > 0) {
223		si->V = MAX64(si->V, HEAP_TOP(neh)->key);
224	}
225	while (neh->elements > 0 &&
226		    DN_KEY_LEQ(HEAP_TOP(neh)->key, si->V)) {
227		q = HEAP_TOP(neh)->object;
228		alg_fq = (struct wf2qp_queue *)q;
229		heap_extract(neh, NULL);
230		heap_insert(sch, alg_fq->F, q);
231	}
232	if (m) /* pkt found in previous iteration */
233		break;
234	/* ok we have at least one eligible pkt */
235	q = HEAP_TOP(sch)->object;
236	alg_fq = (struct wf2qp_queue *)q;
237	m = dn_dequeue(q);
238	heap_extract(sch, NULL); /* Remove queue from heap. */
239	si->V += (uint64_t)(m->m_pkthdr.len) * si->inv_wsum;
240	alg_fq->S = alg_fq->F;  /* Update start time. */
241	if (q->mq.head == 0) {	/* not backlogged any more. */
242		heap_insert(&si->idle_heap, alg_fq->F, q);
243	} else {			/* Still backlogged. */
244		/* Update F, store in neh or sch */
245		uint64_t len = q->mq.head->m_pkthdr.len;
246		alg_fq->F += len * alg_fq->inv_w;
247		if (DN_KEY_LEQ(alg_fq->S, si->V)) {
248			heap_insert(sch, alg_fq->F, q);
249		} else {
250			heap_insert(neh, alg_fq->S, q);
251		}
252	}
253    }
254	return m;
255}
256
257static int
258wf2qp_new_sched(struct dn_sch_inst *_si)
259{
260	struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
261	int ofs = offsetof(struct wf2qp_queue, heap_pos);
262
263	/* all heaps support extract from middle */
264	if (heap_init(&si->idle_heap, 16, ofs) ||
265	    heap_init(&si->sch_heap, 16, ofs) ||
266	    heap_init(&si->ne_heap, 16, ofs)) {
267		heap_free(&si->ne_heap);
268		heap_free(&si->sch_heap);
269		heap_free(&si->idle_heap);
270		return ENOMEM;
271	}
272	return 0;
273}
274
275static int
276wf2qp_free_sched(struct dn_sch_inst *_si)
277{
278	struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
279
280	heap_free(&si->sch_heap);
281	heap_free(&si->ne_heap);
282	heap_free(&si->idle_heap);
283
284	return 0;
285}
286
287static int
288wf2qp_new_fsk(struct dn_fsk *fs)
289{
290	ipdn_bound_var(&fs->fs.par[0], 1,
291		1, 100, "WF2Q+ weight");
292	return 0;
293}
294
295static int
296wf2qp_new_queue(struct dn_queue *_q)
297{
298	struct wf2qp_queue *q = (struct wf2qp_queue *)_q;
299
300	_q->ni.oid.subtype = DN_SCHED_WF2QP;
301	q->F = 0;	/* not strictly necessary */
302	q->S = q->F + 1;    /* mark timestamp as invalid. */
303        q->inv_w = ONE_FP / _q->fs->fs.par[0];
304	if (_q->mq.head != NULL) {
305		wf2qp_enqueue(_q->_si, _q, _q->mq.head);
306	}
307	return 0;
308}
309
310/*
311 * Called when the infrastructure removes a queue (e.g. flowset
312 * is reconfigured). Nothing to do if we did not 'own' the queue,
313 * otherwise remove it from the right heap and adjust the sum
314 * of weights.
315 */
316static int
317wf2qp_free_queue(struct dn_queue *q)
318{
319	struct wf2qp_queue *alg_fq = (struct wf2qp_queue *)q;
320	struct wf2qp_si *si = (struct wf2qp_si *)(q->_si + 1);
321
322	if (alg_fq->S >= alg_fq->F + 1)
323		return 0;	/* nothing to do, not in any heap */
324	si->wsum -= q->fs->fs.par[0];
325	if (si->wsum > 0)
326		si->inv_wsum = ONE_FP/si->wsum;
327
328	/* extract from the heap. XXX TODO we may need to adjust V
329	 * to make sure the invariants hold.
330	 */
331	if (q->mq.head == NULL) {
332		heap_extract(&si->idle_heap, q);
333	} else if (DN_KEY_LT(si->V, alg_fq->S)) {
334		heap_extract(&si->ne_heap, q);
335	} else {
336		heap_extract(&si->sch_heap, q);
337	}
338	return 0;
339}
340
341/*
342 * WF2Q+ scheduler descriptor
343 * contains the type of the scheduler, the name, the size of the
344 * structures and function pointers.
345 */
346static struct dn_alg wf2qp_desc = {
347	_SI( .type = ) DN_SCHED_WF2QP,
348	_SI( .name = ) "WF2Q+",
349	_SI( .flags = ) DN_MULTIQUEUE,
350
351	/* we need extra space in the si and the queue */
352	_SI( .schk_datalen = ) 0,
353	_SI( .si_datalen = ) sizeof(struct wf2qp_si),
354	_SI( .q_datalen = ) sizeof(struct wf2qp_queue) -
355				sizeof(struct dn_queue),
356
357	_SI( .enqueue = ) wf2qp_enqueue,
358	_SI( .dequeue = ) wf2qp_dequeue,
359
360	_SI( .config = )  NULL,
361	_SI( .destroy = )  NULL,
362	_SI( .new_sched = ) wf2qp_new_sched,
363	_SI( .free_sched = ) wf2qp_free_sched,
364
365	_SI( .new_fsk = ) wf2qp_new_fsk,
366	_SI( .free_fsk = )  NULL,
367
368	_SI( .new_queue = ) wf2qp_new_queue,
369	_SI( .free_queue = ) wf2qp_free_queue,
370};
371
372
373DECLARE_DNSCHED_MODULE(dn_wf2qp, &wf2qp_desc);
374