altq_rio.c revision 263086
1/*	$FreeBSD: stable/10/sys/contrib/altq/altq/altq_rio.c 263086 2014-03-12 10:45:58Z glebius $	*/
2/*	$KAME: altq_rio.c,v 1.17 2003/07/10 12:07:49 kjc Exp $	*/
3
4/*
5 * Copyright (C) 1998-2003
6 *	Sony Computer Science Laboratories Inc.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29/*
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 *    notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 *    notice, this list of conditions and the following disclaimer in the
40 *    documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 *    must display the following acknowledgement:
43 *	This product includes software developed by the Computer Systems
44 *	Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 *    to endorse or promote products derived from this software without
47 *    specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62#if defined(__FreeBSD__) || defined(__NetBSD__)
63#include "opt_altq.h"
64#include "opt_inet.h"
65#ifdef __FreeBSD__
66#include "opt_inet6.h"
67#endif
68#endif /* __FreeBSD__ || __NetBSD__ */
69#ifdef ALTQ_RIO	/* rio is enabled by ALTQ_RIO option in opt_altq.h */
70
71#include <sys/param.h>
72#include <sys/malloc.h>
73#include <sys/mbuf.h>
74#include <sys/socket.h>
75#include <sys/systm.h>
76#include <sys/errno.h>
77#if 1 /* ALTQ3_COMPAT */
78#include <sys/proc.h>
79#include <sys/sockio.h>
80#include <sys/kernel.h>
81#endif
82
83#include <net/if.h>
84#include <net/if_var.h>
85
86#include <netinet/in.h>
87#include <netinet/in_systm.h>
88#include <netinet/ip.h>
89#ifdef INET6
90#include <netinet/ip6.h>
91#endif
92
93#include <netpfil/pf/pf.h>
94#include <netpfil/pf/pf_altq.h>
95#include <altq/altq.h>
96#include <altq/altq_cdnr.h>
97#include <altq/altq_red.h>
98#include <altq/altq_rio.h>
99#ifdef ALTQ3_COMPAT
100#include <altq/altq_conf.h>
101#endif
102
103/*
104 * RIO: RED with IN/OUT bit
105 *   described in
106 *	"Explicit Allocation of Best Effort Packet Delivery Service"
107 *	David D. Clark and Wenjia Fang, MIT Lab for Computer Science
108 *	http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
109 *
110 * this implementation is extended to support more than 2 drop precedence
111 * values as described in RFC2597 (Assured Forwarding PHB Group).
112 *
113 */
114/*
115 * AF DS (differentiated service) codepoints.
116 * (classes can be mapped to CBQ or H-FSC classes.)
117 *
118 *      0   1   2   3   4   5   6   7
119 *    +---+---+---+---+---+---+---+---+
120 *    |   CLASS   |DropPre| 0 |  CU   |
121 *    +---+---+---+---+---+---+---+---+
122 *
123 *    class 1: 001
124 *    class 2: 010
125 *    class 3: 011
126 *    class 4: 100
127 *
128 *    low drop prec:    01
129 *    medium drop prec: 10
130 *    high drop prec:   01
131 */
132
133/* normal red parameters */
134#define	W_WEIGHT	512	/* inverse of weight of EWMA (511/512) */
135				/* q_weight = 0.00195 */
136
137/* red parameters for a slow link */
138#define	W_WEIGHT_1	128	/* inverse of weight of EWMA (127/128) */
139				/* q_weight = 0.0078125 */
140
141/* red parameters for a very slow link (e.g., dialup) */
142#define	W_WEIGHT_2	64	/* inverse of weight of EWMA (63/64) */
143				/* q_weight = 0.015625 */
144
145/* fixed-point uses 12-bit decimal places */
146#define	FP_SHIFT	12	/* fixed-point shift */
147
148/* red parameters for drop probability */
149#define	INV_P_MAX	10	/* inverse of max drop probability */
150#define	TH_MIN		 5	/* min threshold */
151#define	TH_MAX		15	/* max threshold */
152
153#define	RIO_LIMIT	60	/* default max queue lenght */
154#define	RIO_STATS		/* collect statistics */
155
156#define	TV_DELTA(a, b, delta) {					\
157	register int	xxs;					\
158								\
159	delta = (a)->tv_usec - (b)->tv_usec; 			\
160	if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { 		\
161		if (xxs < 0) { 					\
162			delta = 60000000;			\
163		} else if (xxs > 4)  {				\
164			if (xxs > 60)				\
165				delta = 60000000;		\
166			else					\
167				delta += xxs * 1000000;		\
168		} else while (xxs > 0) {			\
169			delta += 1000000;			\
170			xxs--;					\
171		}						\
172	}							\
173}
174
175#ifdef ALTQ3_COMPAT
176/* rio_list keeps all rio_queue_t's allocated. */
177static rio_queue_t *rio_list = NULL;
178#endif
179/* default rio parameter values */
180static struct redparams default_rio_params[RIO_NDROPPREC] = {
181  /* th_min,		 th_max,     inv_pmax */
182  { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
183  { TH_MAX + TH_MIN,	 TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
184  { TH_MIN,		 TH_MAX,     INV_P_MAX }  /* high drop precedence */
185};
186
187/* internal function prototypes */
188static int dscp2index(u_int8_t);
189#ifdef ALTQ3_COMPAT
190static int rio_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
191static struct mbuf *rio_dequeue(struct ifaltq *, int);
192static int rio_request(struct ifaltq *, int, void *);
193static int rio_detach(rio_queue_t *);
194
195/*
196 * rio device interface
197 */
198altqdev_decl(rio);
199
200#endif /* ALTQ3_COMPAT */
201
202rio_t *
203rio_alloc(int weight, struct redparams *params, int flags, int pkttime)
204{
205	rio_t	*rp;
206	int	 w, i;
207	int	 npkts_per_sec;
208
209	rp = malloc(sizeof(rio_t), M_DEVBUF, M_NOWAIT | M_ZERO);
210	if (rp == NULL)
211		return (NULL);
212
213	rp->rio_flags = flags;
214	if (pkttime == 0)
215		/* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
216		rp->rio_pkttime = 800;
217	else
218		rp->rio_pkttime = pkttime;
219
220	if (weight != 0)
221		rp->rio_weight = weight;
222	else {
223		/* use default */
224		rp->rio_weight = W_WEIGHT;
225
226		/* when the link is very slow, adjust red parameters */
227		npkts_per_sec = 1000000 / rp->rio_pkttime;
228		if (npkts_per_sec < 50) {
229			/* up to about 400Kbps */
230			rp->rio_weight = W_WEIGHT_2;
231		} else if (npkts_per_sec < 300) {
232			/* up to about 2.4Mbps */
233			rp->rio_weight = W_WEIGHT_1;
234		}
235	}
236
237	/* calculate wshift.  weight must be power of 2 */
238	w = rp->rio_weight;
239	for (i = 0; w > 1; i++)
240		w = w >> 1;
241	rp->rio_wshift = i;
242	w = 1 << rp->rio_wshift;
243	if (w != rp->rio_weight) {
244		printf("invalid weight value %d for red! use %d\n",
245		       rp->rio_weight, w);
246		rp->rio_weight = w;
247	}
248
249	/* allocate weight table */
250	rp->rio_wtab = wtab_alloc(rp->rio_weight);
251
252	for (i = 0; i < RIO_NDROPPREC; i++) {
253		struct dropprec_state *prec = &rp->rio_precstate[i];
254
255		prec->avg = 0;
256		prec->idle = 1;
257
258		if (params == NULL || params[i].inv_pmax == 0)
259			prec->inv_pmax = default_rio_params[i].inv_pmax;
260		else
261			prec->inv_pmax = params[i].inv_pmax;
262		if (params == NULL || params[i].th_min == 0)
263			prec->th_min = default_rio_params[i].th_min;
264		else
265			prec->th_min = params[i].th_min;
266		if (params == NULL || params[i].th_max == 0)
267			prec->th_max = default_rio_params[i].th_max;
268		else
269			prec->th_max = params[i].th_max;
270
271		/*
272		 * th_min_s and th_max_s are scaled versions of th_min
273		 * and th_max to be compared with avg.
274		 */
275		prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
276		prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
277
278		/*
279		 * precompute probability denominator
280		 *  probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
281		 */
282		prec->probd = (2 * (prec->th_max - prec->th_min)
283			       * prec->inv_pmax) << FP_SHIFT;
284
285		microtime(&prec->last);
286	}
287
288	return (rp);
289}
290
291void
292rio_destroy(rio_t *rp)
293{
294	wtab_destroy(rp->rio_wtab);
295	free(rp, M_DEVBUF);
296}
297
298void
299rio_getstats(rio_t *rp, struct redstats *sp)
300{
301	int	i;
302
303	for (i = 0; i < RIO_NDROPPREC; i++) {
304		bcopy(&rp->q_stats[i], sp, sizeof(struct redstats));
305		sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
306		sp++;
307	}
308}
309
310#if (RIO_NDROPPREC == 3)
311/*
312 * internally, a drop precedence value is converted to an index
313 * starting from 0.
314 */
315static int
316dscp2index(u_int8_t dscp)
317{
318	int	dpindex = dscp & AF_DROPPRECMASK;
319
320	if (dpindex == 0)
321		return (0);
322	return ((dpindex >> 3) - 1);
323}
324#endif
325
326#if 1
327/*
328 * kludge: when a packet is dequeued, we need to know its drop precedence
329 * in order to keep the queue length of each drop precedence.
330 * use m_pkthdr.rcvif to pass this info.
331 */
332#define	RIOM_SET_PRECINDEX(m, idx)	\
333	do { (m)->m_pkthdr.rcvif = (void *)((long)(idx)); } while (0)
334#define	RIOM_GET_PRECINDEX(m)	\
335	({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
336	(m)->m_pkthdr.rcvif = NULL; idx; })
337#endif
338
339int
340rio_addq(rio_t *rp, class_queue_t *q, struct mbuf *m,
341    struct altq_pktattr *pktattr)
342{
343	int			 avg, droptype;
344	u_int8_t		 dsfield, odsfield;
345	int			 dpindex, i, n, t;
346	struct timeval		 now;
347	struct dropprec_state	*prec;
348
349	dsfield = odsfield = read_dsfield(m, pktattr);
350	dpindex = dscp2index(dsfield);
351
352	/*
353	 * update avg of the precedence states whose drop precedence
354	 * is larger than or equal to the drop precedence of the packet
355	 */
356	now.tv_sec = 0;
357	for (i = dpindex; i < RIO_NDROPPREC; i++) {
358		prec = &rp->rio_precstate[i];
359		avg = prec->avg;
360		if (prec->idle) {
361			prec->idle = 0;
362			if (now.tv_sec == 0)
363				microtime(&now);
364			t = (now.tv_sec - prec->last.tv_sec);
365			if (t > 60)
366				avg = 0;
367			else {
368				t = t * 1000000 +
369					(now.tv_usec - prec->last.tv_usec);
370				n = t / rp->rio_pkttime;
371				/* calculate (avg = (1 - Wq)^n * avg) */
372				if (n > 0)
373					avg = (avg >> FP_SHIFT) *
374						pow_w(rp->rio_wtab, n);
375			}
376		}
377
378		/* run estimator. (avg is scaled by WEIGHT in fixed-point) */
379		avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
380		prec->avg = avg;		/* save the new value */
381		/*
382		 * count keeps a tally of arriving traffic that has not
383		 * been dropped.
384		 */
385		prec->count++;
386	}
387
388	prec = &rp->rio_precstate[dpindex];
389	avg = prec->avg;
390
391	/* see if we drop early */
392	droptype = DTYPE_NODROP;
393	if (avg >= prec->th_min_s && prec->qlen > 1) {
394		if (avg >= prec->th_max_s) {
395			/* avg >= th_max: forced drop */
396			droptype = DTYPE_FORCED;
397		} else if (prec->old == 0) {
398			/* first exceeds th_min */
399			prec->count = 1;
400			prec->old = 1;
401		} else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
402				      prec->probd, prec->count)) {
403			/* unforced drop by red */
404			droptype = DTYPE_EARLY;
405		}
406	} else {
407		/* avg < th_min */
408		prec->old = 0;
409	}
410
411	/*
412	 * if the queue length hits the hard limit, it's a forced drop.
413	 */
414	if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
415		droptype = DTYPE_FORCED;
416
417	if (droptype != DTYPE_NODROP) {
418		/* always drop incoming packet (as opposed to randomdrop) */
419		for (i = dpindex; i < RIO_NDROPPREC; i++)
420			rp->rio_precstate[i].count = 0;
421#ifdef RIO_STATS
422		if (droptype == DTYPE_EARLY)
423			rp->q_stats[dpindex].drop_unforced++;
424		else
425			rp->q_stats[dpindex].drop_forced++;
426		PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
427#endif
428		m_freem(m);
429		return (-1);
430	}
431
432	for (i = dpindex; i < RIO_NDROPPREC; i++)
433		rp->rio_precstate[i].qlen++;
434
435	/* save drop precedence index in mbuf hdr */
436	RIOM_SET_PRECINDEX(m, dpindex);
437
438	if (rp->rio_flags & RIOF_CLEARDSCP)
439		dsfield &= ~DSCP_MASK;
440
441	if (dsfield != odsfield)
442		write_dsfield(m, pktattr, dsfield);
443
444	_addq(q, m);
445
446#ifdef RIO_STATS
447	PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
448#endif
449	return (0);
450}
451
452struct mbuf *
453rio_getq(rio_t *rp, class_queue_t *q)
454{
455	struct mbuf	*m;
456	int		 dpindex, i;
457
458	if ((m = _getq(q)) == NULL)
459		return NULL;
460
461	dpindex = RIOM_GET_PRECINDEX(m);
462	for (i = dpindex; i < RIO_NDROPPREC; i++) {
463		if (--rp->rio_precstate[i].qlen == 0) {
464			if (rp->rio_precstate[i].idle == 0) {
465				rp->rio_precstate[i].idle = 1;
466				microtime(&rp->rio_precstate[i].last);
467			}
468		}
469	}
470	return (m);
471}
472
473#ifdef ALTQ3_COMPAT
474int
475rioopen(dev, flag, fmt, p)
476	dev_t dev;
477	int flag, fmt;
478#if (__FreeBSD_version > 500000)
479	struct thread *p;
480#else
481	struct proc *p;
482#endif
483{
484	/* everything will be done when the queueing scheme is attached. */
485	return 0;
486}
487
488int
489rioclose(dev, flag, fmt, p)
490	dev_t dev;
491	int flag, fmt;
492#if (__FreeBSD_version > 500000)
493	struct thread *p;
494#else
495	struct proc *p;
496#endif
497{
498	rio_queue_t *rqp;
499	int err, error = 0;
500
501	while ((rqp = rio_list) != NULL) {
502		/* destroy all */
503		err = rio_detach(rqp);
504		if (err != 0 && error == 0)
505			error = err;
506	}
507
508	return error;
509}
510
511int
512rioioctl(dev, cmd, addr, flag, p)
513	dev_t dev;
514	ioctlcmd_t cmd;
515	caddr_t addr;
516	int flag;
517#if (__FreeBSD_version > 500000)
518	struct thread *p;
519#else
520	struct proc *p;
521#endif
522{
523	rio_queue_t *rqp;
524	struct rio_interface *ifacep;
525	struct ifnet *ifp;
526	int	error = 0;
527
528	/* check super-user privilege */
529	switch (cmd) {
530	case RIO_GETSTATS:
531		break;
532	default:
533#if (__FreeBSD_version > 700000)
534		if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
535			return (error);
536#elsif (__FreeBSD_version > 400000)
537		if ((error = suser(p)) != 0)
538			return (error);
539#else
540		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
541			return (error);
542#endif
543		break;
544	}
545
546	switch (cmd) {
547
548	case RIO_ENABLE:
549		ifacep = (struct rio_interface *)addr;
550		if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
551			error = EBADF;
552			break;
553		}
554		error = altq_enable(rqp->rq_ifq);
555		break;
556
557	case RIO_DISABLE:
558		ifacep = (struct rio_interface *)addr;
559		if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
560			error = EBADF;
561			break;
562		}
563		error = altq_disable(rqp->rq_ifq);
564		break;
565
566	case RIO_IF_ATTACH:
567		ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
568		if (ifp == NULL) {
569			error = ENXIO;
570			break;
571		}
572
573		/* allocate and initialize rio_queue_t */
574		rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK);
575		if (rqp == NULL) {
576			error = ENOMEM;
577			break;
578		}
579		bzero(rqp, sizeof(rio_queue_t));
580
581		rqp->rq_q = malloc(sizeof(class_queue_t),
582		       M_DEVBUF, M_WAITOK);
583		if (rqp->rq_q == NULL) {
584			free(rqp, M_DEVBUF);
585			error = ENOMEM;
586			break;
587		}
588		bzero(rqp->rq_q, sizeof(class_queue_t));
589
590		rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
591		if (rqp->rq_rio == NULL) {
592			free(rqp->rq_q, M_DEVBUF);
593			free(rqp, M_DEVBUF);
594			error = ENOMEM;
595			break;
596		}
597
598		rqp->rq_ifq = &ifp->if_snd;
599		qtail(rqp->rq_q) = NULL;
600		qlen(rqp->rq_q) = 0;
601		qlimit(rqp->rq_q) = RIO_LIMIT;
602		qtype(rqp->rq_q) = Q_RIO;
603
604		/*
605		 * set RIO to this ifnet structure.
606		 */
607		error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
608				    rio_enqueue, rio_dequeue, rio_request,
609				    NULL, NULL);
610		if (error) {
611			rio_destroy(rqp->rq_rio);
612			free(rqp->rq_q, M_DEVBUF);
613			free(rqp, M_DEVBUF);
614			break;
615		}
616
617		/* add this state to the rio list */
618		rqp->rq_next = rio_list;
619		rio_list = rqp;
620		break;
621
622	case RIO_IF_DETACH:
623		ifacep = (struct rio_interface *)addr;
624		if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
625			error = EBADF;
626			break;
627		}
628		error = rio_detach(rqp);
629		break;
630
631	case RIO_GETSTATS:
632		do {
633			struct rio_stats *q_stats;
634			rio_t *rp;
635			int i;
636
637			q_stats = (struct rio_stats *)addr;
638			if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
639					       ALTQT_RIO)) == NULL) {
640				error = EBADF;
641				break;
642			}
643
644			rp = rqp->rq_rio;
645
646			q_stats->q_limit = qlimit(rqp->rq_q);
647			q_stats->weight	= rp->rio_weight;
648			q_stats->flags = rp->rio_flags;
649
650			for (i = 0; i < RIO_NDROPPREC; i++) {
651				q_stats->q_len[i] = rp->rio_precstate[i].qlen;
652				bcopy(&rp->q_stats[i], &q_stats->q_stats[i],
653				      sizeof(struct redstats));
654				q_stats->q_stats[i].q_avg =
655				    rp->rio_precstate[i].avg >> rp->rio_wshift;
656
657				q_stats->q_params[i].inv_pmax
658					= rp->rio_precstate[i].inv_pmax;
659				q_stats->q_params[i].th_min
660					= rp->rio_precstate[i].th_min;
661				q_stats->q_params[i].th_max
662					= rp->rio_precstate[i].th_max;
663			}
664		} while (/*CONSTCOND*/ 0);
665		break;
666
667	case RIO_CONFIG:
668		do {
669			struct rio_conf *fc;
670			rio_t	*new;
671			int s, limit, i;
672
673			fc = (struct rio_conf *)addr;
674			if ((rqp = altq_lookup(fc->iface.rio_ifname,
675					       ALTQT_RIO)) == NULL) {
676				error = EBADF;
677				break;
678			}
679
680			new = rio_alloc(fc->rio_weight, &fc->q_params[0],
681					fc->rio_flags, fc->rio_pkttime);
682			if (new == NULL) {
683				error = ENOMEM;
684				break;
685			}
686
687#ifdef __NetBSD__
688			s = splnet();
689#else
690			s = splimp();
691#endif
692			_flushq(rqp->rq_q);
693			limit = fc->rio_limit;
694			if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
695				limit = fc->q_params[RIO_NDROPPREC-1].th_max;
696			qlimit(rqp->rq_q) = limit;
697
698			rio_destroy(rqp->rq_rio);
699			rqp->rq_rio = new;
700
701			splx(s);
702
703			/* write back new values */
704			fc->rio_limit = limit;
705			for (i = 0; i < RIO_NDROPPREC; i++) {
706				fc->q_params[i].inv_pmax =
707					rqp->rq_rio->rio_precstate[i].inv_pmax;
708				fc->q_params[i].th_min =
709					rqp->rq_rio->rio_precstate[i].th_min;
710				fc->q_params[i].th_max =
711					rqp->rq_rio->rio_precstate[i].th_max;
712			}
713		} while (/*CONSTCOND*/ 0);
714		break;
715
716	case RIO_SETDEFAULTS:
717		do {
718			struct redparams *rp;
719			int i;
720
721			rp = (struct redparams *)addr;
722			for (i = 0; i < RIO_NDROPPREC; i++)
723				default_rio_params[i] = rp[i];
724		} while (/*CONSTCOND*/ 0);
725		break;
726
727	default:
728		error = EINVAL;
729		break;
730	}
731
732	return error;
733}
734
735static int
736rio_detach(rqp)
737	rio_queue_t *rqp;
738{
739	rio_queue_t *tmp;
740	int error = 0;
741
742	if (ALTQ_IS_ENABLED(rqp->rq_ifq))
743		altq_disable(rqp->rq_ifq);
744
745	if ((error = altq_detach(rqp->rq_ifq)))
746		return (error);
747
748	if (rio_list == rqp)
749		rio_list = rqp->rq_next;
750	else {
751		for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
752			if (tmp->rq_next == rqp) {
753				tmp->rq_next = rqp->rq_next;
754				break;
755			}
756		if (tmp == NULL)
757			printf("rio_detach: no state found in rio_list!\n");
758	}
759
760	rio_destroy(rqp->rq_rio);
761	free(rqp->rq_q, M_DEVBUF);
762	free(rqp, M_DEVBUF);
763	return (error);
764}
765
766/*
767 * rio support routines
768 */
769static int
770rio_request(ifq, req, arg)
771	struct ifaltq *ifq;
772	int req;
773	void *arg;
774{
775	rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
776
777	IFQ_LOCK_ASSERT(ifq);
778
779	switch (req) {
780	case ALTRQ_PURGE:
781		_flushq(rqp->rq_q);
782		if (ALTQ_IS_ENABLED(ifq))
783			ifq->ifq_len = 0;
784		break;
785	}
786	return (0);
787}
788
789/*
790 * enqueue routine:
791 *
792 *	returns: 0 when successfully queued.
793 *		 ENOBUFS when drop occurs.
794 */
795static int
796rio_enqueue(ifq, m, pktattr)
797	struct ifaltq *ifq;
798	struct mbuf *m;
799	struct altq_pktattr *pktattr;
800{
801	rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
802	int error = 0;
803
804	IFQ_LOCK_ASSERT(ifq);
805
806	if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
807		ifq->ifq_len++;
808	else
809		error = ENOBUFS;
810	return error;
811}
812
813/*
814 * dequeue routine:
815 *	must be called in splimp.
816 *
817 *	returns: mbuf dequeued.
818 *		 NULL when no packet is available in the queue.
819 */
820
821static struct mbuf *
822rio_dequeue(ifq, op)
823	struct ifaltq *ifq;
824	int op;
825{
826	rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
827	struct mbuf *m = NULL;
828
829	IFQ_LOCK_ASSERT(ifq);
830
831	if (op == ALTDQ_POLL)
832		return qhead(rqp->rq_q);
833
834	m = rio_getq(rqp->rq_rio, rqp->rq_q);
835	if (m != NULL)
836		ifq->ifq_len--;
837	return m;
838}
839
840#ifdef KLD_MODULE
841
842static struct altqsw rio_sw =
843	{"rio", rioopen, rioclose, rioioctl};
844
845ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
846MODULE_VERSION(altq_rio, 1);
847MODULE_DEPEND(altq_rio, altq_red, 1, 1, 1);
848
849#endif /* KLD_MODULE */
850#endif /* ALTQ3_COMPAT */
851
852#endif /* ALTQ_RIO */
853