altq_rmclass.c revision 263086
1/*	$FreeBSD: stable/10/sys/contrib/altq/altq/altq_rmclass.c 263086 2014-03-12 10:45:58Z glebius $	*/
2/*	$KAME: altq_rmclass.c,v 1.19 2005/04/13 03:44:25 suz Exp $	*/
3
4/*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by the Network Research
19 *      Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 *    to endorse or promote products derived from this software without
22 *    specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
38 *
39 * @(#)rm_class.c  1.48     97/12/05 SMI
40 */
41#if defined(__FreeBSD__) || defined(__NetBSD__)
42#include "opt_altq.h"
43#include "opt_inet.h"
44#ifdef __FreeBSD__
45#include "opt_inet6.h"
46#endif
47#endif /* __FreeBSD__ || __NetBSD__ */
48#ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
49
50#include <sys/param.h>
51#include <sys/malloc.h>
52#include <sys/mbuf.h>
53#include <sys/socket.h>
54#include <sys/systm.h>
55#include <sys/errno.h>
56#include <sys/time.h>
57#ifdef ALTQ3_COMPAT
58#include <sys/kernel.h>
59#endif
60
61#include <net/if.h>
62#include <net/if_var.h>
63#ifdef ALTQ3_COMPAT
64#include <netinet/in.h>
65#include <netinet/in_systm.h>
66#include <netinet/ip.h>
67#endif
68
69#include <altq/if_altq.h>
70#include <altq/altq.h>
71#include <altq/altq_rmclass.h>
72#include <altq/altq_rmclass_debug.h>
73#include <altq/altq_red.h>
74#include <altq/altq_rio.h>
75
76/*
77 * Local Macros
78 */
79
80#define	reset_cutoff(ifd)	{ ifd->cutoff_ = RM_MAXDEPTH; }
81
82/*
83 * Local routines.
84 */
85
86static int	rmc_satisfied(struct rm_class *, struct timeval *);
87static void	rmc_wrr_set_weights(struct rm_ifdat *);
88static void	rmc_depth_compute(struct rm_class *);
89static void	rmc_depth_recompute(rm_class_t *);
90
91static mbuf_t	*_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
92static mbuf_t	*_rmc_prr_dequeue_next(struct rm_ifdat *, int);
93
94static int	_rmc_addq(rm_class_t *, mbuf_t *);
95static void	_rmc_dropq(rm_class_t *);
96static mbuf_t	*_rmc_getq(rm_class_t *);
97static mbuf_t	*_rmc_pollq(rm_class_t *);
98
99static int	rmc_under_limit(struct rm_class *, struct timeval *);
100static void	rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
101static void	rmc_drop_action(struct rm_class *);
102static void	rmc_restart(struct rm_class *);
103static void	rmc_root_overlimit(struct rm_class *, struct rm_class *);
104
105#define	BORROW_OFFTIME
106/*
107 * BORROW_OFFTIME (experimental):
108 * borrow the offtime of the class borrowing from.
109 * the reason is that when its own offtime is set, the class is unable
110 * to borrow much, especially when cutoff is taking effect.
111 * but when the borrowed class is overloaded (advidle is close to minidle),
112 * use the borrowing class's offtime to avoid overload.
113 */
114#define	ADJUST_CUTOFF
115/*
116 * ADJUST_CUTOFF (experimental):
117 * if no underlimit class is found due to cutoff, increase cutoff and
118 * retry the scheduling loop.
119 * also, don't invoke delay_actions while cutoff is taking effect,
120 * since a sleeping class won't have a chance to be scheduled in the
121 * next loop.
122 *
123 * now heuristics for setting the top-level variable (cutoff_) becomes:
124 *	1. if a packet arrives for a not-overlimit class, set cutoff
125 *	   to the depth of the class.
126 *	2. if cutoff is i, and a packet arrives for an overlimit class
127 *	   with an underlimit ancestor at a lower level than i (say j),
128 *	   then set cutoff to j.
129 *	3. at scheduling a packet, if there is no underlimit class
130 *	   due to the current cutoff level, increase cutoff by 1 and
131 *	   then try to schedule again.
132 */
133
134/*
135 * rm_class_t *
136 * rmc_newclass(...) - Create a new resource management class at priority
137 * 'pri' on the interface given by 'ifd'.
138 *
139 * nsecPerByte  is the data rate of the interface in nanoseconds/byte.
140 *              E.g., 800 for a 10Mb/s ethernet.  If the class gets less
141 *              than 100% of the bandwidth, this number should be the
142 *              'effective' rate for the class.  Let f be the
143 *              bandwidth fraction allocated to this class, and let
144 *              nsPerByte be the data rate of the output link in
145 *              nanoseconds/byte.  Then nsecPerByte is set to
146 *              nsPerByte / f.  E.g., 1600 (= 800 / .5)
147 *              for a class that gets 50% of an ethernet's bandwidth.
148 *
149 * action       the routine to call when the class is over limit.
150 *
151 * maxq         max allowable queue size for class (in packets).
152 *
153 * parent       parent class pointer.
154 *
155 * borrow       class to borrow from (should be either 'parent' or null).
156 *
157 * maxidle      max value allowed for class 'idle' time estimate (this
158 *              parameter determines how large an initial burst of packets
159 *              can be before overlimit action is invoked.
160 *
161 * offtime      how long 'delay' action will delay when class goes over
162 *              limit (this parameter determines the steady-state burst
163 *              size when a class is running over its limit).
164 *
165 * Maxidle and offtime have to be computed from the following:  If the
166 * average packet size is s, the bandwidth fraction allocated to this
167 * class is f, we want to allow b packet bursts, and the gain of the
168 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
169 *
170 *   ptime = s * nsPerByte * (1 - f) / f
171 *   maxidle = ptime * (1 - g^b) / g^b
172 *   minidle = -ptime * (1 / (f - 1))
173 *   offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
174 *
175 * Operationally, it's convenient to specify maxidle & offtime in units
176 * independent of the link bandwidth so the maxidle & offtime passed to
177 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
178 * (The constant factor is a scale factor needed to make the parameters
179 * integers.  This scaling also means that the 'unscaled' values of
180 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
181 * not nanoseconds.)  Also note that the 'idle' filter computation keeps
182 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
183 * maxidle also must be scaled upward by this value.  Thus, the passed
184 * values for maxidle and offtime can be computed as follows:
185 *
186 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
187 * offtime = offtime * 8 / (1000 * nsecPerByte)
188 *
189 * When USE_HRTIME is employed, then maxidle and offtime become:
190 * 	maxidle = maxilde * (8.0 / nsecPerByte);
191 * 	offtime = offtime * (8.0 / nsecPerByte);
192 */
193struct rm_class *
194rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
195    void (*action)(rm_class_t *, rm_class_t *), int maxq,
196    struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
197    int minidle, u_int offtime, int pktsize, int flags)
198{
199	struct rm_class	*cl;
200	struct rm_class	*peer;
201	int		 s;
202
203	if (pri >= RM_MAXPRIO)
204		return (NULL);
205#ifndef ALTQ_RED
206	if (flags & RMCF_RED) {
207#ifdef ALTQ_DEBUG
208		printf("rmc_newclass: RED not configured for CBQ!\n");
209#endif
210		return (NULL);
211	}
212#endif
213#ifndef ALTQ_RIO
214	if (flags & RMCF_RIO) {
215#ifdef ALTQ_DEBUG
216		printf("rmc_newclass: RIO not configured for CBQ!\n");
217#endif
218		return (NULL);
219	}
220#endif
221
222	cl = malloc(sizeof(struct rm_class), M_DEVBUF, M_NOWAIT | M_ZERO);
223	if (cl == NULL)
224		return (NULL);
225	CALLOUT_INIT(&cl->callout_);
226	cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
227	if (cl->q_ == NULL) {
228		free(cl, M_DEVBUF);
229		return (NULL);
230	}
231
232	/*
233	 * Class initialization.
234	 */
235	cl->children_ = NULL;
236	cl->parent_ = parent;
237	cl->borrow_ = borrow;
238	cl->leaf_ = 1;
239	cl->ifdat_ = ifd;
240	cl->pri_ = pri;
241	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
242	cl->depth_ = 0;
243	cl->qthresh_ = 0;
244	cl->ns_per_byte_ = nsecPerByte;
245
246	qlimit(cl->q_) = maxq;
247	qtype(cl->q_) = Q_DROPHEAD;
248	qlen(cl->q_) = 0;
249	cl->flags_ = flags;
250
251#if 1 /* minidle is also scaled in ALTQ */
252	cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
253	if (cl->minidle_ > 0)
254		cl->minidle_ = 0;
255#else
256	cl->minidle_ = minidle;
257#endif
258	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
259	if (cl->maxidle_ == 0)
260		cl->maxidle_ = 1;
261#if 1 /* offtime is also scaled in ALTQ */
262	cl->avgidle_ = cl->maxidle_;
263	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
264	if (cl->offtime_ == 0)
265		cl->offtime_ = 1;
266#else
267	cl->avgidle_ = 0;
268	cl->offtime_ = (offtime * nsecPerByte) / 8;
269#endif
270	cl->overlimit = action;
271
272#ifdef ALTQ_RED
273	if (flags & (RMCF_RED|RMCF_RIO)) {
274		int red_flags, red_pkttime;
275
276		red_flags = 0;
277		if (flags & RMCF_ECN)
278			red_flags |= REDF_ECN;
279		if (flags & RMCF_FLOWVALVE)
280			red_flags |= REDF_FLOWVALVE;
281#ifdef ALTQ_RIO
282		if (flags & RMCF_CLEARDSCP)
283			red_flags |= RIOF_CLEARDSCP;
284#endif
285		red_pkttime = nsecPerByte * pktsize  / 1000;
286
287		if (flags & RMCF_RED) {
288			cl->red_ = red_alloc(0, 0,
289			    qlimit(cl->q_) * 10/100,
290			    qlimit(cl->q_) * 30/100,
291			    red_flags, red_pkttime);
292			if (cl->red_ != NULL)
293				qtype(cl->q_) = Q_RED;
294		}
295#ifdef ALTQ_RIO
296		else {
297			cl->red_ = (red_t *)rio_alloc(0, NULL,
298						      red_flags, red_pkttime);
299			if (cl->red_ != NULL)
300				qtype(cl->q_) = Q_RIO;
301		}
302#endif
303	}
304#endif /* ALTQ_RED */
305
306	/*
307	 * put the class into the class tree
308	 */
309#ifdef __NetBSD__
310	s = splnet();
311#else
312	s = splimp();
313#endif
314	IFQ_LOCK(ifd->ifq_);
315	if ((peer = ifd->active_[pri]) != NULL) {
316		/* find the last class at this pri */
317		cl->peer_ = peer;
318		while (peer->peer_ != ifd->active_[pri])
319			peer = peer->peer_;
320		peer->peer_ = cl;
321	} else {
322		ifd->active_[pri] = cl;
323		cl->peer_ = cl;
324	}
325
326	if (cl->parent_) {
327		cl->next_ = parent->children_;
328		parent->children_ = cl;
329		parent->leaf_ = 0;
330	}
331
332	/*
333	 * Compute the depth of this class and its ancestors in the class
334	 * hierarchy.
335	 */
336	rmc_depth_compute(cl);
337
338	/*
339	 * If CBQ's WRR is enabled, then initialize the class WRR state.
340	 */
341	if (ifd->wrr_) {
342		ifd->num_[pri]++;
343		ifd->alloc_[pri] += cl->allotment_;
344		rmc_wrr_set_weights(ifd);
345	}
346	IFQ_UNLOCK(ifd->ifq_);
347	splx(s);
348	return (cl);
349}
350
351int
352rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
353    int minidle, u_int offtime, int pktsize)
354{
355	struct rm_ifdat	*ifd;
356	u_int		 old_allotment;
357	int		 s;
358
359	ifd = cl->ifdat_;
360	old_allotment = cl->allotment_;
361
362#ifdef __NetBSD__
363	s = splnet();
364#else
365	s = splimp();
366#endif
367	IFQ_LOCK(ifd->ifq_);
368	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
369	cl->qthresh_ = 0;
370	cl->ns_per_byte_ = nsecPerByte;
371
372	qlimit(cl->q_) = maxq;
373
374#if 1 /* minidle is also scaled in ALTQ */
375	cl->minidle_ = (minidle * nsecPerByte) / 8;
376	if (cl->minidle_ > 0)
377		cl->minidle_ = 0;
378#else
379	cl->minidle_ = minidle;
380#endif
381	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
382	if (cl->maxidle_ == 0)
383		cl->maxidle_ = 1;
384#if 1 /* offtime is also scaled in ALTQ */
385	cl->avgidle_ = cl->maxidle_;
386	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
387	if (cl->offtime_ == 0)
388		cl->offtime_ = 1;
389#else
390	cl->avgidle_ = 0;
391	cl->offtime_ = (offtime * nsecPerByte) / 8;
392#endif
393
394	/*
395	 * If CBQ's WRR is enabled, then initialize the class WRR state.
396	 */
397	if (ifd->wrr_) {
398		ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
399		rmc_wrr_set_weights(ifd);
400	}
401	IFQ_UNLOCK(ifd->ifq_);
402	splx(s);
403	return (0);
404}
405
406/*
407 * static void
408 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
409 *	the appropriate run robin weights for the CBQ weighted round robin
410 *	algorithm.
411 *
412 *	Returns: NONE
413 */
414
415static void
416rmc_wrr_set_weights(struct rm_ifdat *ifd)
417{
418	int		i;
419	struct rm_class	*cl, *clh;
420
421	for (i = 0; i < RM_MAXPRIO; i++) {
422		/*
423		 * This is inverted from that of the simulator to
424		 * maintain precision.
425		 */
426		if (ifd->num_[i] == 0)
427			ifd->M_[i] = 0;
428		else
429			ifd->M_[i] = ifd->alloc_[i] /
430				(ifd->num_[i] * ifd->maxpkt_);
431		/*
432		 * Compute the weighted allotment for each class.
433		 * This takes the expensive div instruction out
434		 * of the main loop for the wrr scheduling path.
435		 * These only get recomputed when a class comes or
436		 * goes.
437		 */
438		if (ifd->active_[i] != NULL) {
439			clh = cl = ifd->active_[i];
440			do {
441				/* safe-guard for slow link or alloc_ == 0 */
442				if (ifd->M_[i] == 0)
443					cl->w_allotment_ = 0;
444				else
445					cl->w_allotment_ = cl->allotment_ /
446						ifd->M_[i];
447				cl = cl->peer_;
448			} while ((cl != NULL) && (cl != clh));
449		}
450	}
451}
452
453int
454rmc_get_weight(struct rm_ifdat *ifd, int pri)
455{
456	if ((pri >= 0) && (pri < RM_MAXPRIO))
457		return (ifd->M_[pri]);
458	else
459		return (0);
460}
461
462/*
463 * static void
464 * rmc_depth_compute(struct rm_class *cl) - This function computes the
465 *	appropriate depth of class 'cl' and its ancestors.
466 *
467 *	Returns:	NONE
468 */
469
470static void
471rmc_depth_compute(struct rm_class *cl)
472{
473	rm_class_t	*t = cl, *p;
474
475	/*
476	 * Recompute the depth for the branch of the tree.
477	 */
478	while (t != NULL) {
479		p = t->parent_;
480		if (p && (t->depth_ >= p->depth_)) {
481			p->depth_ = t->depth_ + 1;
482			t = p;
483		} else
484			t = NULL;
485	}
486}
487
488/*
489 * static void
490 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
491 *	the depth of the tree after a class has been deleted.
492 *
493 *	Returns:	NONE
494 */
495
496static void
497rmc_depth_recompute(rm_class_t *cl)
498{
499#if 1 /* ALTQ */
500	rm_class_t	*p, *t;
501
502	p = cl;
503	while (p != NULL) {
504		if ((t = p->children_) == NULL) {
505			p->depth_ = 0;
506		} else {
507			int cdepth = 0;
508
509			while (t != NULL) {
510				if (t->depth_ > cdepth)
511					cdepth = t->depth_;
512				t = t->next_;
513			}
514
515			if (p->depth_ == cdepth + 1)
516				/* no change to this parent */
517				return;
518
519			p->depth_ = cdepth + 1;
520		}
521
522		p = p->parent_;
523	}
524#else
525	rm_class_t	*t;
526
527	if (cl->depth_ >= 1) {
528		if (cl->children_ == NULL) {
529			cl->depth_ = 0;
530		} else if ((t = cl->children_) != NULL) {
531			while (t != NULL) {
532				if (t->children_ != NULL)
533					rmc_depth_recompute(t);
534				t = t->next_;
535			}
536		} else
537			rmc_depth_compute(cl);
538	}
539#endif
540}
541
542/*
543 * void
544 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
545 *	function deletes a class from the link-sharing structure and frees
546 *	all resources associated with the class.
547 *
548 *	Returns: NONE
549 */
550
551void
552rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
553{
554	struct rm_class	*p, *head, *previous;
555	int		 s;
556
557	ASSERT(cl->children_ == NULL);
558
559	if (cl->sleeping_)
560		CALLOUT_STOP(&cl->callout_);
561
562#ifdef __NetBSD__
563	s = splnet();
564#else
565	s = splimp();
566#endif
567	IFQ_LOCK(ifd->ifq_);
568	/*
569	 * Free packets in the packet queue.
570	 * XXX - this may not be a desired behavior.  Packets should be
571	 *		re-queued.
572	 */
573	rmc_dropall(cl);
574
575	/*
576	 * If the class has a parent, then remove the class from the
577	 * class from the parent's children chain.
578	 */
579	if (cl->parent_ != NULL) {
580		head = cl->parent_->children_;
581		p = previous = head;
582		if (head->next_ == NULL) {
583			ASSERT(head == cl);
584			cl->parent_->children_ = NULL;
585			cl->parent_->leaf_ = 1;
586		} else while (p != NULL) {
587			if (p == cl) {
588				if (cl == head)
589					cl->parent_->children_ = cl->next_;
590				else
591					previous->next_ = cl->next_;
592				cl->next_ = NULL;
593				p = NULL;
594			} else {
595				previous = p;
596				p = p->next_;
597			}
598		}
599	}
600
601	/*
602	 * Delete class from class priority peer list.
603	 */
604	if ((p = ifd->active_[cl->pri_]) != NULL) {
605		/*
606		 * If there is more than one member of this priority
607		 * level, then look for class(cl) in the priority level.
608		 */
609		if (p != p->peer_) {
610			while (p->peer_ != cl)
611				p = p->peer_;
612			p->peer_ = cl->peer_;
613
614			if (ifd->active_[cl->pri_] == cl)
615				ifd->active_[cl->pri_] = cl->peer_;
616		} else {
617			ASSERT(p == cl);
618			ifd->active_[cl->pri_] = NULL;
619		}
620	}
621
622	/*
623	 * Recompute the WRR weights.
624	 */
625	if (ifd->wrr_) {
626		ifd->alloc_[cl->pri_] -= cl->allotment_;
627		ifd->num_[cl->pri_]--;
628		rmc_wrr_set_weights(ifd);
629	}
630
631	/*
632	 * Re-compute the depth of the tree.
633	 */
634#if 1 /* ALTQ */
635	rmc_depth_recompute(cl->parent_);
636#else
637	rmc_depth_recompute(ifd->root_);
638#endif
639
640	IFQ_UNLOCK(ifd->ifq_);
641	splx(s);
642
643	/*
644	 * Free the class structure.
645	 */
646	if (cl->red_ != NULL) {
647#ifdef ALTQ_RIO
648		if (q_is_rio(cl->q_))
649			rio_destroy((rio_t *)cl->red_);
650#endif
651#ifdef ALTQ_RED
652		if (q_is_red(cl->q_))
653			red_destroy(cl->red_);
654#endif
655	}
656	free(cl->q_, M_DEVBUF);
657	free(cl, M_DEVBUF);
658}
659
660
661/*
662 * void
663 * rmc_init(...) - Initialize the resource management data structures
664 *	associated with the output portion of interface 'ifp'.  'ifd' is
665 *	where the structures will be built (for backwards compatibility, the
666 *	structures aren't kept in the ifnet struct).  'nsecPerByte'
667 *	gives the link speed (inverse of bandwidth) in nanoseconds/byte.
668 *	'restart' is the driver-specific routine that the generic 'delay
669 *	until under limit' action will call to restart output.  `maxq'
670 *	is the queue size of the 'link' & 'default' classes.  'maxqueued'
671 *	is the maximum number of packets that the resource management
672 *	code will allow to be queued 'downstream' (this is typically 1).
673 *
674 *	Returns:	NONE
675 */
676
677void
678rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
679    void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
680    int minidle, u_int offtime, int flags)
681{
682	int		i, mtu;
683
684	/*
685	 * Initialize the CBQ tracing/debug facility.
686	 */
687	CBQTRACEINIT();
688
689	bzero((char *)ifd, sizeof (*ifd));
690	mtu = ifq->altq_ifp->if_mtu;
691	ifd->ifq_ = ifq;
692	ifd->restart = restart;
693	ifd->maxqueued_ = maxqueued;
694	ifd->ns_per_byte_ = nsecPerByte;
695	ifd->maxpkt_ = mtu;
696	ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
697	ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
698#if 1
699	ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
700	if (mtu * nsecPerByte > 10 * 1000000)
701		ifd->maxiftime_ /= 4;
702#endif
703
704	reset_cutoff(ifd);
705	CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
706
707	/*
708	 * Initialize the CBQ's WRR state.
709	 */
710	for (i = 0; i < RM_MAXPRIO; i++) {
711		ifd->alloc_[i] = 0;
712		ifd->M_[i] = 0;
713		ifd->num_[i] = 0;
714		ifd->na_[i] = 0;
715		ifd->active_[i] = NULL;
716	}
717
718	/*
719	 * Initialize current packet state.
720	 */
721	ifd->qi_ = 0;
722	ifd->qo_ = 0;
723	for (i = 0; i < RM_MAXQUEUED; i++) {
724		ifd->class_[i] = NULL;
725		ifd->curlen_[i] = 0;
726		ifd->borrowed_[i] = NULL;
727	}
728
729	/*
730	 * Create the root class of the link-sharing structure.
731	 */
732	if ((ifd->root_ = rmc_newclass(0, ifd,
733				       nsecPerByte,
734				       rmc_root_overlimit, maxq, 0, 0,
735				       maxidle, minidle, offtime,
736				       0, 0)) == NULL) {
737		printf("rmc_init: root class not allocated\n");
738		return ;
739	}
740	ifd->root_->depth_ = 0;
741}
742
743/*
744 * void
745 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
746 *	mbuf 'm' to queue for resource class 'cl'.  This routine is called
747 *	by a driver's if_output routine.  This routine must be called with
748 *	output packet completion interrupts locked out (to avoid racing with
749 *	rmc_dequeue_next).
750 *
751 *	Returns:	0 on successful queueing
752 *			-1 when packet drop occurs
753 */
754int
755rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
756{
757	struct timeval	 now;
758	struct rm_ifdat *ifd = cl->ifdat_;
759	int		 cpri = cl->pri_;
760	int		 is_empty = qempty(cl->q_);
761
762	RM_GETTIME(now);
763	if (ifd->cutoff_ > 0) {
764		if (TV_LT(&cl->undertime_, &now)) {
765			if (ifd->cutoff_ > cl->depth_)
766				ifd->cutoff_ = cl->depth_;
767			CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
768		}
769#if 1 /* ALTQ */
770		else {
771			/*
772			 * the class is overlimit. if the class has
773			 * underlimit ancestors, set cutoff to the lowest
774			 * depth among them.
775			 */
776			struct rm_class *borrow = cl->borrow_;
777
778			while (borrow != NULL &&
779			       borrow->depth_ < ifd->cutoff_) {
780				if (TV_LT(&borrow->undertime_, &now)) {
781					ifd->cutoff_ = borrow->depth_;
782					CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
783					break;
784				}
785				borrow = borrow->borrow_;
786			}
787		}
788#else /* !ALTQ */
789		else if ((ifd->cutoff_ > 1) && cl->borrow_) {
790			if (TV_LT(&cl->borrow_->undertime_, &now)) {
791				ifd->cutoff_ = cl->borrow_->depth_;
792				CBQTRACE(rmc_queue_packet, 'ffob',
793					 cl->borrow_->depth_);
794			}
795		}
796#endif /* !ALTQ */
797	}
798
799	if (_rmc_addq(cl, m) < 0)
800		/* failed */
801		return (-1);
802
803	if (is_empty) {
804		CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
805		ifd->na_[cpri]++;
806	}
807
808	if (qlen(cl->q_) > qlimit(cl->q_)) {
809		/* note: qlimit can be set to 0 or 1 */
810		rmc_drop_action(cl);
811		return (-1);
812	}
813	return (0);
814}
815
816/*
817 * void
818 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
819 *	classes to see if there are satified.
820 */
821
822static void
823rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
824{
825	int		 i;
826	rm_class_t	*p, *bp;
827
828	for (i = RM_MAXPRIO - 1; i >= 0; i--) {
829		if ((bp = ifd->active_[i]) != NULL) {
830			p = bp;
831			do {
832				if (!rmc_satisfied(p, now)) {
833					ifd->cutoff_ = p->depth_;
834					return;
835				}
836				p = p->peer_;
837			} while (p != bp);
838		}
839	}
840
841	reset_cutoff(ifd);
842}
843
844/*
845 * rmc_satisfied - Return 1 of the class is satisfied.  O, otherwise.
846 */
847
848static int
849rmc_satisfied(struct rm_class *cl, struct timeval *now)
850{
851	rm_class_t	*p;
852
853	if (cl == NULL)
854		return (1);
855	if (TV_LT(now, &cl->undertime_))
856		return (1);
857	if (cl->depth_ == 0) {
858		if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
859			return (0);
860		else
861			return (1);
862	}
863	if (cl->children_ != NULL) {
864		p = cl->children_;
865		while (p != NULL) {
866			if (!rmc_satisfied(p, now))
867				return (0);
868			p = p->next_;
869		}
870	}
871
872	return (1);
873}
874
875/*
876 * Return 1 if class 'cl' is under limit or can borrow from a parent,
877 * 0 if overlimit.  As a side-effect, this routine will invoke the
878 * class overlimit action if the class if overlimit.
879 */
880
881static int
882rmc_under_limit(struct rm_class *cl, struct timeval *now)
883{
884	rm_class_t	*p = cl;
885	rm_class_t	*top;
886	struct rm_ifdat	*ifd = cl->ifdat_;
887
888	ifd->borrowed_[ifd->qi_] = NULL;
889	/*
890	 * If cl is the root class, then always return that it is
891	 * underlimit.  Otherwise, check to see if the class is underlimit.
892	 */
893	if (cl->parent_ == NULL)
894		return (1);
895
896	if (cl->sleeping_) {
897		if (TV_LT(now, &cl->undertime_))
898			return (0);
899
900		CALLOUT_STOP(&cl->callout_);
901		cl->sleeping_ = 0;
902		cl->undertime_.tv_sec = 0;
903		return (1);
904	}
905
906	top = NULL;
907	while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
908		if (((cl = cl->borrow_) == NULL) ||
909		    (cl->depth_ > ifd->cutoff_)) {
910#ifdef ADJUST_CUTOFF
911			if (cl != NULL)
912				/* cutoff is taking effect, just
913				   return false without calling
914				   the delay action. */
915				return (0);
916#endif
917#ifdef BORROW_OFFTIME
918			/*
919			 * check if the class can borrow offtime too.
920			 * borrow offtime from the top of the borrow
921			 * chain if the top class is not overloaded.
922			 */
923			if (cl != NULL) {
924				/* cutoff is taking effect, use this class as top. */
925				top = cl;
926				CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
927			}
928			if (top != NULL && top->avgidle_ == top->minidle_)
929				top = NULL;
930			p->overtime_ = *now;
931			(p->overlimit)(p, top);
932#else
933			p->overtime_ = *now;
934			(p->overlimit)(p, NULL);
935#endif
936			return (0);
937		}
938		top = cl;
939	}
940
941	if (cl != p)
942		ifd->borrowed_[ifd->qi_] = cl;
943	return (1);
944}
945
946/*
947 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
948 *	Packet-by-packet round robin.
949 *
950 * The heart of the weighted round-robin scheduler, which decides which
951 * class next gets to send a packet.  Highest priority first, then
952 * weighted round-robin within priorites.
953 *
954 * Each able-to-send class gets to send until its byte allocation is
955 * exhausted.  Thus, the active pointer is only changed after a class has
956 * exhausted its allocation.
957 *
958 * If the scheduler finds no class that is underlimit or able to borrow,
959 * then the first class found that had a nonzero queue and is allowed to
960 * borrow gets to send.
961 */
962
963static mbuf_t *
964_rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
965{
966	struct rm_class	*cl = NULL, *first = NULL;
967	u_int		 deficit;
968	int		 cpri;
969	mbuf_t		*m;
970	struct timeval	 now;
971
972	RM_GETTIME(now);
973
974	/*
975	 * if the driver polls the top of the queue and then removes
976	 * the polled packet, we must return the same packet.
977	 */
978	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
979		cl = ifd->pollcache_;
980		cpri = cl->pri_;
981		if (ifd->efficient_) {
982			/* check if this class is overlimit */
983			if (cl->undertime_.tv_sec != 0 &&
984			    rmc_under_limit(cl, &now) == 0)
985				first = cl;
986		}
987		ifd->pollcache_ = NULL;
988		goto _wrr_out;
989	}
990	else {
991		/* mode == ALTDQ_POLL || pollcache == NULL */
992		ifd->pollcache_ = NULL;
993		ifd->borrowed_[ifd->qi_] = NULL;
994	}
995#ifdef ADJUST_CUTOFF
996 _again:
997#endif
998	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
999		if (ifd->na_[cpri] == 0)
1000			continue;
1001		deficit = 0;
1002		/*
1003		 * Loop through twice for a priority level, if some class
1004		 * was unable to send a packet the first round because
1005		 * of the weighted round-robin mechanism.
1006		 * During the second loop at this level, deficit==2.
1007		 * (This second loop is not needed if for every class,
1008		 * "M[cl->pri_])" times "cl->allotment" is greater than
1009		 * the byte size for the largest packet in the class.)
1010		 */
1011 _wrr_loop:
1012		cl = ifd->active_[cpri];
1013		ASSERT(cl != NULL);
1014		do {
1015			if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1016				cl->bytes_alloc_ += cl->w_allotment_;
1017			if (!qempty(cl->q_)) {
1018				if ((cl->undertime_.tv_sec == 0) ||
1019				    rmc_under_limit(cl, &now)) {
1020					if (cl->bytes_alloc_ > 0 || deficit > 1)
1021						goto _wrr_out;
1022
1023					/* underlimit but no alloc */
1024					deficit = 1;
1025#if 1
1026					ifd->borrowed_[ifd->qi_] = NULL;
1027#endif
1028				}
1029				else if (first == NULL && cl->borrow_ != NULL)
1030					first = cl; /* borrowing candidate */
1031			}
1032
1033			cl->bytes_alloc_ = 0;
1034			cl = cl->peer_;
1035		} while (cl != ifd->active_[cpri]);
1036
1037		if (deficit == 1) {
1038			/* first loop found an underlimit class with deficit */
1039			/* Loop on same priority level, with new deficit.  */
1040			deficit = 2;
1041			goto _wrr_loop;
1042		}
1043	}
1044
1045#ifdef ADJUST_CUTOFF
1046	/*
1047	 * no underlimit class found.  if cutoff is taking effect,
1048	 * increase cutoff and try again.
1049	 */
1050	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1051		ifd->cutoff_++;
1052		CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1053		goto _again;
1054	}
1055#endif /* ADJUST_CUTOFF */
1056	/*
1057	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1058	 * class we encounter will send a packet if all the classes
1059	 * of the link-sharing structure are overlimit.
1060	 */
1061	reset_cutoff(ifd);
1062	CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1063
1064	if (!ifd->efficient_ || first == NULL)
1065		return (NULL);
1066
1067	cl = first;
1068	cpri = cl->pri_;
1069#if 0	/* too time-consuming for nothing */
1070	if (cl->sleeping_)
1071		CALLOUT_STOP(&cl->callout_);
1072	cl->sleeping_ = 0;
1073	cl->undertime_.tv_sec = 0;
1074#endif
1075	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1076	ifd->cutoff_ = cl->borrow_->depth_;
1077
1078	/*
1079	 * Deque the packet and do the book keeping...
1080	 */
1081 _wrr_out:
1082	if (op == ALTDQ_REMOVE) {
1083		m = _rmc_getq(cl);
1084		if (m == NULL)
1085			panic("_rmc_wrr_dequeue_next");
1086		if (qempty(cl->q_))
1087			ifd->na_[cpri]--;
1088
1089		/*
1090		 * Update class statistics and link data.
1091		 */
1092		if (cl->bytes_alloc_ > 0)
1093			cl->bytes_alloc_ -= m_pktlen(m);
1094
1095		if ((cl->bytes_alloc_ <= 0) || first == cl)
1096			ifd->active_[cl->pri_] = cl->peer_;
1097		else
1098			ifd->active_[cl->pri_] = cl;
1099
1100		ifd->class_[ifd->qi_] = cl;
1101		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1102		ifd->now_[ifd->qi_] = now;
1103		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1104		ifd->queued_++;
1105	} else {
1106		/* mode == ALTDQ_PPOLL */
1107		m = _rmc_pollq(cl);
1108		ifd->pollcache_ = cl;
1109	}
1110	return (m);
1111}
1112
1113/*
1114 * Dequeue & return next packet from the highest priority class that
1115 * has a packet to send & has enough allocation to send it.  This
1116 * routine is called by a driver whenever it needs a new packet to
1117 * output.
1118 */
1119static mbuf_t *
1120_rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1121{
1122	mbuf_t		*m;
1123	int		 cpri;
1124	struct rm_class	*cl, *first = NULL;
1125	struct timeval	 now;
1126
1127	RM_GETTIME(now);
1128
1129	/*
1130	 * if the driver polls the top of the queue and then removes
1131	 * the polled packet, we must return the same packet.
1132	 */
1133	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1134		cl = ifd->pollcache_;
1135		cpri = cl->pri_;
1136		ifd->pollcache_ = NULL;
1137		goto _prr_out;
1138	} else {
1139		/* mode == ALTDQ_POLL || pollcache == NULL */
1140		ifd->pollcache_ = NULL;
1141		ifd->borrowed_[ifd->qi_] = NULL;
1142	}
1143#ifdef ADJUST_CUTOFF
1144 _again:
1145#endif
1146	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1147		if (ifd->na_[cpri] == 0)
1148			continue;
1149		cl = ifd->active_[cpri];
1150		ASSERT(cl != NULL);
1151		do {
1152			if (!qempty(cl->q_)) {
1153				if ((cl->undertime_.tv_sec == 0) ||
1154				    rmc_under_limit(cl, &now))
1155					goto _prr_out;
1156				if (first == NULL && cl->borrow_ != NULL)
1157					first = cl;
1158			}
1159			cl = cl->peer_;
1160		} while (cl != ifd->active_[cpri]);
1161	}
1162
1163#ifdef ADJUST_CUTOFF
1164	/*
1165	 * no underlimit class found.  if cutoff is taking effect, increase
1166	 * cutoff and try again.
1167	 */
1168	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1169		ifd->cutoff_++;
1170		goto _again;
1171	}
1172#endif /* ADJUST_CUTOFF */
1173	/*
1174	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1175	 * class we encounter will send a packet if all the classes
1176	 * of the link-sharing structure are overlimit.
1177	 */
1178	reset_cutoff(ifd);
1179	if (!ifd->efficient_ || first == NULL)
1180		return (NULL);
1181
1182	cl = first;
1183	cpri = cl->pri_;
1184#if 0	/* too time-consuming for nothing */
1185	if (cl->sleeping_)
1186		CALLOUT_STOP(&cl->callout_);
1187	cl->sleeping_ = 0;
1188	cl->undertime_.tv_sec = 0;
1189#endif
1190	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1191	ifd->cutoff_ = cl->borrow_->depth_;
1192
1193	/*
1194	 * Deque the packet and do the book keeping...
1195	 */
1196 _prr_out:
1197	if (op == ALTDQ_REMOVE) {
1198		m = _rmc_getq(cl);
1199		if (m == NULL)
1200			panic("_rmc_prr_dequeue_next");
1201		if (qempty(cl->q_))
1202			ifd->na_[cpri]--;
1203
1204		ifd->active_[cpri] = cl->peer_;
1205
1206		ifd->class_[ifd->qi_] = cl;
1207		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1208		ifd->now_[ifd->qi_] = now;
1209		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1210		ifd->queued_++;
1211	} else {
1212		/* mode == ALTDQ_POLL */
1213		m = _rmc_pollq(cl);
1214		ifd->pollcache_ = cl;
1215	}
1216	return (m);
1217}
1218
1219/*
1220 * mbuf_t *
1221 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1222 *	is invoked by the packet driver to get the next packet to be
1223 *	dequeued and output on the link.  If WRR is enabled, then the
1224 *	WRR dequeue next routine will determine the next packet to sent.
1225 *	Otherwise, packet-by-packet round robin is invoked.
1226 *
1227 *	Returns:	NULL, if a packet is not available or if all
1228 *			classes are overlimit.
1229 *
1230 *			Otherwise, Pointer to the next packet.
1231 */
1232
1233mbuf_t *
1234rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1235{
1236	if (ifd->queued_ >= ifd->maxqueued_)
1237		return (NULL);
1238	else if (ifd->wrr_)
1239		return (_rmc_wrr_dequeue_next(ifd, mode));
1240	else
1241		return (_rmc_prr_dequeue_next(ifd, mode));
1242}
1243
1244/*
1245 * Update the utilization estimate for the packet that just completed.
1246 * The packet's class & the parent(s) of that class all get their
1247 * estimators updated.  This routine is called by the driver's output-
1248 * packet-completion interrupt service routine.
1249 */
1250
1251/*
1252 * a macro to approximate "divide by 1000" that gives 0.000999,
1253 * if a value has enough effective digits.
1254 * (on pentium, mul takes 9 cycles but div takes 46!)
1255 */
1256#define	NSEC_TO_USEC(t)	(((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1257void
1258rmc_update_class_util(struct rm_ifdat *ifd)
1259{
1260	int		 idle, avgidle, pktlen;
1261	int		 pkt_time, tidle;
1262	rm_class_t	*cl, *borrowed;
1263	rm_class_t	*borrows;
1264	struct timeval	*nowp;
1265
1266	/*
1267	 * Get the most recent completed class.
1268	 */
1269	if ((cl = ifd->class_[ifd->qo_]) == NULL)
1270		return;
1271
1272	pktlen = ifd->curlen_[ifd->qo_];
1273	borrowed = ifd->borrowed_[ifd->qo_];
1274	borrows = borrowed;
1275
1276	PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1277
1278	/*
1279	 * Run estimator on class and its ancestors.
1280	 */
1281	/*
1282	 * rm_update_class_util is designed to be called when the
1283	 * transfer is completed from a xmit complete interrupt,
1284	 * but most drivers don't implement an upcall for that.
1285	 * so, just use estimated completion time.
1286	 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1287	 */
1288	nowp = &ifd->now_[ifd->qo_];
1289	/* get pkt_time (for link) in usec */
1290#if 1  /* use approximation */
1291	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1292	pkt_time = NSEC_TO_USEC(pkt_time);
1293#else
1294	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1295#endif
1296#if 1 /* ALTQ4PPP */
1297	if (TV_LT(nowp, &ifd->ifnow_)) {
1298		int iftime;
1299
1300		/*
1301		 * make sure the estimated completion time does not go
1302		 * too far.  it can happen when the link layer supports
1303		 * data compression or the interface speed is set to
1304		 * a much lower value.
1305		 */
1306		TV_DELTA(&ifd->ifnow_, nowp, iftime);
1307		if (iftime+pkt_time < ifd->maxiftime_) {
1308			TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1309		} else {
1310			TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1311		}
1312	} else {
1313		TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1314	}
1315#else
1316	if (TV_LT(nowp, &ifd->ifnow_)) {
1317		TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1318	} else {
1319		TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1320	}
1321#endif
1322
1323	while (cl != NULL) {
1324		TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1325		if (idle >= 2000000)
1326			/*
1327			 * this class is idle enough, reset avgidle.
1328			 * (TV_DELTA returns 2000000 us when delta is large.)
1329			 */
1330			cl->avgidle_ = cl->maxidle_;
1331
1332		/* get pkt_time (for class) in usec */
1333#if 1  /* use approximation */
1334		pkt_time = pktlen * cl->ns_per_byte_;
1335		pkt_time = NSEC_TO_USEC(pkt_time);
1336#else
1337		pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1338#endif
1339		idle -= pkt_time;
1340
1341		avgidle = cl->avgidle_;
1342		avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1343		cl->avgidle_ = avgidle;
1344
1345		/* Are we overlimit ? */
1346		if (avgidle <= 0) {
1347			CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1348#if 1 /* ALTQ */
1349			/*
1350			 * need some lower bound for avgidle, otherwise
1351			 * a borrowing class gets unbounded penalty.
1352			 */
1353			if (avgidle < cl->minidle_)
1354				avgidle = cl->avgidle_ = cl->minidle_;
1355#endif
1356			/* set next idle to make avgidle 0 */
1357			tidle = pkt_time +
1358				(((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1359			TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1360			++cl->stats_.over;
1361		} else {
1362			cl->avgidle_ =
1363			    (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1364			cl->undertime_.tv_sec = 0;
1365			if (cl->sleeping_) {
1366				CALLOUT_STOP(&cl->callout_);
1367				cl->sleeping_ = 0;
1368			}
1369		}
1370
1371		if (borrows != NULL) {
1372			if (borrows != cl)
1373				++cl->stats_.borrows;
1374			else
1375				borrows = NULL;
1376		}
1377		cl->last_ = ifd->ifnow_;
1378		cl->last_pkttime_ = pkt_time;
1379
1380#if 1
1381		if (cl->parent_ == NULL) {
1382			/* take stats of root class */
1383			PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1384		}
1385#endif
1386
1387		cl = cl->parent_;
1388	}
1389
1390	/*
1391	 * Check to see if cutoff needs to set to a new level.
1392	 */
1393	cl = ifd->class_[ifd->qo_];
1394	if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1395#if 1 /* ALTQ */
1396		if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1397			rmc_tl_satisfied(ifd, nowp);
1398			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1399		} else {
1400			ifd->cutoff_ = borrowed->depth_;
1401			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1402		}
1403#else /* !ALTQ */
1404		if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1405			reset_cutoff(ifd);
1406#ifdef notdef
1407			rmc_tl_satisfied(ifd, &now);
1408#endif
1409			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1410		} else {
1411			ifd->cutoff_ = borrowed->depth_;
1412			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1413		}
1414#endif /* !ALTQ */
1415	}
1416
1417	/*
1418	 * Release class slot
1419	 */
1420	ifd->borrowed_[ifd->qo_] = NULL;
1421	ifd->class_[ifd->qo_] = NULL;
1422	ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1423	ifd->queued_--;
1424}
1425
1426/*
1427 * void
1428 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1429 *	over-limit action routines.  These get invoked by rmc_under_limit()
1430 *	if a class with packets to send if over its bandwidth limit & can't
1431 *	borrow from a parent class.
1432 *
1433 *	Returns: NONE
1434 */
1435
1436static void
1437rmc_drop_action(struct rm_class *cl)
1438{
1439	struct rm_ifdat	*ifd = cl->ifdat_;
1440
1441	ASSERT(qlen(cl->q_) > 0);
1442	_rmc_dropq(cl);
1443	if (qempty(cl->q_))
1444		ifd->na_[cl->pri_]--;
1445}
1446
1447void rmc_dropall(struct rm_class *cl)
1448{
1449	struct rm_ifdat	*ifd = cl->ifdat_;
1450
1451	if (!qempty(cl->q_)) {
1452		_flushq(cl->q_);
1453
1454		ifd->na_[cl->pri_]--;
1455	}
1456}
1457
1458#if (__FreeBSD_version > 300000)
1459/* hzto() is removed from FreeBSD-3.0 */
1460static int hzto(struct timeval *);
1461
1462static int
1463hzto(tv)
1464	struct timeval *tv;
1465{
1466	struct timeval t2;
1467
1468	getmicrotime(&t2);
1469	t2.tv_sec = tv->tv_sec - t2.tv_sec;
1470	t2.tv_usec = tv->tv_usec - t2.tv_usec;
1471	return (tvtohz(&t2));
1472}
1473#endif /* __FreeBSD_version > 300000 */
1474
1475/*
1476 * void
1477 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1478 *	delay action routine.  It is invoked via rmc_under_limit when the
1479 *	packet is discoverd to be overlimit.
1480 *
1481 *	If the delay action is result of borrow class being overlimit, then
1482 *	delay for the offtime of the borrowing class that is overlimit.
1483 *
1484 *	Returns: NONE
1485 */
1486
1487void
1488rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1489{
1490	int	delay, t, extradelay;
1491
1492	cl->stats_.overactions++;
1493	TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1494#ifndef BORROW_OFFTIME
1495	delay += cl->offtime_;
1496#endif
1497
1498	if (!cl->sleeping_) {
1499		CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1500#ifdef BORROW_OFFTIME
1501		if (borrow != NULL)
1502			extradelay = borrow->offtime_;
1503		else
1504#endif
1505			extradelay = cl->offtime_;
1506
1507#ifdef ALTQ
1508		/*
1509		 * XXX recalculate suspend time:
1510		 * current undertime is (tidle + pkt_time) calculated
1511		 * from the last transmission.
1512		 *	tidle: time required to bring avgidle back to 0
1513		 *	pkt_time: target waiting time for this class
1514		 * we need to replace pkt_time by offtime
1515		 */
1516		extradelay -= cl->last_pkttime_;
1517#endif
1518		if (extradelay > 0) {
1519			TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1520			delay += extradelay;
1521		}
1522
1523		cl->sleeping_ = 1;
1524		cl->stats_.delays++;
1525
1526		/*
1527		 * Since packets are phased randomly with respect to the
1528		 * clock, 1 tick (the next clock tick) can be an arbitrarily
1529		 * short time so we have to wait for at least two ticks.
1530		 * NOTE:  If there's no other traffic, we need the timer as
1531		 * a 'backstop' to restart this class.
1532		 */
1533		if (delay > tick * 2) {
1534#ifdef __FreeBSD__
1535			/* FreeBSD rounds up the tick */
1536			t = hzto(&cl->undertime_);
1537#else
1538			/* other BSDs round down the tick */
1539			t = hzto(&cl->undertime_) + 1;
1540#endif
1541		} else
1542			t = 2;
1543		CALLOUT_RESET(&cl->callout_, t,
1544			      (timeout_t *)rmc_restart, (caddr_t)cl);
1545	}
1546}
1547
1548/*
1549 * void
1550 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1551 *	called by the system timer code & is responsible checking if the
1552 *	class is still sleeping (it might have been restarted as a side
1553 *	effect of the queue scan on a packet arrival) and, if so, restarting
1554 *	output for the class.  Inspecting the class state & restarting output
1555 *	require locking the class structure.  In general the driver is
1556 *	responsible for locking but this is the only routine that is not
1557 *	called directly or indirectly from the interface driver so it has
1558 *	know about system locking conventions.  Under bsd, locking is done
1559 *	by raising IPL to splimp so that's what's implemented here.  On a
1560 *	different system this would probably need to be changed.
1561 *
1562 *	Returns:	NONE
1563 */
1564
1565static void
1566rmc_restart(struct rm_class *cl)
1567{
1568	struct rm_ifdat	*ifd = cl->ifdat_;
1569	int		 s;
1570
1571#ifdef __NetBSD__
1572	s = splnet();
1573#else
1574	s = splimp();
1575#endif
1576	IFQ_LOCK(ifd->ifq_);
1577	if (cl->sleeping_) {
1578		cl->sleeping_ = 0;
1579		cl->undertime_.tv_sec = 0;
1580
1581		if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1582			CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1583			(ifd->restart)(ifd->ifq_);
1584		}
1585	}
1586	IFQ_UNLOCK(ifd->ifq_);
1587	splx(s);
1588}
1589
1590/*
1591 * void
1592 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1593 *	handling routine for the root class of the link sharing structure.
1594 *
1595 *	Returns: NONE
1596 */
1597
1598static void
1599rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1600{
1601    panic("rmc_root_overlimit");
1602}
1603
1604/*
1605 * Packet Queue handling routines.  Eventually, this is to localize the
1606 *	effects on the code whether queues are red queues or droptail
1607 *	queues.
1608 */
1609
1610static int
1611_rmc_addq(rm_class_t *cl, mbuf_t *m)
1612{
1613#ifdef ALTQ_RIO
1614	if (q_is_rio(cl->q_))
1615		return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1616#endif
1617#ifdef ALTQ_RED
1618	if (q_is_red(cl->q_))
1619		return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1620#endif /* ALTQ_RED */
1621
1622	if (cl->flags_ & RMCF_CLEARDSCP)
1623		write_dsfield(m, cl->pktattr_, 0);
1624
1625	_addq(cl->q_, m);
1626	return (0);
1627}
1628
1629/* note: _rmc_dropq is not called for red */
1630static void
1631_rmc_dropq(rm_class_t *cl)
1632{
1633	mbuf_t	*m;
1634
1635	if ((m = _getq(cl->q_)) != NULL)
1636		m_freem(m);
1637}
1638
1639static mbuf_t *
1640_rmc_getq(rm_class_t *cl)
1641{
1642#ifdef ALTQ_RIO
1643	if (q_is_rio(cl->q_))
1644		return rio_getq((rio_t *)cl->red_, cl->q_);
1645#endif
1646#ifdef ALTQ_RED
1647	if (q_is_red(cl->q_))
1648		return red_getq(cl->red_, cl->q_);
1649#endif
1650	return _getq(cl->q_);
1651}
1652
1653static mbuf_t *
1654_rmc_pollq(rm_class_t *cl)
1655{
1656	return qhead(cl->q_);
1657}
1658
1659#ifdef CBQ_TRACE
1660
1661struct cbqtrace		 cbqtrace_buffer[NCBQTRACE+1];
1662struct cbqtrace		*cbqtrace_ptr = NULL;
1663int			 cbqtrace_count;
1664
1665/*
1666 * DDB hook to trace cbq events:
1667 *  the last 1024 events are held in a circular buffer.
1668 *  use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1669 */
1670void cbqtrace_dump(int);
1671static char *rmc_funcname(void *);
1672
1673static struct rmc_funcs {
1674	void	*func;
1675	char	*name;
1676} rmc_funcs[] =
1677{
1678	rmc_init,		"rmc_init",
1679	rmc_queue_packet,	"rmc_queue_packet",
1680	rmc_under_limit,	"rmc_under_limit",
1681	rmc_update_class_util,	"rmc_update_class_util",
1682	rmc_delay_action,	"rmc_delay_action",
1683	rmc_restart,		"rmc_restart",
1684	_rmc_wrr_dequeue_next,	"_rmc_wrr_dequeue_next",
1685	NULL,			NULL
1686};
1687
1688static char *rmc_funcname(void *func)
1689{
1690	struct rmc_funcs *fp;
1691
1692	for (fp = rmc_funcs; fp->func != NULL; fp++)
1693		if (fp->func == func)
1694			return (fp->name);
1695	return ("unknown");
1696}
1697
1698void cbqtrace_dump(int counter)
1699{
1700	int	 i, *p;
1701	char	*cp;
1702
1703	counter = counter % NCBQTRACE;
1704	p = (int *)&cbqtrace_buffer[counter];
1705
1706	for (i=0; i<20; i++) {
1707		printf("[0x%x] ", *p++);
1708		printf("%s: ", rmc_funcname((void *)*p++));
1709		cp = (char *)p++;
1710		printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1711		printf("%d\n",*p++);
1712
1713		if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1714			p = (int *)cbqtrace_buffer;
1715	}
1716}
1717#endif /* CBQ_TRACE */
1718#endif /* ALTQ_CBQ */
1719
1720#if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || defined(ALTQ_HFSC) || defined(ALTQ_PRIQ)
1721#if !defined(__GNUC__) || defined(ALTQ_DEBUG)
1722
1723void
1724_addq(class_queue_t *q, mbuf_t *m)
1725{
1726        mbuf_t	*m0;
1727
1728	if ((m0 = qtail(q)) != NULL)
1729		m->m_nextpkt = m0->m_nextpkt;
1730	else
1731		m0 = m;
1732	m0->m_nextpkt = m;
1733	qtail(q) = m;
1734	qlen(q)++;
1735}
1736
1737mbuf_t *
1738_getq(class_queue_t *q)
1739{
1740	mbuf_t	*m, *m0;
1741
1742	if ((m = qtail(q)) == NULL)
1743		return (NULL);
1744	if ((m0 = m->m_nextpkt) != m)
1745		m->m_nextpkt = m0->m_nextpkt;
1746	else {
1747		ASSERT(qlen(q) == 1);
1748		qtail(q) = NULL;
1749	}
1750	qlen(q)--;
1751	m0->m_nextpkt = NULL;
1752	return (m0);
1753}
1754
1755/* drop a packet at the tail of the queue */
1756mbuf_t *
1757_getq_tail(class_queue_t *q)
1758{
1759	mbuf_t	*m, *m0, *prev;
1760
1761	if ((m = m0 = qtail(q)) == NULL)
1762		return NULL;
1763	do {
1764		prev = m0;
1765		m0 = m0->m_nextpkt;
1766	} while (m0 != m);
1767	prev->m_nextpkt = m->m_nextpkt;
1768	if (prev == m)  {
1769		ASSERT(qlen(q) == 1);
1770		qtail(q) = NULL;
1771	} else
1772		qtail(q) = prev;
1773	qlen(q)--;
1774	m->m_nextpkt = NULL;
1775	return (m);
1776}
1777
1778/* randomly select a packet in the queue */
1779mbuf_t *
1780_getq_random(class_queue_t *q)
1781{
1782	struct mbuf	*m;
1783	int		 i, n;
1784
1785	if ((m = qtail(q)) == NULL)
1786		return NULL;
1787	if (m->m_nextpkt == m) {
1788		ASSERT(qlen(q) == 1);
1789		qtail(q) = NULL;
1790	} else {
1791		struct mbuf *prev = NULL;
1792
1793		n = arc4random() % qlen(q) + 1;
1794		for (i = 0; i < n; i++) {
1795			prev = m;
1796			m = m->m_nextpkt;
1797		}
1798		prev->m_nextpkt = m->m_nextpkt;
1799		if (m == qtail(q))
1800			qtail(q) = prev;
1801	}
1802	qlen(q)--;
1803	m->m_nextpkt = NULL;
1804	return (m);
1805}
1806
1807void
1808_removeq(class_queue_t *q, mbuf_t *m)
1809{
1810	mbuf_t	*m0, *prev;
1811
1812	m0 = qtail(q);
1813	do {
1814		prev = m0;
1815		m0 = m0->m_nextpkt;
1816	} while (m0 != m);
1817	prev->m_nextpkt = m->m_nextpkt;
1818	if (prev == m)
1819		qtail(q) = NULL;
1820	else if (qtail(q) == m)
1821		qtail(q) = prev;
1822	qlen(q)--;
1823}
1824
1825void
1826_flushq(class_queue_t *q)
1827{
1828	mbuf_t *m;
1829
1830	while ((m = _getq(q)) != NULL)
1831		m_freem(m);
1832	ASSERT(qlen(q) == 0);
1833}
1834
1835#endif /* !__GNUC__ || ALTQ_DEBUG */
1836#endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */
1837