1130368Smlaier/*	$FreeBSD$	*/
2130365Smlaier/*	$KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $	*/
3130365Smlaier
4130365Smlaier/*
5130365Smlaier * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6130365Smlaier *
7130365Smlaier * Permission to use, copy, modify, and distribute this software and
8130365Smlaier * its documentation is hereby granted (including for commercial or
9130365Smlaier * for-profit use), provided that both the copyright notice and this
10130365Smlaier * permission notice appear in all copies of the software, derivative
11130365Smlaier * works, or modified versions, and any portions thereof.
12130365Smlaier *
13130365Smlaier * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14130365Smlaier * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
15130365Smlaier * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16130365Smlaier * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17130365Smlaier * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18130365Smlaier * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19130365Smlaier * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20130365Smlaier * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21130365Smlaier * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22130365Smlaier * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23130365Smlaier * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24130365Smlaier * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25130365Smlaier * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26130365Smlaier * DAMAGE.
27130365Smlaier *
28130365Smlaier * Carnegie Mellon encourages (but does not require) users of this
29130365Smlaier * software to return any improvements or extensions that they make,
30130365Smlaier * and to grant Carnegie Mellon the rights to redistribute these
31130365Smlaier * changes without encumbrance.
32130365Smlaier */
33130365Smlaier/*
34130365Smlaier * H-FSC is described in Proceedings of SIGCOMM'97,
35130365Smlaier * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36130365Smlaier * Real-Time and Priority Service"
37130365Smlaier * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38130365Smlaier *
39130365Smlaier * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40130365Smlaier * when a class has an upperlimit, the fit-time is computed from the
41130365Smlaier * upperlimit service curve.  the link-sharing scheduler does not schedule
42130365Smlaier * a class whose fit-time exceeds the current time.
43130365Smlaier */
44130365Smlaier
45130365Smlaier#if defined(__FreeBSD__) || defined(__NetBSD__)
46130365Smlaier#include "opt_altq.h"
47130365Smlaier#include "opt_inet.h"
48130365Smlaier#ifdef __FreeBSD__
49130365Smlaier#include "opt_inet6.h"
50130365Smlaier#endif
51130365Smlaier#endif /* __FreeBSD__ || __NetBSD__ */
52130365Smlaier
53130365Smlaier#ifdef ALTQ_HFSC  /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
54130365Smlaier
55130365Smlaier#include <sys/param.h>
56130365Smlaier#include <sys/malloc.h>
57130365Smlaier#include <sys/mbuf.h>
58130365Smlaier#include <sys/socket.h>
59130365Smlaier#include <sys/systm.h>
60130365Smlaier#include <sys/errno.h>
61130365Smlaier#include <sys/queue.h>
62130365Smlaier#if 1 /* ALTQ3_COMPAT */
63130365Smlaier#include <sys/sockio.h>
64130365Smlaier#include <sys/proc.h>
65130365Smlaier#include <sys/kernel.h>
66130365Smlaier#endif /* ALTQ3_COMPAT */
67130365Smlaier
68130365Smlaier#include <net/if.h>
69263086Sglebius#include <net/if_var.h>
70130365Smlaier#include <netinet/in.h>
71130365Smlaier
72263086Sglebius#include <netpfil/pf/pf.h>
73263086Sglebius#include <netpfil/pf/pf_altq.h>
74263086Sglebius#include <netpfil/pf/pf_mtag.h>
75130365Smlaier#include <altq/altq.h>
76130365Smlaier#include <altq/altq_hfsc.h>
77130365Smlaier#ifdef ALTQ3_COMPAT
78130365Smlaier#include <altq/altq_conf.h>
79130365Smlaier#endif
80130365Smlaier
81130365Smlaier/*
82130365Smlaier * function prototypes
83130365Smlaier */
84130365Smlaierstatic int			 hfsc_clear_interface(struct hfsc_if *);
85130365Smlaierstatic int			 hfsc_request(struct ifaltq *, int, void *);
86130365Smlaierstatic void			 hfsc_purge(struct hfsc_if *);
87130365Smlaierstatic struct hfsc_class	*hfsc_class_create(struct hfsc_if *,
88130365Smlaier    struct service_curve *, struct service_curve *, struct service_curve *,
89130365Smlaier    struct hfsc_class *, int, int, int);
90130365Smlaierstatic int			 hfsc_class_destroy(struct hfsc_class *);
91130365Smlaierstatic struct hfsc_class	*hfsc_nextclass(struct hfsc_class *);
92130365Smlaierstatic int			 hfsc_enqueue(struct ifaltq *, struct mbuf *,
93130365Smlaier				    struct altq_pktattr *);
94130365Smlaierstatic struct mbuf		*hfsc_dequeue(struct ifaltq *, int);
95130365Smlaier
96130365Smlaierstatic int		 hfsc_addq(struct hfsc_class *, struct mbuf *);
97130365Smlaierstatic struct mbuf	*hfsc_getq(struct hfsc_class *);
98130365Smlaierstatic struct mbuf	*hfsc_pollq(struct hfsc_class *);
99130365Smlaierstatic void		 hfsc_purgeq(struct hfsc_class *);
100130365Smlaier
101130365Smlaierstatic void		 update_cfmin(struct hfsc_class *);
102130365Smlaierstatic void		 set_active(struct hfsc_class *, int);
103130365Smlaierstatic void		 set_passive(struct hfsc_class *);
104130365Smlaier
105130365Smlaierstatic void		 init_ed(struct hfsc_class *, int);
106130365Smlaierstatic void		 update_ed(struct hfsc_class *, int);
107130365Smlaierstatic void		 update_d(struct hfsc_class *, int);
108130365Smlaierstatic void		 init_vf(struct hfsc_class *, int);
109130365Smlaierstatic void		 update_vf(struct hfsc_class *, int, u_int64_t);
110130365Smlaierstatic void		 ellist_insert(struct hfsc_class *);
111130365Smlaierstatic void		 ellist_remove(struct hfsc_class *);
112130365Smlaierstatic void		 ellist_update(struct hfsc_class *);
113247830Sglebiusstruct hfsc_class	*hfsc_get_mindl(struct hfsc_if *, u_int64_t);
114130365Smlaierstatic void		 actlist_insert(struct hfsc_class *);
115130365Smlaierstatic void		 actlist_remove(struct hfsc_class *);
116130365Smlaierstatic void		 actlist_update(struct hfsc_class *);
117130365Smlaier
118130365Smlaierstatic struct hfsc_class	*actlist_firstfit(struct hfsc_class *,
119130365Smlaier				    u_int64_t);
120130365Smlaier
121130365Smlaierstatic __inline u_int64_t	seg_x2y(u_int64_t, u_int64_t);
122130365Smlaierstatic __inline u_int64_t	seg_y2x(u_int64_t, u_int64_t);
123130365Smlaierstatic __inline u_int64_t	m2sm(u_int);
124130365Smlaierstatic __inline u_int64_t	m2ism(u_int);
125130365Smlaierstatic __inline u_int64_t	d2dx(u_int);
126130365Smlaierstatic u_int			sm2m(u_int64_t);
127130365Smlaierstatic u_int			dx2d(u_int64_t);
128130365Smlaier
129130365Smlaierstatic void		sc2isc(struct service_curve *, struct internal_sc *);
130130365Smlaierstatic void		rtsc_init(struct runtime_sc *, struct internal_sc *,
131130365Smlaier			    u_int64_t, u_int64_t);
132130365Smlaierstatic u_int64_t	rtsc_y2x(struct runtime_sc *, u_int64_t);
133130365Smlaierstatic u_int64_t	rtsc_x2y(struct runtime_sc *, u_int64_t);
134130365Smlaierstatic void		rtsc_min(struct runtime_sc *, struct internal_sc *,
135130365Smlaier			    u_int64_t, u_int64_t);
136130365Smlaier
137130365Smlaierstatic void			 get_class_stats(struct hfsc_classstats *,
138130365Smlaier				    struct hfsc_class *);
139130365Smlaierstatic struct hfsc_class	*clh_to_clp(struct hfsc_if *, u_int32_t);
140130365Smlaier
141130365Smlaier
142130365Smlaier#ifdef ALTQ3_COMPAT
143130365Smlaierstatic struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
144130365Smlaierstatic int hfsc_detach(struct hfsc_if *);
145130365Smlaierstatic int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
146130365Smlaier    struct service_curve *, struct service_curve *);
147130365Smlaier
148130365Smlaierstatic int hfsccmd_if_attach(struct hfsc_attach *);
149130365Smlaierstatic int hfsccmd_if_detach(struct hfsc_interface *);
150130365Smlaierstatic int hfsccmd_add_class(struct hfsc_add_class *);
151130365Smlaierstatic int hfsccmd_delete_class(struct hfsc_delete_class *);
152130365Smlaierstatic int hfsccmd_modify_class(struct hfsc_modify_class *);
153130365Smlaierstatic int hfsccmd_add_filter(struct hfsc_add_filter *);
154130365Smlaierstatic int hfsccmd_delete_filter(struct hfsc_delete_filter *);
155130365Smlaierstatic int hfsccmd_class_stats(struct hfsc_class_stats *);
156130365Smlaier
157130365Smlaieraltqdev_decl(hfsc);
158130365Smlaier#endif /* ALTQ3_COMPAT */
159130365Smlaier
160130365Smlaier/*
161130365Smlaier * macros
162130365Smlaier */
163130365Smlaier#define	is_a_parent_class(cl)	((cl)->cl_children != NULL)
164130365Smlaier
165130365Smlaier#define	HT_INFINITY	0xffffffffffffffffLL	/* infinite time value */
166130365Smlaier
167130365Smlaier#ifdef ALTQ3_COMPAT
168130365Smlaier/* hif_list keeps all hfsc_if's allocated. */
169130365Smlaierstatic struct hfsc_if *hif_list = NULL;
170130365Smlaier#endif /* ALTQ3_COMPAT */
171130365Smlaier
172130365Smlaierint
173130365Smlaierhfsc_pfattach(struct pf_altq *a)
174130365Smlaier{
175130365Smlaier	struct ifnet *ifp;
176130365Smlaier	int s, error;
177130365Smlaier
178130365Smlaier	if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
179130365Smlaier		return (EINVAL);
180130365Smlaier#ifdef __NetBSD__
181130365Smlaier	s = splnet();
182130365Smlaier#else
183130365Smlaier	s = splimp();
184130365Smlaier#endif
185130365Smlaier	error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
186130365Smlaier	    hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
187130365Smlaier	splx(s);
188130365Smlaier	return (error);
189130365Smlaier}
190130365Smlaier
191130365Smlaierint
192130365Smlaierhfsc_add_altq(struct pf_altq *a)
193130365Smlaier{
194130365Smlaier	struct hfsc_if *hif;
195130365Smlaier	struct ifnet *ifp;
196130365Smlaier
197130365Smlaier	if ((ifp = ifunit(a->ifname)) == NULL)
198130365Smlaier		return (EINVAL);
199130365Smlaier	if (!ALTQ_IS_READY(&ifp->if_snd))
200130365Smlaier		return (ENODEV);
201130365Smlaier
202240233Sglebius	hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_NOWAIT | M_ZERO);
203130365Smlaier	if (hif == NULL)
204130365Smlaier		return (ENOMEM);
205130365Smlaier
206247830Sglebius	TAILQ_INIT(&hif->hif_eligible);
207130365Smlaier	hif->hif_ifq = &ifp->if_snd;
208130365Smlaier
209130365Smlaier	/* keep the state in pf_altq */
210130365Smlaier	a->altq_disc = hif;
211130365Smlaier
212130365Smlaier	return (0);
213130365Smlaier}
214130365Smlaier
215130365Smlaierint
216130365Smlaierhfsc_remove_altq(struct pf_altq *a)
217130365Smlaier{
218130365Smlaier	struct hfsc_if *hif;
219130365Smlaier
220130365Smlaier	if ((hif = a->altq_disc) == NULL)
221130365Smlaier		return (EINVAL);
222130365Smlaier	a->altq_disc = NULL;
223130365Smlaier
224130365Smlaier	(void)hfsc_clear_interface(hif);
225130365Smlaier	(void)hfsc_class_destroy(hif->hif_rootclass);
226130365Smlaier
227184205Sdes	free(hif, M_DEVBUF);
228130365Smlaier
229130365Smlaier	return (0);
230130365Smlaier}
231130365Smlaier
232130365Smlaierint
233130365Smlaierhfsc_add_queue(struct pf_altq *a)
234130365Smlaier{
235130365Smlaier	struct hfsc_if *hif;
236130365Smlaier	struct hfsc_class *cl, *parent;
237130365Smlaier	struct hfsc_opts *opts;
238130365Smlaier	struct service_curve rtsc, lssc, ulsc;
239130365Smlaier
240130365Smlaier	if ((hif = a->altq_disc) == NULL)
241130365Smlaier		return (EINVAL);
242130365Smlaier
243130365Smlaier	opts = &a->pq_u.hfsc_opts;
244130365Smlaier
245130365Smlaier	if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
246130365Smlaier	    hif->hif_rootclass == NULL)
247130365Smlaier		parent = NULL;
248130365Smlaier	else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
249130365Smlaier		return (EINVAL);
250130365Smlaier
251130365Smlaier	if (a->qid == 0)
252130365Smlaier		return (EINVAL);
253130365Smlaier
254130365Smlaier	if (clh_to_clp(hif, a->qid) != NULL)
255130365Smlaier		return (EBUSY);
256130365Smlaier
257130365Smlaier	rtsc.m1 = opts->rtsc_m1;
258130365Smlaier	rtsc.d  = opts->rtsc_d;
259130365Smlaier	rtsc.m2 = opts->rtsc_m2;
260130365Smlaier	lssc.m1 = opts->lssc_m1;
261130365Smlaier	lssc.d  = opts->lssc_d;
262130365Smlaier	lssc.m2 = opts->lssc_m2;
263130365Smlaier	ulsc.m1 = opts->ulsc_m1;
264130365Smlaier	ulsc.d  = opts->ulsc_d;
265130365Smlaier	ulsc.m2 = opts->ulsc_m2;
266130365Smlaier
267130365Smlaier	cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
268130365Smlaier	    parent, a->qlimit, opts->flags, a->qid);
269130365Smlaier	if (cl == NULL)
270130365Smlaier		return (ENOMEM);
271130365Smlaier
272130365Smlaier	return (0);
273130365Smlaier}
274130365Smlaier
275130365Smlaierint
276130365Smlaierhfsc_remove_queue(struct pf_altq *a)
277130365Smlaier{
278130365Smlaier	struct hfsc_if *hif;
279130365Smlaier	struct hfsc_class *cl;
280130365Smlaier
281130365Smlaier	if ((hif = a->altq_disc) == NULL)
282130365Smlaier		return (EINVAL);
283130365Smlaier
284130365Smlaier	if ((cl = clh_to_clp(hif, a->qid)) == NULL)
285130365Smlaier		return (EINVAL);
286130365Smlaier
287130365Smlaier	return (hfsc_class_destroy(cl));
288130365Smlaier}
289130365Smlaier
290130365Smlaierint
291130365Smlaierhfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
292130365Smlaier{
293130365Smlaier	struct hfsc_if *hif;
294130365Smlaier	struct hfsc_class *cl;
295130365Smlaier	struct hfsc_classstats stats;
296130365Smlaier	int error = 0;
297130365Smlaier
298130365Smlaier	if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
299130365Smlaier		return (EBADF);
300130365Smlaier
301130365Smlaier	if ((cl = clh_to_clp(hif, a->qid)) == NULL)
302130365Smlaier		return (EINVAL);
303130365Smlaier
304130365Smlaier	if (*nbytes < sizeof(stats))
305130365Smlaier		return (EINVAL);
306130365Smlaier
307130365Smlaier	get_class_stats(&stats, cl);
308130365Smlaier
309130365Smlaier	if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
310130365Smlaier		return (error);
311130365Smlaier	*nbytes = sizeof(stats);
312130365Smlaier	return (0);
313130365Smlaier}
314130365Smlaier
315130365Smlaier/*
316130365Smlaier * bring the interface back to the initial state by discarding
317130365Smlaier * all the filters and classes except the root class.
318130365Smlaier */
319130365Smlaierstatic int
320130365Smlaierhfsc_clear_interface(struct hfsc_if *hif)
321130365Smlaier{
322130365Smlaier	struct hfsc_class	*cl;
323130365Smlaier
324130365Smlaier#ifdef ALTQ3_COMPAT
325130365Smlaier	/* free the filters for this interface */
326130365Smlaier	acc_discard_filters(&hif->hif_classifier, NULL, 1);
327130365Smlaier#endif
328130365Smlaier
329130365Smlaier	/* clear out the classes */
330130365Smlaier	while (hif->hif_rootclass != NULL &&
331130365Smlaier	    (cl = hif->hif_rootclass->cl_children) != NULL) {
332130365Smlaier		/*
333130365Smlaier		 * remove the first leaf class found in the hierarchy
334130365Smlaier		 * then start over
335130365Smlaier		 */
336130365Smlaier		for (; cl != NULL; cl = hfsc_nextclass(cl)) {
337130365Smlaier			if (!is_a_parent_class(cl)) {
338130365Smlaier				(void)hfsc_class_destroy(cl);
339130365Smlaier				break;
340130365Smlaier			}
341130365Smlaier		}
342130365Smlaier	}
343130365Smlaier
344130365Smlaier	return (0);
345130365Smlaier}
346130365Smlaier
347130365Smlaierstatic int
348130365Smlaierhfsc_request(struct ifaltq *ifq, int req, void *arg)
349130365Smlaier{
350130365Smlaier	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
351130365Smlaier
352130368Smlaier	IFQ_LOCK_ASSERT(ifq);
353130368Smlaier
354130365Smlaier	switch (req) {
355130365Smlaier	case ALTRQ_PURGE:
356130365Smlaier		hfsc_purge(hif);
357130365Smlaier		break;
358130365Smlaier	}
359130365Smlaier	return (0);
360130365Smlaier}
361130365Smlaier
362130365Smlaier/* discard all the queued packets on the interface */
363130365Smlaierstatic void
364130365Smlaierhfsc_purge(struct hfsc_if *hif)
365130365Smlaier{
366130365Smlaier	struct hfsc_class *cl;
367130365Smlaier
368130365Smlaier	for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
369130365Smlaier		if (!qempty(cl->cl_q))
370130365Smlaier			hfsc_purgeq(cl);
371130365Smlaier	if (ALTQ_IS_ENABLED(hif->hif_ifq))
372130365Smlaier		hif->hif_ifq->ifq_len = 0;
373130365Smlaier}
374130365Smlaier
375130365Smlaierstruct hfsc_class *
376130365Smlaierhfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
377130365Smlaier    struct service_curve *fsc, struct service_curve *usc,
378130365Smlaier    struct hfsc_class *parent, int qlimit, int flags, int qid)
379130365Smlaier{
380130365Smlaier	struct hfsc_class *cl, *p;
381130365Smlaier	int i, s;
382130365Smlaier
383130365Smlaier	if (hif->hif_classes >= HFSC_MAX_CLASSES)
384130365Smlaier		return (NULL);
385130365Smlaier
386130365Smlaier#ifndef ALTQ_RED
387130365Smlaier	if (flags & HFCF_RED) {
388130365Smlaier#ifdef ALTQ_DEBUG
389130365Smlaier		printf("hfsc_class_create: RED not configured for HFSC!\n");
390130365Smlaier#endif
391130365Smlaier		return (NULL);
392130365Smlaier	}
393130365Smlaier#endif
394130365Smlaier
395240646Sglebius	cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_NOWAIT | M_ZERO);
396130365Smlaier	if (cl == NULL)
397130365Smlaier		return (NULL);
398130365Smlaier
399240646Sglebius	cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
400130365Smlaier	if (cl->cl_q == NULL)
401130365Smlaier		goto err_ret;
402130365Smlaier
403247830Sglebius	TAILQ_INIT(&cl->cl_actc);
404130365Smlaier
405130365Smlaier	if (qlimit == 0)
406130365Smlaier		qlimit = 50;  /* use default */
407130365Smlaier	qlimit(cl->cl_q) = qlimit;
408130365Smlaier	qtype(cl->cl_q) = Q_DROPTAIL;
409130365Smlaier	qlen(cl->cl_q) = 0;
410130365Smlaier	cl->cl_flags = flags;
411130365Smlaier#ifdef ALTQ_RED
412130365Smlaier	if (flags & (HFCF_RED|HFCF_RIO)) {
413130365Smlaier		int red_flags, red_pkttime;
414130365Smlaier		u_int m2;
415130365Smlaier
416130365Smlaier		m2 = 0;
417130365Smlaier		if (rsc != NULL && rsc->m2 > m2)
418130365Smlaier			m2 = rsc->m2;
419130365Smlaier		if (fsc != NULL && fsc->m2 > m2)
420130365Smlaier			m2 = fsc->m2;
421130365Smlaier		if (usc != NULL && usc->m2 > m2)
422130365Smlaier			m2 = usc->m2;
423130365Smlaier
424130365Smlaier		red_flags = 0;
425130365Smlaier		if (flags & HFCF_ECN)
426130365Smlaier			red_flags |= REDF_ECN;
427130365Smlaier#ifdef ALTQ_RIO
428130365Smlaier		if (flags & HFCF_CLEARDSCP)
429130365Smlaier			red_flags |= RIOF_CLEARDSCP;
430130365Smlaier#endif
431130365Smlaier		if (m2 < 8)
432130365Smlaier			red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
433130365Smlaier		else
434130365Smlaier			red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
435130365Smlaier				* 1000 * 1000 * 1000 / (m2 / 8);
436130365Smlaier		if (flags & HFCF_RED) {
437130365Smlaier			cl->cl_red = red_alloc(0, 0,
438130365Smlaier			    qlimit(cl->cl_q) * 10/100,
439130365Smlaier			    qlimit(cl->cl_q) * 30/100,
440130365Smlaier			    red_flags, red_pkttime);
441130365Smlaier			if (cl->cl_red != NULL)
442130365Smlaier				qtype(cl->cl_q) = Q_RED;
443130365Smlaier		}
444130365Smlaier#ifdef ALTQ_RIO
445130365Smlaier		else {
446130365Smlaier			cl->cl_red = (red_t *)rio_alloc(0, NULL,
447130365Smlaier			    red_flags, red_pkttime);
448130365Smlaier			if (cl->cl_red != NULL)
449130365Smlaier				qtype(cl->cl_q) = Q_RIO;
450130365Smlaier		}
451130365Smlaier#endif
452130365Smlaier	}
453130365Smlaier#endif /* ALTQ_RED */
454130365Smlaier
455130365Smlaier	if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
456184214Sdes		cl->cl_rsc = malloc(sizeof(struct internal_sc),
457240646Sglebius		    M_DEVBUF, M_NOWAIT);
458130365Smlaier		if (cl->cl_rsc == NULL)
459130365Smlaier			goto err_ret;
460130365Smlaier		sc2isc(rsc, cl->cl_rsc);
461130365Smlaier		rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
462130365Smlaier		rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
463130365Smlaier	}
464130365Smlaier	if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
465184214Sdes		cl->cl_fsc = malloc(sizeof(struct internal_sc),
466240646Sglebius		    M_DEVBUF, M_NOWAIT);
467130365Smlaier		if (cl->cl_fsc == NULL)
468130365Smlaier			goto err_ret;
469130365Smlaier		sc2isc(fsc, cl->cl_fsc);
470130365Smlaier		rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
471130365Smlaier	}
472130365Smlaier	if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
473184214Sdes		cl->cl_usc = malloc(sizeof(struct internal_sc),
474240646Sglebius		    M_DEVBUF, M_NOWAIT);
475130365Smlaier		if (cl->cl_usc == NULL)
476130365Smlaier			goto err_ret;
477130365Smlaier		sc2isc(usc, cl->cl_usc);
478130365Smlaier		rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
479130365Smlaier	}
480130365Smlaier
481130365Smlaier	cl->cl_id = hif->hif_classid++;
482130365Smlaier	cl->cl_handle = qid;
483130365Smlaier	cl->cl_hif = hif;
484130365Smlaier	cl->cl_parent = parent;
485130365Smlaier
486130365Smlaier#ifdef __NetBSD__
487130365Smlaier	s = splnet();
488130365Smlaier#else
489130365Smlaier	s = splimp();
490130365Smlaier#endif
491130368Smlaier	IFQ_LOCK(hif->hif_ifq);
492130365Smlaier	hif->hif_classes++;
493130365Smlaier
494130365Smlaier	/*
495130365Smlaier	 * find a free slot in the class table.  if the slot matching
496130365Smlaier	 * the lower bits of qid is free, use this slot.  otherwise,
497130365Smlaier	 * use the first free slot.
498130365Smlaier	 */
499130365Smlaier	i = qid % HFSC_MAX_CLASSES;
500130365Smlaier	if (hif->hif_class_tbl[i] == NULL)
501130365Smlaier		hif->hif_class_tbl[i] = cl;
502130365Smlaier	else {
503130365Smlaier		for (i = 0; i < HFSC_MAX_CLASSES; i++)
504130365Smlaier			if (hif->hif_class_tbl[i] == NULL) {
505130365Smlaier				hif->hif_class_tbl[i] = cl;
506130365Smlaier				break;
507130365Smlaier			}
508130365Smlaier		if (i == HFSC_MAX_CLASSES) {
509130368Smlaier			IFQ_UNLOCK(hif->hif_ifq);
510130365Smlaier			splx(s);
511130365Smlaier			goto err_ret;
512130365Smlaier		}
513130365Smlaier	}
514130365Smlaier
515130365Smlaier	if (flags & HFCF_DEFAULTCLASS)
516130365Smlaier		hif->hif_defaultclass = cl;
517130365Smlaier
518130365Smlaier	if (parent == NULL) {
519130365Smlaier		/* this is root class */
520130365Smlaier		hif->hif_rootclass = cl;
521130365Smlaier	} else {
522130365Smlaier		/* add this class to the children list of the parent */
523130365Smlaier		if ((p = parent->cl_children) == NULL)
524130365Smlaier			parent->cl_children = cl;
525130365Smlaier		else {
526130365Smlaier			while (p->cl_siblings != NULL)
527130365Smlaier				p = p->cl_siblings;
528130365Smlaier			p->cl_siblings = cl;
529130365Smlaier		}
530130365Smlaier	}
531130368Smlaier	IFQ_UNLOCK(hif->hif_ifq);
532130365Smlaier	splx(s);
533130365Smlaier
534130365Smlaier	return (cl);
535130365Smlaier
536130365Smlaier err_ret:
537130365Smlaier	if (cl->cl_red != NULL) {
538130365Smlaier#ifdef ALTQ_RIO
539130365Smlaier		if (q_is_rio(cl->cl_q))
540130365Smlaier			rio_destroy((rio_t *)cl->cl_red);
541130365Smlaier#endif
542130365Smlaier#ifdef ALTQ_RED
543130365Smlaier		if (q_is_red(cl->cl_q))
544130365Smlaier			red_destroy(cl->cl_red);
545130365Smlaier#endif
546130365Smlaier	}
547130365Smlaier	if (cl->cl_fsc != NULL)
548184205Sdes		free(cl->cl_fsc, M_DEVBUF);
549130365Smlaier	if (cl->cl_rsc != NULL)
550184205Sdes		free(cl->cl_rsc, M_DEVBUF);
551130365Smlaier	if (cl->cl_usc != NULL)
552184205Sdes		free(cl->cl_usc, M_DEVBUF);
553130365Smlaier	if (cl->cl_q != NULL)
554184205Sdes		free(cl->cl_q, M_DEVBUF);
555184205Sdes	free(cl, M_DEVBUF);
556130365Smlaier	return (NULL);
557130365Smlaier}
558130365Smlaier
559130365Smlaierstatic int
560130365Smlaierhfsc_class_destroy(struct hfsc_class *cl)
561130365Smlaier{
562130365Smlaier	int i, s;
563130365Smlaier
564130365Smlaier	if (cl == NULL)
565130365Smlaier		return (0);
566130365Smlaier
567130365Smlaier	if (is_a_parent_class(cl))
568130365Smlaier		return (EBUSY);
569130365Smlaier
570130365Smlaier#ifdef __NetBSD__
571130365Smlaier	s = splnet();
572130365Smlaier#else
573130365Smlaier	s = splimp();
574130365Smlaier#endif
575130368Smlaier	IFQ_LOCK(cl->cl_hif->hif_ifq);
576130365Smlaier
577130365Smlaier#ifdef ALTQ3_COMPAT
578130365Smlaier	/* delete filters referencing to this class */
579130365Smlaier	acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
580130365Smlaier#endif /* ALTQ3_COMPAT */
581130365Smlaier
582130365Smlaier	if (!qempty(cl->cl_q))
583130365Smlaier		hfsc_purgeq(cl);
584130365Smlaier
585130365Smlaier	if (cl->cl_parent == NULL) {
586130365Smlaier		/* this is root class */
587130365Smlaier	} else {
588130365Smlaier		struct hfsc_class *p = cl->cl_parent->cl_children;
589130365Smlaier
590130365Smlaier		if (p == cl)
591130365Smlaier			cl->cl_parent->cl_children = cl->cl_siblings;
592130365Smlaier		else do {
593130365Smlaier			if (p->cl_siblings == cl) {
594130365Smlaier				p->cl_siblings = cl->cl_siblings;
595130365Smlaier				break;
596130365Smlaier			}
597130365Smlaier		} while ((p = p->cl_siblings) != NULL);
598130365Smlaier		ASSERT(p != NULL);
599130365Smlaier	}
600130365Smlaier
601130365Smlaier	for (i = 0; i < HFSC_MAX_CLASSES; i++)
602130365Smlaier		if (cl->cl_hif->hif_class_tbl[i] == cl) {
603130365Smlaier			cl->cl_hif->hif_class_tbl[i] = NULL;
604130365Smlaier			break;
605130365Smlaier		}
606130365Smlaier
607130365Smlaier	cl->cl_hif->hif_classes--;
608130368Smlaier	IFQ_UNLOCK(cl->cl_hif->hif_ifq);
609130365Smlaier	splx(s);
610130365Smlaier
611130365Smlaier	if (cl->cl_red != NULL) {
612130365Smlaier#ifdef ALTQ_RIO
613130365Smlaier		if (q_is_rio(cl->cl_q))
614130365Smlaier			rio_destroy((rio_t *)cl->cl_red);
615130365Smlaier#endif
616130365Smlaier#ifdef ALTQ_RED
617130365Smlaier		if (q_is_red(cl->cl_q))
618130365Smlaier			red_destroy(cl->cl_red);
619130365Smlaier#endif
620130365Smlaier	}
621130365Smlaier
622130368Smlaier	IFQ_LOCK(cl->cl_hif->hif_ifq);
623130365Smlaier	if (cl == cl->cl_hif->hif_rootclass)
624130365Smlaier		cl->cl_hif->hif_rootclass = NULL;
625130365Smlaier	if (cl == cl->cl_hif->hif_defaultclass)
626130365Smlaier		cl->cl_hif->hif_defaultclass = NULL;
627130368Smlaier	IFQ_UNLOCK(cl->cl_hif->hif_ifq);
628130365Smlaier
629130365Smlaier	if (cl->cl_usc != NULL)
630184205Sdes		free(cl->cl_usc, M_DEVBUF);
631130365Smlaier	if (cl->cl_fsc != NULL)
632184205Sdes		free(cl->cl_fsc, M_DEVBUF);
633130365Smlaier	if (cl->cl_rsc != NULL)
634184205Sdes		free(cl->cl_rsc, M_DEVBUF);
635184205Sdes	free(cl->cl_q, M_DEVBUF);
636184205Sdes	free(cl, M_DEVBUF);
637130365Smlaier
638130365Smlaier	return (0);
639130365Smlaier}
640130365Smlaier
641130365Smlaier/*
642130365Smlaier * hfsc_nextclass returns the next class in the tree.
643130365Smlaier *   usage:
644130365Smlaier *	for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
645130365Smlaier *		do_something;
646130365Smlaier */
647130365Smlaierstatic struct hfsc_class *
648130365Smlaierhfsc_nextclass(struct hfsc_class *cl)
649130365Smlaier{
650130365Smlaier	if (cl->cl_children != NULL)
651130365Smlaier		cl = cl->cl_children;
652130365Smlaier	else if (cl->cl_siblings != NULL)
653130365Smlaier		cl = cl->cl_siblings;
654130365Smlaier	else {
655130365Smlaier		while ((cl = cl->cl_parent) != NULL)
656130365Smlaier			if (cl->cl_siblings) {
657130365Smlaier				cl = cl->cl_siblings;
658130365Smlaier				break;
659130365Smlaier			}
660130365Smlaier	}
661130365Smlaier
662130365Smlaier	return (cl);
663130365Smlaier}
664130365Smlaier
665130365Smlaier/*
666130365Smlaier * hfsc_enqueue is an enqueue function to be registered to
667130365Smlaier * (*altq_enqueue) in struct ifaltq.
668130365Smlaier */
669130365Smlaierstatic int
670130365Smlaierhfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
671130365Smlaier{
672130365Smlaier	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
673130365Smlaier	struct hfsc_class *cl;
674171173Smlaier	struct pf_mtag *t;
675130365Smlaier	int len;
676130365Smlaier
677130368Smlaier	IFQ_LOCK_ASSERT(ifq);
678130368Smlaier
679130365Smlaier	/* grab class set by classifier */
680130365Smlaier	if ((m->m_flags & M_PKTHDR) == 0) {
681130365Smlaier		/* should not happen */
682130365Smlaier		printf("altq: packet for %s does not have pkthdr\n",
683130365Smlaier		    ifq->altq_ifp->if_xname);
684130365Smlaier		m_freem(m);
685130365Smlaier		return (ENOBUFS);
686130365Smlaier	}
687130365Smlaier	cl = NULL;
688171173Smlaier	if ((t = pf_find_mtag(m)) != NULL)
689171173Smlaier		cl = clh_to_clp(hif, t->qid);
690130365Smlaier#ifdef ALTQ3_COMPAT
691130365Smlaier	else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
692130365Smlaier		cl = pktattr->pattr_class;
693130365Smlaier#endif
694130365Smlaier	if (cl == NULL || is_a_parent_class(cl)) {
695130365Smlaier		cl = hif->hif_defaultclass;
696130365Smlaier		if (cl == NULL) {
697130365Smlaier			m_freem(m);
698130365Smlaier			return (ENOBUFS);
699130365Smlaier		}
700130365Smlaier	}
701130365Smlaier#ifdef ALTQ3_COMPAT
702130365Smlaier	if (pktattr != NULL)
703130365Smlaier		cl->cl_pktattr = pktattr;  /* save proto hdr used by ECN */
704130365Smlaier	else
705130365Smlaier#endif
706130365Smlaier		cl->cl_pktattr = NULL;
707130365Smlaier	len = m_pktlen(m);
708130365Smlaier	if (hfsc_addq(cl, m) != 0) {
709130365Smlaier		/* drop occurred.  mbuf was freed in hfsc_addq. */
710130365Smlaier		PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
711130365Smlaier		return (ENOBUFS);
712130365Smlaier	}
713130365Smlaier	IFQ_INC_LEN(ifq);
714130365Smlaier	cl->cl_hif->hif_packets++;
715130365Smlaier
716130365Smlaier	/* successfully queued. */
717130365Smlaier	if (qlen(cl->cl_q) == 1)
718130365Smlaier		set_active(cl, m_pktlen(m));
719130365Smlaier
720130365Smlaier	return (0);
721130365Smlaier}
722130365Smlaier
723130365Smlaier/*
724130365Smlaier * hfsc_dequeue is a dequeue function to be registered to
725130365Smlaier * (*altq_dequeue) in struct ifaltq.
726130365Smlaier *
727130365Smlaier * note: ALTDQ_POLL returns the next packet without removing the packet
728130365Smlaier *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
729130365Smlaier *	ALTDQ_REMOVE must return the same packet if called immediately
730130365Smlaier *	after ALTDQ_POLL.
731130365Smlaier */
732130365Smlaierstatic struct mbuf *
733130365Smlaierhfsc_dequeue(struct ifaltq *ifq, int op)
734130365Smlaier{
735130365Smlaier	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
736130365Smlaier	struct hfsc_class *cl;
737130365Smlaier	struct mbuf *m;
738130365Smlaier	int len, next_len;
739130365Smlaier	int realtime = 0;
740130365Smlaier	u_int64_t cur_time;
741130365Smlaier
742130368Smlaier	IFQ_LOCK_ASSERT(ifq);
743130368Smlaier
744130365Smlaier	if (hif->hif_packets == 0)
745130365Smlaier		/* no packet in the tree */
746130365Smlaier		return (NULL);
747130365Smlaier
748130365Smlaier	cur_time = read_machclk();
749130365Smlaier
750130365Smlaier	if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
751130365Smlaier
752130365Smlaier		cl = hif->hif_pollcache;
753130365Smlaier		hif->hif_pollcache = NULL;
754130365Smlaier		/* check if the class was scheduled by real-time criteria */
755130365Smlaier		if (cl->cl_rsc != NULL)
756130365Smlaier			realtime = (cl->cl_e <= cur_time);
757130365Smlaier	} else {
758130365Smlaier		/*
759130365Smlaier		 * if there are eligible classes, use real-time criteria.
760130365Smlaier		 * find the class with the minimum deadline among
761130365Smlaier		 * the eligible classes.
762130365Smlaier		 */
763247830Sglebius		if ((cl = hfsc_get_mindl(hif, cur_time))
764130365Smlaier		    != NULL) {
765130365Smlaier			realtime = 1;
766130365Smlaier		} else {
767130365Smlaier#ifdef ALTQ_DEBUG
768130365Smlaier			int fits = 0;
769130365Smlaier#endif
770130365Smlaier			/*
771130365Smlaier			 * use link-sharing criteria
772130365Smlaier			 * get the class with the minimum vt in the hierarchy
773130365Smlaier			 */
774130365Smlaier			cl = hif->hif_rootclass;
775130365Smlaier			while (is_a_parent_class(cl)) {
776130365Smlaier
777130365Smlaier				cl = actlist_firstfit(cl, cur_time);
778130365Smlaier				if (cl == NULL) {
779130365Smlaier#ifdef ALTQ_DEBUG
780130365Smlaier					if (fits > 0)
781130365Smlaier						printf("%d fit but none found\n",fits);
782130365Smlaier#endif
783130365Smlaier					return (NULL);
784130365Smlaier				}
785130365Smlaier				/*
786130365Smlaier				 * update parent's cl_cvtmin.
787130365Smlaier				 * don't update if the new vt is smaller.
788130365Smlaier				 */
789130365Smlaier				if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
790130365Smlaier					cl->cl_parent->cl_cvtmin = cl->cl_vt;
791130365Smlaier#ifdef ALTQ_DEBUG
792130365Smlaier				fits++;
793130365Smlaier#endif
794130365Smlaier			}
795130365Smlaier		}
796130365Smlaier
797130365Smlaier		if (op == ALTDQ_POLL) {
798130365Smlaier			hif->hif_pollcache = cl;
799130365Smlaier			m = hfsc_pollq(cl);
800130365Smlaier			return (m);
801130365Smlaier		}
802130365Smlaier	}
803130365Smlaier
804130365Smlaier	m = hfsc_getq(cl);
805130365Smlaier	if (m == NULL)
806130365Smlaier		panic("hfsc_dequeue:");
807130365Smlaier	len = m_pktlen(m);
808130365Smlaier	cl->cl_hif->hif_packets--;
809130365Smlaier	IFQ_DEC_LEN(ifq);
810130365Smlaier	PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
811130365Smlaier
812130365Smlaier	update_vf(cl, len, cur_time);
813130365Smlaier	if (realtime)
814130365Smlaier		cl->cl_cumul += len;
815130365Smlaier
816130365Smlaier	if (!qempty(cl->cl_q)) {
817130365Smlaier		if (cl->cl_rsc != NULL) {
818130365Smlaier			/* update ed */
819130365Smlaier			next_len = m_pktlen(qhead(cl->cl_q));
820130365Smlaier
821130365Smlaier			if (realtime)
822130365Smlaier				update_ed(cl, next_len);
823130365Smlaier			else
824130365Smlaier				update_d(cl, next_len);
825130365Smlaier		}
826130365Smlaier	} else {
827130365Smlaier		/* the class becomes passive */
828130365Smlaier		set_passive(cl);
829130365Smlaier	}
830130365Smlaier
831130365Smlaier	return (m);
832130365Smlaier}
833130365Smlaier
834130365Smlaierstatic int
835130365Smlaierhfsc_addq(struct hfsc_class *cl, struct mbuf *m)
836130365Smlaier{
837130365Smlaier
838130365Smlaier#ifdef ALTQ_RIO
839130365Smlaier	if (q_is_rio(cl->cl_q))
840130365Smlaier		return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
841130365Smlaier				m, cl->cl_pktattr);
842130365Smlaier#endif
843130365Smlaier#ifdef ALTQ_RED
844130365Smlaier	if (q_is_red(cl->cl_q))
845130365Smlaier		return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
846130365Smlaier#endif
847130365Smlaier	if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
848130365Smlaier		m_freem(m);
849130365Smlaier		return (-1);
850130365Smlaier	}
851130365Smlaier
852130365Smlaier	if (cl->cl_flags & HFCF_CLEARDSCP)
853130365Smlaier		write_dsfield(m, cl->cl_pktattr, 0);
854130365Smlaier
855130365Smlaier	_addq(cl->cl_q, m);
856130365Smlaier
857130365Smlaier	return (0);
858130365Smlaier}
859130365Smlaier
860130365Smlaierstatic struct mbuf *
861130365Smlaierhfsc_getq(struct hfsc_class *cl)
862130365Smlaier{
863130365Smlaier#ifdef ALTQ_RIO
864130365Smlaier	if (q_is_rio(cl->cl_q))
865130365Smlaier		return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
866130365Smlaier#endif
867130365Smlaier#ifdef ALTQ_RED
868130365Smlaier	if (q_is_red(cl->cl_q))
869130365Smlaier		return red_getq(cl->cl_red, cl->cl_q);
870130365Smlaier#endif
871130365Smlaier	return _getq(cl->cl_q);
872130365Smlaier}
873130365Smlaier
874130365Smlaierstatic struct mbuf *
875130365Smlaierhfsc_pollq(struct hfsc_class *cl)
876130365Smlaier{
877130365Smlaier	return qhead(cl->cl_q);
878130365Smlaier}
879130365Smlaier
880130365Smlaierstatic void
881130365Smlaierhfsc_purgeq(struct hfsc_class *cl)
882130365Smlaier{
883130365Smlaier	struct mbuf *m;
884130365Smlaier
885130365Smlaier	if (qempty(cl->cl_q))
886130365Smlaier		return;
887130365Smlaier
888130365Smlaier	while ((m = _getq(cl->cl_q)) != NULL) {
889130365Smlaier		PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
890130365Smlaier		m_freem(m);
891130365Smlaier		cl->cl_hif->hif_packets--;
892130365Smlaier		IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
893130365Smlaier	}
894130365Smlaier	ASSERT(qlen(cl->cl_q) == 0);
895130365Smlaier
896130365Smlaier	update_vf(cl, 0, 0);	/* remove cl from the actlist */
897130365Smlaier	set_passive(cl);
898130365Smlaier}
899130365Smlaier
900130365Smlaierstatic void
901130365Smlaierset_active(struct hfsc_class *cl, int len)
902130365Smlaier{
903130365Smlaier	if (cl->cl_rsc != NULL)
904130365Smlaier		init_ed(cl, len);
905130365Smlaier	if (cl->cl_fsc != NULL)
906130365Smlaier		init_vf(cl, len);
907130365Smlaier
908130365Smlaier	cl->cl_stats.period++;
909130365Smlaier}
910130365Smlaier
911130365Smlaierstatic void
912130365Smlaierset_passive(struct hfsc_class *cl)
913130365Smlaier{
914130365Smlaier	if (cl->cl_rsc != NULL)
915130365Smlaier		ellist_remove(cl);
916130365Smlaier
917130365Smlaier	/*
918130365Smlaier	 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
919130365Smlaier	 * needs to be called explicitly to remove a class from actlist
920130365Smlaier	 */
921130365Smlaier}
922130365Smlaier
923130365Smlaierstatic void
924130365Smlaierinit_ed(struct hfsc_class *cl, int next_len)
925130365Smlaier{
926130365Smlaier	u_int64_t cur_time;
927130365Smlaier
928130365Smlaier	cur_time = read_machclk();
929130365Smlaier
930130365Smlaier	/* update the deadline curve */
931130365Smlaier	rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
932130365Smlaier
933130365Smlaier	/*
934130365Smlaier	 * update the eligible curve.
935130365Smlaier	 * for concave, it is equal to the deadline curve.
936130365Smlaier	 * for convex, it is a linear curve with slope m2.
937130365Smlaier	 */
938130365Smlaier	cl->cl_eligible = cl->cl_deadline;
939130365Smlaier	if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
940130365Smlaier		cl->cl_eligible.dx = 0;
941130365Smlaier		cl->cl_eligible.dy = 0;
942130365Smlaier	}
943130365Smlaier
944130365Smlaier	/* compute e and d */
945130365Smlaier	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
946130365Smlaier	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
947130365Smlaier
948130365Smlaier	ellist_insert(cl);
949130365Smlaier}
950130365Smlaier
951130365Smlaierstatic void
952130365Smlaierupdate_ed(struct hfsc_class *cl, int next_len)
953130365Smlaier{
954130365Smlaier	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
955130365Smlaier	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
956130365Smlaier
957130365Smlaier	ellist_update(cl);
958130365Smlaier}
959130365Smlaier
960130365Smlaierstatic void
961130365Smlaierupdate_d(struct hfsc_class *cl, int next_len)
962130365Smlaier{
963130365Smlaier	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
964130365Smlaier}
965130365Smlaier
966130365Smlaierstatic void
967130365Smlaierinit_vf(struct hfsc_class *cl, int len)
968130365Smlaier{
969130365Smlaier	struct hfsc_class *max_cl, *p;
970130365Smlaier	u_int64_t vt, f, cur_time;
971130365Smlaier	int go_active;
972130365Smlaier
973130365Smlaier	cur_time = 0;
974130365Smlaier	go_active = 1;
975130365Smlaier	for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
976130365Smlaier
977130365Smlaier		if (go_active && cl->cl_nactive++ == 0)
978130365Smlaier			go_active = 1;
979130365Smlaier		else
980130365Smlaier			go_active = 0;
981130365Smlaier
982130365Smlaier		if (go_active) {
983247830Sglebius			max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
984130365Smlaier			if (max_cl != NULL) {
985130365Smlaier				/*
986130365Smlaier				 * set vt to the average of the min and max
987130365Smlaier				 * classes.  if the parent's period didn't
988130365Smlaier				 * change, don't decrease vt of the class.
989130365Smlaier				 */
990130365Smlaier				vt = max_cl->cl_vt;
991130365Smlaier				if (cl->cl_parent->cl_cvtmin != 0)
992130365Smlaier					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
993130365Smlaier
994130365Smlaier				if (cl->cl_parent->cl_vtperiod !=
995130365Smlaier				    cl->cl_parentperiod || vt > cl->cl_vt)
996130365Smlaier					cl->cl_vt = vt;
997130365Smlaier			} else {
998130365Smlaier				/*
999130365Smlaier				 * first child for a new parent backlog period.
1000130365Smlaier				 * add parent's cvtmax to vtoff of children
1001130365Smlaier				 * to make a new vt (vtoff + vt) larger than
1002130365Smlaier				 * the vt in the last period for all children.
1003130365Smlaier				 */
1004130365Smlaier				vt = cl->cl_parent->cl_cvtmax;
1005130365Smlaier				for (p = cl->cl_parent->cl_children; p != NULL;
1006130365Smlaier				     p = p->cl_siblings)
1007130365Smlaier					p->cl_vtoff += vt;
1008130365Smlaier				cl->cl_vt = 0;
1009130365Smlaier				cl->cl_parent->cl_cvtmax = 0;
1010130365Smlaier				cl->cl_parent->cl_cvtmin = 0;
1011130365Smlaier			}
1012130365Smlaier			cl->cl_initvt = cl->cl_vt;
1013130365Smlaier
1014130365Smlaier			/* update the virtual curve */
1015130365Smlaier			vt = cl->cl_vt + cl->cl_vtoff;
1016130365Smlaier			rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1017130365Smlaier			if (cl->cl_virtual.x == vt) {
1018130365Smlaier				cl->cl_virtual.x -= cl->cl_vtoff;
1019130365Smlaier				cl->cl_vtoff = 0;
1020130365Smlaier			}
1021130365Smlaier			cl->cl_vtadj = 0;
1022130365Smlaier
1023130365Smlaier			cl->cl_vtperiod++;  /* increment vt period */
1024130365Smlaier			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1025130365Smlaier			if (cl->cl_parent->cl_nactive == 0)
1026130365Smlaier				cl->cl_parentperiod++;
1027130365Smlaier			cl->cl_f = 0;
1028130365Smlaier
1029130365Smlaier			actlist_insert(cl);
1030130365Smlaier
1031130365Smlaier			if (cl->cl_usc != NULL) {
1032130365Smlaier				/* class has upper limit curve */
1033130365Smlaier				if (cur_time == 0)
1034130365Smlaier					cur_time = read_machclk();
1035130365Smlaier
1036130365Smlaier				/* update the ulimit curve */
1037130365Smlaier				rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1038130365Smlaier				    cl->cl_total);
1039130365Smlaier				/* compute myf */
1040130365Smlaier				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1041130365Smlaier				    cl->cl_total);
1042130365Smlaier				cl->cl_myfadj = 0;
1043130365Smlaier			}
1044130365Smlaier		}
1045130365Smlaier
1046130365Smlaier		if (cl->cl_myf > cl->cl_cfmin)
1047130365Smlaier			f = cl->cl_myf;
1048130365Smlaier		else
1049130365Smlaier			f = cl->cl_cfmin;
1050130365Smlaier		if (f != cl->cl_f) {
1051130365Smlaier			cl->cl_f = f;
1052130365Smlaier			update_cfmin(cl->cl_parent);
1053130365Smlaier		}
1054130365Smlaier	}
1055130365Smlaier}
1056130365Smlaier
1057130365Smlaierstatic void
1058130365Smlaierupdate_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1059130365Smlaier{
1060130365Smlaier	u_int64_t f, myf_bound, delta;
1061130365Smlaier	int go_passive;
1062130365Smlaier
1063130365Smlaier	go_passive = qempty(cl->cl_q);
1064130365Smlaier
1065130365Smlaier	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1066130365Smlaier
1067130365Smlaier		cl->cl_total += len;
1068130365Smlaier
1069130365Smlaier		if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1070130365Smlaier			continue;
1071130365Smlaier
1072130365Smlaier		if (go_passive && --cl->cl_nactive == 0)
1073130365Smlaier			go_passive = 1;
1074130365Smlaier		else
1075130365Smlaier			go_passive = 0;
1076130365Smlaier
1077130365Smlaier		if (go_passive) {
1078130365Smlaier			/* no more active child, going passive */
1079130365Smlaier
1080130365Smlaier			/* update cvtmax of the parent class */
1081130365Smlaier			if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1082130365Smlaier				cl->cl_parent->cl_cvtmax = cl->cl_vt;
1083130365Smlaier
1084130365Smlaier			/* remove this class from the vt list */
1085130365Smlaier			actlist_remove(cl);
1086130365Smlaier
1087130365Smlaier			update_cfmin(cl->cl_parent);
1088130365Smlaier
1089130365Smlaier			continue;
1090130365Smlaier		}
1091130365Smlaier
1092130365Smlaier		/*
1093130365Smlaier		 * update vt and f
1094130365Smlaier		 */
1095130365Smlaier		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1096130365Smlaier		    - cl->cl_vtoff + cl->cl_vtadj;
1097130365Smlaier
1098130365Smlaier		/*
1099130365Smlaier		 * if vt of the class is smaller than cvtmin,
1100130365Smlaier		 * the class was skipped in the past due to non-fit.
1101130365Smlaier		 * if so, we need to adjust vtadj.
1102130365Smlaier		 */
1103130365Smlaier		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1104130365Smlaier			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1105130365Smlaier			cl->cl_vt = cl->cl_parent->cl_cvtmin;
1106130365Smlaier		}
1107130365Smlaier
1108130365Smlaier		/* update the vt list */
1109130365Smlaier		actlist_update(cl);
1110130365Smlaier
1111130365Smlaier		if (cl->cl_usc != NULL) {
1112130365Smlaier			cl->cl_myf = cl->cl_myfadj
1113130365Smlaier			    + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1114130365Smlaier
1115130365Smlaier			/*
1116130365Smlaier			 * if myf lags behind by more than one clock tick
1117130365Smlaier			 * from the current time, adjust myfadj to prevent
1118130365Smlaier			 * a rate-limited class from going greedy.
1119130365Smlaier			 * in a steady state under rate-limiting, myf
1120130365Smlaier			 * fluctuates within one clock tick.
1121130365Smlaier			 */
1122130365Smlaier			myf_bound = cur_time - machclk_per_tick;
1123130365Smlaier			if (cl->cl_myf < myf_bound) {
1124130365Smlaier				delta = cur_time - cl->cl_myf;
1125130365Smlaier				cl->cl_myfadj += delta;
1126130365Smlaier				cl->cl_myf += delta;
1127130365Smlaier			}
1128130365Smlaier		}
1129130365Smlaier
1130130365Smlaier		/* cl_f is max(cl_myf, cl_cfmin) */
1131130365Smlaier		if (cl->cl_myf > cl->cl_cfmin)
1132130365Smlaier			f = cl->cl_myf;
1133130365Smlaier		else
1134130365Smlaier			f = cl->cl_cfmin;
1135130365Smlaier		if (f != cl->cl_f) {
1136130365Smlaier			cl->cl_f = f;
1137130365Smlaier			update_cfmin(cl->cl_parent);
1138130365Smlaier		}
1139130365Smlaier	}
1140130365Smlaier}
1141130365Smlaier
1142130365Smlaierstatic void
1143130365Smlaierupdate_cfmin(struct hfsc_class *cl)
1144130365Smlaier{
1145130365Smlaier	struct hfsc_class *p;
1146130365Smlaier	u_int64_t cfmin;
1147130365Smlaier
1148247830Sglebius	if (TAILQ_EMPTY(&cl->cl_actc)) {
1149130365Smlaier		cl->cl_cfmin = 0;
1150130365Smlaier		return;
1151130365Smlaier	}
1152130365Smlaier	cfmin = HT_INFINITY;
1153247830Sglebius	TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
1154130365Smlaier		if (p->cl_f == 0) {
1155130365Smlaier			cl->cl_cfmin = 0;
1156130365Smlaier			return;
1157130365Smlaier		}
1158130365Smlaier		if (p->cl_f < cfmin)
1159130365Smlaier			cfmin = p->cl_f;
1160130365Smlaier	}
1161130365Smlaier	cl->cl_cfmin = cfmin;
1162130365Smlaier}
1163130365Smlaier
1164130365Smlaier/*
1165130365Smlaier * TAILQ based ellist and actlist implementation
1166130365Smlaier * (ion wanted to make a calendar queue based implementation)
1167130365Smlaier */
1168130365Smlaier/*
1169130365Smlaier * eligible list holds backlogged classes being sorted by their eligible times.
1170130365Smlaier * there is one eligible list per interface.
1171130365Smlaier */
1172130365Smlaier
1173130365Smlaierstatic void
1174130365Smlaierellist_insert(struct hfsc_class *cl)
1175130365Smlaier{
1176130365Smlaier	struct hfsc_if	*hif = cl->cl_hif;
1177130365Smlaier	struct hfsc_class *p;
1178130365Smlaier
1179130365Smlaier	/* check the last entry first */
1180247830Sglebius	if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL ||
1181130365Smlaier	    p->cl_e <= cl->cl_e) {
1182247830Sglebius		TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
1183130365Smlaier		return;
1184130365Smlaier	}
1185130365Smlaier
1186247830Sglebius	TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
1187130365Smlaier		if (cl->cl_e < p->cl_e) {
1188130365Smlaier			TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1189130365Smlaier			return;
1190130365Smlaier		}
1191130365Smlaier	}
1192130365Smlaier	ASSERT(0); /* should not reach here */
1193130365Smlaier}
1194130365Smlaier
1195130365Smlaierstatic void
1196130365Smlaierellist_remove(struct hfsc_class *cl)
1197130365Smlaier{
1198130365Smlaier	struct hfsc_if	*hif = cl->cl_hif;
1199130365Smlaier
1200247830Sglebius	TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1201130365Smlaier}
1202130365Smlaier
1203130365Smlaierstatic void
1204130365Smlaierellist_update(struct hfsc_class *cl)
1205130365Smlaier{
1206130365Smlaier	struct hfsc_if	*hif = cl->cl_hif;
1207130365Smlaier	struct hfsc_class *p, *last;
1208130365Smlaier
1209130365Smlaier	/*
1210130365Smlaier	 * the eligible time of a class increases monotonically.
1211130365Smlaier	 * if the next entry has a larger eligible time, nothing to do.
1212130365Smlaier	 */
1213130365Smlaier	p = TAILQ_NEXT(cl, cl_ellist);
1214130365Smlaier	if (p == NULL || cl->cl_e <= p->cl_e)
1215130365Smlaier		return;
1216130365Smlaier
1217130365Smlaier	/* check the last entry */
1218247830Sglebius	last = TAILQ_LAST(&hif->hif_eligible, elighead);
1219130365Smlaier	ASSERT(last != NULL);
1220130365Smlaier	if (last->cl_e <= cl->cl_e) {
1221247830Sglebius		TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1222247830Sglebius		TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
1223130365Smlaier		return;
1224130365Smlaier	}
1225130365Smlaier
1226130365Smlaier	/*
1227130365Smlaier	 * the new position must be between the next entry
1228130365Smlaier	 * and the last entry
1229130365Smlaier	 */
1230130365Smlaier	while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1231130365Smlaier		if (cl->cl_e < p->cl_e) {
1232247830Sglebius			TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1233130365Smlaier			TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1234130365Smlaier			return;
1235130365Smlaier		}
1236130365Smlaier	}
1237130365Smlaier	ASSERT(0); /* should not reach here */
1238130365Smlaier}
1239130365Smlaier
1240130365Smlaier/* find the class with the minimum deadline among the eligible classes */
1241130365Smlaierstruct hfsc_class *
1242247830Sglebiushfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time)
1243130365Smlaier{
1244130365Smlaier	struct hfsc_class *p, *cl = NULL;
1245130365Smlaier
1246247830Sglebius	TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
1247130365Smlaier		if (p->cl_e > cur_time)
1248130365Smlaier			break;
1249130365Smlaier		if (cl == NULL || p->cl_d < cl->cl_d)
1250130365Smlaier			cl = p;
1251130365Smlaier	}
1252130365Smlaier	return (cl);
1253130365Smlaier}
1254130365Smlaier
1255130365Smlaier/*
1256130365Smlaier * active children list holds backlogged child classes being sorted
1257130365Smlaier * by their virtual time.
1258130365Smlaier * each intermediate class has one active children list.
1259130365Smlaier */
1260130365Smlaier
1261130365Smlaierstatic void
1262130365Smlaieractlist_insert(struct hfsc_class *cl)
1263130365Smlaier{
1264130365Smlaier	struct hfsc_class *p;
1265130365Smlaier
1266130365Smlaier	/* check the last entry first */
1267247830Sglebius	if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL
1268130365Smlaier	    || p->cl_vt <= cl->cl_vt) {
1269247830Sglebius		TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
1270130365Smlaier		return;
1271130365Smlaier	}
1272130365Smlaier
1273247830Sglebius	TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) {
1274130365Smlaier		if (cl->cl_vt < p->cl_vt) {
1275130365Smlaier			TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1276130365Smlaier			return;
1277130365Smlaier		}
1278130365Smlaier	}
1279130365Smlaier	ASSERT(0); /* should not reach here */
1280130365Smlaier}
1281130365Smlaier
1282130365Smlaierstatic void
1283130365Smlaieractlist_remove(struct hfsc_class *cl)
1284130365Smlaier{
1285247830Sglebius	TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1286130365Smlaier}
1287130365Smlaier
1288130365Smlaierstatic void
1289130365Smlaieractlist_update(struct hfsc_class *cl)
1290130365Smlaier{
1291130365Smlaier	struct hfsc_class *p, *last;
1292130365Smlaier
1293130365Smlaier	/*
1294130365Smlaier	 * the virtual time of a class increases monotonically during its
1295130365Smlaier	 * backlogged period.
1296130365Smlaier	 * if the next entry has a larger virtual time, nothing to do.
1297130365Smlaier	 */
1298130365Smlaier	p = TAILQ_NEXT(cl, cl_actlist);
1299130365Smlaier	if (p == NULL || cl->cl_vt < p->cl_vt)
1300130365Smlaier		return;
1301130365Smlaier
1302130365Smlaier	/* check the last entry */
1303247830Sglebius	last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
1304130365Smlaier	ASSERT(last != NULL);
1305130365Smlaier	if (last->cl_vt <= cl->cl_vt) {
1306247830Sglebius		TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1307247830Sglebius		TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
1308130365Smlaier		return;
1309130365Smlaier	}
1310130365Smlaier
1311130365Smlaier	/*
1312130365Smlaier	 * the new position must be between the next entry
1313130365Smlaier	 * and the last entry
1314130365Smlaier	 */
1315130365Smlaier	while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1316130365Smlaier		if (cl->cl_vt < p->cl_vt) {
1317247830Sglebius			TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1318130365Smlaier			TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1319130365Smlaier			return;
1320130365Smlaier		}
1321130365Smlaier	}
1322130365Smlaier	ASSERT(0); /* should not reach here */
1323130365Smlaier}
1324130365Smlaier
1325130365Smlaierstatic struct hfsc_class *
1326130365Smlaieractlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1327130365Smlaier{
1328130365Smlaier	struct hfsc_class *p;
1329130365Smlaier
1330247830Sglebius	TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
1331130365Smlaier		if (p->cl_f <= cur_time)
1332130365Smlaier			return (p);
1333130365Smlaier	}
1334130365Smlaier	return (NULL);
1335130365Smlaier}
1336130365Smlaier
1337130365Smlaier/*
1338130365Smlaier * service curve support functions
1339130365Smlaier *
1340130365Smlaier *  external service curve parameters
1341130365Smlaier *	m: bits/sec
1342130365Smlaier *	d: msec
1343130365Smlaier *  internal service curve parameters
1344130365Smlaier *	sm: (bytes/tsc_interval) << SM_SHIFT
1345130365Smlaier *	ism: (tsc_count/byte) << ISM_SHIFT
1346130365Smlaier *	dx: tsc_count
1347130365Smlaier *
1348130365Smlaier * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1349130365Smlaier * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1350130365Smlaier * speed.  SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1351130365Smlaier * digits in decimal using the following table.
1352130365Smlaier *
1353130365Smlaier *  bits/sec    100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
1354130365Smlaier *  ----------+-------------------------------------------------------
1355130365Smlaier *  bytes/nsec  12.5e-6    125e-6     1250e-6    12500e-6   125000e-6
1356130365Smlaier *  sm(500MHz)  25.0e-6    250e-6     2500e-6    25000e-6   250000e-6
1357130365Smlaier *  sm(200MHz)  62.5e-6    625e-6     6250e-6    62500e-6   625000e-6
1358130365Smlaier *
1359130365Smlaier *  nsec/byte   80000      8000       800        80         8
1360130365Smlaier *  ism(500MHz) 40000      4000       400        40         4
1361130365Smlaier *  ism(200MHz) 16000      1600       160        16         1.6
1362130365Smlaier */
1363130365Smlaier#define	SM_SHIFT	24
1364130365Smlaier#define	ISM_SHIFT	10
1365130365Smlaier
1366130365Smlaier#define	SM_MASK		((1LL << SM_SHIFT) - 1)
1367130365Smlaier#define	ISM_MASK	((1LL << ISM_SHIFT) - 1)
1368130365Smlaier
1369130365Smlaierstatic __inline u_int64_t
1370130365Smlaierseg_x2y(u_int64_t x, u_int64_t sm)
1371130365Smlaier{
1372130365Smlaier	u_int64_t y;
1373130365Smlaier
1374130365Smlaier	/*
1375130365Smlaier	 * compute
1376130365Smlaier	 *	y = x * sm >> SM_SHIFT
1377130365Smlaier	 * but divide it for the upper and lower bits to avoid overflow
1378130365Smlaier	 */
1379130365Smlaier	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1380130365Smlaier	return (y);
1381130365Smlaier}
1382130365Smlaier
1383130365Smlaierstatic __inline u_int64_t
1384130365Smlaierseg_y2x(u_int64_t y, u_int64_t ism)
1385130365Smlaier{
1386130365Smlaier	u_int64_t x;
1387130365Smlaier
1388130365Smlaier	if (y == 0)
1389130365Smlaier		x = 0;
1390130365Smlaier	else if (ism == HT_INFINITY)
1391130365Smlaier		x = HT_INFINITY;
1392130365Smlaier	else {
1393130365Smlaier		x = (y >> ISM_SHIFT) * ism
1394130365Smlaier		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1395130365Smlaier	}
1396130365Smlaier	return (x);
1397130365Smlaier}
1398130365Smlaier
1399130365Smlaierstatic __inline u_int64_t
1400130365Smlaierm2sm(u_int m)
1401130365Smlaier{
1402130365Smlaier	u_int64_t sm;
1403130365Smlaier
1404130365Smlaier	sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1405130365Smlaier	return (sm);
1406130365Smlaier}
1407130365Smlaier
1408130365Smlaierstatic __inline u_int64_t
1409130365Smlaierm2ism(u_int m)
1410130365Smlaier{
1411130365Smlaier	u_int64_t ism;
1412130365Smlaier
1413130365Smlaier	if (m == 0)
1414130365Smlaier		ism = HT_INFINITY;
1415130365Smlaier	else
1416130365Smlaier		ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1417130365Smlaier	return (ism);
1418130365Smlaier}
1419130365Smlaier
1420130365Smlaierstatic __inline u_int64_t
1421130365Smlaierd2dx(u_int d)
1422130365Smlaier{
1423130365Smlaier	u_int64_t dx;
1424130365Smlaier
1425130365Smlaier	dx = ((u_int64_t)d * machclk_freq) / 1000;
1426130365Smlaier	return (dx);
1427130365Smlaier}
1428130365Smlaier
1429130365Smlaierstatic u_int
1430130365Smlaiersm2m(u_int64_t sm)
1431130365Smlaier{
1432130365Smlaier	u_int64_t m;
1433130365Smlaier
1434130365Smlaier	m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1435130365Smlaier	return ((u_int)m);
1436130365Smlaier}
1437130365Smlaier
1438130365Smlaierstatic u_int
1439130365Smlaierdx2d(u_int64_t dx)
1440130365Smlaier{
1441130365Smlaier	u_int64_t d;
1442130365Smlaier
1443130365Smlaier	d = dx * 1000 / machclk_freq;
1444130365Smlaier	return ((u_int)d);
1445130365Smlaier}
1446130365Smlaier
1447130365Smlaierstatic void
1448130365Smlaiersc2isc(struct service_curve *sc, struct internal_sc *isc)
1449130365Smlaier{
1450130365Smlaier	isc->sm1 = m2sm(sc->m1);
1451130365Smlaier	isc->ism1 = m2ism(sc->m1);
1452130365Smlaier	isc->dx = d2dx(sc->d);
1453130365Smlaier	isc->dy = seg_x2y(isc->dx, isc->sm1);
1454130365Smlaier	isc->sm2 = m2sm(sc->m2);
1455130365Smlaier	isc->ism2 = m2ism(sc->m2);
1456130365Smlaier}
1457130365Smlaier
1458130365Smlaier/*
1459130365Smlaier * initialize the runtime service curve with the given internal
1460130365Smlaier * service curve starting at (x, y).
1461130365Smlaier */
1462130365Smlaierstatic void
1463130365Smlaierrtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1464130365Smlaier    u_int64_t y)
1465130365Smlaier{
1466130365Smlaier	rtsc->x =	x;
1467130365Smlaier	rtsc->y =	y;
1468130365Smlaier	rtsc->sm1 =	isc->sm1;
1469130365Smlaier	rtsc->ism1 =	isc->ism1;
1470130365Smlaier	rtsc->dx =	isc->dx;
1471130365Smlaier	rtsc->dy =	isc->dy;
1472130365Smlaier	rtsc->sm2 =	isc->sm2;
1473130365Smlaier	rtsc->ism2 =	isc->ism2;
1474130365Smlaier}
1475130365Smlaier
1476130365Smlaier/*
1477130365Smlaier * calculate the y-projection of the runtime service curve by the
1478130365Smlaier * given x-projection value
1479130365Smlaier */
1480130365Smlaierstatic u_int64_t
1481130365Smlaierrtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1482130365Smlaier{
1483130365Smlaier	u_int64_t	x;
1484130365Smlaier
1485130365Smlaier	if (y < rtsc->y)
1486130365Smlaier		x = rtsc->x;
1487130365Smlaier	else if (y <= rtsc->y + rtsc->dy) {
1488130365Smlaier		/* x belongs to the 1st segment */
1489130365Smlaier		if (rtsc->dy == 0)
1490130365Smlaier			x = rtsc->x + rtsc->dx;
1491130365Smlaier		else
1492130365Smlaier			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1493130365Smlaier	} else {
1494130365Smlaier		/* x belongs to the 2nd segment */
1495130365Smlaier		x = rtsc->x + rtsc->dx
1496130365Smlaier		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1497130365Smlaier	}
1498130365Smlaier	return (x);
1499130365Smlaier}
1500130365Smlaier
1501130365Smlaierstatic u_int64_t
1502130365Smlaierrtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1503130365Smlaier{
1504130365Smlaier	u_int64_t	y;
1505130365Smlaier
1506130365Smlaier	if (x <= rtsc->x)
1507130365Smlaier		y = rtsc->y;
1508130365Smlaier	else if (x <= rtsc->x + rtsc->dx)
1509130365Smlaier		/* y belongs to the 1st segment */
1510130365Smlaier		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1511130365Smlaier	else
1512130365Smlaier		/* y belongs to the 2nd segment */
1513130365Smlaier		y = rtsc->y + rtsc->dy
1514130365Smlaier		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1515130365Smlaier	return (y);
1516130365Smlaier}
1517130365Smlaier
1518130365Smlaier/*
1519130365Smlaier * update the runtime service curve by taking the minimum of the current
1520130365Smlaier * runtime service curve and the service curve starting at (x, y).
1521130365Smlaier */
1522130365Smlaierstatic void
1523130365Smlaierrtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1524130365Smlaier    u_int64_t y)
1525130365Smlaier{
1526130365Smlaier	u_int64_t	y1, y2, dx, dy;
1527130365Smlaier
1528130365Smlaier	if (isc->sm1 <= isc->sm2) {
1529130365Smlaier		/* service curve is convex */
1530130365Smlaier		y1 = rtsc_x2y(rtsc, x);
1531130365Smlaier		if (y1 < y)
1532130365Smlaier			/* the current rtsc is smaller */
1533130365Smlaier			return;
1534130365Smlaier		rtsc->x = x;
1535130365Smlaier		rtsc->y = y;
1536130365Smlaier		return;
1537130365Smlaier	}
1538130365Smlaier
1539130365Smlaier	/*
1540130365Smlaier	 * service curve is concave
1541130365Smlaier	 * compute the two y values of the current rtsc
1542130365Smlaier	 *	y1: at x
1543130365Smlaier	 *	y2: at (x + dx)
1544130365Smlaier	 */
1545130365Smlaier	y1 = rtsc_x2y(rtsc, x);
1546130365Smlaier	if (y1 <= y) {
1547130365Smlaier		/* rtsc is below isc, no change to rtsc */
1548130365Smlaier		return;
1549130365Smlaier	}
1550130365Smlaier
1551130365Smlaier	y2 = rtsc_x2y(rtsc, x + isc->dx);
1552130365Smlaier	if (y2 >= y + isc->dy) {
1553130365Smlaier		/* rtsc is above isc, replace rtsc by isc */
1554130365Smlaier		rtsc->x = x;
1555130365Smlaier		rtsc->y = y;
1556130365Smlaier		rtsc->dx = isc->dx;
1557130365Smlaier		rtsc->dy = isc->dy;
1558130365Smlaier		return;
1559130365Smlaier	}
1560130365Smlaier
1561130365Smlaier	/*
1562130365Smlaier	 * the two curves intersect
1563130365Smlaier	 * compute the offsets (dx, dy) using the reverse
1564130365Smlaier	 * function of seg_x2y()
1565130365Smlaier	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1566130365Smlaier	 */
1567130365Smlaier	dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1568130365Smlaier	/*
1569130365Smlaier	 * check if (x, y1) belongs to the 1st segment of rtsc.
1570130365Smlaier	 * if so, add the offset.
1571130365Smlaier	 */
1572130365Smlaier	if (rtsc->x + rtsc->dx > x)
1573130365Smlaier		dx += rtsc->x + rtsc->dx - x;
1574130365Smlaier	dy = seg_x2y(dx, isc->sm1);
1575130365Smlaier
1576130365Smlaier	rtsc->x = x;
1577130365Smlaier	rtsc->y = y;
1578130365Smlaier	rtsc->dx = dx;
1579130365Smlaier	rtsc->dy = dy;
1580130365Smlaier	return;
1581130365Smlaier}
1582130365Smlaier
1583130365Smlaierstatic void
1584130365Smlaierget_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1585130365Smlaier{
1586130365Smlaier	sp->class_id = cl->cl_id;
1587130365Smlaier	sp->class_handle = cl->cl_handle;
1588130365Smlaier
1589130365Smlaier	if (cl->cl_rsc != NULL) {
1590130365Smlaier		sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1591130365Smlaier		sp->rsc.d = dx2d(cl->cl_rsc->dx);
1592130365Smlaier		sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1593130365Smlaier	} else {
1594130365Smlaier		sp->rsc.m1 = 0;
1595130365Smlaier		sp->rsc.d = 0;
1596130365Smlaier		sp->rsc.m2 = 0;
1597130365Smlaier	}
1598130365Smlaier	if (cl->cl_fsc != NULL) {
1599130365Smlaier		sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1600130365Smlaier		sp->fsc.d = dx2d(cl->cl_fsc->dx);
1601130365Smlaier		sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1602130365Smlaier	} else {
1603130365Smlaier		sp->fsc.m1 = 0;
1604130365Smlaier		sp->fsc.d = 0;
1605130365Smlaier		sp->fsc.m2 = 0;
1606130365Smlaier	}
1607130365Smlaier	if (cl->cl_usc != NULL) {
1608130365Smlaier		sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1609130365Smlaier		sp->usc.d = dx2d(cl->cl_usc->dx);
1610130365Smlaier		sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1611130365Smlaier	} else {
1612130365Smlaier		sp->usc.m1 = 0;
1613130365Smlaier		sp->usc.d = 0;
1614130365Smlaier		sp->usc.m2 = 0;
1615130365Smlaier	}
1616130365Smlaier
1617130365Smlaier	sp->total = cl->cl_total;
1618130365Smlaier	sp->cumul = cl->cl_cumul;
1619130365Smlaier
1620130365Smlaier	sp->d = cl->cl_d;
1621130365Smlaier	sp->e = cl->cl_e;
1622130365Smlaier	sp->vt = cl->cl_vt;
1623130365Smlaier	sp->f = cl->cl_f;
1624130365Smlaier
1625130365Smlaier	sp->initvt = cl->cl_initvt;
1626130365Smlaier	sp->vtperiod = cl->cl_vtperiod;
1627130365Smlaier	sp->parentperiod = cl->cl_parentperiod;
1628130365Smlaier	sp->nactive = cl->cl_nactive;
1629130365Smlaier	sp->vtoff = cl->cl_vtoff;
1630130365Smlaier	sp->cvtmax = cl->cl_cvtmax;
1631130365Smlaier	sp->myf = cl->cl_myf;
1632130365Smlaier	sp->cfmin = cl->cl_cfmin;
1633130365Smlaier	sp->cvtmin = cl->cl_cvtmin;
1634130365Smlaier	sp->myfadj = cl->cl_myfadj;
1635130365Smlaier	sp->vtadj = cl->cl_vtadj;
1636130365Smlaier
1637130365Smlaier	sp->cur_time = read_machclk();
1638130365Smlaier	sp->machclk_freq = machclk_freq;
1639130365Smlaier
1640130365Smlaier	sp->qlength = qlen(cl->cl_q);
1641130365Smlaier	sp->qlimit = qlimit(cl->cl_q);
1642130365Smlaier	sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1643130365Smlaier	sp->drop_cnt = cl->cl_stats.drop_cnt;
1644130365Smlaier	sp->period = cl->cl_stats.period;
1645130365Smlaier
1646130365Smlaier	sp->qtype = qtype(cl->cl_q);
1647130365Smlaier#ifdef ALTQ_RED
1648130365Smlaier	if (q_is_red(cl->cl_q))
1649130365Smlaier		red_getstats(cl->cl_red, &sp->red[0]);
1650130365Smlaier#endif
1651130365Smlaier#ifdef ALTQ_RIO
1652130365Smlaier	if (q_is_rio(cl->cl_q))
1653130365Smlaier		rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1654130365Smlaier#endif
1655130365Smlaier}
1656130365Smlaier
1657130365Smlaier/* convert a class handle to the corresponding class pointer */
1658130365Smlaierstatic struct hfsc_class *
1659130365Smlaierclh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1660130365Smlaier{
1661130365Smlaier	int i;
1662130365Smlaier	struct hfsc_class *cl;
1663130365Smlaier
1664130365Smlaier	if (chandle == 0)
1665130365Smlaier		return (NULL);
1666130365Smlaier	/*
1667130365Smlaier	 * first, try optimistically the slot matching the lower bits of
1668130365Smlaier	 * the handle.  if it fails, do the linear table search.
1669130365Smlaier	 */
1670130365Smlaier	i = chandle % HFSC_MAX_CLASSES;
1671130365Smlaier	if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1672130365Smlaier		return (cl);
1673130365Smlaier	for (i = 0; i < HFSC_MAX_CLASSES; i++)
1674130365Smlaier		if ((cl = hif->hif_class_tbl[i]) != NULL &&
1675130365Smlaier		    cl->cl_handle == chandle)
1676130365Smlaier			return (cl);
1677130365Smlaier	return (NULL);
1678130365Smlaier}
1679130365Smlaier
1680130365Smlaier#ifdef ALTQ3_COMPAT
1681130365Smlaierstatic struct hfsc_if *
1682130365Smlaierhfsc_attach(ifq, bandwidth)
1683130365Smlaier	struct ifaltq *ifq;
1684130365Smlaier	u_int bandwidth;
1685130365Smlaier{
1686130365Smlaier	struct hfsc_if *hif;
1687130365Smlaier
1688184214Sdes	hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK);
1689130365Smlaier	if (hif == NULL)
1690130365Smlaier		return (NULL);
1691130365Smlaier	bzero(hif, sizeof(struct hfsc_if));
1692130365Smlaier
1693130365Smlaier	hif->hif_eligible = ellist_alloc();
1694130365Smlaier	if (hif->hif_eligible == NULL) {
1695184205Sdes		free(hif, M_DEVBUF);
1696130365Smlaier		return NULL;
1697130365Smlaier	}
1698130365Smlaier
1699130365Smlaier	hif->hif_ifq = ifq;
1700130365Smlaier
1701130365Smlaier	/* add this state to the hfsc list */
1702130365Smlaier	hif->hif_next = hif_list;
1703130365Smlaier	hif_list = hif;
1704130365Smlaier
1705130365Smlaier	return (hif);
1706130365Smlaier}
1707130365Smlaier
1708130365Smlaierstatic int
1709130365Smlaierhfsc_detach(hif)
1710130365Smlaier	struct hfsc_if *hif;
1711130365Smlaier{
1712130365Smlaier	(void)hfsc_clear_interface(hif);
1713130365Smlaier	(void)hfsc_class_destroy(hif->hif_rootclass);
1714130365Smlaier
1715130365Smlaier	/* remove this interface from the hif list */
1716130365Smlaier	if (hif_list == hif)
1717130365Smlaier		hif_list = hif->hif_next;
1718130365Smlaier	else {
1719130365Smlaier		struct hfsc_if *h;
1720130365Smlaier
1721130365Smlaier		for (h = hif_list; h != NULL; h = h->hif_next)
1722130365Smlaier			if (h->hif_next == hif) {
1723130365Smlaier				h->hif_next = hif->hif_next;
1724130365Smlaier				break;
1725130365Smlaier			}
1726130365Smlaier		ASSERT(h != NULL);
1727130365Smlaier	}
1728130365Smlaier
1729130365Smlaier	ellist_destroy(hif->hif_eligible);
1730130365Smlaier
1731184205Sdes	free(hif, M_DEVBUF);
1732130365Smlaier
1733130365Smlaier	return (0);
1734130365Smlaier}
1735130365Smlaier
1736130365Smlaierstatic int
1737130365Smlaierhfsc_class_modify(cl, rsc, fsc, usc)
1738130365Smlaier	struct hfsc_class *cl;
1739130365Smlaier	struct service_curve *rsc, *fsc, *usc;
1740130365Smlaier{
1741130365Smlaier	struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1742130365Smlaier	u_int64_t cur_time;
1743130365Smlaier	int s;
1744130365Smlaier
1745130365Smlaier	rsc_tmp = fsc_tmp = usc_tmp = NULL;
1746130365Smlaier	if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1747130365Smlaier	    cl->cl_rsc == NULL) {
1748184214Sdes		rsc_tmp = malloc(sizeof(struct internal_sc),
1749184214Sdes		    M_DEVBUF, M_WAITOK);
1750130365Smlaier		if (rsc_tmp == NULL)
1751130365Smlaier			return (ENOMEM);
1752130365Smlaier	}
1753130365Smlaier	if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1754130365Smlaier	    cl->cl_fsc == NULL) {
1755184214Sdes		fsc_tmp = malloc(sizeof(struct internal_sc),
1756184214Sdes		    M_DEVBUF, M_WAITOK);
1757198952Sbrueffer		if (fsc_tmp == NULL) {
1758198952Sbrueffer			free(rsc_tmp);
1759130365Smlaier			return (ENOMEM);
1760198952Sbrueffer		}
1761130365Smlaier	}
1762130365Smlaier	if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1763130365Smlaier	    cl->cl_usc == NULL) {
1764184214Sdes		usc_tmp = malloc(sizeof(struct internal_sc),
1765184214Sdes		    M_DEVBUF, M_WAITOK);
1766198952Sbrueffer		if (usc_tmp == NULL) {
1767198952Sbrueffer			free(rsc_tmp);
1768198952Sbrueffer			free(fsc_tmp);
1769130365Smlaier			return (ENOMEM);
1770198952Sbrueffer		}
1771130365Smlaier	}
1772130365Smlaier
1773130365Smlaier	cur_time = read_machclk();
1774130365Smlaier#ifdef __NetBSD__
1775130365Smlaier	s = splnet();
1776130365Smlaier#else
1777130365Smlaier	s = splimp();
1778130365Smlaier#endif
1779130368Smlaier	IFQ_LOCK(cl->cl_hif->hif_ifq);
1780130365Smlaier
1781130365Smlaier	if (rsc != NULL) {
1782130365Smlaier		if (rsc->m1 == 0 && rsc->m2 == 0) {
1783130365Smlaier			if (cl->cl_rsc != NULL) {
1784130365Smlaier				if (!qempty(cl->cl_q))
1785130365Smlaier					hfsc_purgeq(cl);
1786184205Sdes				free(cl->cl_rsc, M_DEVBUF);
1787130365Smlaier				cl->cl_rsc = NULL;
1788130365Smlaier			}
1789130365Smlaier		} else {
1790130365Smlaier			if (cl->cl_rsc == NULL)
1791130365Smlaier				cl->cl_rsc = rsc_tmp;
1792130365Smlaier			sc2isc(rsc, cl->cl_rsc);
1793130365Smlaier			rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1794130365Smlaier			    cl->cl_cumul);
1795130365Smlaier			cl->cl_eligible = cl->cl_deadline;
1796130365Smlaier			if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1797130365Smlaier				cl->cl_eligible.dx = 0;
1798130365Smlaier				cl->cl_eligible.dy = 0;
1799130365Smlaier			}
1800130365Smlaier		}
1801130365Smlaier	}
1802130365Smlaier
1803130365Smlaier	if (fsc != NULL) {
1804130365Smlaier		if (fsc->m1 == 0 && fsc->m2 == 0) {
1805130365Smlaier			if (cl->cl_fsc != NULL) {
1806130365Smlaier				if (!qempty(cl->cl_q))
1807130365Smlaier					hfsc_purgeq(cl);
1808184205Sdes				free(cl->cl_fsc, M_DEVBUF);
1809130365Smlaier				cl->cl_fsc = NULL;
1810130365Smlaier			}
1811130365Smlaier		} else {
1812130365Smlaier			if (cl->cl_fsc == NULL)
1813130365Smlaier				cl->cl_fsc = fsc_tmp;
1814130365Smlaier			sc2isc(fsc, cl->cl_fsc);
1815130365Smlaier			rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1816130365Smlaier			    cl->cl_total);
1817130365Smlaier		}
1818130365Smlaier	}
1819130365Smlaier
1820130365Smlaier	if (usc != NULL) {
1821130365Smlaier		if (usc->m1 == 0 && usc->m2 == 0) {
1822130365Smlaier			if (cl->cl_usc != NULL) {
1823184205Sdes				free(cl->cl_usc, M_DEVBUF);
1824130365Smlaier				cl->cl_usc = NULL;
1825130365Smlaier				cl->cl_myf = 0;
1826130365Smlaier			}
1827130365Smlaier		} else {
1828130365Smlaier			if (cl->cl_usc == NULL)
1829130365Smlaier				cl->cl_usc = usc_tmp;
1830130365Smlaier			sc2isc(usc, cl->cl_usc);
1831130365Smlaier			rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1832130365Smlaier			    cl->cl_total);
1833130365Smlaier		}
1834130365Smlaier	}
1835130365Smlaier
1836130365Smlaier	if (!qempty(cl->cl_q)) {
1837130365Smlaier		if (cl->cl_rsc != NULL)
1838130365Smlaier			update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1839130365Smlaier		if (cl->cl_fsc != NULL)
1840130365Smlaier			update_vf(cl, 0, cur_time);
1841130365Smlaier		/* is this enough? */
1842130365Smlaier	}
1843130365Smlaier
1844130368Smlaier	IFQ_UNLOCK(cl->cl_hif->hif_ifq);
1845130365Smlaier	splx(s);
1846130365Smlaier
1847130365Smlaier	return (0);
1848130365Smlaier}
1849130365Smlaier
1850130365Smlaier/*
1851130365Smlaier * hfsc device interface
1852130365Smlaier */
1853130365Smlaierint
1854130365Smlaierhfscopen(dev, flag, fmt, p)
1855130365Smlaier	dev_t dev;
1856130365Smlaier	int flag, fmt;
1857130365Smlaier#if (__FreeBSD_version > 500000)
1858130365Smlaier	struct thread *p;
1859130365Smlaier#else
1860130365Smlaier	struct proc *p;
1861130365Smlaier#endif
1862130365Smlaier{
1863130365Smlaier	if (machclk_freq == 0)
1864130365Smlaier		init_machclk();
1865130365Smlaier
1866130365Smlaier	if (machclk_freq == 0) {
1867130365Smlaier		printf("hfsc: no cpu clock available!\n");
1868130365Smlaier		return (ENXIO);
1869130365Smlaier	}
1870130365Smlaier
1871130365Smlaier	/* everything will be done when the queueing scheme is attached. */
1872130365Smlaier	return 0;
1873130365Smlaier}
1874130365Smlaier
1875130365Smlaierint
1876130365Smlaierhfscclose(dev, flag, fmt, p)
1877130365Smlaier	dev_t dev;
1878130365Smlaier	int flag, fmt;
1879130365Smlaier#if (__FreeBSD_version > 500000)
1880130365Smlaier	struct thread *p;
1881130365Smlaier#else
1882130365Smlaier	struct proc *p;
1883130365Smlaier#endif
1884130365Smlaier{
1885130365Smlaier	struct hfsc_if *hif;
1886130365Smlaier	int err, error = 0;
1887130365Smlaier
1888130365Smlaier	while ((hif = hif_list) != NULL) {
1889130365Smlaier		/* destroy all */
1890130365Smlaier		if (ALTQ_IS_ENABLED(hif->hif_ifq))
1891130365Smlaier			altq_disable(hif->hif_ifq);
1892130365Smlaier
1893130365Smlaier		err = altq_detach(hif->hif_ifq);
1894130365Smlaier		if (err == 0)
1895130365Smlaier			err = hfsc_detach(hif);
1896130365Smlaier		if (err != 0 && error == 0)
1897130365Smlaier			error = err;
1898130365Smlaier	}
1899130365Smlaier
1900130365Smlaier	return error;
1901130365Smlaier}
1902130365Smlaier
1903130365Smlaierint
1904130365Smlaierhfscioctl(dev, cmd, addr, flag, p)
1905130365Smlaier	dev_t dev;
1906130365Smlaier	ioctlcmd_t cmd;
1907130365Smlaier	caddr_t addr;
1908130365Smlaier	int flag;
1909130365Smlaier#if (__FreeBSD_version > 500000)
1910130365Smlaier	struct thread *p;
1911130365Smlaier#else
1912130365Smlaier	struct proc *p;
1913130365Smlaier#endif
1914130365Smlaier{
1915130365Smlaier	struct hfsc_if *hif;
1916130365Smlaier	struct hfsc_interface *ifacep;
1917130365Smlaier	int	error = 0;
1918130365Smlaier
1919130365Smlaier	/* check super-user privilege */
1920130365Smlaier	switch (cmd) {
1921130365Smlaier	case HFSC_GETSTATS:
1922130365Smlaier		break;
1923130365Smlaier	default:
1924164033Srwatson#if (__FreeBSD_version > 700000)
1925164033Srwatson		if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
1926164033Srwatson			return (error);
1927164033Srwatson#elsif (__FreeBSD_version > 400000)
1928130365Smlaier		if ((error = suser(p)) != 0)
1929130365Smlaier			return (error);
1930130365Smlaier#else
1931130365Smlaier		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1932130365Smlaier			return (error);
1933130365Smlaier#endif
1934130365Smlaier		break;
1935130365Smlaier	}
1936130365Smlaier
1937130365Smlaier	switch (cmd) {
1938130365Smlaier
1939130365Smlaier	case HFSC_IF_ATTACH:
1940130365Smlaier		error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1941130365Smlaier		break;
1942130365Smlaier
1943130365Smlaier	case HFSC_IF_DETACH:
1944130365Smlaier		error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1945130365Smlaier		break;
1946130365Smlaier
1947130365Smlaier	case HFSC_ENABLE:
1948130365Smlaier	case HFSC_DISABLE:
1949130365Smlaier	case HFSC_CLEAR_HIERARCHY:
1950130365Smlaier		ifacep = (struct hfsc_interface *)addr;
1951130365Smlaier		if ((hif = altq_lookup(ifacep->hfsc_ifname,
1952130365Smlaier				       ALTQT_HFSC)) == NULL) {
1953130365Smlaier			error = EBADF;
1954130365Smlaier			break;
1955130365Smlaier		}
1956130365Smlaier
1957130365Smlaier		switch (cmd) {
1958130365Smlaier
1959130365Smlaier		case HFSC_ENABLE:
1960130365Smlaier			if (hif->hif_defaultclass == NULL) {
1961130365Smlaier#ifdef ALTQ_DEBUG
1962130365Smlaier				printf("hfsc: no default class\n");
1963130365Smlaier#endif
1964130365Smlaier				error = EINVAL;
1965130365Smlaier				break;
1966130365Smlaier			}
1967130365Smlaier			error = altq_enable(hif->hif_ifq);
1968130365Smlaier			break;
1969130365Smlaier
1970130365Smlaier		case HFSC_DISABLE:
1971130365Smlaier			error = altq_disable(hif->hif_ifq);
1972130365Smlaier			break;
1973130365Smlaier
1974130365Smlaier		case HFSC_CLEAR_HIERARCHY:
1975130365Smlaier			hfsc_clear_interface(hif);
1976130365Smlaier			break;
1977130365Smlaier		}
1978130365Smlaier		break;
1979130365Smlaier
1980130365Smlaier	case HFSC_ADD_CLASS:
1981130365Smlaier		error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1982130365Smlaier		break;
1983130365Smlaier
1984130365Smlaier	case HFSC_DEL_CLASS:
1985130365Smlaier		error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1986130365Smlaier		break;
1987130365Smlaier
1988130365Smlaier	case HFSC_MOD_CLASS:
1989130365Smlaier		error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1990130365Smlaier		break;
1991130365Smlaier
1992130365Smlaier	case HFSC_ADD_FILTER:
1993130365Smlaier		error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1994130365Smlaier		break;
1995130365Smlaier
1996130365Smlaier	case HFSC_DEL_FILTER:
1997130365Smlaier		error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1998130365Smlaier		break;
1999130365Smlaier
2000130365Smlaier	case HFSC_GETSTATS:
2001130365Smlaier		error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
2002130365Smlaier		break;
2003130365Smlaier
2004130365Smlaier	default:
2005130365Smlaier		error = EINVAL;
2006130365Smlaier		break;
2007130365Smlaier	}
2008130365Smlaier	return error;
2009130365Smlaier}
2010130365Smlaier
2011130365Smlaierstatic int
2012130365Smlaierhfsccmd_if_attach(ap)
2013130365Smlaier	struct hfsc_attach *ap;
2014130365Smlaier{
2015130365Smlaier	struct hfsc_if *hif;
2016130365Smlaier	struct ifnet *ifp;
2017130365Smlaier	int error;
2018130365Smlaier
2019130365Smlaier	if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2020130365Smlaier		return (ENXIO);
2021130365Smlaier
2022130365Smlaier	if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2023130365Smlaier		return (ENOMEM);
2024130365Smlaier
2025130365Smlaier	/*
2026130365Smlaier	 * set HFSC to this ifnet structure.
2027130365Smlaier	 */
2028130365Smlaier	if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2029130365Smlaier				 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2030130365Smlaier				 &hif->hif_classifier, acc_classify)) != 0)
2031130365Smlaier		(void)hfsc_detach(hif);
2032130365Smlaier
2033130365Smlaier	return (error);
2034130365Smlaier}
2035130365Smlaier
2036130365Smlaierstatic int
2037130365Smlaierhfsccmd_if_detach(ap)
2038130365Smlaier	struct hfsc_interface *ap;
2039130365Smlaier{
2040130365Smlaier	struct hfsc_if *hif;
2041130365Smlaier	int error;
2042130365Smlaier
2043130365Smlaier	if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2044130365Smlaier		return (EBADF);
2045130365Smlaier
2046130365Smlaier	if (ALTQ_IS_ENABLED(hif->hif_ifq))
2047130365Smlaier		altq_disable(hif->hif_ifq);
2048130365Smlaier
2049130365Smlaier	if ((error = altq_detach(hif->hif_ifq)))
2050130365Smlaier		return (error);
2051130365Smlaier
2052130365Smlaier	return hfsc_detach(hif);
2053130365Smlaier}
2054130365Smlaier
2055130365Smlaierstatic int
2056130365Smlaierhfsccmd_add_class(ap)
2057130365Smlaier	struct hfsc_add_class *ap;
2058130365Smlaier{
2059130365Smlaier	struct hfsc_if *hif;
2060130365Smlaier	struct hfsc_class *cl, *parent;
2061130365Smlaier	int	i;
2062130365Smlaier
2063130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2064130365Smlaier		return (EBADF);
2065130365Smlaier
2066130365Smlaier	if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2067130365Smlaier	    hif->hif_rootclass == NULL)
2068130365Smlaier		parent = NULL;
2069130365Smlaier	else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2070130365Smlaier		return (EINVAL);
2071130365Smlaier
2072130365Smlaier	/* assign a class handle (use a free slot number for now) */
2073130365Smlaier	for (i = 1; i < HFSC_MAX_CLASSES; i++)
2074130365Smlaier		if (hif->hif_class_tbl[i] == NULL)
2075130365Smlaier			break;
2076130365Smlaier	if (i == HFSC_MAX_CLASSES)
2077130365Smlaier		return (EBUSY);
2078130365Smlaier
2079130365Smlaier	if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2080130365Smlaier	    parent, ap->qlimit, ap->flags, i)) == NULL)
2081130365Smlaier		return (ENOMEM);
2082130365Smlaier
2083130365Smlaier	/* return a class handle to the user */
2084130365Smlaier	ap->class_handle = i;
2085130365Smlaier
2086130365Smlaier	return (0);
2087130365Smlaier}
2088130365Smlaier
2089130365Smlaierstatic int
2090130365Smlaierhfsccmd_delete_class(ap)
2091130365Smlaier	struct hfsc_delete_class *ap;
2092130365Smlaier{
2093130365Smlaier	struct hfsc_if *hif;
2094130365Smlaier	struct hfsc_class *cl;
2095130365Smlaier
2096130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2097130365Smlaier		return (EBADF);
2098130365Smlaier
2099130365Smlaier	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2100130365Smlaier		return (EINVAL);
2101130365Smlaier
2102130365Smlaier	return hfsc_class_destroy(cl);
2103130365Smlaier}
2104130365Smlaier
2105130365Smlaierstatic int
2106130365Smlaierhfsccmd_modify_class(ap)
2107130365Smlaier	struct hfsc_modify_class *ap;
2108130365Smlaier{
2109130365Smlaier	struct hfsc_if *hif;
2110130365Smlaier	struct hfsc_class *cl;
2111130365Smlaier	struct service_curve *rsc = NULL;
2112130365Smlaier	struct service_curve *fsc = NULL;
2113130365Smlaier	struct service_curve *usc = NULL;
2114130365Smlaier
2115130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2116130365Smlaier		return (EBADF);
2117130365Smlaier
2118130365Smlaier	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2119130365Smlaier		return (EINVAL);
2120130365Smlaier
2121130365Smlaier	if (ap->sctype & HFSC_REALTIMESC)
2122130365Smlaier		rsc = &ap->service_curve;
2123130365Smlaier	if (ap->sctype & HFSC_LINKSHARINGSC)
2124130365Smlaier		fsc = &ap->service_curve;
2125130365Smlaier	if (ap->sctype & HFSC_UPPERLIMITSC)
2126130365Smlaier		usc = &ap->service_curve;
2127130365Smlaier
2128130365Smlaier	return hfsc_class_modify(cl, rsc, fsc, usc);
2129130365Smlaier}
2130130365Smlaier
2131130365Smlaierstatic int
2132130365Smlaierhfsccmd_add_filter(ap)
2133130365Smlaier	struct hfsc_add_filter *ap;
2134130365Smlaier{
2135130365Smlaier	struct hfsc_if *hif;
2136130365Smlaier	struct hfsc_class *cl;
2137130365Smlaier
2138130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2139130365Smlaier		return (EBADF);
2140130365Smlaier
2141130365Smlaier	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2142130365Smlaier		return (EINVAL);
2143130365Smlaier
2144130365Smlaier	if (is_a_parent_class(cl)) {
2145130365Smlaier#ifdef ALTQ_DEBUG
2146130365Smlaier		printf("hfsccmd_add_filter: not a leaf class!\n");
2147130365Smlaier#endif
2148130365Smlaier		return (EINVAL);
2149130365Smlaier	}
2150130365Smlaier
2151130365Smlaier	return acc_add_filter(&hif->hif_classifier, &ap->filter,
2152130365Smlaier			      cl, &ap->filter_handle);
2153130365Smlaier}
2154130365Smlaier
2155130365Smlaierstatic int
2156130365Smlaierhfsccmd_delete_filter(ap)
2157130365Smlaier	struct hfsc_delete_filter *ap;
2158130365Smlaier{
2159130365Smlaier	struct hfsc_if *hif;
2160130365Smlaier
2161130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2162130365Smlaier		return (EBADF);
2163130365Smlaier
2164130365Smlaier	return acc_delete_filter(&hif->hif_classifier,
2165130365Smlaier				 ap->filter_handle);
2166130365Smlaier}
2167130365Smlaier
2168130365Smlaierstatic int
2169130365Smlaierhfsccmd_class_stats(ap)
2170130365Smlaier	struct hfsc_class_stats *ap;
2171130365Smlaier{
2172130365Smlaier	struct hfsc_if *hif;
2173130365Smlaier	struct hfsc_class *cl;
2174130365Smlaier	struct hfsc_classstats stats, *usp;
2175130365Smlaier	int	n, nclasses, error;
2176130365Smlaier
2177130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2178130365Smlaier		return (EBADF);
2179130365Smlaier
2180130365Smlaier	ap->cur_time = read_machclk();
2181130365Smlaier	ap->machclk_freq = machclk_freq;
2182130365Smlaier	ap->hif_classes = hif->hif_classes;
2183130365Smlaier	ap->hif_packets = hif->hif_packets;
2184130365Smlaier
2185130365Smlaier	/* skip the first N classes in the tree */
2186130365Smlaier	nclasses = ap->nskip;
2187130365Smlaier	for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2188130365Smlaier	     cl = hfsc_nextclass(cl), n++)
2189130365Smlaier		;
2190130365Smlaier	if (n != nclasses)
2191130365Smlaier		return (EINVAL);
2192130365Smlaier
2193130365Smlaier	/* then, read the next N classes in the tree */
2194130365Smlaier	nclasses = ap->nclasses;
2195130365Smlaier	usp = ap->stats;
2196130365Smlaier	for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2197130365Smlaier
2198130365Smlaier		get_class_stats(&stats, cl);
2199130365Smlaier
2200130365Smlaier		if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
2201130365Smlaier				     sizeof(stats))) != 0)
2202130365Smlaier			return (error);
2203130365Smlaier	}
2204130365Smlaier
2205130365Smlaier	ap->nclasses = n;
2206130365Smlaier
2207130365Smlaier	return (0);
2208130365Smlaier}
2209130365Smlaier
2210130365Smlaier#ifdef KLD_MODULE
2211130365Smlaier
2212130365Smlaierstatic struct altqsw hfsc_sw =
2213130365Smlaier	{"hfsc", hfscopen, hfscclose, hfscioctl};
2214130365Smlaier
2215130365SmlaierALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2216130365SmlaierMODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2217130365SmlaierMODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2218130365Smlaier
2219130365Smlaier#endif /* KLD_MODULE */
2220130365Smlaier#endif /* ALTQ3_COMPAT */
2221130365Smlaier
2222130365Smlaier#endif /* ALTQ_HFSC */
2223