1/*	$OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2002
5 *	Sony Computer Science Laboratories Inc.
6 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/cdefs.h>
22#define PFIOC_USE_LATEST
23#define _WANT_FREEBSD_BITSET
24
25#include <sys/types.h>
26#include <sys/bitset.h>
27#include <sys/ioctl.h>
28#include <sys/socket.h>
29
30#include <net/if.h>
31#include <netinet/in.h>
32#include <net/pfvar.h>
33
34#include <err.h>
35#include <errno.h>
36#include <inttypes.h>
37#include <limits.h>
38#include <math.h>
39#include <search.h>
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <unistd.h>
44
45#include <net/altq/altq.h>
46#include <net/altq/altq_cbq.h>
47#include <net/altq/altq_codel.h>
48#include <net/altq/altq_priq.h>
49#include <net/altq/altq_hfsc.h>
50#include <net/altq/altq_fairq.h>
51
52#include "pfctl_parser.h"
53#include "pfctl.h"
54
55#define is_sc_null(sc)	(((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
56
57static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces);
58static struct hsearch_data queue_map;
59static struct hsearch_data if_map;
60static struct hsearch_data qid_map;
61
62static struct pfctl_altq *pfaltq_lookup(char *ifname);
63static struct pfctl_altq *qname_to_pfaltq(const char *, const char *);
64static u_int32_t	 qname_to_qid(char *);
65
66static int	eval_pfqueue_cbq(struct pfctl *, struct pf_altq *,
67		    struct pfctl_altq *);
68static int	cbq_compute_idletime(struct pfctl *, struct pf_altq *);
69static int	check_commit_cbq(int, int, struct pfctl_altq *);
70static int	print_cbq_opts(const struct pf_altq *);
71
72static int	print_codel_opts(const struct pf_altq *,
73		    const struct node_queue_opt *);
74
75static int	eval_pfqueue_priq(struct pfctl *, struct pf_altq *,
76		    struct pfctl_altq *);
77static int	check_commit_priq(int, int, struct pfctl_altq *);
78static int	print_priq_opts(const struct pf_altq *);
79
80static int	eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *,
81		    struct pfctl_altq *, struct pfctl_altq *);
82static int	check_commit_hfsc(int, int, struct pfctl_altq *);
83static int	print_hfsc_opts(const struct pf_altq *,
84		    const struct node_queue_opt *);
85
86static int	eval_pfqueue_fairq(struct pfctl *, struct pf_altq *,
87		    struct pfctl_altq *, struct pfctl_altq *);
88static int	print_fairq_opts(const struct pf_altq *,
89		    const struct node_queue_opt *);
90static int	check_commit_fairq(int, int, struct pfctl_altq *);
91
92static void		 gsc_add_sc(struct gen_sc *, struct service_curve *);
93static int		 is_gsc_under_sc(struct gen_sc *,
94			     struct service_curve *);
95static struct segment	*gsc_getentry(struct gen_sc *, double);
96static int		 gsc_add_seg(struct gen_sc *, double, double, double,
97			     double);
98static double		 sc_x2y(struct service_curve *, double);
99
100u_int32_t	 getifspeed(char *);
101u_long		 getifmtu(char *);
102int		 eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
103		     u_int64_t);
104u_int64_t	 eval_bwspec(struct node_queue_bw *, u_int64_t);
105void		 print_hfsc_sc(const char *, u_int, u_int, u_int,
106		     const struct node_hfsc_sc *);
107void		 print_fairq_sc(const char *, u_int, u_int, u_int,
108		     const struct node_fairq_sc *);
109
110static __attribute__((constructor)) void
111pfctl_altq_init(void)
112{
113	/*
114	 * As hdestroy() will never be called on these tables, it will be
115	 * safe to use references into the stored data as keys.
116	 */
117	if (hcreate_r(0, &queue_map) == 0)
118		err(1, "Failed to create altq queue map");
119	if (hcreate_r(0, &if_map) == 0)
120		err(1, "Failed to create altq interface map");
121	if (hcreate_r(0, &qid_map) == 0)
122		err(1, "Failed to create altq queue id map");
123}
124
125void
126pfaltq_store(struct pf_altq *a)
127{
128	struct pfctl_altq	*altq;
129	ENTRY 			 item;
130	ENTRY			*ret_item;
131	size_t			 key_size;
132
133	if ((altq = malloc(sizeof(*altq))) == NULL)
134		err(1, "queue malloc");
135	memcpy(&altq->pa, a, sizeof(struct pf_altq));
136	memset(&altq->meta, 0, sizeof(altq->meta));
137
138	if (a->qname[0] == 0) {
139		item.key = altq->pa.ifname;
140		item.data = altq;
141		if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0)
142			err(1, "interface map insert");
143		STAILQ_INSERT_TAIL(&interfaces, altq, meta.link);
144	} else {
145		key_size = sizeof(a->ifname) + sizeof(a->qname);
146		if ((item.key = malloc(key_size)) == NULL)
147			err(1, "queue map key malloc");
148		snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname);
149		item.data = altq;
150		if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0)
151			err(1, "queue map insert");
152
153		item.key = altq->pa.qname;
154		item.data = &altq->pa.qid;
155		if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0)
156			err(1, "qid map insert");
157	}
158}
159
160static struct pfctl_altq *
161pfaltq_lookup(char *ifname)
162{
163	ENTRY	 item;
164	ENTRY	*ret_item;
165
166	item.key = ifname;
167	if (hsearch_r(item, FIND, &ret_item, &if_map) == 0)
168		return (NULL);
169
170	return (ret_item->data);
171}
172
173static struct pfctl_altq *
174qname_to_pfaltq(const char *qname, const char *ifname)
175{
176	ENTRY	 item;
177	ENTRY	*ret_item;
178	char	 key[IFNAMSIZ + PF_QNAME_SIZE];
179
180	item.key = key;
181	snprintf(item.key, sizeof(key), "%s:%s", ifname, qname);
182	if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0)
183		return (NULL);
184
185	return (ret_item->data);
186}
187
188static u_int32_t
189qname_to_qid(char *qname)
190{
191	ENTRY	 item;
192	ENTRY	*ret_item;
193	uint32_t qid;
194
195	/*
196	 * We guarantee that same named queues on different interfaces
197	 * have the same qid.
198	 */
199	item.key = qname;
200	if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0)
201		return (0);
202
203	qid = *(uint32_t *)ret_item->data;
204	return (qid);
205}
206
207void
208print_altq(const struct pf_altq *a, unsigned int level,
209    struct node_queue_bw *bw, struct node_queue_opt *qopts)
210{
211	if (a->qname[0] != 0) {
212		print_queue(a, level, bw, 1, qopts);
213		return;
214	}
215
216#ifdef __FreeBSD__
217	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
218		printf("INACTIVE ");
219#endif
220
221	printf("altq on %s ", a->ifname);
222
223	switch (a->scheduler) {
224	case ALTQT_CBQ:
225		if (!print_cbq_opts(a))
226			printf("cbq ");
227		break;
228	case ALTQT_PRIQ:
229		if (!print_priq_opts(a))
230			printf("priq ");
231		break;
232	case ALTQT_HFSC:
233		if (!print_hfsc_opts(a, qopts))
234			printf("hfsc ");
235		break;
236	case ALTQT_FAIRQ:
237		if (!print_fairq_opts(a, qopts))
238			printf("fairq ");
239		break;
240	case ALTQT_CODEL:
241		if (!print_codel_opts(a, qopts))
242			printf("codel ");
243		break;
244	}
245
246	if (bw != NULL && bw->bw_percent > 0) {
247		if (bw->bw_percent < 100)
248			printf("bandwidth %u%% ", bw->bw_percent);
249	} else
250		printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
251
252	if (a->qlimit != DEFAULT_QLIMIT)
253		printf("qlimit %u ", a->qlimit);
254	printf("tbrsize %u ", a->tbrsize);
255}
256
257void
258print_queue(const struct pf_altq *a, unsigned int level,
259    struct node_queue_bw *bw, int print_interface,
260    struct node_queue_opt *qopts)
261{
262	unsigned int	i;
263
264#ifdef __FreeBSD__
265	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
266		printf("INACTIVE ");
267#endif
268	printf("queue ");
269	for (i = 0; i < level; ++i)
270		printf(" ");
271	printf("%s ", a->qname);
272	if (print_interface)
273		printf("on %s ", a->ifname);
274	if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC ||
275		a->scheduler == ALTQT_FAIRQ) {
276		if (bw != NULL && bw->bw_percent > 0) {
277			if (bw->bw_percent < 100)
278				printf("bandwidth %u%% ", bw->bw_percent);
279		} else
280			printf("bandwidth %s ", rate2str((double)a->bandwidth));
281	}
282	if (a->priority != DEFAULT_PRIORITY)
283		printf("priority %u ", a->priority);
284	if (a->qlimit != DEFAULT_QLIMIT)
285		printf("qlimit %u ", a->qlimit);
286	switch (a->scheduler) {
287	case ALTQT_CBQ:
288		print_cbq_opts(a);
289		break;
290	case ALTQT_PRIQ:
291		print_priq_opts(a);
292		break;
293	case ALTQT_HFSC:
294		print_hfsc_opts(a, qopts);
295		break;
296	case ALTQT_FAIRQ:
297		print_fairq_opts(a, qopts);
298		break;
299	}
300}
301
302/*
303 * eval_pfaltq computes the discipline parameters.
304 */
305int
306eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
307    struct node_queue_opt *opts)
308{
309	u_int64_t	rate;
310	u_int		size, errors = 0;
311
312	if (bw->bw_absolute > 0)
313		pa->ifbandwidth = bw->bw_absolute;
314	else
315		if ((rate = getifspeed(pa->ifname)) == 0) {
316			fprintf(stderr, "interface %s does not know its bandwidth, "
317			    "please specify an absolute bandwidth\n",
318			    pa->ifname);
319			errors++;
320		} else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
321			pa->ifbandwidth = rate;
322
323	/*
324	 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready.
325	 */
326	if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) {
327		pa->ifbandwidth = UINT_MAX;
328		warnx("interface %s bandwidth limited to %" PRIu64 " bps "
329		    "because selected scheduler is 32-bit limited\n", pa->ifname,
330		    pa->ifbandwidth);
331	}
332	errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
333
334	/* if tbrsize is not specified, use heuristics */
335	if (pa->tbrsize == 0) {
336		rate = pa->ifbandwidth;
337		if (rate <= 1 * 1000 * 1000)
338			size = 1;
339		else if (rate <= 10 * 1000 * 1000)
340			size = 4;
341		else if (rate <= 200 * 1000 * 1000)
342			size = 8;
343		else if (rate <= 2500 * 1000 * 1000ULL)
344			size = 24;
345		else
346			size = 128;
347		size = size * getifmtu(pa->ifname);
348		pa->tbrsize = size;
349	}
350	return (errors);
351}
352
353/*
354 * check_commit_altq does consistency check for each interface
355 */
356int
357check_commit_altq(int dev, int opts)
358{
359	struct pfctl_altq	*if_ppa;
360	int			 error = 0;
361
362	/* call the discipline check for each interface. */
363	STAILQ_FOREACH(if_ppa, &interfaces, meta.link) {
364		switch (if_ppa->pa.scheduler) {
365		case ALTQT_CBQ:
366			error = check_commit_cbq(dev, opts, if_ppa);
367			break;
368		case ALTQT_PRIQ:
369			error = check_commit_priq(dev, opts, if_ppa);
370			break;
371		case ALTQT_HFSC:
372			error = check_commit_hfsc(dev, opts, if_ppa);
373			break;
374		case ALTQT_FAIRQ:
375			error = check_commit_fairq(dev, opts, if_ppa);
376			break;
377		default:
378			break;
379		}
380	}
381	return (error);
382}
383
384/*
385 * eval_pfqueue computes the queue parameters.
386 */
387int
388eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
389    struct node_queue_opt *opts)
390{
391	/* should be merged with expand_queue */
392	struct pfctl_altq	*if_ppa, *parent;
393	int		 	 error = 0;
394
395	/* find the corresponding interface and copy fields used by queues */
396	if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) {
397		fprintf(stderr, "altq not defined on %s\n", pa->ifname);
398		return (1);
399	}
400	pa->scheduler = if_ppa->pa.scheduler;
401	pa->ifbandwidth = if_ppa->pa.ifbandwidth;
402
403	if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
404		fprintf(stderr, "queue %s already exists on interface %s\n",
405		    pa->qname, pa->ifname);
406		return (1);
407	}
408	pa->qid = qname_to_qid(pa->qname);
409
410	parent = NULL;
411	if (pa->parent[0] != 0) {
412		parent = qname_to_pfaltq(pa->parent, pa->ifname);
413		if (parent == NULL) {
414			fprintf(stderr, "parent %s not found for %s\n",
415			    pa->parent, pa->qname);
416			return (1);
417		}
418		pa->parent_qid = parent->pa.qid;
419	}
420	if (pa->qlimit == 0)
421		pa->qlimit = DEFAULT_QLIMIT;
422
423	if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC ||
424		pa->scheduler == ALTQT_FAIRQ) {
425		pa->bandwidth = eval_bwspec(bw,
426		    parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth);
427
428		if (pa->bandwidth > pa->ifbandwidth) {
429			fprintf(stderr, "bandwidth for %s higher than "
430			    "interface\n", pa->qname);
431			return (1);
432		}
433		/*
434		 * If not HFSC, then check that the sum of the child
435		 * bandwidths is less than the parent's bandwidth.  For
436		 * HFSC, the equivalent concept is to check that the sum of
437		 * the child linkshare service curves are under the parent's
438		 * linkshare service curve, and that check is performed by
439		 * eval_pfqueue_hfsc().
440		 */
441		if ((parent != NULL) && (pa->scheduler != ALTQT_HFSC)) {
442			if (pa->bandwidth > parent->pa.bandwidth) {
443				warnx("bandwidth for %s higher than parent",
444				    pa->qname);
445				return (1);
446			}
447			parent->meta.bwsum += pa->bandwidth;
448			if (parent->meta.bwsum > parent->pa.bandwidth) {
449				warnx("the sum of the child bandwidth (%" PRIu64
450				    ") higher than parent \"%s\" (%" PRIu64 ")",
451				    parent->meta.bwsum, parent->pa.qname,
452				    parent->pa.bandwidth);
453			}
454		}
455	}
456
457	if (eval_queue_opts(pa, opts,
458		parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth))
459		return (1);
460
461	if (parent != NULL)
462		parent->meta.children++;
463
464	switch (pa->scheduler) {
465	case ALTQT_CBQ:
466		error = eval_pfqueue_cbq(pf, pa, if_ppa);
467		break;
468	case ALTQT_PRIQ:
469		error = eval_pfqueue_priq(pf, pa, if_ppa);
470		break;
471	case ALTQT_HFSC:
472		error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent);
473		break;
474	case ALTQT_FAIRQ:
475		error = eval_pfqueue_fairq(pf, pa, if_ppa, parent);
476		break;
477	default:
478		break;
479	}
480	return (error);
481}
482
483/*
484 * CBQ support functions
485 */
486#define	RM_FILTER_GAIN	5	/* log2 of gain, e.g., 5 => 31/32 */
487#define	RM_NS_PER_SEC	(1000000000)
488
489static int
490eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
491{
492	struct cbq_opts	*opts;
493	u_int		 ifmtu;
494
495	if (pa->priority >= CBQ_MAXPRI) {
496		warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
497		return (-1);
498	}
499
500	ifmtu = getifmtu(pa->ifname);
501	opts = &pa->pq_u.cbq_opts;
502
503	if (opts->pktsize == 0) {	/* use default */
504		opts->pktsize = ifmtu;
505		if (opts->pktsize > MCLBYTES)	/* do what TCP does */
506			opts->pktsize &= ~MCLBYTES;
507	} else if (opts->pktsize > ifmtu)
508		opts->pktsize = ifmtu;
509	if (opts->maxpktsize == 0)	/* use default */
510		opts->maxpktsize = ifmtu;
511	else if (opts->maxpktsize > ifmtu)
512		opts->pktsize = ifmtu;
513
514	if (opts->pktsize > opts->maxpktsize)
515		opts->pktsize = opts->maxpktsize;
516
517	if (pa->parent[0] == 0)
518		opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
519
520	if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
521		if_ppa->meta.root_classes++;
522	if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
523		if_ppa->meta.default_classes++;
524
525	cbq_compute_idletime(pf, pa);
526	return (0);
527}
528
529/*
530 * compute ns_per_byte, maxidle, minidle, and offtime
531 */
532static int
533cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
534{
535	struct cbq_opts	*opts;
536	double		 maxidle_s, maxidle, minidle;
537	double		 offtime, nsPerByte, ifnsPerByte, ptime, cptime;
538	double		 z, g, f, gton, gtom;
539	u_int		 minburst, maxburst;
540
541	opts = &pa->pq_u.cbq_opts;
542	ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
543	minburst = opts->minburst;
544	maxburst = opts->maxburst;
545
546	if (pa->bandwidth == 0)
547		f = 0.0001;	/* small enough? */
548	else
549		f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
550
551	nsPerByte = ifnsPerByte / f;
552	ptime = (double)opts->pktsize * ifnsPerByte;
553	cptime = ptime * (1.0 - f) / f;
554
555	if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
556		/*
557		 * this causes integer overflow in kernel!
558		 * (bandwidth < 6Kbps when max_pkt_size=1500)
559		 */
560		if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) {
561			warnx("queue bandwidth must be larger than %s",
562			    rate2str(ifnsPerByte * (double)opts->maxpktsize /
563			    (double)INT_MAX * (double)pa->ifbandwidth));
564			fprintf(stderr, "cbq: queue %s is too slow!\n",
565			    pa->qname);
566		}
567		nsPerByte = (double)(INT_MAX / opts->maxpktsize);
568	}
569
570	if (maxburst == 0) {  /* use default */
571		if (cptime > 10.0 * 1000000)
572			maxburst = 4;
573		else
574			maxburst = 16;
575	}
576	if (minburst == 0)  /* use default */
577		minburst = 2;
578	if (minburst > maxburst)
579		minburst = maxburst;
580
581	z = (double)(1 << RM_FILTER_GAIN);
582	g = (1.0 - 1.0 / z);
583	gton = pow(g, (double)maxburst);
584	gtom = pow(g, (double)(minburst-1));
585	maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
586	maxidle_s = (1.0 - g);
587	if (maxidle > maxidle_s)
588		maxidle = ptime * maxidle;
589	else
590		maxidle = ptime * maxidle_s;
591	offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
592	minidle = -((double)opts->maxpktsize * (double)nsPerByte);
593
594	/* scale parameters */
595	maxidle = ((maxidle * 8.0) / nsPerByte) *
596	    pow(2.0, (double)RM_FILTER_GAIN);
597	offtime = (offtime * 8.0) / nsPerByte *
598	    pow(2.0, (double)RM_FILTER_GAIN);
599	minidle = ((minidle * 8.0) / nsPerByte) *
600	    pow(2.0, (double)RM_FILTER_GAIN);
601
602	maxidle = maxidle / 1000.0;
603	offtime = offtime / 1000.0;
604	minidle = minidle / 1000.0;
605
606	opts->minburst = minburst;
607	opts->maxburst = maxburst;
608	opts->ns_per_byte = (u_int)nsPerByte;
609	opts->maxidle = (u_int)fabs(maxidle);
610	opts->minidle = (int)minidle;
611	opts->offtime = (u_int)fabs(offtime);
612
613	return (0);
614}
615
616static int
617check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa)
618{
619	int	error = 0;
620
621	/*
622	 * check if cbq has one root queue and one default queue
623	 * for this interface
624	 */
625	if (if_ppa->meta.root_classes != 1) {
626		warnx("should have one root queue on %s", if_ppa->pa.ifname);
627		error++;
628	}
629	if (if_ppa->meta.default_classes != 1) {
630		warnx("should have one default queue on %s", if_ppa->pa.ifname);
631		error++;
632	}
633	return (error);
634}
635
636static int
637print_cbq_opts(const struct pf_altq *a)
638{
639	const struct cbq_opts	*opts;
640
641	opts = &a->pq_u.cbq_opts;
642	if (opts->flags) {
643		printf("cbq(");
644		if (opts->flags & CBQCLF_RED)
645			printf(" red");
646		if (opts->flags & CBQCLF_ECN)
647			printf(" ecn");
648		if (opts->flags & CBQCLF_RIO)
649			printf(" rio");
650		if (opts->flags & CBQCLF_CODEL)
651			printf(" codel");
652		if (opts->flags & CBQCLF_CLEARDSCP)
653			printf(" cleardscp");
654		if (opts->flags & CBQCLF_FLOWVALVE)
655			printf(" flowvalve");
656		if (opts->flags & CBQCLF_BORROW)
657			printf(" borrow");
658		if (opts->flags & CBQCLF_WRR)
659			printf(" wrr");
660		if (opts->flags & CBQCLF_EFFICIENT)
661			printf(" efficient");
662		if (opts->flags & CBQCLF_ROOTCLASS)
663			printf(" root");
664		if (opts->flags & CBQCLF_DEFCLASS)
665			printf(" default");
666		printf(" ) ");
667
668		return (1);
669	} else
670		return (0);
671}
672
673/*
674 * PRIQ support functions
675 */
676static int
677eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
678{
679
680	if (pa->priority >= PRIQ_MAXPRI) {
681		warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
682		return (-1);
683	}
684	if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) {
685		warnx("%s does not have a unique priority on interface %s",
686		    pa->qname, pa->ifname);
687		return (-1);
688	} else
689		BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris);
690
691	if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
692		if_ppa->meta.default_classes++;
693	return (0);
694}
695
696static int
697check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa)
698{
699
700	/*
701	 * check if priq has one default class for this interface
702	 */
703	if (if_ppa->meta.default_classes != 1) {
704		warnx("should have one default queue on %s", if_ppa->pa.ifname);
705		return (1);
706	}
707	return (0);
708}
709
710static int
711print_priq_opts(const struct pf_altq *a)
712{
713	const struct priq_opts	*opts;
714
715	opts = &a->pq_u.priq_opts;
716
717	if (opts->flags) {
718		printf("priq(");
719		if (opts->flags & PRCF_RED)
720			printf(" red");
721		if (opts->flags & PRCF_ECN)
722			printf(" ecn");
723		if (opts->flags & PRCF_RIO)
724			printf(" rio");
725		if (opts->flags & PRCF_CODEL)
726			printf(" codel");
727		if (opts->flags & PRCF_CLEARDSCP)
728			printf(" cleardscp");
729		if (opts->flags & PRCF_DEFAULTCLASS)
730			printf(" default");
731		printf(" ) ");
732
733		return (1);
734	} else
735		return (0);
736}
737
738/*
739 * HFSC support functions
740 */
741static int
742eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa,
743    struct pfctl_altq *parent)
744{
745	struct hfsc_opts_v1	*opts;
746	struct service_curve	 sc;
747
748	opts = &pa->pq_u.hfsc_opts;
749
750	if (parent == NULL) {
751		/* root queue */
752		opts->lssc_m1 = pa->ifbandwidth;
753		opts->lssc_m2 = pa->ifbandwidth;
754		opts->lssc_d = 0;
755		return (0);
756	}
757
758	/* First child initializes the parent's service curve accumulators. */
759	if (parent->meta.children == 1) {
760		LIST_INIT(&parent->meta.rtsc);
761		LIST_INIT(&parent->meta.lssc);
762	}
763
764	if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
765		warnx("adding %s would make default queue %s not a leaf",
766		    pa->qname, pa->parent);
767		return (-1);
768	}
769
770	if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS)
771		if_ppa->meta.default_classes++;
772
773	/* if link_share is not specified, use bandwidth */
774	if (opts->lssc_m2 == 0)
775		opts->lssc_m2 = pa->bandwidth;
776
777	if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
778	    (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
779	    (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
780		warnx("m2 is zero for %s", pa->qname);
781		return (-1);
782	}
783
784	if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
785	    (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) ||
786	    (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) {
787		warnx("m1 must be zero for convex curve: %s", pa->qname);
788		return (-1);
789	}
790
791	/*
792	 * admission control:
793	 * for the real-time service curve, the sum of the service curves
794	 * should not exceed 80% of the interface bandwidth.  20% is reserved
795	 * not to over-commit the actual interface bandwidth.
796	 * for the linkshare service curve, the sum of the child service
797	 * curve should not exceed the parent service curve.
798	 * for the upper-limit service curve, the assigned bandwidth should
799	 * be smaller than the interface bandwidth, and the upper-limit should
800	 * be larger than the real-time service curve when both are defined.
801	 */
802
803	/* check the real-time service curve.  reserve 20% of interface bw */
804	if (opts->rtsc_m2 != 0) {
805		/* add this queue to the sum */
806		sc.m1 = opts->rtsc_m1;
807		sc.d = opts->rtsc_d;
808		sc.m2 = opts->rtsc_m2;
809		gsc_add_sc(&parent->meta.rtsc, &sc);
810		/* compare the sum with 80% of the interface */
811		sc.m1 = 0;
812		sc.d = 0;
813		sc.m2 = pa->ifbandwidth / 100 * 80;
814		if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) {
815			warnx("real-time sc exceeds 80%% of the interface "
816			    "bandwidth (%s)", rate2str((double)sc.m2));
817			return (-1);
818		}
819	}
820
821	/* check the linkshare service curve. */
822	if (opts->lssc_m2 != 0) {
823		/* add this queue to the child sum */
824		sc.m1 = opts->lssc_m1;
825		sc.d = opts->lssc_d;
826		sc.m2 = opts->lssc_m2;
827		gsc_add_sc(&parent->meta.lssc, &sc);
828		/* compare the sum of the children with parent's sc */
829		sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1;
830		sc.d = parent->pa.pq_u.hfsc_opts.lssc_d;
831		sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2;
832		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
833			warnx("linkshare sc exceeds parent's sc");
834			return (-1);
835		}
836	}
837
838	/* check the upper-limit service curve. */
839	if (opts->ulsc_m2 != 0) {
840		if (opts->ulsc_m1 > pa->ifbandwidth ||
841		    opts->ulsc_m2 > pa->ifbandwidth) {
842			warnx("upper-limit larger than interface bandwidth");
843			return (-1);
844		}
845		if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
846			warnx("upper-limit sc smaller than real-time sc");
847			return (-1);
848		}
849	}
850
851	return (0);
852}
853
854/*
855 * FAIRQ support functions
856 */
857static int
858eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa,
859    struct pfctl_altq *if_ppa, struct pfctl_altq *parent)
860{
861	struct fairq_opts	*opts;
862	struct service_curve	 sc;
863
864	opts = &pa->pq_u.fairq_opts;
865
866	if (parent == NULL) {
867		/* root queue */
868		opts->lssc_m1 = pa->ifbandwidth;
869		opts->lssc_m2 = pa->ifbandwidth;
870		opts->lssc_d = 0;
871		return (0);
872	}
873
874	/* First child initializes the parent's service curve accumulator. */
875	if (parent->meta.children == 1)
876		LIST_INIT(&parent->meta.lssc);
877
878	if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) {
879		warnx("adding %s would make default queue %s not a leaf",
880		    pa->qname, pa->parent);
881		return (-1);
882	}
883
884	if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS)
885		if_ppa->meta.default_classes++;
886
887	/* if link_share is not specified, use bandwidth */
888	if (opts->lssc_m2 == 0)
889		opts->lssc_m2 = pa->bandwidth;
890
891	/*
892	 * admission control:
893	 * for the real-time service curve, the sum of the service curves
894	 * should not exceed 80% of the interface bandwidth.  20% is reserved
895	 * not to over-commit the actual interface bandwidth.
896	 * for the link-sharing service curve, the sum of the child service
897	 * curve should not exceed the parent service curve.
898	 * for the upper-limit service curve, the assigned bandwidth should
899	 * be smaller than the interface bandwidth, and the upper-limit should
900	 * be larger than the real-time service curve when both are defined.
901	 */
902
903	/* check the linkshare service curve. */
904	if (opts->lssc_m2 != 0) {
905		/* add this queue to the child sum */
906		sc.m1 = opts->lssc_m1;
907		sc.d = opts->lssc_d;
908		sc.m2 = opts->lssc_m2;
909		gsc_add_sc(&parent->meta.lssc, &sc);
910		/* compare the sum of the children with parent's sc */
911		sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1;
912		sc.d = parent->pa.pq_u.fairq_opts.lssc_d;
913		sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2;
914		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
915			warnx("link-sharing sc exceeds parent's sc");
916			return (-1);
917		}
918	}
919
920	return (0);
921}
922
923static int
924check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa)
925{
926
927	/* check if hfsc has one default queue for this interface */
928	if (if_ppa->meta.default_classes != 1) {
929		warnx("should have one default queue on %s", if_ppa->pa.ifname);
930		return (1);
931	}
932	return (0);
933}
934
935static int
936check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa)
937{
938
939	/* check if fairq has one default queue for this interface */
940	if (if_ppa->meta.default_classes != 1) {
941		warnx("should have one default queue on %s", if_ppa->pa.ifname);
942		return (1);
943	}
944	return (0);
945}
946
947static int
948print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
949{
950	const struct hfsc_opts_v1	*opts;
951	const struct node_hfsc_sc	*rtsc, *lssc, *ulsc;
952
953	opts = &a->pq_u.hfsc_opts;
954	if (qopts == NULL)
955		rtsc = lssc = ulsc = NULL;
956	else {
957		rtsc = &qopts->data.hfsc_opts.realtime;
958		lssc = &qopts->data.hfsc_opts.linkshare;
959		ulsc = &qopts->data.hfsc_opts.upperlimit;
960	}
961
962	if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
963	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
964	    opts->lssc_d != 0))) {
965		printf("hfsc(");
966		if (opts->flags & HFCF_RED)
967			printf(" red");
968		if (opts->flags & HFCF_ECN)
969			printf(" ecn");
970		if (opts->flags & HFCF_RIO)
971			printf(" rio");
972		if (opts->flags & HFCF_CODEL)
973			printf(" codel");
974		if (opts->flags & HFCF_CLEARDSCP)
975			printf(" cleardscp");
976		if (opts->flags & HFCF_DEFAULTCLASS)
977			printf(" default");
978		if (opts->rtsc_m2 != 0)
979			print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
980			    opts->rtsc_m2, rtsc);
981		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
982		    opts->lssc_d != 0))
983			print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
984			    opts->lssc_m2, lssc);
985		if (opts->ulsc_m2 != 0)
986			print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
987			    opts->ulsc_m2, ulsc);
988		printf(" ) ");
989
990		return (1);
991	} else
992		return (0);
993}
994
995static int
996print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
997{
998	const struct codel_opts *opts;
999
1000	opts = &a->pq_u.codel_opts;
1001	if (opts->target || opts->interval || opts->ecn) {
1002		printf("codel(");
1003		if (opts->target)
1004			printf(" target %d", opts->target);
1005		if (opts->interval)
1006			printf(" interval %d", opts->interval);
1007		if (opts->ecn)
1008			printf("ecn");
1009		printf(" ) ");
1010
1011		return (1);
1012	}
1013
1014	return (0);
1015}
1016
1017static int
1018print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1019{
1020	const struct fairq_opts		*opts;
1021	const struct node_fairq_sc	*loc_lssc;
1022
1023	opts = &a->pq_u.fairq_opts;
1024	if (qopts == NULL)
1025		loc_lssc = NULL;
1026	else
1027		loc_lssc = &qopts->data.fairq_opts.linkshare;
1028
1029	if (opts->flags ||
1030	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1031	    opts->lssc_d != 0))) {
1032		printf("fairq(");
1033		if (opts->flags & FARF_RED)
1034			printf(" red");
1035		if (opts->flags & FARF_ECN)
1036			printf(" ecn");
1037		if (opts->flags & FARF_RIO)
1038			printf(" rio");
1039		if (opts->flags & FARF_CODEL)
1040			printf(" codel");
1041		if (opts->flags & FARF_CLEARDSCP)
1042			printf(" cleardscp");
1043		if (opts->flags & FARF_DEFAULTCLASS)
1044			printf(" default");
1045		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1046		    opts->lssc_d != 0))
1047			print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1048			    opts->lssc_m2, loc_lssc);
1049		printf(" ) ");
1050
1051		return (1);
1052	} else
1053		return (0);
1054}
1055
1056/*
1057 * admission control using generalized service curve
1058 */
1059
1060/* add a new service curve to a generalized service curve */
1061static void
1062gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
1063{
1064	if (is_sc_null(sc))
1065		return;
1066	if (sc->d != 0)
1067		gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
1068	gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
1069}
1070
1071/*
1072 * check whether all points of a generalized service curve have
1073 * their y-coordinates no larger than a given two-piece linear
1074 * service curve.
1075 */
1076static int
1077is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
1078{
1079	struct segment	*s, *last, *end;
1080	double		 y;
1081
1082	if (is_sc_null(sc)) {
1083		if (LIST_EMPTY(gsc))
1084			return (1);
1085		LIST_FOREACH(s, gsc, _next) {
1086			if (s->m != 0)
1087				return (0);
1088		}
1089		return (1);
1090	}
1091	/*
1092	 * gsc has a dummy entry at the end with x = INFINITY.
1093	 * loop through up to this dummy entry.
1094	 */
1095	end = gsc_getentry(gsc, INFINITY);
1096	if (end == NULL)
1097		return (1);
1098	last = NULL;
1099	for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
1100		if (s->y > sc_x2y(sc, s->x))
1101			return (0);
1102		last = s;
1103	}
1104	/* last now holds the real last segment */
1105	if (last == NULL)
1106		return (1);
1107	if (last->m > sc->m2)
1108		return (0);
1109	if (last->x < sc->d && last->m > sc->m1) {
1110		y = last->y + (sc->d - last->x) * last->m;
1111		if (y > sc_x2y(sc, sc->d))
1112			return (0);
1113	}
1114	return (1);
1115}
1116
1117/*
1118 * return a segment entry starting at x.
1119 * if gsc has no entry starting at x, a new entry is created at x.
1120 */
1121static struct segment *
1122gsc_getentry(struct gen_sc *gsc, double x)
1123{
1124	struct segment	*new, *prev, *s;
1125
1126	prev = NULL;
1127	LIST_FOREACH(s, gsc, _next) {
1128		if (s->x == x)
1129			return (s);	/* matching entry found */
1130		else if (s->x < x)
1131			prev = s;
1132		else
1133			break;
1134	}
1135
1136	/* we have to create a new entry */
1137	if ((new = calloc(1, sizeof(struct segment))) == NULL)
1138		return (NULL);
1139
1140	new->x = x;
1141	if (x == INFINITY || s == NULL)
1142		new->d = 0;
1143	else if (s->x == INFINITY)
1144		new->d = INFINITY;
1145	else
1146		new->d = s->x - x;
1147	if (prev == NULL) {
1148		/* insert the new entry at the head of the list */
1149		new->y = 0;
1150		new->m = 0;
1151		LIST_INSERT_HEAD(gsc, new, _next);
1152	} else {
1153		/*
1154		 * the start point intersects with the segment pointed by
1155		 * prev.  divide prev into 2 segments
1156		 */
1157		if (x == INFINITY) {
1158			prev->d = INFINITY;
1159			if (prev->m == 0)
1160				new->y = prev->y;
1161			else
1162				new->y = INFINITY;
1163		} else {
1164			prev->d = x - prev->x;
1165			new->y = prev->d * prev->m + prev->y;
1166		}
1167		new->m = prev->m;
1168		LIST_INSERT_AFTER(prev, new, _next);
1169	}
1170	return (new);
1171}
1172
1173/* add a segment to a generalized service curve */
1174static int
1175gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1176{
1177	struct segment	*start, *end, *s;
1178	double		 x2;
1179
1180	if (d == INFINITY)
1181		x2 = INFINITY;
1182	else
1183		x2 = x + d;
1184	start = gsc_getentry(gsc, x);
1185	end = gsc_getentry(gsc, x2);
1186	if (start == NULL || end == NULL)
1187		return (-1);
1188
1189	for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1190		s->m += m;
1191		s->y += y + (s->x - x) * m;
1192	}
1193
1194	end = gsc_getentry(gsc, INFINITY);
1195	for (; s != end; s = LIST_NEXT(s, _next)) {
1196		s->y += m * d;
1197	}
1198
1199	return (0);
1200}
1201
1202/* get y-projection of a service curve */
1203static double
1204sc_x2y(struct service_curve *sc, double x)
1205{
1206	double	y;
1207
1208	if (x <= (double)sc->d)
1209		/* y belongs to the 1st segment */
1210		y = x * (double)sc->m1;
1211	else
1212		/* y belongs to the 2nd segment */
1213		y = (double)sc->d * (double)sc->m1
1214			+ (x - (double)sc->d) * (double)sc->m2;
1215	return (y);
1216}
1217
1218/*
1219 * misc utilities
1220 */
1221#define	R2S_BUFS	8
1222#define	RATESTR_MAX	16
1223
1224char *
1225rate2str(double rate)
1226{
1227	char		*buf;
1228	static char	 r2sbuf[R2S_BUFS][RATESTR_MAX];  /* ring buffer */
1229	static int	 idx = 0;
1230	int		 i;
1231	static const char unit[] = " KMG";
1232
1233	buf = r2sbuf[idx++];
1234	if (idx == R2S_BUFS)
1235		idx = 0;
1236
1237	for (i = 0; rate >= 1000 && i <= 3; i++)
1238		rate /= 1000;
1239
1240	if ((int)(rate * 100) % 100)
1241		snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1242	else
1243		snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1244
1245	return (buf);
1246}
1247
1248u_int32_t
1249getifspeed(char *ifname)
1250{
1251	int		s;
1252	struct ifreq	ifr;
1253	struct if_data	ifrdat;
1254
1255	s = get_query_socket();
1256	bzero(&ifr, sizeof(ifr));
1257	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1258	    sizeof(ifr.ifr_name))
1259		errx(1, "getifspeed: strlcpy");
1260	ifr.ifr_data = (caddr_t)&ifrdat;
1261	if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1262		err(1, "SIOCGIFDATA");
1263	return ((u_int32_t)ifrdat.ifi_baudrate);
1264}
1265
1266u_long
1267getifmtu(char *ifname)
1268{
1269	int		s;
1270	struct ifreq	ifr;
1271
1272	s = get_query_socket();
1273	bzero(&ifr, sizeof(ifr));
1274	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1275	    sizeof(ifr.ifr_name))
1276		errx(1, "getifmtu: strlcpy");
1277	if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1278#ifdef __FreeBSD__
1279		ifr.ifr_mtu = 1500;
1280#else
1281		err(1, "SIOCGIFMTU");
1282#endif
1283	if (ifr.ifr_mtu > 0)
1284		return (ifr.ifr_mtu);
1285	else {
1286		warnx("could not get mtu for %s, assuming 1500", ifname);
1287		return (1500);
1288	}
1289}
1290
1291int
1292eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1293    u_int64_t ref_bw)
1294{
1295	int	errors = 0;
1296
1297	switch (pa->scheduler) {
1298	case ALTQT_CBQ:
1299		pa->pq_u.cbq_opts = opts->data.cbq_opts;
1300		break;
1301	case ALTQT_PRIQ:
1302		pa->pq_u.priq_opts = opts->data.priq_opts;
1303		break;
1304	case ALTQT_HFSC:
1305		pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1306		if (opts->data.hfsc_opts.linkshare.used) {
1307			pa->pq_u.hfsc_opts.lssc_m1 =
1308			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1309			    ref_bw);
1310			pa->pq_u.hfsc_opts.lssc_m2 =
1311			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1312			    ref_bw);
1313			pa->pq_u.hfsc_opts.lssc_d =
1314			    opts->data.hfsc_opts.linkshare.d;
1315		}
1316		if (opts->data.hfsc_opts.realtime.used) {
1317			pa->pq_u.hfsc_opts.rtsc_m1 =
1318			    eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1319			    ref_bw);
1320			pa->pq_u.hfsc_opts.rtsc_m2 =
1321			    eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1322			    ref_bw);
1323			pa->pq_u.hfsc_opts.rtsc_d =
1324			    opts->data.hfsc_opts.realtime.d;
1325		}
1326		if (opts->data.hfsc_opts.upperlimit.used) {
1327			pa->pq_u.hfsc_opts.ulsc_m1 =
1328			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1329			    ref_bw);
1330			pa->pq_u.hfsc_opts.ulsc_m2 =
1331			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1332			    ref_bw);
1333			pa->pq_u.hfsc_opts.ulsc_d =
1334			    opts->data.hfsc_opts.upperlimit.d;
1335		}
1336		break;
1337	case ALTQT_FAIRQ:
1338		pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags;
1339		pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets;
1340		pa->pq_u.fairq_opts.hogs_m1 =
1341			eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw);
1342
1343		if (opts->data.fairq_opts.linkshare.used) {
1344			pa->pq_u.fairq_opts.lssc_m1 =
1345			    eval_bwspec(&opts->data.fairq_opts.linkshare.m1,
1346			    ref_bw);
1347			pa->pq_u.fairq_opts.lssc_m2 =
1348			    eval_bwspec(&opts->data.fairq_opts.linkshare.m2,
1349			    ref_bw);
1350			pa->pq_u.fairq_opts.lssc_d =
1351			    opts->data.fairq_opts.linkshare.d;
1352		}
1353		break;
1354	case ALTQT_CODEL:
1355		pa->pq_u.codel_opts.target = opts->data.codel_opts.target;
1356		pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval;
1357		pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn;
1358		break;
1359	default:
1360		warnx("eval_queue_opts: unknown scheduler type %u",
1361		    opts->qtype);
1362		errors++;
1363		break;
1364	}
1365
1366	return (errors);
1367}
1368
1369/*
1370 * If absolute bandwidth if set, return the lesser of that value and the
1371 * reference bandwidth.  Limiting to the reference bandwidth allows simple
1372 * limiting of configured bandwidth parameters for schedulers that are
1373 * 32-bit limited, as the root/interface bandwidth (top-level reference
1374 * bandwidth) will be properly limited in that case.
1375 *
1376 * Otherwise, if the absolute bandwidth is not set, return given percentage
1377 * of reference bandwidth.
1378 */
1379u_int64_t
1380eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw)
1381{
1382	if (bw->bw_absolute > 0)
1383		return (MIN(bw->bw_absolute, ref_bw));
1384
1385	if (bw->bw_percent > 0)
1386		return (ref_bw / 100 * bw->bw_percent);
1387
1388	return (0);
1389}
1390
1391void
1392print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1393    const struct node_hfsc_sc *sc)
1394{
1395	printf(" %s", scname);
1396
1397	if (d != 0) {
1398		printf("(");
1399		if (sc != NULL && sc->m1.bw_percent > 0)
1400			printf("%u%%", sc->m1.bw_percent);
1401		else
1402			printf("%s", rate2str((double)m1));
1403		printf(" %u", d);
1404	}
1405
1406	if (sc != NULL && sc->m2.bw_percent > 0)
1407		printf(" %u%%", sc->m2.bw_percent);
1408	else
1409		printf(" %s", rate2str((double)m2));
1410
1411	if (d != 0)
1412		printf(")");
1413}
1414
1415void
1416print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2,
1417    const struct node_fairq_sc *sc)
1418{
1419	printf(" %s", scname);
1420
1421	if (d != 0) {
1422		printf("(");
1423		if (sc != NULL && sc->m1.bw_percent > 0)
1424			printf("%u%%", sc->m1.bw_percent);
1425		else
1426			printf("%s", rate2str((double)m1));
1427		printf(" %u", d);
1428	}
1429
1430	if (sc != NULL && sc->m2.bw_percent > 0)
1431		printf(" %u%%", sc->m2.bw_percent);
1432	else
1433		printf(" %s", rate2str((double)m2));
1434
1435	if (d != 0)
1436		printf(")");
1437}
1438