altq_hfsc.c revision 263086
1/* $FreeBSD: stable/10/sys/contrib/altq/altq/altq_hfsc.c 263086 2014-03-12 10:45:58Z glebius $ */ 2/* $KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $ */ 3 4/* 5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 6 * 7 * Permission to use, copy, modify, and distribute this software and 8 * its documentation is hereby granted (including for commercial or 9 * for-profit use), provided that both the copyright notice and this 10 * permission notice appear in all copies of the software, derivative 11 * works, or modified versions, and any portions thereof. 12 * 13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * Carnegie Mellon encourages (but does not require) users of this 29 * software to return any improvements or extensions that they make, 30 * and to grant Carnegie Mellon the rights to redistribute these 31 * changes without encumbrance. 32 */ 33/* 34 * H-FSC is described in Proceedings of SIGCOMM'97, 35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 36 * Real-Time and Priority Service" 37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 38 * 39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 40 * when a class has an upperlimit, the fit-time is computed from the 41 * upperlimit service curve. the link-sharing scheduler does not schedule 42 * a class whose fit-time exceeds the current time. 43 */ 44 45#if defined(__FreeBSD__) || defined(__NetBSD__) 46#include "opt_altq.h" 47#include "opt_inet.h" 48#ifdef __FreeBSD__ 49#include "opt_inet6.h" 50#endif 51#endif /* __FreeBSD__ || __NetBSD__ */ 52 53#ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 54 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/mbuf.h> 58#include <sys/socket.h> 59#include <sys/systm.h> 60#include <sys/errno.h> 61#include <sys/queue.h> 62#if 1 /* ALTQ3_COMPAT */ 63#include <sys/sockio.h> 64#include <sys/proc.h> 65#include <sys/kernel.h> 66#endif /* ALTQ3_COMPAT */ 67 68#include <net/if.h> 69#include <net/if_var.h> 70#include <netinet/in.h> 71 72#include <netpfil/pf/pf.h> 73#include <netpfil/pf/pf_altq.h> 74#include <netpfil/pf/pf_mtag.h> 75#include <altq/altq.h> 76#include <altq/altq_hfsc.h> 77#ifdef ALTQ3_COMPAT 78#include <altq/altq_conf.h> 79#endif 80 81/* 82 * function prototypes 83 */ 84static int hfsc_clear_interface(struct hfsc_if *); 85static int hfsc_request(struct ifaltq *, int, void *); 86static void hfsc_purge(struct hfsc_if *); 87static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 88 struct service_curve *, struct service_curve *, struct service_curve *, 89 struct hfsc_class *, int, int, int); 90static int hfsc_class_destroy(struct hfsc_class *); 91static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 92static int hfsc_enqueue(struct ifaltq *, struct mbuf *, 93 struct altq_pktattr *); 94static struct mbuf *hfsc_dequeue(struct ifaltq *, int); 95 96static int hfsc_addq(struct hfsc_class *, struct mbuf *); 97static struct mbuf *hfsc_getq(struct hfsc_class *); 98static struct mbuf *hfsc_pollq(struct hfsc_class *); 99static void hfsc_purgeq(struct hfsc_class *); 100 101static void update_cfmin(struct hfsc_class *); 102static void set_active(struct hfsc_class *, int); 103static void set_passive(struct hfsc_class *); 104 105static void init_ed(struct hfsc_class *, int); 106static void update_ed(struct hfsc_class *, int); 107static void update_d(struct hfsc_class *, int); 108static void init_vf(struct hfsc_class *, int); 109static void update_vf(struct hfsc_class *, int, u_int64_t); 110static void ellist_insert(struct hfsc_class *); 111static void ellist_remove(struct hfsc_class *); 112static void ellist_update(struct hfsc_class *); 113struct hfsc_class *hfsc_get_mindl(struct hfsc_if *, u_int64_t); 114static void actlist_insert(struct hfsc_class *); 115static void actlist_remove(struct hfsc_class *); 116static void actlist_update(struct hfsc_class *); 117 118static struct hfsc_class *actlist_firstfit(struct hfsc_class *, 119 u_int64_t); 120 121static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t); 122static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t); 123static __inline u_int64_t m2sm(u_int); 124static __inline u_int64_t m2ism(u_int); 125static __inline u_int64_t d2dx(u_int); 126static u_int sm2m(u_int64_t); 127static u_int dx2d(u_int64_t); 128 129static void sc2isc(struct service_curve *, struct internal_sc *); 130static void rtsc_init(struct runtime_sc *, struct internal_sc *, 131 u_int64_t, u_int64_t); 132static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t); 133static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t); 134static void rtsc_min(struct runtime_sc *, struct internal_sc *, 135 u_int64_t, u_int64_t); 136 137static void get_class_stats(struct hfsc_classstats *, 138 struct hfsc_class *); 139static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t); 140 141 142#ifdef ALTQ3_COMPAT 143static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int); 144static int hfsc_detach(struct hfsc_if *); 145static int hfsc_class_modify(struct hfsc_class *, struct service_curve *, 146 struct service_curve *, struct service_curve *); 147 148static int hfsccmd_if_attach(struct hfsc_attach *); 149static int hfsccmd_if_detach(struct hfsc_interface *); 150static int hfsccmd_add_class(struct hfsc_add_class *); 151static int hfsccmd_delete_class(struct hfsc_delete_class *); 152static int hfsccmd_modify_class(struct hfsc_modify_class *); 153static int hfsccmd_add_filter(struct hfsc_add_filter *); 154static int hfsccmd_delete_filter(struct hfsc_delete_filter *); 155static int hfsccmd_class_stats(struct hfsc_class_stats *); 156 157altqdev_decl(hfsc); 158#endif /* ALTQ3_COMPAT */ 159 160/* 161 * macros 162 */ 163#define is_a_parent_class(cl) ((cl)->cl_children != NULL) 164 165#define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 166 167#ifdef ALTQ3_COMPAT 168/* hif_list keeps all hfsc_if's allocated. */ 169static struct hfsc_if *hif_list = NULL; 170#endif /* ALTQ3_COMPAT */ 171 172int 173hfsc_pfattach(struct pf_altq *a) 174{ 175 struct ifnet *ifp; 176 int s, error; 177 178 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) 179 return (EINVAL); 180#ifdef __NetBSD__ 181 s = splnet(); 182#else 183 s = splimp(); 184#endif 185 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc, 186 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 187 splx(s); 188 return (error); 189} 190 191int 192hfsc_add_altq(struct pf_altq *a) 193{ 194 struct hfsc_if *hif; 195 struct ifnet *ifp; 196 197 if ((ifp = ifunit(a->ifname)) == NULL) 198 return (EINVAL); 199 if (!ALTQ_IS_READY(&ifp->if_snd)) 200 return (ENODEV); 201 202 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_NOWAIT | M_ZERO); 203 if (hif == NULL) 204 return (ENOMEM); 205 206 TAILQ_INIT(&hif->hif_eligible); 207 hif->hif_ifq = &ifp->if_snd; 208 209 /* keep the state in pf_altq */ 210 a->altq_disc = hif; 211 212 return (0); 213} 214 215int 216hfsc_remove_altq(struct pf_altq *a) 217{ 218 struct hfsc_if *hif; 219 220 if ((hif = a->altq_disc) == NULL) 221 return (EINVAL); 222 a->altq_disc = NULL; 223 224 (void)hfsc_clear_interface(hif); 225 (void)hfsc_class_destroy(hif->hif_rootclass); 226 227 free(hif, M_DEVBUF); 228 229 return (0); 230} 231 232int 233hfsc_add_queue(struct pf_altq *a) 234{ 235 struct hfsc_if *hif; 236 struct hfsc_class *cl, *parent; 237 struct hfsc_opts *opts; 238 struct service_curve rtsc, lssc, ulsc; 239 240 if ((hif = a->altq_disc) == NULL) 241 return (EINVAL); 242 243 opts = &a->pq_u.hfsc_opts; 244 245 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && 246 hif->hif_rootclass == NULL) 247 parent = NULL; 248 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 249 return (EINVAL); 250 251 if (a->qid == 0) 252 return (EINVAL); 253 254 if (clh_to_clp(hif, a->qid) != NULL) 255 return (EBUSY); 256 257 rtsc.m1 = opts->rtsc_m1; 258 rtsc.d = opts->rtsc_d; 259 rtsc.m2 = opts->rtsc_m2; 260 lssc.m1 = opts->lssc_m1; 261 lssc.d = opts->lssc_d; 262 lssc.m2 = opts->lssc_m2; 263 ulsc.m1 = opts->ulsc_m1; 264 ulsc.d = opts->ulsc_d; 265 ulsc.m2 = opts->ulsc_m2; 266 267 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, 268 parent, a->qlimit, opts->flags, a->qid); 269 if (cl == NULL) 270 return (ENOMEM); 271 272 return (0); 273} 274 275int 276hfsc_remove_queue(struct pf_altq *a) 277{ 278 struct hfsc_if *hif; 279 struct hfsc_class *cl; 280 281 if ((hif = a->altq_disc) == NULL) 282 return (EINVAL); 283 284 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 285 return (EINVAL); 286 287 return (hfsc_class_destroy(cl)); 288} 289 290int 291hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 292{ 293 struct hfsc_if *hif; 294 struct hfsc_class *cl; 295 struct hfsc_classstats stats; 296 int error = 0; 297 298 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 299 return (EBADF); 300 301 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 302 return (EINVAL); 303 304 if (*nbytes < sizeof(stats)) 305 return (EINVAL); 306 307 get_class_stats(&stats, cl); 308 309 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 310 return (error); 311 *nbytes = sizeof(stats); 312 return (0); 313} 314 315/* 316 * bring the interface back to the initial state by discarding 317 * all the filters and classes except the root class. 318 */ 319static int 320hfsc_clear_interface(struct hfsc_if *hif) 321{ 322 struct hfsc_class *cl; 323 324#ifdef ALTQ3_COMPAT 325 /* free the filters for this interface */ 326 acc_discard_filters(&hif->hif_classifier, NULL, 1); 327#endif 328 329 /* clear out the classes */ 330 while (hif->hif_rootclass != NULL && 331 (cl = hif->hif_rootclass->cl_children) != NULL) { 332 /* 333 * remove the first leaf class found in the hierarchy 334 * then start over 335 */ 336 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 337 if (!is_a_parent_class(cl)) { 338 (void)hfsc_class_destroy(cl); 339 break; 340 } 341 } 342 } 343 344 return (0); 345} 346 347static int 348hfsc_request(struct ifaltq *ifq, int req, void *arg) 349{ 350 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 351 352 IFQ_LOCK_ASSERT(ifq); 353 354 switch (req) { 355 case ALTRQ_PURGE: 356 hfsc_purge(hif); 357 break; 358 } 359 return (0); 360} 361 362/* discard all the queued packets on the interface */ 363static void 364hfsc_purge(struct hfsc_if *hif) 365{ 366 struct hfsc_class *cl; 367 368 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 369 if (!qempty(cl->cl_q)) 370 hfsc_purgeq(cl); 371 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 372 hif->hif_ifq->ifq_len = 0; 373} 374 375struct hfsc_class * 376hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 377 struct service_curve *fsc, struct service_curve *usc, 378 struct hfsc_class *parent, int qlimit, int flags, int qid) 379{ 380 struct hfsc_class *cl, *p; 381 int i, s; 382 383 if (hif->hif_classes >= HFSC_MAX_CLASSES) 384 return (NULL); 385 386#ifndef ALTQ_RED 387 if (flags & HFCF_RED) { 388#ifdef ALTQ_DEBUG 389 printf("hfsc_class_create: RED not configured for HFSC!\n"); 390#endif 391 return (NULL); 392 } 393#endif 394 395 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_NOWAIT | M_ZERO); 396 if (cl == NULL) 397 return (NULL); 398 399 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO); 400 if (cl->cl_q == NULL) 401 goto err_ret; 402 403 TAILQ_INIT(&cl->cl_actc); 404 405 if (qlimit == 0) 406 qlimit = 50; /* use default */ 407 qlimit(cl->cl_q) = qlimit; 408 qtype(cl->cl_q) = Q_DROPTAIL; 409 qlen(cl->cl_q) = 0; 410 cl->cl_flags = flags; 411#ifdef ALTQ_RED 412 if (flags & (HFCF_RED|HFCF_RIO)) { 413 int red_flags, red_pkttime; 414 u_int m2; 415 416 m2 = 0; 417 if (rsc != NULL && rsc->m2 > m2) 418 m2 = rsc->m2; 419 if (fsc != NULL && fsc->m2 > m2) 420 m2 = fsc->m2; 421 if (usc != NULL && usc->m2 > m2) 422 m2 = usc->m2; 423 424 red_flags = 0; 425 if (flags & HFCF_ECN) 426 red_flags |= REDF_ECN; 427#ifdef ALTQ_RIO 428 if (flags & HFCF_CLEARDSCP) 429 red_flags |= RIOF_CLEARDSCP; 430#endif 431 if (m2 < 8) 432 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 433 else 434 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 435 * 1000 * 1000 * 1000 / (m2 / 8); 436 if (flags & HFCF_RED) { 437 cl->cl_red = red_alloc(0, 0, 438 qlimit(cl->cl_q) * 10/100, 439 qlimit(cl->cl_q) * 30/100, 440 red_flags, red_pkttime); 441 if (cl->cl_red != NULL) 442 qtype(cl->cl_q) = Q_RED; 443 } 444#ifdef ALTQ_RIO 445 else { 446 cl->cl_red = (red_t *)rio_alloc(0, NULL, 447 red_flags, red_pkttime); 448 if (cl->cl_red != NULL) 449 qtype(cl->cl_q) = Q_RIO; 450 } 451#endif 452 } 453#endif /* ALTQ_RED */ 454 455 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 456 cl->cl_rsc = malloc(sizeof(struct internal_sc), 457 M_DEVBUF, M_NOWAIT); 458 if (cl->cl_rsc == NULL) 459 goto err_ret; 460 sc2isc(rsc, cl->cl_rsc); 461 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 462 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 463 } 464 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 465 cl->cl_fsc = malloc(sizeof(struct internal_sc), 466 M_DEVBUF, M_NOWAIT); 467 if (cl->cl_fsc == NULL) 468 goto err_ret; 469 sc2isc(fsc, cl->cl_fsc); 470 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 471 } 472 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 473 cl->cl_usc = malloc(sizeof(struct internal_sc), 474 M_DEVBUF, M_NOWAIT); 475 if (cl->cl_usc == NULL) 476 goto err_ret; 477 sc2isc(usc, cl->cl_usc); 478 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 479 } 480 481 cl->cl_id = hif->hif_classid++; 482 cl->cl_handle = qid; 483 cl->cl_hif = hif; 484 cl->cl_parent = parent; 485 486#ifdef __NetBSD__ 487 s = splnet(); 488#else 489 s = splimp(); 490#endif 491 IFQ_LOCK(hif->hif_ifq); 492 hif->hif_classes++; 493 494 /* 495 * find a free slot in the class table. if the slot matching 496 * the lower bits of qid is free, use this slot. otherwise, 497 * use the first free slot. 498 */ 499 i = qid % HFSC_MAX_CLASSES; 500 if (hif->hif_class_tbl[i] == NULL) 501 hif->hif_class_tbl[i] = cl; 502 else { 503 for (i = 0; i < HFSC_MAX_CLASSES; i++) 504 if (hif->hif_class_tbl[i] == NULL) { 505 hif->hif_class_tbl[i] = cl; 506 break; 507 } 508 if (i == HFSC_MAX_CLASSES) { 509 IFQ_UNLOCK(hif->hif_ifq); 510 splx(s); 511 goto err_ret; 512 } 513 } 514 515 if (flags & HFCF_DEFAULTCLASS) 516 hif->hif_defaultclass = cl; 517 518 if (parent == NULL) { 519 /* this is root class */ 520 hif->hif_rootclass = cl; 521 } else { 522 /* add this class to the children list of the parent */ 523 if ((p = parent->cl_children) == NULL) 524 parent->cl_children = cl; 525 else { 526 while (p->cl_siblings != NULL) 527 p = p->cl_siblings; 528 p->cl_siblings = cl; 529 } 530 } 531 IFQ_UNLOCK(hif->hif_ifq); 532 splx(s); 533 534 return (cl); 535 536 err_ret: 537 if (cl->cl_red != NULL) { 538#ifdef ALTQ_RIO 539 if (q_is_rio(cl->cl_q)) 540 rio_destroy((rio_t *)cl->cl_red); 541#endif 542#ifdef ALTQ_RED 543 if (q_is_red(cl->cl_q)) 544 red_destroy(cl->cl_red); 545#endif 546 } 547 if (cl->cl_fsc != NULL) 548 free(cl->cl_fsc, M_DEVBUF); 549 if (cl->cl_rsc != NULL) 550 free(cl->cl_rsc, M_DEVBUF); 551 if (cl->cl_usc != NULL) 552 free(cl->cl_usc, M_DEVBUF); 553 if (cl->cl_q != NULL) 554 free(cl->cl_q, M_DEVBUF); 555 free(cl, M_DEVBUF); 556 return (NULL); 557} 558 559static int 560hfsc_class_destroy(struct hfsc_class *cl) 561{ 562 int i, s; 563 564 if (cl == NULL) 565 return (0); 566 567 if (is_a_parent_class(cl)) 568 return (EBUSY); 569 570#ifdef __NetBSD__ 571 s = splnet(); 572#else 573 s = splimp(); 574#endif 575 IFQ_LOCK(cl->cl_hif->hif_ifq); 576 577#ifdef ALTQ3_COMPAT 578 /* delete filters referencing to this class */ 579 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0); 580#endif /* ALTQ3_COMPAT */ 581 582 if (!qempty(cl->cl_q)) 583 hfsc_purgeq(cl); 584 585 if (cl->cl_parent == NULL) { 586 /* this is root class */ 587 } else { 588 struct hfsc_class *p = cl->cl_parent->cl_children; 589 590 if (p == cl) 591 cl->cl_parent->cl_children = cl->cl_siblings; 592 else do { 593 if (p->cl_siblings == cl) { 594 p->cl_siblings = cl->cl_siblings; 595 break; 596 } 597 } while ((p = p->cl_siblings) != NULL); 598 ASSERT(p != NULL); 599 } 600 601 for (i = 0; i < HFSC_MAX_CLASSES; i++) 602 if (cl->cl_hif->hif_class_tbl[i] == cl) { 603 cl->cl_hif->hif_class_tbl[i] = NULL; 604 break; 605 } 606 607 cl->cl_hif->hif_classes--; 608 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 609 splx(s); 610 611 if (cl->cl_red != NULL) { 612#ifdef ALTQ_RIO 613 if (q_is_rio(cl->cl_q)) 614 rio_destroy((rio_t *)cl->cl_red); 615#endif 616#ifdef ALTQ_RED 617 if (q_is_red(cl->cl_q)) 618 red_destroy(cl->cl_red); 619#endif 620 } 621 622 IFQ_LOCK(cl->cl_hif->hif_ifq); 623 if (cl == cl->cl_hif->hif_rootclass) 624 cl->cl_hif->hif_rootclass = NULL; 625 if (cl == cl->cl_hif->hif_defaultclass) 626 cl->cl_hif->hif_defaultclass = NULL; 627 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 628 629 if (cl->cl_usc != NULL) 630 free(cl->cl_usc, M_DEVBUF); 631 if (cl->cl_fsc != NULL) 632 free(cl->cl_fsc, M_DEVBUF); 633 if (cl->cl_rsc != NULL) 634 free(cl->cl_rsc, M_DEVBUF); 635 free(cl->cl_q, M_DEVBUF); 636 free(cl, M_DEVBUF); 637 638 return (0); 639} 640 641/* 642 * hfsc_nextclass returns the next class in the tree. 643 * usage: 644 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 645 * do_something; 646 */ 647static struct hfsc_class * 648hfsc_nextclass(struct hfsc_class *cl) 649{ 650 if (cl->cl_children != NULL) 651 cl = cl->cl_children; 652 else if (cl->cl_siblings != NULL) 653 cl = cl->cl_siblings; 654 else { 655 while ((cl = cl->cl_parent) != NULL) 656 if (cl->cl_siblings) { 657 cl = cl->cl_siblings; 658 break; 659 } 660 } 661 662 return (cl); 663} 664 665/* 666 * hfsc_enqueue is an enqueue function to be registered to 667 * (*altq_enqueue) in struct ifaltq. 668 */ 669static int 670hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) 671{ 672 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 673 struct hfsc_class *cl; 674 struct pf_mtag *t; 675 int len; 676 677 IFQ_LOCK_ASSERT(ifq); 678 679 /* grab class set by classifier */ 680 if ((m->m_flags & M_PKTHDR) == 0) { 681 /* should not happen */ 682 printf("altq: packet for %s does not have pkthdr\n", 683 ifq->altq_ifp->if_xname); 684 m_freem(m); 685 return (ENOBUFS); 686 } 687 cl = NULL; 688 if ((t = pf_find_mtag(m)) != NULL) 689 cl = clh_to_clp(hif, t->qid); 690#ifdef ALTQ3_COMPAT 691 else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL) 692 cl = pktattr->pattr_class; 693#endif 694 if (cl == NULL || is_a_parent_class(cl)) { 695 cl = hif->hif_defaultclass; 696 if (cl == NULL) { 697 m_freem(m); 698 return (ENOBUFS); 699 } 700 } 701#ifdef ALTQ3_COMPAT 702 if (pktattr != NULL) 703 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */ 704 else 705#endif 706 cl->cl_pktattr = NULL; 707 len = m_pktlen(m); 708 if (hfsc_addq(cl, m) != 0) { 709 /* drop occurred. mbuf was freed in hfsc_addq. */ 710 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 711 return (ENOBUFS); 712 } 713 IFQ_INC_LEN(ifq); 714 cl->cl_hif->hif_packets++; 715 716 /* successfully queued. */ 717 if (qlen(cl->cl_q) == 1) 718 set_active(cl, m_pktlen(m)); 719 720 return (0); 721} 722 723/* 724 * hfsc_dequeue is a dequeue function to be registered to 725 * (*altq_dequeue) in struct ifaltq. 726 * 727 * note: ALTDQ_POLL returns the next packet without removing the packet 728 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 729 * ALTDQ_REMOVE must return the same packet if called immediately 730 * after ALTDQ_POLL. 731 */ 732static struct mbuf * 733hfsc_dequeue(struct ifaltq *ifq, int op) 734{ 735 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 736 struct hfsc_class *cl; 737 struct mbuf *m; 738 int len, next_len; 739 int realtime = 0; 740 u_int64_t cur_time; 741 742 IFQ_LOCK_ASSERT(ifq); 743 744 if (hif->hif_packets == 0) 745 /* no packet in the tree */ 746 return (NULL); 747 748 cur_time = read_machclk(); 749 750 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 751 752 cl = hif->hif_pollcache; 753 hif->hif_pollcache = NULL; 754 /* check if the class was scheduled by real-time criteria */ 755 if (cl->cl_rsc != NULL) 756 realtime = (cl->cl_e <= cur_time); 757 } else { 758 /* 759 * if there are eligible classes, use real-time criteria. 760 * find the class with the minimum deadline among 761 * the eligible classes. 762 */ 763 if ((cl = hfsc_get_mindl(hif, cur_time)) 764 != NULL) { 765 realtime = 1; 766 } else { 767#ifdef ALTQ_DEBUG 768 int fits = 0; 769#endif 770 /* 771 * use link-sharing criteria 772 * get the class with the minimum vt in the hierarchy 773 */ 774 cl = hif->hif_rootclass; 775 while (is_a_parent_class(cl)) { 776 777 cl = actlist_firstfit(cl, cur_time); 778 if (cl == NULL) { 779#ifdef ALTQ_DEBUG 780 if (fits > 0) 781 printf("%d fit but none found\n",fits); 782#endif 783 return (NULL); 784 } 785 /* 786 * update parent's cl_cvtmin. 787 * don't update if the new vt is smaller. 788 */ 789 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 790 cl->cl_parent->cl_cvtmin = cl->cl_vt; 791#ifdef ALTQ_DEBUG 792 fits++; 793#endif 794 } 795 } 796 797 if (op == ALTDQ_POLL) { 798 hif->hif_pollcache = cl; 799 m = hfsc_pollq(cl); 800 return (m); 801 } 802 } 803 804 m = hfsc_getq(cl); 805 if (m == NULL) 806 panic("hfsc_dequeue:"); 807 len = m_pktlen(m); 808 cl->cl_hif->hif_packets--; 809 IFQ_DEC_LEN(ifq); 810 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 811 812 update_vf(cl, len, cur_time); 813 if (realtime) 814 cl->cl_cumul += len; 815 816 if (!qempty(cl->cl_q)) { 817 if (cl->cl_rsc != NULL) { 818 /* update ed */ 819 next_len = m_pktlen(qhead(cl->cl_q)); 820 821 if (realtime) 822 update_ed(cl, next_len); 823 else 824 update_d(cl, next_len); 825 } 826 } else { 827 /* the class becomes passive */ 828 set_passive(cl); 829 } 830 831 return (m); 832} 833 834static int 835hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 836{ 837 838#ifdef ALTQ_RIO 839 if (q_is_rio(cl->cl_q)) 840 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 841 m, cl->cl_pktattr); 842#endif 843#ifdef ALTQ_RED 844 if (q_is_red(cl->cl_q)) 845 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 846#endif 847 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 848 m_freem(m); 849 return (-1); 850 } 851 852 if (cl->cl_flags & HFCF_CLEARDSCP) 853 write_dsfield(m, cl->cl_pktattr, 0); 854 855 _addq(cl->cl_q, m); 856 857 return (0); 858} 859 860static struct mbuf * 861hfsc_getq(struct hfsc_class *cl) 862{ 863#ifdef ALTQ_RIO 864 if (q_is_rio(cl->cl_q)) 865 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 866#endif 867#ifdef ALTQ_RED 868 if (q_is_red(cl->cl_q)) 869 return red_getq(cl->cl_red, cl->cl_q); 870#endif 871 return _getq(cl->cl_q); 872} 873 874static struct mbuf * 875hfsc_pollq(struct hfsc_class *cl) 876{ 877 return qhead(cl->cl_q); 878} 879 880static void 881hfsc_purgeq(struct hfsc_class *cl) 882{ 883 struct mbuf *m; 884 885 if (qempty(cl->cl_q)) 886 return; 887 888 while ((m = _getq(cl->cl_q)) != NULL) { 889 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 890 m_freem(m); 891 cl->cl_hif->hif_packets--; 892 IFQ_DEC_LEN(cl->cl_hif->hif_ifq); 893 } 894 ASSERT(qlen(cl->cl_q) == 0); 895 896 update_vf(cl, 0, 0); /* remove cl from the actlist */ 897 set_passive(cl); 898} 899 900static void 901set_active(struct hfsc_class *cl, int len) 902{ 903 if (cl->cl_rsc != NULL) 904 init_ed(cl, len); 905 if (cl->cl_fsc != NULL) 906 init_vf(cl, len); 907 908 cl->cl_stats.period++; 909} 910 911static void 912set_passive(struct hfsc_class *cl) 913{ 914 if (cl->cl_rsc != NULL) 915 ellist_remove(cl); 916 917 /* 918 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 919 * needs to be called explicitly to remove a class from actlist 920 */ 921} 922 923static void 924init_ed(struct hfsc_class *cl, int next_len) 925{ 926 u_int64_t cur_time; 927 928 cur_time = read_machclk(); 929 930 /* update the deadline curve */ 931 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 932 933 /* 934 * update the eligible curve. 935 * for concave, it is equal to the deadline curve. 936 * for convex, it is a linear curve with slope m2. 937 */ 938 cl->cl_eligible = cl->cl_deadline; 939 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 940 cl->cl_eligible.dx = 0; 941 cl->cl_eligible.dy = 0; 942 } 943 944 /* compute e and d */ 945 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 946 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 947 948 ellist_insert(cl); 949} 950 951static void 952update_ed(struct hfsc_class *cl, int next_len) 953{ 954 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 955 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 956 957 ellist_update(cl); 958} 959 960static void 961update_d(struct hfsc_class *cl, int next_len) 962{ 963 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 964} 965 966static void 967init_vf(struct hfsc_class *cl, int len) 968{ 969 struct hfsc_class *max_cl, *p; 970 u_int64_t vt, f, cur_time; 971 int go_active; 972 973 cur_time = 0; 974 go_active = 1; 975 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 976 977 if (go_active && cl->cl_nactive++ == 0) 978 go_active = 1; 979 else 980 go_active = 0; 981 982 if (go_active) { 983 max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead); 984 if (max_cl != NULL) { 985 /* 986 * set vt to the average of the min and max 987 * classes. if the parent's period didn't 988 * change, don't decrease vt of the class. 989 */ 990 vt = max_cl->cl_vt; 991 if (cl->cl_parent->cl_cvtmin != 0) 992 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 993 994 if (cl->cl_parent->cl_vtperiod != 995 cl->cl_parentperiod || vt > cl->cl_vt) 996 cl->cl_vt = vt; 997 } else { 998 /* 999 * first child for a new parent backlog period. 1000 * add parent's cvtmax to vtoff of children 1001 * to make a new vt (vtoff + vt) larger than 1002 * the vt in the last period for all children. 1003 */ 1004 vt = cl->cl_parent->cl_cvtmax; 1005 for (p = cl->cl_parent->cl_children; p != NULL; 1006 p = p->cl_siblings) 1007 p->cl_vtoff += vt; 1008 cl->cl_vt = 0; 1009 cl->cl_parent->cl_cvtmax = 0; 1010 cl->cl_parent->cl_cvtmin = 0; 1011 } 1012 cl->cl_initvt = cl->cl_vt; 1013 1014 /* update the virtual curve */ 1015 vt = cl->cl_vt + cl->cl_vtoff; 1016 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 1017 if (cl->cl_virtual.x == vt) { 1018 cl->cl_virtual.x -= cl->cl_vtoff; 1019 cl->cl_vtoff = 0; 1020 } 1021 cl->cl_vtadj = 0; 1022 1023 cl->cl_vtperiod++; /* increment vt period */ 1024 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 1025 if (cl->cl_parent->cl_nactive == 0) 1026 cl->cl_parentperiod++; 1027 cl->cl_f = 0; 1028 1029 actlist_insert(cl); 1030 1031 if (cl->cl_usc != NULL) { 1032 /* class has upper limit curve */ 1033 if (cur_time == 0) 1034 cur_time = read_machclk(); 1035 1036 /* update the ulimit curve */ 1037 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 1038 cl->cl_total); 1039 /* compute myf */ 1040 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 1041 cl->cl_total); 1042 cl->cl_myfadj = 0; 1043 } 1044 } 1045 1046 if (cl->cl_myf > cl->cl_cfmin) 1047 f = cl->cl_myf; 1048 else 1049 f = cl->cl_cfmin; 1050 if (f != cl->cl_f) { 1051 cl->cl_f = f; 1052 update_cfmin(cl->cl_parent); 1053 } 1054 } 1055} 1056 1057static void 1058update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time) 1059{ 1060 u_int64_t f, myf_bound, delta; 1061 int go_passive; 1062 1063 go_passive = qempty(cl->cl_q); 1064 1065 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 1066 1067 cl->cl_total += len; 1068 1069 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1070 continue; 1071 1072 if (go_passive && --cl->cl_nactive == 0) 1073 go_passive = 1; 1074 else 1075 go_passive = 0; 1076 1077 if (go_passive) { 1078 /* no more active child, going passive */ 1079 1080 /* update cvtmax of the parent class */ 1081 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1082 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1083 1084 /* remove this class from the vt list */ 1085 actlist_remove(cl); 1086 1087 update_cfmin(cl->cl_parent); 1088 1089 continue; 1090 } 1091 1092 /* 1093 * update vt and f 1094 */ 1095 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1096 - cl->cl_vtoff + cl->cl_vtadj; 1097 1098 /* 1099 * if vt of the class is smaller than cvtmin, 1100 * the class was skipped in the past due to non-fit. 1101 * if so, we need to adjust vtadj. 1102 */ 1103 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1104 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1105 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1106 } 1107 1108 /* update the vt list */ 1109 actlist_update(cl); 1110 1111 if (cl->cl_usc != NULL) { 1112 cl->cl_myf = cl->cl_myfadj 1113 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1114 1115 /* 1116 * if myf lags behind by more than one clock tick 1117 * from the current time, adjust myfadj to prevent 1118 * a rate-limited class from going greedy. 1119 * in a steady state under rate-limiting, myf 1120 * fluctuates within one clock tick. 1121 */ 1122 myf_bound = cur_time - machclk_per_tick; 1123 if (cl->cl_myf < myf_bound) { 1124 delta = cur_time - cl->cl_myf; 1125 cl->cl_myfadj += delta; 1126 cl->cl_myf += delta; 1127 } 1128 } 1129 1130 /* cl_f is max(cl_myf, cl_cfmin) */ 1131 if (cl->cl_myf > cl->cl_cfmin) 1132 f = cl->cl_myf; 1133 else 1134 f = cl->cl_cfmin; 1135 if (f != cl->cl_f) { 1136 cl->cl_f = f; 1137 update_cfmin(cl->cl_parent); 1138 } 1139 } 1140} 1141 1142static void 1143update_cfmin(struct hfsc_class *cl) 1144{ 1145 struct hfsc_class *p; 1146 u_int64_t cfmin; 1147 1148 if (TAILQ_EMPTY(&cl->cl_actc)) { 1149 cl->cl_cfmin = 0; 1150 return; 1151 } 1152 cfmin = HT_INFINITY; 1153 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) { 1154 if (p->cl_f == 0) { 1155 cl->cl_cfmin = 0; 1156 return; 1157 } 1158 if (p->cl_f < cfmin) 1159 cfmin = p->cl_f; 1160 } 1161 cl->cl_cfmin = cfmin; 1162} 1163 1164/* 1165 * TAILQ based ellist and actlist implementation 1166 * (ion wanted to make a calendar queue based implementation) 1167 */ 1168/* 1169 * eligible list holds backlogged classes being sorted by their eligible times. 1170 * there is one eligible list per interface. 1171 */ 1172 1173static void 1174ellist_insert(struct hfsc_class *cl) 1175{ 1176 struct hfsc_if *hif = cl->cl_hif; 1177 struct hfsc_class *p; 1178 1179 /* check the last entry first */ 1180 if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL || 1181 p->cl_e <= cl->cl_e) { 1182 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist); 1183 return; 1184 } 1185 1186 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) { 1187 if (cl->cl_e < p->cl_e) { 1188 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1189 return; 1190 } 1191 } 1192 ASSERT(0); /* should not reach here */ 1193} 1194 1195static void 1196ellist_remove(struct hfsc_class *cl) 1197{ 1198 struct hfsc_if *hif = cl->cl_hif; 1199 1200 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1201} 1202 1203static void 1204ellist_update(struct hfsc_class *cl) 1205{ 1206 struct hfsc_if *hif = cl->cl_hif; 1207 struct hfsc_class *p, *last; 1208 1209 /* 1210 * the eligible time of a class increases monotonically. 1211 * if the next entry has a larger eligible time, nothing to do. 1212 */ 1213 p = TAILQ_NEXT(cl, cl_ellist); 1214 if (p == NULL || cl->cl_e <= p->cl_e) 1215 return; 1216 1217 /* check the last entry */ 1218 last = TAILQ_LAST(&hif->hif_eligible, elighead); 1219 ASSERT(last != NULL); 1220 if (last->cl_e <= cl->cl_e) { 1221 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1222 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist); 1223 return; 1224 } 1225 1226 /* 1227 * the new position must be between the next entry 1228 * and the last entry 1229 */ 1230 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1231 if (cl->cl_e < p->cl_e) { 1232 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1233 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1234 return; 1235 } 1236 } 1237 ASSERT(0); /* should not reach here */ 1238} 1239 1240/* find the class with the minimum deadline among the eligible classes */ 1241struct hfsc_class * 1242hfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time) 1243{ 1244 struct hfsc_class *p, *cl = NULL; 1245 1246 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) { 1247 if (p->cl_e > cur_time) 1248 break; 1249 if (cl == NULL || p->cl_d < cl->cl_d) 1250 cl = p; 1251 } 1252 return (cl); 1253} 1254 1255/* 1256 * active children list holds backlogged child classes being sorted 1257 * by their virtual time. 1258 * each intermediate class has one active children list. 1259 */ 1260 1261static void 1262actlist_insert(struct hfsc_class *cl) 1263{ 1264 struct hfsc_class *p; 1265 1266 /* check the last entry first */ 1267 if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL 1268 || p->cl_vt <= cl->cl_vt) { 1269 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist); 1270 return; 1271 } 1272 1273 TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) { 1274 if (cl->cl_vt < p->cl_vt) { 1275 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1276 return; 1277 } 1278 } 1279 ASSERT(0); /* should not reach here */ 1280} 1281 1282static void 1283actlist_remove(struct hfsc_class *cl) 1284{ 1285 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1286} 1287 1288static void 1289actlist_update(struct hfsc_class *cl) 1290{ 1291 struct hfsc_class *p, *last; 1292 1293 /* 1294 * the virtual time of a class increases monotonically during its 1295 * backlogged period. 1296 * if the next entry has a larger virtual time, nothing to do. 1297 */ 1298 p = TAILQ_NEXT(cl, cl_actlist); 1299 if (p == NULL || cl->cl_vt < p->cl_vt) 1300 return; 1301 1302 /* check the last entry */ 1303 last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead); 1304 ASSERT(last != NULL); 1305 if (last->cl_vt <= cl->cl_vt) { 1306 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1307 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist); 1308 return; 1309 } 1310 1311 /* 1312 * the new position must be between the next entry 1313 * and the last entry 1314 */ 1315 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1316 if (cl->cl_vt < p->cl_vt) { 1317 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1318 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1319 return; 1320 } 1321 } 1322 ASSERT(0); /* should not reach here */ 1323} 1324 1325static struct hfsc_class * 1326actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time) 1327{ 1328 struct hfsc_class *p; 1329 1330 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) { 1331 if (p->cl_f <= cur_time) 1332 return (p); 1333 } 1334 return (NULL); 1335} 1336 1337/* 1338 * service curve support functions 1339 * 1340 * external service curve parameters 1341 * m: bits/sec 1342 * d: msec 1343 * internal service curve parameters 1344 * sm: (bytes/tsc_interval) << SM_SHIFT 1345 * ism: (tsc_count/byte) << ISM_SHIFT 1346 * dx: tsc_count 1347 * 1348 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1349 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1350 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1351 * digits in decimal using the following table. 1352 * 1353 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1354 * ----------+------------------------------------------------------- 1355 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1356 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1357 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1358 * 1359 * nsec/byte 80000 8000 800 80 8 1360 * ism(500MHz) 40000 4000 400 40 4 1361 * ism(200MHz) 16000 1600 160 16 1.6 1362 */ 1363#define SM_SHIFT 24 1364#define ISM_SHIFT 10 1365 1366#define SM_MASK ((1LL << SM_SHIFT) - 1) 1367#define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1368 1369static __inline u_int64_t 1370seg_x2y(u_int64_t x, u_int64_t sm) 1371{ 1372 u_int64_t y; 1373 1374 /* 1375 * compute 1376 * y = x * sm >> SM_SHIFT 1377 * but divide it for the upper and lower bits to avoid overflow 1378 */ 1379 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1380 return (y); 1381} 1382 1383static __inline u_int64_t 1384seg_y2x(u_int64_t y, u_int64_t ism) 1385{ 1386 u_int64_t x; 1387 1388 if (y == 0) 1389 x = 0; 1390 else if (ism == HT_INFINITY) 1391 x = HT_INFINITY; 1392 else { 1393 x = (y >> ISM_SHIFT) * ism 1394 + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1395 } 1396 return (x); 1397} 1398 1399static __inline u_int64_t 1400m2sm(u_int m) 1401{ 1402 u_int64_t sm; 1403 1404 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq; 1405 return (sm); 1406} 1407 1408static __inline u_int64_t 1409m2ism(u_int m) 1410{ 1411 u_int64_t ism; 1412 1413 if (m == 0) 1414 ism = HT_INFINITY; 1415 else 1416 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1417 return (ism); 1418} 1419 1420static __inline u_int64_t 1421d2dx(u_int d) 1422{ 1423 u_int64_t dx; 1424 1425 dx = ((u_int64_t)d * machclk_freq) / 1000; 1426 return (dx); 1427} 1428 1429static u_int 1430sm2m(u_int64_t sm) 1431{ 1432 u_int64_t m; 1433 1434 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1435 return ((u_int)m); 1436} 1437 1438static u_int 1439dx2d(u_int64_t dx) 1440{ 1441 u_int64_t d; 1442 1443 d = dx * 1000 / machclk_freq; 1444 return ((u_int)d); 1445} 1446 1447static void 1448sc2isc(struct service_curve *sc, struct internal_sc *isc) 1449{ 1450 isc->sm1 = m2sm(sc->m1); 1451 isc->ism1 = m2ism(sc->m1); 1452 isc->dx = d2dx(sc->d); 1453 isc->dy = seg_x2y(isc->dx, isc->sm1); 1454 isc->sm2 = m2sm(sc->m2); 1455 isc->ism2 = m2ism(sc->m2); 1456} 1457 1458/* 1459 * initialize the runtime service curve with the given internal 1460 * service curve starting at (x, y). 1461 */ 1462static void 1463rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x, 1464 u_int64_t y) 1465{ 1466 rtsc->x = x; 1467 rtsc->y = y; 1468 rtsc->sm1 = isc->sm1; 1469 rtsc->ism1 = isc->ism1; 1470 rtsc->dx = isc->dx; 1471 rtsc->dy = isc->dy; 1472 rtsc->sm2 = isc->sm2; 1473 rtsc->ism2 = isc->ism2; 1474} 1475 1476/* 1477 * calculate the y-projection of the runtime service curve by the 1478 * given x-projection value 1479 */ 1480static u_int64_t 1481rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y) 1482{ 1483 u_int64_t x; 1484 1485 if (y < rtsc->y) 1486 x = rtsc->x; 1487 else if (y <= rtsc->y + rtsc->dy) { 1488 /* x belongs to the 1st segment */ 1489 if (rtsc->dy == 0) 1490 x = rtsc->x + rtsc->dx; 1491 else 1492 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1493 } else { 1494 /* x belongs to the 2nd segment */ 1495 x = rtsc->x + rtsc->dx 1496 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1497 } 1498 return (x); 1499} 1500 1501static u_int64_t 1502rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x) 1503{ 1504 u_int64_t y; 1505 1506 if (x <= rtsc->x) 1507 y = rtsc->y; 1508 else if (x <= rtsc->x + rtsc->dx) 1509 /* y belongs to the 1st segment */ 1510 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1511 else 1512 /* y belongs to the 2nd segment */ 1513 y = rtsc->y + rtsc->dy 1514 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1515 return (y); 1516} 1517 1518/* 1519 * update the runtime service curve by taking the minimum of the current 1520 * runtime service curve and the service curve starting at (x, y). 1521 */ 1522static void 1523rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x, 1524 u_int64_t y) 1525{ 1526 u_int64_t y1, y2, dx, dy; 1527 1528 if (isc->sm1 <= isc->sm2) { 1529 /* service curve is convex */ 1530 y1 = rtsc_x2y(rtsc, x); 1531 if (y1 < y) 1532 /* the current rtsc is smaller */ 1533 return; 1534 rtsc->x = x; 1535 rtsc->y = y; 1536 return; 1537 } 1538 1539 /* 1540 * service curve is concave 1541 * compute the two y values of the current rtsc 1542 * y1: at x 1543 * y2: at (x + dx) 1544 */ 1545 y1 = rtsc_x2y(rtsc, x); 1546 if (y1 <= y) { 1547 /* rtsc is below isc, no change to rtsc */ 1548 return; 1549 } 1550 1551 y2 = rtsc_x2y(rtsc, x + isc->dx); 1552 if (y2 >= y + isc->dy) { 1553 /* rtsc is above isc, replace rtsc by isc */ 1554 rtsc->x = x; 1555 rtsc->y = y; 1556 rtsc->dx = isc->dx; 1557 rtsc->dy = isc->dy; 1558 return; 1559 } 1560 1561 /* 1562 * the two curves intersect 1563 * compute the offsets (dx, dy) using the reverse 1564 * function of seg_x2y() 1565 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1566 */ 1567 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1568 /* 1569 * check if (x, y1) belongs to the 1st segment of rtsc. 1570 * if so, add the offset. 1571 */ 1572 if (rtsc->x + rtsc->dx > x) 1573 dx += rtsc->x + rtsc->dx - x; 1574 dy = seg_x2y(dx, isc->sm1); 1575 1576 rtsc->x = x; 1577 rtsc->y = y; 1578 rtsc->dx = dx; 1579 rtsc->dy = dy; 1580 return; 1581} 1582 1583static void 1584get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1585{ 1586 sp->class_id = cl->cl_id; 1587 sp->class_handle = cl->cl_handle; 1588 1589 if (cl->cl_rsc != NULL) { 1590 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1591 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1592 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1593 } else { 1594 sp->rsc.m1 = 0; 1595 sp->rsc.d = 0; 1596 sp->rsc.m2 = 0; 1597 } 1598 if (cl->cl_fsc != NULL) { 1599 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1600 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1601 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1602 } else { 1603 sp->fsc.m1 = 0; 1604 sp->fsc.d = 0; 1605 sp->fsc.m2 = 0; 1606 } 1607 if (cl->cl_usc != NULL) { 1608 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1609 sp->usc.d = dx2d(cl->cl_usc->dx); 1610 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1611 } else { 1612 sp->usc.m1 = 0; 1613 sp->usc.d = 0; 1614 sp->usc.m2 = 0; 1615 } 1616 1617 sp->total = cl->cl_total; 1618 sp->cumul = cl->cl_cumul; 1619 1620 sp->d = cl->cl_d; 1621 sp->e = cl->cl_e; 1622 sp->vt = cl->cl_vt; 1623 sp->f = cl->cl_f; 1624 1625 sp->initvt = cl->cl_initvt; 1626 sp->vtperiod = cl->cl_vtperiod; 1627 sp->parentperiod = cl->cl_parentperiod; 1628 sp->nactive = cl->cl_nactive; 1629 sp->vtoff = cl->cl_vtoff; 1630 sp->cvtmax = cl->cl_cvtmax; 1631 sp->myf = cl->cl_myf; 1632 sp->cfmin = cl->cl_cfmin; 1633 sp->cvtmin = cl->cl_cvtmin; 1634 sp->myfadj = cl->cl_myfadj; 1635 sp->vtadj = cl->cl_vtadj; 1636 1637 sp->cur_time = read_machclk(); 1638 sp->machclk_freq = machclk_freq; 1639 1640 sp->qlength = qlen(cl->cl_q); 1641 sp->qlimit = qlimit(cl->cl_q); 1642 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1643 sp->drop_cnt = cl->cl_stats.drop_cnt; 1644 sp->period = cl->cl_stats.period; 1645 1646 sp->qtype = qtype(cl->cl_q); 1647#ifdef ALTQ_RED 1648 if (q_is_red(cl->cl_q)) 1649 red_getstats(cl->cl_red, &sp->red[0]); 1650#endif 1651#ifdef ALTQ_RIO 1652 if (q_is_rio(cl->cl_q)) 1653 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1654#endif 1655} 1656 1657/* convert a class handle to the corresponding class pointer */ 1658static struct hfsc_class * 1659clh_to_clp(struct hfsc_if *hif, u_int32_t chandle) 1660{ 1661 int i; 1662 struct hfsc_class *cl; 1663 1664 if (chandle == 0) 1665 return (NULL); 1666 /* 1667 * first, try optimistically the slot matching the lower bits of 1668 * the handle. if it fails, do the linear table search. 1669 */ 1670 i = chandle % HFSC_MAX_CLASSES; 1671 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1672 return (cl); 1673 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1674 if ((cl = hif->hif_class_tbl[i]) != NULL && 1675 cl->cl_handle == chandle) 1676 return (cl); 1677 return (NULL); 1678} 1679 1680#ifdef ALTQ3_COMPAT 1681static struct hfsc_if * 1682hfsc_attach(ifq, bandwidth) 1683 struct ifaltq *ifq; 1684 u_int bandwidth; 1685{ 1686 struct hfsc_if *hif; 1687 1688 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK); 1689 if (hif == NULL) 1690 return (NULL); 1691 bzero(hif, sizeof(struct hfsc_if)); 1692 1693 hif->hif_eligible = ellist_alloc(); 1694 if (hif->hif_eligible == NULL) { 1695 free(hif, M_DEVBUF); 1696 return NULL; 1697 } 1698 1699 hif->hif_ifq = ifq; 1700 1701 /* add this state to the hfsc list */ 1702 hif->hif_next = hif_list; 1703 hif_list = hif; 1704 1705 return (hif); 1706} 1707 1708static int 1709hfsc_detach(hif) 1710 struct hfsc_if *hif; 1711{ 1712 (void)hfsc_clear_interface(hif); 1713 (void)hfsc_class_destroy(hif->hif_rootclass); 1714 1715 /* remove this interface from the hif list */ 1716 if (hif_list == hif) 1717 hif_list = hif->hif_next; 1718 else { 1719 struct hfsc_if *h; 1720 1721 for (h = hif_list; h != NULL; h = h->hif_next) 1722 if (h->hif_next == hif) { 1723 h->hif_next = hif->hif_next; 1724 break; 1725 } 1726 ASSERT(h != NULL); 1727 } 1728 1729 ellist_destroy(hif->hif_eligible); 1730 1731 free(hif, M_DEVBUF); 1732 1733 return (0); 1734} 1735 1736static int 1737hfsc_class_modify(cl, rsc, fsc, usc) 1738 struct hfsc_class *cl; 1739 struct service_curve *rsc, *fsc, *usc; 1740{ 1741 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp; 1742 u_int64_t cur_time; 1743 int s; 1744 1745 rsc_tmp = fsc_tmp = usc_tmp = NULL; 1746 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) && 1747 cl->cl_rsc == NULL) { 1748 rsc_tmp = malloc(sizeof(struct internal_sc), 1749 M_DEVBUF, M_WAITOK); 1750 if (rsc_tmp == NULL) 1751 return (ENOMEM); 1752 } 1753 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) && 1754 cl->cl_fsc == NULL) { 1755 fsc_tmp = malloc(sizeof(struct internal_sc), 1756 M_DEVBUF, M_WAITOK); 1757 if (fsc_tmp == NULL) { 1758 free(rsc_tmp); 1759 return (ENOMEM); 1760 } 1761 } 1762 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) && 1763 cl->cl_usc == NULL) { 1764 usc_tmp = malloc(sizeof(struct internal_sc), 1765 M_DEVBUF, M_WAITOK); 1766 if (usc_tmp == NULL) { 1767 free(rsc_tmp); 1768 free(fsc_tmp); 1769 return (ENOMEM); 1770 } 1771 } 1772 1773 cur_time = read_machclk(); 1774#ifdef __NetBSD__ 1775 s = splnet(); 1776#else 1777 s = splimp(); 1778#endif 1779 IFQ_LOCK(cl->cl_hif->hif_ifq); 1780 1781 if (rsc != NULL) { 1782 if (rsc->m1 == 0 && rsc->m2 == 0) { 1783 if (cl->cl_rsc != NULL) { 1784 if (!qempty(cl->cl_q)) 1785 hfsc_purgeq(cl); 1786 free(cl->cl_rsc, M_DEVBUF); 1787 cl->cl_rsc = NULL; 1788 } 1789 } else { 1790 if (cl->cl_rsc == NULL) 1791 cl->cl_rsc = rsc_tmp; 1792 sc2isc(rsc, cl->cl_rsc); 1793 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time, 1794 cl->cl_cumul); 1795 cl->cl_eligible = cl->cl_deadline; 1796 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 1797 cl->cl_eligible.dx = 0; 1798 cl->cl_eligible.dy = 0; 1799 } 1800 } 1801 } 1802 1803 if (fsc != NULL) { 1804 if (fsc->m1 == 0 && fsc->m2 == 0) { 1805 if (cl->cl_fsc != NULL) { 1806 if (!qempty(cl->cl_q)) 1807 hfsc_purgeq(cl); 1808 free(cl->cl_fsc, M_DEVBUF); 1809 cl->cl_fsc = NULL; 1810 } 1811 } else { 1812 if (cl->cl_fsc == NULL) 1813 cl->cl_fsc = fsc_tmp; 1814 sc2isc(fsc, cl->cl_fsc); 1815 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt, 1816 cl->cl_total); 1817 } 1818 } 1819 1820 if (usc != NULL) { 1821 if (usc->m1 == 0 && usc->m2 == 0) { 1822 if (cl->cl_usc != NULL) { 1823 free(cl->cl_usc, M_DEVBUF); 1824 cl->cl_usc = NULL; 1825 cl->cl_myf = 0; 1826 } 1827 } else { 1828 if (cl->cl_usc == NULL) 1829 cl->cl_usc = usc_tmp; 1830 sc2isc(usc, cl->cl_usc); 1831 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time, 1832 cl->cl_total); 1833 } 1834 } 1835 1836 if (!qempty(cl->cl_q)) { 1837 if (cl->cl_rsc != NULL) 1838 update_ed(cl, m_pktlen(qhead(cl->cl_q))); 1839 if (cl->cl_fsc != NULL) 1840 update_vf(cl, 0, cur_time); 1841 /* is this enough? */ 1842 } 1843 1844 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 1845 splx(s); 1846 1847 return (0); 1848} 1849 1850/* 1851 * hfsc device interface 1852 */ 1853int 1854hfscopen(dev, flag, fmt, p) 1855 dev_t dev; 1856 int flag, fmt; 1857#if (__FreeBSD_version > 500000) 1858 struct thread *p; 1859#else 1860 struct proc *p; 1861#endif 1862{ 1863 if (machclk_freq == 0) 1864 init_machclk(); 1865 1866 if (machclk_freq == 0) { 1867 printf("hfsc: no cpu clock available!\n"); 1868 return (ENXIO); 1869 } 1870 1871 /* everything will be done when the queueing scheme is attached. */ 1872 return 0; 1873} 1874 1875int 1876hfscclose(dev, flag, fmt, p) 1877 dev_t dev; 1878 int flag, fmt; 1879#if (__FreeBSD_version > 500000) 1880 struct thread *p; 1881#else 1882 struct proc *p; 1883#endif 1884{ 1885 struct hfsc_if *hif; 1886 int err, error = 0; 1887 1888 while ((hif = hif_list) != NULL) { 1889 /* destroy all */ 1890 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 1891 altq_disable(hif->hif_ifq); 1892 1893 err = altq_detach(hif->hif_ifq); 1894 if (err == 0) 1895 err = hfsc_detach(hif); 1896 if (err != 0 && error == 0) 1897 error = err; 1898 } 1899 1900 return error; 1901} 1902 1903int 1904hfscioctl(dev, cmd, addr, flag, p) 1905 dev_t dev; 1906 ioctlcmd_t cmd; 1907 caddr_t addr; 1908 int flag; 1909#if (__FreeBSD_version > 500000) 1910 struct thread *p; 1911#else 1912 struct proc *p; 1913#endif 1914{ 1915 struct hfsc_if *hif; 1916 struct hfsc_interface *ifacep; 1917 int error = 0; 1918 1919 /* check super-user privilege */ 1920 switch (cmd) { 1921 case HFSC_GETSTATS: 1922 break; 1923 default: 1924#if (__FreeBSD_version > 700000) 1925 if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0) 1926 return (error); 1927#elsif (__FreeBSD_version > 400000) 1928 if ((error = suser(p)) != 0) 1929 return (error); 1930#else 1931 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 1932 return (error); 1933#endif 1934 break; 1935 } 1936 1937 switch (cmd) { 1938 1939 case HFSC_IF_ATTACH: 1940 error = hfsccmd_if_attach((struct hfsc_attach *)addr); 1941 break; 1942 1943 case HFSC_IF_DETACH: 1944 error = hfsccmd_if_detach((struct hfsc_interface *)addr); 1945 break; 1946 1947 case HFSC_ENABLE: 1948 case HFSC_DISABLE: 1949 case HFSC_CLEAR_HIERARCHY: 1950 ifacep = (struct hfsc_interface *)addr; 1951 if ((hif = altq_lookup(ifacep->hfsc_ifname, 1952 ALTQT_HFSC)) == NULL) { 1953 error = EBADF; 1954 break; 1955 } 1956 1957 switch (cmd) { 1958 1959 case HFSC_ENABLE: 1960 if (hif->hif_defaultclass == NULL) { 1961#ifdef ALTQ_DEBUG 1962 printf("hfsc: no default class\n"); 1963#endif 1964 error = EINVAL; 1965 break; 1966 } 1967 error = altq_enable(hif->hif_ifq); 1968 break; 1969 1970 case HFSC_DISABLE: 1971 error = altq_disable(hif->hif_ifq); 1972 break; 1973 1974 case HFSC_CLEAR_HIERARCHY: 1975 hfsc_clear_interface(hif); 1976 break; 1977 } 1978 break; 1979 1980 case HFSC_ADD_CLASS: 1981 error = hfsccmd_add_class((struct hfsc_add_class *)addr); 1982 break; 1983 1984 case HFSC_DEL_CLASS: 1985 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr); 1986 break; 1987 1988 case HFSC_MOD_CLASS: 1989 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr); 1990 break; 1991 1992 case HFSC_ADD_FILTER: 1993 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr); 1994 break; 1995 1996 case HFSC_DEL_FILTER: 1997 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr); 1998 break; 1999 2000 case HFSC_GETSTATS: 2001 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr); 2002 break; 2003 2004 default: 2005 error = EINVAL; 2006 break; 2007 } 2008 return error; 2009} 2010 2011static int 2012hfsccmd_if_attach(ap) 2013 struct hfsc_attach *ap; 2014{ 2015 struct hfsc_if *hif; 2016 struct ifnet *ifp; 2017 int error; 2018 2019 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL) 2020 return (ENXIO); 2021 2022 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL) 2023 return (ENOMEM); 2024 2025 /* 2026 * set HFSC to this ifnet structure. 2027 */ 2028 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif, 2029 hfsc_enqueue, hfsc_dequeue, hfsc_request, 2030 &hif->hif_classifier, acc_classify)) != 0) 2031 (void)hfsc_detach(hif); 2032 2033 return (error); 2034} 2035 2036static int 2037hfsccmd_if_detach(ap) 2038 struct hfsc_interface *ap; 2039{ 2040 struct hfsc_if *hif; 2041 int error; 2042 2043 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL) 2044 return (EBADF); 2045 2046 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 2047 altq_disable(hif->hif_ifq); 2048 2049 if ((error = altq_detach(hif->hif_ifq))) 2050 return (error); 2051 2052 return hfsc_detach(hif); 2053} 2054 2055static int 2056hfsccmd_add_class(ap) 2057 struct hfsc_add_class *ap; 2058{ 2059 struct hfsc_if *hif; 2060 struct hfsc_class *cl, *parent; 2061 int i; 2062 2063 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2064 return (EBADF); 2065 2066 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE && 2067 hif->hif_rootclass == NULL) 2068 parent = NULL; 2069 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) 2070 return (EINVAL); 2071 2072 /* assign a class handle (use a free slot number for now) */ 2073 for (i = 1; i < HFSC_MAX_CLASSES; i++) 2074 if (hif->hif_class_tbl[i] == NULL) 2075 break; 2076 if (i == HFSC_MAX_CLASSES) 2077 return (EBUSY); 2078 2079 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL, 2080 parent, ap->qlimit, ap->flags, i)) == NULL) 2081 return (ENOMEM); 2082 2083 /* return a class handle to the user */ 2084 ap->class_handle = i; 2085 2086 return (0); 2087} 2088 2089static int 2090hfsccmd_delete_class(ap) 2091 struct hfsc_delete_class *ap; 2092{ 2093 struct hfsc_if *hif; 2094 struct hfsc_class *cl; 2095 2096 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2097 return (EBADF); 2098 2099 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2100 return (EINVAL); 2101 2102 return hfsc_class_destroy(cl); 2103} 2104 2105static int 2106hfsccmd_modify_class(ap) 2107 struct hfsc_modify_class *ap; 2108{ 2109 struct hfsc_if *hif; 2110 struct hfsc_class *cl; 2111 struct service_curve *rsc = NULL; 2112 struct service_curve *fsc = NULL; 2113 struct service_curve *usc = NULL; 2114 2115 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2116 return (EBADF); 2117 2118 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2119 return (EINVAL); 2120 2121 if (ap->sctype & HFSC_REALTIMESC) 2122 rsc = &ap->service_curve; 2123 if (ap->sctype & HFSC_LINKSHARINGSC) 2124 fsc = &ap->service_curve; 2125 if (ap->sctype & HFSC_UPPERLIMITSC) 2126 usc = &ap->service_curve; 2127 2128 return hfsc_class_modify(cl, rsc, fsc, usc); 2129} 2130 2131static int 2132hfsccmd_add_filter(ap) 2133 struct hfsc_add_filter *ap; 2134{ 2135 struct hfsc_if *hif; 2136 struct hfsc_class *cl; 2137 2138 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2139 return (EBADF); 2140 2141 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2142 return (EINVAL); 2143 2144 if (is_a_parent_class(cl)) { 2145#ifdef ALTQ_DEBUG 2146 printf("hfsccmd_add_filter: not a leaf class!\n"); 2147#endif 2148 return (EINVAL); 2149 } 2150 2151 return acc_add_filter(&hif->hif_classifier, &ap->filter, 2152 cl, &ap->filter_handle); 2153} 2154 2155static int 2156hfsccmd_delete_filter(ap) 2157 struct hfsc_delete_filter *ap; 2158{ 2159 struct hfsc_if *hif; 2160 2161 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2162 return (EBADF); 2163 2164 return acc_delete_filter(&hif->hif_classifier, 2165 ap->filter_handle); 2166} 2167 2168static int 2169hfsccmd_class_stats(ap) 2170 struct hfsc_class_stats *ap; 2171{ 2172 struct hfsc_if *hif; 2173 struct hfsc_class *cl; 2174 struct hfsc_classstats stats, *usp; 2175 int n, nclasses, error; 2176 2177 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2178 return (EBADF); 2179 2180 ap->cur_time = read_machclk(); 2181 ap->machclk_freq = machclk_freq; 2182 ap->hif_classes = hif->hif_classes; 2183 ap->hif_packets = hif->hif_packets; 2184 2185 /* skip the first N classes in the tree */ 2186 nclasses = ap->nskip; 2187 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses; 2188 cl = hfsc_nextclass(cl), n++) 2189 ; 2190 if (n != nclasses) 2191 return (EINVAL); 2192 2193 /* then, read the next N classes in the tree */ 2194 nclasses = ap->nclasses; 2195 usp = ap->stats; 2196 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) { 2197 2198 get_class_stats(&stats, cl); 2199 2200 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++, 2201 sizeof(stats))) != 0) 2202 return (error); 2203 } 2204 2205 ap->nclasses = n; 2206 2207 return (0); 2208} 2209 2210#ifdef KLD_MODULE 2211 2212static struct altqsw hfsc_sw = 2213 {"hfsc", hfscopen, hfscclose, hfscioctl}; 2214 2215ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw); 2216MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1); 2217MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1); 2218 2219#endif /* KLD_MODULE */ 2220#endif /* ALTQ3_COMPAT */ 2221 2222#endif /* ALTQ_HFSC */ 2223