ip_dummynet.c revision 297228
11590Srgrimes/*- 281782Smikeh * Copyright (c) 1998-2002,2010 Luigi Rizzo, Universita` di Pisa 31590Srgrimes * Portions Copyright (c) 2000 Akamba Corp. 41590Srgrimes * All rights reserved 587215Smarkm * 61590Srgrimes * Redistribution and use in source and binary forms, with or without 71590Srgrimes * modification, are permitted provided that the following conditions 81590Srgrimes * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/netpfil/ipfw/ip_dummynet.c 297228 2016-03-24 09:22:58Z hselasky $"); 30 31/* 32 * Configuration and internal object management for dummynet. 33 */ 34 35#include "opt_inet6.h" 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/malloc.h> 40#include <sys/mbuf.h> 41#include <sys/kernel.h> 42#include <sys/lock.h> 43#include <sys/module.h> 44#include <sys/priv.h> 45#include <sys/proc.h> 46#include <sys/rwlock.h> 47#include <sys/socket.h> 48#include <sys/socketvar.h> 49#include <sys/time.h> 50#include <sys/taskqueue.h> 51#include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ 52#include <netinet/in.h> 53#include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */ 54#include <netinet/ip_fw.h> 55#include <netinet/ip_dummynet.h> 56 57#include <netpfil/ipfw/ip_fw_private.h> 58#include <netpfil/ipfw/dn_heap.h> 59#include <netpfil/ipfw/ip_dn_private.h> 60#include <netpfil/ipfw/dn_sched.h> 61 62/* which objects to copy */ 63#define DN_C_LINK 0x01 64#define DN_C_SCH 0x02 65#define DN_C_FLOW 0x04 66#define DN_C_FS 0x08 67#define DN_C_QUEUE 0x10 68 69/* we use this argument in case of a schk_new */ 70struct schk_new_arg { 71 struct dn_alg *fp; 72 struct dn_sch *sch; 73}; 74 75/*---- callout hooks. ----*/ 76static struct callout dn_timeout; 77static int dn_gone; 78static struct task dn_task; 79static struct taskqueue *dn_tq = NULL; 80 81static void 82dummynet(void *arg) 83{ 84 85 (void)arg; /* UNUSED */ 86 taskqueue_enqueue_fast(dn_tq, &dn_task); 87} 88 89void 90dn_reschedule(void) 91{ 92 93 if (dn_gone != 0) 94 return; 95 callout_reset_sbt(&dn_timeout, tick_sbt, 0, dummynet, NULL, 96 C_HARDCLOCK | C_DIRECT_EXEC); 97} 98/*----- end of callout hooks -----*/ 99 100/* Return a scheduler descriptor given the type or name. */ 101static struct dn_alg * 102find_sched_type(int type, char *name) 103{ 104 struct dn_alg *d; 105 106 SLIST_FOREACH(d, &dn_cfg.schedlist, next) { 107 if (d->type == type || (name && !strcasecmp(d->name, name))) 108 return d; 109 } 110 return NULL; /* not found */ 111} 112 113int 114ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg) 115{ 116 int oldv = *v; 117 const char *op = NULL; 118 if (dflt < lo) 119 dflt = lo; 120 if (dflt > hi) 121 dflt = hi; 122 if (oldv < lo) { 123 *v = dflt; 124 op = "Bump"; 125 } else if (oldv > hi) { 126 *v = hi; 127 op = "Clamp"; 128 } else 129 return *v; 130 if (op && msg) 131 printf("%s %s to %d (was %d)\n", op, msg, *v, oldv); 132 return *v; 133} 134 135/*---- flow_id mask, hash and compare functions ---*/ 136/* 137 * The flow_id includes the 5-tuple, the queue/pipe number 138 * which we store in the extra area in host order, 139 * and for ipv6 also the flow_id6. 140 * XXX see if we want the tos byte (can store in 'flags') 141 */ 142static struct ipfw_flow_id * 143flow_id_mask(struct ipfw_flow_id *mask, struct ipfw_flow_id *id) 144{ 145 int is_v6 = IS_IP6_FLOW_ID(id); 146 147 id->dst_port &= mask->dst_port; 148 id->src_port &= mask->src_port; 149 id->proto &= mask->proto; 150 id->extra &= mask->extra; 151 if (is_v6) { 152 APPLY_MASK(&id->dst_ip6, &mask->dst_ip6); 153 APPLY_MASK(&id->src_ip6, &mask->src_ip6); 154 id->flow_id6 &= mask->flow_id6; 155 } else { 156 id->dst_ip &= mask->dst_ip; 157 id->src_ip &= mask->src_ip; 158 } 159 return id; 160} 161 162/* computes an OR of two masks, result in dst and also returned */ 163static struct ipfw_flow_id * 164flow_id_or(struct ipfw_flow_id *src, struct ipfw_flow_id *dst) 165{ 166 int is_v6 = IS_IP6_FLOW_ID(dst); 167 168 dst->dst_port |= src->dst_port; 169 dst->src_port |= src->src_port; 170 dst->proto |= src->proto; 171 dst->extra |= src->extra; 172 if (is_v6) { 173#define OR_MASK(_d, _s) \ 174 (_d)->__u6_addr.__u6_addr32[0] |= (_s)->__u6_addr.__u6_addr32[0]; \ 175 (_d)->__u6_addr.__u6_addr32[1] |= (_s)->__u6_addr.__u6_addr32[1]; \ 176 (_d)->__u6_addr.__u6_addr32[2] |= (_s)->__u6_addr.__u6_addr32[2]; \ 177 (_d)->__u6_addr.__u6_addr32[3] |= (_s)->__u6_addr.__u6_addr32[3]; 178 OR_MASK(&dst->dst_ip6, &src->dst_ip6); 179 OR_MASK(&dst->src_ip6, &src->src_ip6); 180#undef OR_MASK 181 dst->flow_id6 |= src->flow_id6; 182 } else { 183 dst->dst_ip |= src->dst_ip; 184 dst->src_ip |= src->src_ip; 185 } 186 return dst; 187} 188 189static int 190nonzero_mask(struct ipfw_flow_id *m) 191{ 192 if (m->dst_port || m->src_port || m->proto || m->extra) 193 return 1; 194 if (IS_IP6_FLOW_ID(m)) { 195 return 196 m->dst_ip6.__u6_addr.__u6_addr32[0] || 197 m->dst_ip6.__u6_addr.__u6_addr32[1] || 198 m->dst_ip6.__u6_addr.__u6_addr32[2] || 199 m->dst_ip6.__u6_addr.__u6_addr32[3] || 200 m->src_ip6.__u6_addr.__u6_addr32[0] || 201 m->src_ip6.__u6_addr.__u6_addr32[1] || 202 m->src_ip6.__u6_addr.__u6_addr32[2] || 203 m->src_ip6.__u6_addr.__u6_addr32[3] || 204 m->flow_id6; 205 } else { 206 return m->dst_ip || m->src_ip; 207 } 208} 209 210/* XXX we may want a better hash function */ 211static uint32_t 212flow_id_hash(struct ipfw_flow_id *id) 213{ 214 uint32_t i; 215 216 if (IS_IP6_FLOW_ID(id)) { 217 uint32_t *d = (uint32_t *)&id->dst_ip6; 218 uint32_t *s = (uint32_t *)&id->src_ip6; 219 i = (d[0] ) ^ (d[1]) ^ 220 (d[2] ) ^ (d[3]) ^ 221 (d[0] >> 15) ^ (d[1] >> 15) ^ 222 (d[2] >> 15) ^ (d[3] >> 15) ^ 223 (s[0] << 1) ^ (s[1] << 1) ^ 224 (s[2] << 1) ^ (s[3] << 1) ^ 225 (s[0] << 16) ^ (s[1] << 16) ^ 226 (s[2] << 16) ^ (s[3] << 16) ^ 227 (id->dst_port << 1) ^ (id->src_port) ^ 228 (id->extra) ^ 229 (id->proto ) ^ (id->flow_id6); 230 } else { 231 i = (id->dst_ip) ^ (id->dst_ip >> 15) ^ 232 (id->src_ip << 1) ^ (id->src_ip >> 16) ^ 233 (id->extra) ^ 234 (id->dst_port << 1) ^ (id->src_port) ^ (id->proto); 235 } 236 return i; 237} 238 239/* Like bcmp, returns 0 if ids match, 1 otherwise. */ 240static int 241flow_id_cmp(struct ipfw_flow_id *id1, struct ipfw_flow_id *id2) 242{ 243 int is_v6 = IS_IP6_FLOW_ID(id1); 244 245 if (!is_v6) { 246 if (IS_IP6_FLOW_ID(id2)) 247 return 1; /* different address families */ 248 249 return (id1->dst_ip == id2->dst_ip && 250 id1->src_ip == id2->src_ip && 251 id1->dst_port == id2->dst_port && 252 id1->src_port == id2->src_port && 253 id1->proto == id2->proto && 254 id1->extra == id2->extra) ? 0 : 1; 255 } 256 /* the ipv6 case */ 257 return ( 258 !bcmp(&id1->dst_ip6,&id2->dst_ip6, sizeof(id1->dst_ip6)) && 259 !bcmp(&id1->src_ip6,&id2->src_ip6, sizeof(id1->src_ip6)) && 260 id1->dst_port == id2->dst_port && 261 id1->src_port == id2->src_port && 262 id1->proto == id2->proto && 263 id1->extra == id2->extra && 264 id1->flow_id6 == id2->flow_id6) ? 0 : 1; 265} 266/*--------- end of flow-id mask, hash and compare ---------*/ 267 268/*--- support functions for the qht hashtable ---- 269 * Entries are hashed by flow-id 270 */ 271static uint32_t 272q_hash(uintptr_t key, int flags, void *arg) 273{ 274 /* compute the hash slot from the flow id */ 275 struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ? 276 &((struct dn_queue *)key)->ni.fid : 277 (struct ipfw_flow_id *)key; 278 279 return flow_id_hash(id); 280} 281 282static int 283q_match(void *obj, uintptr_t key, int flags, void *arg) 284{ 285 struct dn_queue *o = (struct dn_queue *)obj; 286 struct ipfw_flow_id *id2; 287 288 if (flags & DNHT_KEY_IS_OBJ) { 289 /* compare pointers */ 290 id2 = &((struct dn_queue *)key)->ni.fid; 291 } else { 292 id2 = (struct ipfw_flow_id *)key; 293 } 294 return (0 == flow_id_cmp(&o->ni.fid, id2)); 295} 296 297/* 298 * create a new queue instance for the given 'key'. 299 */ 300static void * 301q_new(uintptr_t key, int flags, void *arg) 302{ 303 struct dn_queue *q, *template = arg; 304 struct dn_fsk *fs = template->fs; 305 int size = sizeof(*q) + fs->sched->fp->q_datalen; 306 307 q = malloc(size, M_DUMMYNET, M_NOWAIT | M_ZERO); 308 if (q == NULL) { 309 D("no memory for new queue"); 310 return NULL; 311 } 312 313 set_oid(&q->ni.oid, DN_QUEUE, size); 314 if (fs->fs.flags & DN_QHT_HASH) 315 q->ni.fid = *(struct ipfw_flow_id *)key; 316 q->fs = fs; 317 q->_si = template->_si; 318 q->_si->q_count++; 319 320 if (fs->sched->fp->new_queue) 321 fs->sched->fp->new_queue(q); 322 dn_cfg.queue_count++; 323 return q; 324} 325 326/* 327 * Notify schedulers that a queue is going away. 328 * If (flags & DN_DESTROY), also free the packets. 329 * The version for callbacks is called q_delete_cb(). 330 */ 331static void 332dn_delete_queue(struct dn_queue *q, int flags) 333{ 334 struct dn_fsk *fs = q->fs; 335 336 // D("fs %p si %p\n", fs, q->_si); 337 /* notify the parent scheduler that the queue is going away */ 338 if (fs && fs->sched->fp->free_queue) 339 fs->sched->fp->free_queue(q); 340 q->_si->q_count--; 341 q->_si = NULL; 342 if (flags & DN_DESTROY) { 343 if (q->mq.head) 344 dn_free_pkts(q->mq.head); 345 bzero(q, sizeof(*q)); // safety 346 free(q, M_DUMMYNET); 347 dn_cfg.queue_count--; 348 } 349} 350 351static int 352q_delete_cb(void *q, void *arg) 353{ 354 int flags = (int)(uintptr_t)arg; 355 dn_delete_queue(q, flags); 356 return (flags & DN_DESTROY) ? DNHT_SCAN_DEL : 0; 357} 358 359/* 360 * calls dn_delete_queue/q_delete_cb on all queues, 361 * which notifies the parent scheduler and possibly drains packets. 362 * flags & DN_DESTROY: drains queues and destroy qht; 363 */ 364static void 365qht_delete(struct dn_fsk *fs, int flags) 366{ 367 ND("fs %d start flags %d qht %p", 368 fs->fs.fs_nr, flags, fs->qht); 369 if (!fs->qht) 370 return; 371 if (fs->fs.flags & DN_QHT_HASH) { 372 dn_ht_scan(fs->qht, q_delete_cb, (void *)(uintptr_t)flags); 373 if (flags & DN_DESTROY) { 374 dn_ht_free(fs->qht, 0); 375 fs->qht = NULL; 376 } 377 } else { 378 dn_delete_queue((struct dn_queue *)(fs->qht), flags); 379 if (flags & DN_DESTROY) 380 fs->qht = NULL; 381 } 382} 383 384/* 385 * Find and possibly create the queue for a MULTIQUEUE scheduler. 386 * We never call it for !MULTIQUEUE (the queue is in the sch_inst). 387 */ 388struct dn_queue * 389ipdn_q_find(struct dn_fsk *fs, struct dn_sch_inst *si, 390 struct ipfw_flow_id *id) 391{ 392 struct dn_queue template; 393 394 template._si = si; 395 template.fs = fs; 396 397 if (fs->fs.flags & DN_QHT_HASH) { 398 struct ipfw_flow_id masked_id; 399 if (fs->qht == NULL) { 400 fs->qht = dn_ht_init(NULL, fs->fs.buckets, 401 offsetof(struct dn_queue, q_next), 402 q_hash, q_match, q_new); 403 if (fs->qht == NULL) 404 return NULL; 405 } 406 masked_id = *id; 407 flow_id_mask(&fs->fsk_mask, &masked_id); 408 return dn_ht_find(fs->qht, (uintptr_t)&masked_id, 409 DNHT_INSERT, &template); 410 } else { 411 if (fs->qht == NULL) 412 fs->qht = q_new(0, 0, &template); 413 return (struct dn_queue *)fs->qht; 414 } 415} 416/*--- end of queue hash table ---*/ 417 418/*--- support functions for the sch_inst hashtable ---- 419 * 420 * These are hashed by flow-id 421 */ 422static uint32_t 423si_hash(uintptr_t key, int flags, void *arg) 424{ 425 /* compute the hash slot from the flow id */ 426 struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ? 427 &((struct dn_sch_inst *)key)->ni.fid : 428 (struct ipfw_flow_id *)key; 429 430 return flow_id_hash(id); 431} 432 433static int 434si_match(void *obj, uintptr_t key, int flags, void *arg) 435{ 436 struct dn_sch_inst *o = obj; 437 struct ipfw_flow_id *id2; 438 439 id2 = (flags & DNHT_KEY_IS_OBJ) ? 440 &((struct dn_sch_inst *)key)->ni.fid : 441 (struct ipfw_flow_id *)key; 442 return flow_id_cmp(&o->ni.fid, id2) == 0; 443} 444 445/* 446 * create a new instance for the given 'key' 447 * Allocate memory for instance, delay line and scheduler private data. 448 */ 449static void * 450si_new(uintptr_t key, int flags, void *arg) 451{ 452 struct dn_schk *s = arg; 453 struct dn_sch_inst *si; 454 int l = sizeof(*si) + s->fp->si_datalen; 455 456 si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO); 457 if (si == NULL) 458 goto error; 459 460 /* Set length only for the part passed up to userland. */ 461 set_oid(&si->ni.oid, DN_SCH_I, sizeof(struct dn_flow)); 462 set_oid(&(si->dline.oid), DN_DELAY_LINE, 463 sizeof(struct delay_line)); 464 /* mark si and dline as outside the event queue */ 465 si->ni.oid.id = si->dline.oid.id = -1; 466 467 si->sched = s; 468 si->dline.si = si; 469 470 if (s->fp->new_sched && s->fp->new_sched(si)) { 471 D("new_sched error"); 472 goto error; 473 } 474 if (s->sch.flags & DN_HAVE_MASK) 475 si->ni.fid = *(struct ipfw_flow_id *)key; 476 477 dn_cfg.si_count++; 478 return si; 479 480error: 481 if (si) { 482 bzero(si, sizeof(*si)); // safety 483 free(si, M_DUMMYNET); 484 } 485 return NULL; 486} 487 488/* 489 * Callback from siht to delete all scheduler instances. Remove 490 * si and delay line from the system heap, destroy all queues. 491 * We assume that all flowset have been notified and do not 492 * point to us anymore. 493 */ 494static int 495si_destroy(void *_si, void *arg) 496{ 497 struct dn_sch_inst *si = _si; 498 struct dn_schk *s = si->sched; 499 struct delay_line *dl = &si->dline; 500 501 if (dl->oid.subtype) /* remove delay line from event heap */ 502 heap_extract(&dn_cfg.evheap, dl); 503 dn_free_pkts(dl->mq.head); /* drain delay line */ 504 if (si->kflags & DN_ACTIVE) /* remove si from event heap */ 505 heap_extract(&dn_cfg.evheap, si); 506 if (s->fp->free_sched) 507 s->fp->free_sched(si); 508 bzero(si, sizeof(*si)); /* safety */ 509 free(si, M_DUMMYNET); 510 dn_cfg.si_count--; 511 return DNHT_SCAN_DEL; 512} 513 514/* 515 * Find the scheduler instance for this packet. If we need to apply 516 * a mask, do on a local copy of the flow_id to preserve the original. 517 * Assume siht is always initialized if we have a mask. 518 */ 519struct dn_sch_inst * 520ipdn_si_find(struct dn_schk *s, struct ipfw_flow_id *id) 521{ 522 523 if (s->sch.flags & DN_HAVE_MASK) { 524 struct ipfw_flow_id id_t = *id; 525 flow_id_mask(&s->sch.sched_mask, &id_t); 526 return dn_ht_find(s->siht, (uintptr_t)&id_t, 527 DNHT_INSERT, s); 528 } 529 if (!s->siht) 530 s->siht = si_new(0, 0, s); 531 return (struct dn_sch_inst *)s->siht; 532} 533 534/* callback to flush credit for the scheduler instance */ 535static int 536si_reset_credit(void *_si, void *arg) 537{ 538 struct dn_sch_inst *si = _si; 539 struct dn_link *p = &si->sched->link; 540 541 si->credit = p->burst + (dn_cfg.io_fast ? p->bandwidth : 0); 542 return 0; 543} 544 545static void 546schk_reset_credit(struct dn_schk *s) 547{ 548 if (s->sch.flags & DN_HAVE_MASK) 549 dn_ht_scan(s->siht, si_reset_credit, NULL); 550 else if (s->siht) 551 si_reset_credit(s->siht, NULL); 552} 553/*---- end of sch_inst hashtable ---------------------*/ 554 555/*------------------------------------------------------- 556 * flowset hash (fshash) support. Entries are hashed by fs_nr. 557 * New allocations are put in the fsunlinked list, from which 558 * they are removed when they point to a specific scheduler. 559 */ 560static uint32_t 561fsk_hash(uintptr_t key, int flags, void *arg) 562{ 563 uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key : 564 ((struct dn_fsk *)key)->fs.fs_nr; 565 566 return ( (i>>8)^(i>>4)^i ); 567} 568 569static int 570fsk_match(void *obj, uintptr_t key, int flags, void *arg) 571{ 572 struct dn_fsk *fs = obj; 573 int i = !(flags & DNHT_KEY_IS_OBJ) ? key : 574 ((struct dn_fsk *)key)->fs.fs_nr; 575 576 return (fs->fs.fs_nr == i); 577} 578 579static void * 580fsk_new(uintptr_t key, int flags, void *arg) 581{ 582 struct dn_fsk *fs; 583 584 fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO); 585 if (fs) { 586 set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs)); 587 dn_cfg.fsk_count++; 588 fs->drain_bucket = 0; 589 SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain); 590 } 591 return fs; 592} 593 594/* 595 * detach flowset from its current scheduler. Flags as follows: 596 * DN_DETACH removes from the fsk_list 597 * DN_DESTROY deletes individual queues 598 * DN_DELETE_FS destroys the flowset (otherwise goes in unlinked). 599 */ 600static void 601fsk_detach(struct dn_fsk *fs, int flags) 602{ 603 if (flags & DN_DELETE_FS) 604 flags |= DN_DESTROY; 605 ND("fs %d from sched %d flags %s %s %s", 606 fs->fs.fs_nr, fs->fs.sched_nr, 607 (flags & DN_DELETE_FS) ? "DEL_FS":"", 608 (flags & DN_DESTROY) ? "DEL":"", 609 (flags & DN_DETACH) ? "DET":""); 610 if (flags & DN_DETACH) { /* detach from the list */ 611 struct dn_fsk_head *h; 612 h = fs->sched ? &fs->sched->fsk_list : &dn_cfg.fsu; 613 SLIST_REMOVE(h, fs, dn_fsk, sch_chain); 614 } 615 /* Free the RED parameters, they will be recomputed on 616 * subsequent attach if needed. 617 */ 618 if (fs->w_q_lookup) 619 free(fs->w_q_lookup, M_DUMMYNET); 620 fs->w_q_lookup = NULL; 621 qht_delete(fs, flags); 622 if (fs->sched && fs->sched->fp->free_fsk) 623 fs->sched->fp->free_fsk(fs); 624 fs->sched = NULL; 625 if (flags & DN_DELETE_FS) { 626 bzero(fs, sizeof(*fs)); /* safety */ 627 free(fs, M_DUMMYNET); 628 dn_cfg.fsk_count--; 629 } else { 630 SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain); 631 } 632} 633 634/* 635 * Detach or destroy all flowsets in a list. 636 * flags specifies what to do: 637 * DN_DESTROY: flush all queues 638 * DN_DELETE_FS: DN_DESTROY + destroy flowset 639 * DN_DELETE_FS implies DN_DESTROY 640 */ 641static void 642fsk_detach_list(struct dn_fsk_head *h, int flags) 643{ 644 struct dn_fsk *fs; 645 int n = 0; /* only for stats */ 646 647 ND("head %p flags %x", h, flags); 648 while ((fs = SLIST_FIRST(h))) { 649 SLIST_REMOVE_HEAD(h, sch_chain); 650 n++; 651 fsk_detach(fs, flags); 652 } 653 ND("done %d flowsets", n); 654} 655 656/* 657 * called on 'queue X delete' -- removes the flowset from fshash, 658 * deletes all queues for the flowset, and removes the flowset. 659 */ 660static int 661delete_fs(int i, int locked) 662{ 663 struct dn_fsk *fs; 664 int err = 0; 665 666 if (!locked) 667 DN_BH_WLOCK(); 668 fs = dn_ht_find(dn_cfg.fshash, i, DNHT_REMOVE, NULL); 669 ND("fs %d found %p", i, fs); 670 if (fs) { 671 fsk_detach(fs, DN_DETACH | DN_DELETE_FS); 672 err = 0; 673 } else 674 err = EINVAL; 675 if (!locked) 676 DN_BH_WUNLOCK(); 677 return err; 678} 679 680/*----- end of flowset hashtable support -------------*/ 681 682/*------------------------------------------------------------ 683 * Scheduler hash. When searching by index we pass sched_nr, 684 * otherwise we pass struct dn_sch * which is the first field in 685 * struct dn_schk so we can cast between the two. We use this trick 686 * because in the create phase (but it should be fixed). 687 */ 688static uint32_t 689schk_hash(uintptr_t key, int flags, void *_arg) 690{ 691 uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key : 692 ((struct dn_schk *)key)->sch.sched_nr; 693 return ( (i>>8)^(i>>4)^i ); 694} 695 696static int 697schk_match(void *obj, uintptr_t key, int flags, void *_arg) 698{ 699 struct dn_schk *s = (struct dn_schk *)obj; 700 int i = !(flags & DNHT_KEY_IS_OBJ) ? key : 701 ((struct dn_schk *)key)->sch.sched_nr; 702 return (s->sch.sched_nr == i); 703} 704 705/* 706 * Create the entry and intialize with the sched hash if needed. 707 * Leave s->fp unset so we can tell whether a dn_ht_find() returns 708 * a new object or a previously existing one. 709 */ 710static void * 711schk_new(uintptr_t key, int flags, void *arg) 712{ 713 struct schk_new_arg *a = arg; 714 struct dn_schk *s; 715 int l = sizeof(*s) +a->fp->schk_datalen; 716 717 s = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO); 718 if (s == NULL) 719 return NULL; 720 set_oid(&s->link.oid, DN_LINK, sizeof(s->link)); 721 s->sch = *a->sch; // copy initial values 722 s->link.link_nr = s->sch.sched_nr; 723 SLIST_INIT(&s->fsk_list); 724 /* initialize the hash table or create the single instance */ 725 s->fp = a->fp; /* si_new needs this */ 726 s->drain_bucket = 0; 727 if (s->sch.flags & DN_HAVE_MASK) { 728 s->siht = dn_ht_init(NULL, s->sch.buckets, 729 offsetof(struct dn_sch_inst, si_next), 730 si_hash, si_match, si_new); 731 if (s->siht == NULL) { 732 free(s, M_DUMMYNET); 733 return NULL; 734 } 735 } 736 s->fp = NULL; /* mark as a new scheduler */ 737 dn_cfg.schk_count++; 738 return s; 739} 740 741/* 742 * Callback for sched delete. Notify all attached flowsets to 743 * detach from the scheduler, destroy the internal flowset, and 744 * all instances. The scheduler goes away too. 745 * arg is 0 (only detach flowsets and destroy instances) 746 * DN_DESTROY (detach & delete queues, delete schk) 747 * or DN_DELETE_FS (delete queues and flowsets, delete schk) 748 */ 749static int 750schk_delete_cb(void *obj, void *arg) 751{ 752 struct dn_schk *s = obj; 753#if 0 754 int a = (int)arg; 755 ND("sched %d arg %s%s", 756 s->sch.sched_nr, 757 a&DN_DESTROY ? "DEL ":"", 758 a&DN_DELETE_FS ? "DEL_FS":""); 759#endif 760 fsk_detach_list(&s->fsk_list, arg ? DN_DESTROY : 0); 761 /* no more flowset pointing to us now */ 762 if (s->sch.flags & DN_HAVE_MASK) { 763 dn_ht_scan(s->siht, si_destroy, NULL); 764 dn_ht_free(s->siht, 0); 765 } else if (s->siht) 766 si_destroy(s->siht, NULL); 767 if (s->profile) { 768 free(s->profile, M_DUMMYNET); 769 s->profile = NULL; 770 } 771 s->siht = NULL; 772 if (s->fp->destroy) 773 s->fp->destroy(s); 774 bzero(s, sizeof(*s)); // safety 775 free(obj, M_DUMMYNET); 776 dn_cfg.schk_count--; 777 return DNHT_SCAN_DEL; 778} 779 780/* 781 * called on a 'sched X delete' command. Deletes a single scheduler. 782 * This is done by removing from the schedhash, unlinking all 783 * flowsets and deleting their traffic. 784 */ 785static int 786delete_schk(int i) 787{ 788 struct dn_schk *s; 789 790 s = dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL); 791 ND("%d %p", i, s); 792 if (!s) 793 return EINVAL; 794 delete_fs(i + DN_MAX_ID, 1); /* first delete internal fs */ 795 /* then detach flowsets, delete traffic */ 796 schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY); 797 return 0; 798} 799/*--- end of schk hashtable support ---*/ 800 801static int 802copy_obj(char **start, char *end, void *_o, const char *msg, int i) 803{ 804 struct dn_id *o = _o; 805 int have = end - *start; 806 807 if (have < o->len || o->len == 0 || o->type == 0) { 808 D("(WARN) type %d %s %d have %d need %d", 809 o->type, msg, i, have, o->len); 810 return 1; 811 } 812 ND("type %d %s %d len %d", o->type, msg, i, o->len); 813 bcopy(_o, *start, o->len); 814 if (o->type == DN_LINK) { 815 /* Adjust burst parameter for link */ 816 struct dn_link *l = (struct dn_link *)*start; 817 l->burst = div64(l->burst, 8 * hz); 818 l->delay = l->delay * 1000 / hz; 819 } else if (o->type == DN_SCH) { 820 /* Set id->id to the number of instances */ 821 struct dn_schk *s = _o; 822 struct dn_id *id = (struct dn_id *)(*start); 823 id->id = (s->sch.flags & DN_HAVE_MASK) ? 824 dn_ht_entries(s->siht) : (s->siht ? 1 : 0); 825 } 826 *start += o->len; 827 return 0; 828} 829 830/* Specific function to copy a queue. 831 * Copies only the user-visible part of a queue (which is in 832 * a struct dn_flow), and sets len accordingly. 833 */ 834static int 835copy_obj_q(char **start, char *end, void *_o, const char *msg, int i) 836{ 837 struct dn_id *o = _o; 838 int have = end - *start; 839 int len = sizeof(struct dn_flow); /* see above comment */ 840 841 if (have < len || o->len == 0 || o->type != DN_QUEUE) { 842 D("ERROR type %d %s %d have %d need %d", 843 o->type, msg, i, have, len); 844 return 1; 845 } 846 ND("type %d %s %d len %d", o->type, msg, i, len); 847 bcopy(_o, *start, len); 848 ((struct dn_id*)(*start))->len = len; 849 *start += len; 850 return 0; 851} 852 853static int 854copy_q_cb(void *obj, void *arg) 855{ 856 struct dn_queue *q = obj; 857 struct copy_args *a = arg; 858 struct dn_flow *ni = (struct dn_flow *)(*a->start); 859 if (copy_obj_q(a->start, a->end, &q->ni, "queue", -1)) 860 return DNHT_SCAN_END; 861 ni->oid.type = DN_FLOW; /* override the DN_QUEUE */ 862 ni->oid.id = si_hash((uintptr_t)&ni->fid, 0, NULL); 863 return 0; 864} 865 866static int 867copy_q(struct copy_args *a, struct dn_fsk *fs, int flags) 868{ 869 if (!fs->qht) 870 return 0; 871 if (fs->fs.flags & DN_QHT_HASH) 872 dn_ht_scan(fs->qht, copy_q_cb, a); 873 else 874 copy_q_cb(fs->qht, a); 875 return 0; 876} 877 878/* 879 * This routine only copies the initial part of a profile ? XXX 880 */ 881static int 882copy_profile(struct copy_args *a, struct dn_profile *p) 883{ 884 int have = a->end - *a->start; 885 /* XXX here we check for max length */ 886 int profile_len = sizeof(struct dn_profile) - 887 ED_MAX_SAMPLES_NO*sizeof(int); 888 889 if (p == NULL) 890 return 0; 891 if (have < profile_len) { 892 D("error have %d need %d", have, profile_len); 893 return 1; 894 } 895 bcopy(p, *a->start, profile_len); 896 ((struct dn_id *)(*a->start))->len = profile_len; 897 *a->start += profile_len; 898 return 0; 899} 900 901static int 902copy_flowset(struct copy_args *a, struct dn_fsk *fs, int flags) 903{ 904 struct dn_fs *ufs = (struct dn_fs *)(*a->start); 905 if (!fs) 906 return 0; 907 ND("flowset %d", fs->fs.fs_nr); 908 if (copy_obj(a->start, a->end, &fs->fs, "flowset", fs->fs.fs_nr)) 909 return DNHT_SCAN_END; 910 ufs->oid.id = (fs->fs.flags & DN_QHT_HASH) ? 911 dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0); 912 if (flags) { /* copy queues */ 913 copy_q(a, fs, 0); 914 } 915 return 0; 916} 917 918static int 919copy_si_cb(void *obj, void *arg) 920{ 921 struct dn_sch_inst *si = obj; 922 struct copy_args *a = arg; 923 struct dn_flow *ni = (struct dn_flow *)(*a->start); 924 if (copy_obj(a->start, a->end, &si->ni, "inst", 925 si->sched->sch.sched_nr)) 926 return DNHT_SCAN_END; 927 ni->oid.type = DN_FLOW; /* override the DN_SCH_I */ 928 ni->oid.id = si_hash((uintptr_t)si, DNHT_KEY_IS_OBJ, NULL); 929 return 0; 930} 931 932static int 933copy_si(struct copy_args *a, struct dn_schk *s, int flags) 934{ 935 if (s->sch.flags & DN_HAVE_MASK) 936 dn_ht_scan(s->siht, copy_si_cb, a); 937 else if (s->siht) 938 copy_si_cb(s->siht, a); 939 return 0; 940} 941 942/* 943 * compute a list of children of a scheduler and copy up 944 */ 945static int 946copy_fsk_list(struct copy_args *a, struct dn_schk *s, int flags) 947{ 948 struct dn_fsk *fs; 949 struct dn_id *o; 950 uint32_t *p; 951 952 int n = 0, space = sizeof(*o); 953 SLIST_FOREACH(fs, &s->fsk_list, sch_chain) { 954 if (fs->fs.fs_nr < DN_MAX_ID) 955 n++; 956 } 957 space += n * sizeof(uint32_t); 958 DX(3, "sched %d has %d flowsets", s->sch.sched_nr, n); 959 if (a->end - *(a->start) < space) 960 return DNHT_SCAN_END; 961 o = (struct dn_id *)(*(a->start)); 962 o->len = space; 963 *a->start += o->len; 964 o->type = DN_TEXT; 965 p = (uint32_t *)(o+1); 966 SLIST_FOREACH(fs, &s->fsk_list, sch_chain) 967 if (fs->fs.fs_nr < DN_MAX_ID) 968 *p++ = fs->fs.fs_nr; 969 return 0; 970} 971 972static int 973copy_data_helper(void *_o, void *_arg) 974{ 975 struct copy_args *a = _arg; 976 uint32_t *r = a->extra->r; /* start of first range */ 977 uint32_t *lim; /* first invalid pointer */ 978 int n; 979 980 lim = (uint32_t *)((char *)(a->extra) + a->extra->o.len); 981 982 if (a->type == DN_LINK || a->type == DN_SCH) { 983 /* pipe|sched show, we receive a dn_schk */ 984 struct dn_schk *s = _o; 985 986 n = s->sch.sched_nr; 987 if (a->type == DN_SCH && n >= DN_MAX_ID) 988 return 0; /* not a scheduler */ 989 if (a->type == DN_LINK && n <= DN_MAX_ID) 990 return 0; /* not a pipe */ 991 992 /* see if the object is within one of our ranges */ 993 for (;r < lim; r += 2) { 994 if (n < r[0] || n > r[1]) 995 continue; 996 /* Found a valid entry, copy and we are done */ 997 if (a->flags & DN_C_LINK) { 998 if (copy_obj(a->start, a->end, 999 &s->link, "link", n)) 1000 return DNHT_SCAN_END; 1001 if (copy_profile(a, s->profile)) 1002 return DNHT_SCAN_END; 1003 if (copy_flowset(a, s->fs, 0)) 1004 return DNHT_SCAN_END; 1005 } 1006 if (a->flags & DN_C_SCH) { 1007 if (copy_obj(a->start, a->end, 1008 &s->sch, "sched", n)) 1009 return DNHT_SCAN_END; 1010 /* list all attached flowsets */ 1011 if (copy_fsk_list(a, s, 0)) 1012 return DNHT_SCAN_END; 1013 } 1014 if (a->flags & DN_C_FLOW) 1015 copy_si(a, s, 0); 1016 break; 1017 } 1018 } else if (a->type == DN_FS) { 1019 /* queue show, skip internal flowsets */ 1020 struct dn_fsk *fs = _o; 1021 1022 n = fs->fs.fs_nr; 1023 if (n >= DN_MAX_ID) 1024 return 0; 1025 /* see if the object is within one of our ranges */ 1026 for (;r < lim; r += 2) { 1027 if (n < r[0] || n > r[1]) 1028 continue; 1029 if (copy_flowset(a, fs, 0)) 1030 return DNHT_SCAN_END; 1031 copy_q(a, fs, 0); 1032 break; /* we are done */ 1033 } 1034 } 1035 return 0; 1036} 1037 1038static inline struct dn_schk * 1039locate_scheduler(int i) 1040{ 1041 return dn_ht_find(dn_cfg.schedhash, i, 0, NULL); 1042} 1043 1044/* 1045 * red parameters are in fixed point arithmetic. 1046 */ 1047static int 1048config_red(struct dn_fsk *fs) 1049{ 1050 int64_t s, idle, weight, w0; 1051 int t, i; 1052 1053 fs->w_q = fs->fs.w_q; 1054 fs->max_p = fs->fs.max_p; 1055 ND("called"); 1056 /* Doing stuff that was in userland */ 1057 i = fs->sched->link.bandwidth; 1058 s = (i <= 0) ? 0 : 1059 hz * dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i; 1060 1061 idle = div64((s * 3) , fs->w_q); /* s, fs->w_q scaled; idle not scaled */ 1062 fs->lookup_step = div64(idle , dn_cfg.red_lookup_depth); 1063 /* fs->lookup_step not scaled, */ 1064 if (!fs->lookup_step) 1065 fs->lookup_step = 1; 1066 w0 = weight = SCALE(1) - fs->w_q; //fs->w_q scaled 1067 1068 for (t = fs->lookup_step; t > 1; --t) 1069 weight = SCALE_MUL(weight, w0); 1070 fs->lookup_weight = (int)(weight); // scaled 1071 1072 /* Now doing stuff that was in kerneland */ 1073 fs->min_th = SCALE(fs->fs.min_th); 1074 fs->max_th = SCALE(fs->fs.max_th); 1075 1076 fs->c_1 = fs->max_p / (fs->fs.max_th - fs->fs.min_th); 1077 fs->c_2 = SCALE_MUL(fs->c_1, SCALE(fs->fs.min_th)); 1078 1079 if (fs->fs.flags & DN_IS_GENTLE_RED) { 1080 fs->c_3 = (SCALE(1) - fs->max_p) / fs->fs.max_th; 1081 fs->c_4 = SCALE(1) - 2 * fs->max_p; 1082 } 1083 1084 /* If the lookup table already exist, free and create it again. */ 1085 if (fs->w_q_lookup) { 1086 free(fs->w_q_lookup, M_DUMMYNET); 1087 fs->w_q_lookup = NULL; 1088 } 1089 if (dn_cfg.red_lookup_depth == 0) { 1090 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth" 1091 "must be > 0\n"); 1092 fs->fs.flags &= ~DN_IS_RED; 1093 fs->fs.flags &= ~DN_IS_GENTLE_RED; 1094 return (EINVAL); 1095 } 1096 fs->lookup_depth = dn_cfg.red_lookup_depth; 1097 fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int), 1098 M_DUMMYNET, M_NOWAIT); 1099 if (fs->w_q_lookup == NULL) { 1100 printf("dummynet: sorry, cannot allocate red lookup table\n"); 1101 fs->fs.flags &= ~DN_IS_RED; 1102 fs->fs.flags &= ~DN_IS_GENTLE_RED; 1103 return(ENOSPC); 1104 } 1105 1106 /* Fill the lookup table with (1 - w_q)^x */ 1107 fs->w_q_lookup[0] = SCALE(1) - fs->w_q; 1108 1109 for (i = 1; i < fs->lookup_depth; i++) 1110 fs->w_q_lookup[i] = 1111 SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight); 1112 1113 if (dn_cfg.red_avg_pkt_size < 1) 1114 dn_cfg.red_avg_pkt_size = 512; 1115 fs->avg_pkt_size = dn_cfg.red_avg_pkt_size; 1116 if (dn_cfg.red_max_pkt_size < 1) 1117 dn_cfg.red_max_pkt_size = 1500; 1118 fs->max_pkt_size = dn_cfg.red_max_pkt_size; 1119 ND("exit"); 1120 return 0; 1121} 1122 1123/* Scan all flowset attached to this scheduler and update red */ 1124static void 1125update_red(struct dn_schk *s) 1126{ 1127 struct dn_fsk *fs; 1128 SLIST_FOREACH(fs, &s->fsk_list, sch_chain) { 1129 if (fs && (fs->fs.flags & DN_IS_RED)) 1130 config_red(fs); 1131 } 1132} 1133 1134/* attach flowset to scheduler s, possibly requeue */ 1135static void 1136fsk_attach(struct dn_fsk *fs, struct dn_schk *s) 1137{ 1138 ND("remove fs %d from fsunlinked, link to sched %d", 1139 fs->fs.fs_nr, s->sch.sched_nr); 1140 SLIST_REMOVE(&dn_cfg.fsu, fs, dn_fsk, sch_chain); 1141 fs->sched = s; 1142 SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain); 1143 if (s->fp->new_fsk) 1144 s->fp->new_fsk(fs); 1145 /* XXX compute fsk_mask */ 1146 fs->fsk_mask = fs->fs.flow_mask; 1147 if (fs->sched->sch.flags & DN_HAVE_MASK) 1148 flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask); 1149 if (fs->qht) { 1150 /* 1151 * we must drain qht according to the old 1152 * type, and reinsert according to the new one. 1153 * The requeue is complex -- in general we need to 1154 * reclassify every single packet. 1155 * For the time being, let's hope qht is never set 1156 * when we reach this point. 1157 */ 1158 D("XXX TODO requeue from fs %d to sch %d", 1159 fs->fs.fs_nr, s->sch.sched_nr); 1160 fs->qht = NULL; 1161 } 1162 /* set the new type for qht */ 1163 if (nonzero_mask(&fs->fsk_mask)) 1164 fs->fs.flags |= DN_QHT_HASH; 1165 else 1166 fs->fs.flags &= ~DN_QHT_HASH; 1167 1168 /* XXX config_red() can fail... */ 1169 if (fs->fs.flags & DN_IS_RED) 1170 config_red(fs); 1171} 1172 1173/* update all flowsets which may refer to this scheduler */ 1174static void 1175update_fs(struct dn_schk *s) 1176{ 1177 struct dn_fsk *fs, *tmp; 1178 1179 SLIST_FOREACH_SAFE(fs, &dn_cfg.fsu, sch_chain, tmp) { 1180 if (s->sch.sched_nr != fs->fs.sched_nr) { 1181 D("fs %d for sch %d not %d still unlinked", 1182 fs->fs.fs_nr, fs->fs.sched_nr, 1183 s->sch.sched_nr); 1184 continue; 1185 } 1186 fsk_attach(fs, s); 1187 } 1188} 1189 1190/* 1191 * Configuration -- to preserve backward compatibility we use 1192 * the following scheme (N is 65536) 1193 * NUMBER SCHED LINK FLOWSET 1194 * 1 .. N-1 (1)WFQ (2)WFQ (3)queue 1195 * N+1 .. 2N-1 (4)FIFO (5)FIFO (6)FIFO for sched 1..N-1 1196 * 2N+1 .. 3N-1 -- -- (7)FIFO for sched N+1..2N-1 1197 * 1198 * "pipe i config" configures #1, #2 and #3 1199 * "sched i config" configures #1 and possibly #6 1200 * "queue i config" configures #3 1201 * #1 is configured with 'pipe i config' or 'sched i config' 1202 * #2 is configured with 'pipe i config', and created if not 1203 * existing with 'sched i config' 1204 * #3 is configured with 'queue i config' 1205 * #4 is automatically configured after #1, can only be FIFO 1206 * #5 is automatically configured after #2 1207 * #6 is automatically created when #1 is !MULTIQUEUE, 1208 * and can be updated. 1209 * #7 is automatically configured after #2 1210 */ 1211 1212/* 1213 * configure a link (and its FIFO instance) 1214 */ 1215static int 1216config_link(struct dn_link *p, struct dn_id *arg) 1217{ 1218 int i; 1219 1220 if (p->oid.len != sizeof(*p)) { 1221 D("invalid pipe len %d", p->oid.len); 1222 return EINVAL; 1223 } 1224 i = p->link_nr; 1225 if (i <= 0 || i >= DN_MAX_ID) 1226 return EINVAL; 1227 /* 1228 * The config program passes parameters as follows: 1229 * bw = bits/second (0 means no limits), 1230 * delay = ms, must be translated into ticks. 1231 * qsize = slots/bytes 1232 * burst ??? 1233 */ 1234 p->delay = (p->delay * hz) / 1000; 1235 /* Scale burst size: bytes -> bits * hz */ 1236 p->burst *= 8 * hz; 1237 1238 DN_BH_WLOCK(); 1239 /* do it twice, base link and FIFO link */ 1240 for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) { 1241 struct dn_schk *s = locate_scheduler(i); 1242 if (s == NULL) { 1243 DN_BH_WUNLOCK(); 1244 D("sched %d not found", i); 1245 return EINVAL; 1246 } 1247 /* remove profile if exists */ 1248 if (s->profile) { 1249 free(s->profile, M_DUMMYNET); 1250 s->profile = NULL; 1251 } 1252 /* copy all parameters */ 1253 s->link.oid = p->oid; 1254 s->link.link_nr = i; 1255 s->link.delay = p->delay; 1256 if (s->link.bandwidth != p->bandwidth) { 1257 /* XXX bandwidth changes, need to update red params */ 1258 s->link.bandwidth = p->bandwidth; 1259 update_red(s); 1260 } 1261 s->link.burst = p->burst; 1262 schk_reset_credit(s); 1263 } 1264 dn_cfg.id++; 1265 DN_BH_WUNLOCK(); 1266 return 0; 1267} 1268 1269/* 1270 * configure a flowset. Can be called from inside with locked=1, 1271 */ 1272static struct dn_fsk * 1273config_fs(struct dn_fs *nfs, struct dn_id *arg, int locked) 1274{ 1275 int i; 1276 struct dn_fsk *fs; 1277 1278 if (nfs->oid.len != sizeof(*nfs)) { 1279 D("invalid flowset len %d", nfs->oid.len); 1280 return NULL; 1281 } 1282 i = nfs->fs_nr; 1283 if (i <= 0 || i >= 3*DN_MAX_ID) 1284 return NULL; 1285 ND("flowset %d", i); 1286 /* XXX other sanity checks */ 1287 if (nfs->flags & DN_QSIZE_BYTES) { 1288 ipdn_bound_var(&nfs->qsize, 16384, 1289 1500, dn_cfg.byte_limit, NULL); // "queue byte size"); 1290 } else { 1291 ipdn_bound_var(&nfs->qsize, 50, 1292 1, dn_cfg.slot_limit, NULL); // "queue slot size"); 1293 } 1294 if (nfs->flags & DN_HAVE_MASK) { 1295 /* make sure we have some buckets */ 1296 ipdn_bound_var((int *)&nfs->buckets, dn_cfg.hash_size, 1297 1, dn_cfg.max_hash_size, "flowset buckets"); 1298 } else { 1299 nfs->buckets = 1; /* we only need 1 */ 1300 } 1301 if (!locked) 1302 DN_BH_WLOCK(); 1303 do { /* exit with break when done */ 1304 struct dn_schk *s; 1305 int flags = nfs->sched_nr ? DNHT_INSERT : 0; 1306 int j; 1307 int oldc = dn_cfg.fsk_count; 1308 fs = dn_ht_find(dn_cfg.fshash, i, flags, NULL); 1309 if (fs == NULL) { 1310 D("missing sched for flowset %d", i); 1311 break; 1312 } 1313 /* grab some defaults from the existing one */ 1314 if (nfs->sched_nr == 0) /* reuse */ 1315 nfs->sched_nr = fs->fs.sched_nr; 1316 for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) { 1317 if (nfs->par[j] == -1) /* reuse */ 1318 nfs->par[j] = fs->fs.par[j]; 1319 } 1320 if (bcmp(&fs->fs, nfs, sizeof(*nfs)) == 0) { 1321 ND("flowset %d unchanged", i); 1322 break; /* no change, nothing to do */ 1323 } 1324 if (oldc != dn_cfg.fsk_count) /* new item */ 1325 dn_cfg.id++; 1326 s = locate_scheduler(nfs->sched_nr); 1327 /* detach from old scheduler if needed, preserving 1328 * queues if we need to reattach. Then update the 1329 * configuration, and possibly attach to the new sched. 1330 */ 1331 DX(2, "fs %d changed sched %d@%p to %d@%p", 1332 fs->fs.fs_nr, 1333 fs->fs.sched_nr, fs->sched, nfs->sched_nr, s); 1334 if (fs->sched) { 1335 int flags = s ? DN_DETACH : (DN_DETACH | DN_DESTROY); 1336 flags |= DN_DESTROY; /* XXX temporary */ 1337 fsk_detach(fs, flags); 1338 } 1339 fs->fs = *nfs; /* copy configuration */ 1340 if (s != NULL) 1341 fsk_attach(fs, s); 1342 } while (0); 1343 if (!locked) 1344 DN_BH_WUNLOCK(); 1345 return fs; 1346} 1347 1348/* 1349 * config/reconfig a scheduler and its FIFO variant. 1350 * For !MULTIQUEUE schedulers, also set up the flowset. 1351 * 1352 * On reconfigurations (detected because s->fp is set), 1353 * detach existing flowsets preserving traffic, preserve link, 1354 * and delete the old scheduler creating a new one. 1355 */ 1356static int 1357config_sched(struct dn_sch *_nsch, struct dn_id *arg) 1358{ 1359 struct dn_schk *s; 1360 struct schk_new_arg a; /* argument for schk_new */ 1361 int i; 1362 struct dn_link p; /* copy of oldlink */ 1363 struct dn_profile *pf = NULL; /* copy of old link profile */ 1364 /* Used to preserv mask parameter */ 1365 struct ipfw_flow_id new_mask; 1366 int new_buckets = 0; 1367 int new_flags = 0; 1368 int pipe_cmd; 1369 int err = ENOMEM; 1370 1371 a.sch = _nsch; 1372 if (a.sch->oid.len != sizeof(*a.sch)) { 1373 D("bad sched len %d", a.sch->oid.len); 1374 return EINVAL; 1375 } 1376 i = a.sch->sched_nr; 1377 if (i <= 0 || i >= DN_MAX_ID) 1378 return EINVAL; 1379 /* make sure we have some buckets */ 1380 if (a.sch->flags & DN_HAVE_MASK) 1381 ipdn_bound_var((int *)&a.sch->buckets, dn_cfg.hash_size, 1382 1, dn_cfg.max_hash_size, "sched buckets"); 1383 /* XXX other sanity checks */ 1384 bzero(&p, sizeof(p)); 1385 1386 pipe_cmd = a.sch->flags & DN_PIPE_CMD; 1387 a.sch->flags &= ~DN_PIPE_CMD; //XXX do it even if is not set? 1388 if (pipe_cmd) { 1389 /* Copy mask parameter */ 1390 new_mask = a.sch->sched_mask; 1391 new_buckets = a.sch->buckets; 1392 new_flags = a.sch->flags; 1393 } 1394 DN_BH_WLOCK(); 1395again: /* run twice, for wfq and fifo */ 1396 /* 1397 * lookup the type. If not supplied, use the previous one 1398 * or default to WF2Q+. Otherwise, return an error. 1399 */ 1400 dn_cfg.id++; 1401 a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name); 1402 if (a.fp != NULL) { 1403 /* found. Lookup or create entry */ 1404 s = dn_ht_find(dn_cfg.schedhash, i, DNHT_INSERT, &a); 1405 } else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) { 1406 /* No type. search existing s* or retry with WF2Q+ */ 1407 s = dn_ht_find(dn_cfg.schedhash, i, 0, &a); 1408 if (s != NULL) { 1409 a.fp = s->fp; 1410 /* Scheduler exists, skip to FIFO scheduler 1411 * if command was pipe config... 1412 */ 1413 if (pipe_cmd) 1414 goto next; 1415 } else { 1416 /* New scheduler, create a wf2q+ with no mask 1417 * if command was pipe config... 1418 */ 1419 if (pipe_cmd) { 1420 /* clear mask parameter */ 1421 bzero(&a.sch->sched_mask, sizeof(new_mask)); 1422 a.sch->buckets = 0; 1423 a.sch->flags &= ~DN_HAVE_MASK; 1424 } 1425 a.sch->oid.subtype = DN_SCHED_WF2QP; 1426 goto again; 1427 } 1428 } else { 1429 D("invalid scheduler type %d %s", 1430 a.sch->oid.subtype, a.sch->name); 1431 err = EINVAL; 1432 goto error; 1433 } 1434 /* normalize name and subtype */ 1435 a.sch->oid.subtype = a.fp->type; 1436 bzero(a.sch->name, sizeof(a.sch->name)); 1437 strlcpy(a.sch->name, a.fp->name, sizeof(a.sch->name)); 1438 if (s == NULL) { 1439 D("cannot allocate scheduler %d", i); 1440 goto error; 1441 } 1442 /* restore existing link if any */ 1443 if (p.link_nr) { 1444 s->link = p; 1445 if (!pf || pf->link_nr != p.link_nr) { /* no saved value */ 1446 s->profile = NULL; /* XXX maybe not needed */ 1447 } else { 1448 s->profile = malloc(sizeof(struct dn_profile), 1449 M_DUMMYNET, M_NOWAIT | M_ZERO); 1450 if (s->profile == NULL) { 1451 D("cannot allocate profile"); 1452 goto error; //XXX 1453 } 1454 bcopy(pf, s->profile, sizeof(*pf)); 1455 } 1456 } 1457 p.link_nr = 0; 1458 if (s->fp == NULL) { 1459 DX(2, "sched %d new type %s", i, a.fp->name); 1460 } else if (s->fp != a.fp || 1461 bcmp(a.sch, &s->sch, sizeof(*a.sch)) ) { 1462 /* already existing. */ 1463 DX(2, "sched %d type changed from %s to %s", 1464 i, s->fp->name, a.fp->name); 1465 DX(4, " type/sub %d/%d -> %d/%d", 1466 s->sch.oid.type, s->sch.oid.subtype, 1467 a.sch->oid.type, a.sch->oid.subtype); 1468 if (s->link.link_nr == 0) 1469 D("XXX WARNING link 0 for sched %d", i); 1470 p = s->link; /* preserve link */ 1471 if (s->profile) {/* preserve profile */ 1472 if (!pf) 1473 pf = malloc(sizeof(*pf), 1474 M_DUMMYNET, M_NOWAIT | M_ZERO); 1475 if (pf) /* XXX should issue a warning otherwise */ 1476 bcopy(s->profile, pf, sizeof(*pf)); 1477 } 1478 /* remove from the hash */ 1479 dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL); 1480 /* Detach flowsets, preserve queues. */ 1481 // schk_delete_cb(s, NULL); 1482 // XXX temporarily, kill queues 1483 schk_delete_cb(s, (void *)DN_DESTROY); 1484 goto again; 1485 } else { 1486 DX(4, "sched %d unchanged type %s", i, a.fp->name); 1487 } 1488 /* complete initialization */ 1489 s->sch = *a.sch; 1490 s->fp = a.fp; 1491 s->cfg = arg; 1492 // XXX schk_reset_credit(s); 1493 /* create the internal flowset if needed, 1494 * trying to reuse existing ones if available 1495 */ 1496 if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) { 1497 s->fs = dn_ht_find(dn_cfg.fshash, i, 0, NULL); 1498 if (!s->fs) { 1499 struct dn_fs fs; 1500 bzero(&fs, sizeof(fs)); 1501 set_oid(&fs.oid, DN_FS, sizeof(fs)); 1502 fs.fs_nr = i + DN_MAX_ID; 1503 fs.sched_nr = i; 1504 s->fs = config_fs(&fs, NULL, 1 /* locked */); 1505 } 1506 if (!s->fs) { 1507 schk_delete_cb(s, (void *)DN_DESTROY); 1508 D("error creating internal fs for %d", i); 1509 goto error; 1510 } 1511 } 1512 /* call init function after the flowset is created */ 1513 if (s->fp->config) 1514 s->fp->config(s); 1515 update_fs(s); 1516next: 1517 if (i < DN_MAX_ID) { /* now configure the FIFO instance */ 1518 i += DN_MAX_ID; 1519 if (pipe_cmd) { 1520 /* Restore mask parameter for FIFO */ 1521 a.sch->sched_mask = new_mask; 1522 a.sch->buckets = new_buckets; 1523 a.sch->flags = new_flags; 1524 } else { 1525 /* sched config shouldn't modify the FIFO scheduler */ 1526 if (dn_ht_find(dn_cfg.schedhash, i, 0, &a) != NULL) { 1527 /* FIFO already exist, don't touch it */ 1528 err = 0; /* and this is not an error */ 1529 goto error; 1530 } 1531 } 1532 a.sch->sched_nr = i; 1533 a.sch->oid.subtype = DN_SCHED_FIFO; 1534 bzero(a.sch->name, sizeof(a.sch->name)); 1535 goto again; 1536 } 1537 err = 0; 1538error: 1539 DN_BH_WUNLOCK(); 1540 if (pf) 1541 free(pf, M_DUMMYNET); 1542 return err; 1543} 1544 1545/* 1546 * attach a profile to a link 1547 */ 1548static int 1549config_profile(struct dn_profile *pf, struct dn_id *arg) 1550{ 1551 struct dn_schk *s; 1552 int i, olen, err = 0; 1553 1554 if (pf->oid.len < sizeof(*pf)) { 1555 D("short profile len %d", pf->oid.len); 1556 return EINVAL; 1557 } 1558 i = pf->link_nr; 1559 if (i <= 0 || i >= DN_MAX_ID) 1560 return EINVAL; 1561 /* XXX other sanity checks */ 1562 DN_BH_WLOCK(); 1563 for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) { 1564 s = locate_scheduler(i); 1565 1566 if (s == NULL) { 1567 err = EINVAL; 1568 break; 1569 } 1570 dn_cfg.id++; 1571 /* 1572 * If we had a profile and the new one does not fit, 1573 * or it is deleted, then we need to free memory. 1574 */ 1575 if (s->profile && (pf->samples_no == 0 || 1576 s->profile->oid.len < pf->oid.len)) { 1577 free(s->profile, M_DUMMYNET); 1578 s->profile = NULL; 1579 } 1580 if (pf->samples_no == 0) 1581 continue; 1582 /* 1583 * new profile, possibly allocate memory 1584 * and copy data. 1585 */ 1586 if (s->profile == NULL) 1587 s->profile = malloc(pf->oid.len, 1588 M_DUMMYNET, M_NOWAIT | M_ZERO); 1589 if (s->profile == NULL) { 1590 D("no memory for profile %d", i); 1591 err = ENOMEM; 1592 break; 1593 } 1594 /* preserve larger length XXX double check */ 1595 olen = s->profile->oid.len; 1596 if (olen < pf->oid.len) 1597 olen = pf->oid.len; 1598 bcopy(pf, s->profile, pf->oid.len); 1599 s->profile->oid.len = olen; 1600 } 1601 DN_BH_WUNLOCK(); 1602 return err; 1603} 1604 1605/* 1606 * Delete all objects: 1607 */ 1608static void 1609dummynet_flush(void) 1610{ 1611 1612 /* delete all schedulers and related links/queues/flowsets */ 1613 dn_ht_scan(dn_cfg.schedhash, schk_delete_cb, 1614 (void *)(uintptr_t)DN_DELETE_FS); 1615 /* delete all remaining (unlinked) flowsets */ 1616 DX(4, "still %d unlinked fs", dn_cfg.fsk_count); 1617 dn_ht_free(dn_cfg.fshash, DNHT_REMOVE); 1618 fsk_detach_list(&dn_cfg.fsu, DN_DELETE_FS); 1619 /* Reinitialize system heap... */ 1620 heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id)); 1621} 1622 1623/* 1624 * Main handler for configuration. We are guaranteed to be called 1625 * with an oid which is at least a dn_id. 1626 * - the first object is the command (config, delete, flush, ...) 1627 * - config_link must be issued after the corresponding config_sched 1628 * - parameters (DN_TXT) for an object must preceed the object 1629 * processed on a config_sched. 1630 */ 1631int 1632do_config(void *p, int l) 1633{ 1634 struct dn_id *next, *o; 1635 int err = 0, err2 = 0; 1636 struct dn_id *arg = NULL; 1637 uintptr_t *a; 1638 1639 o = p; 1640 if (o->id != DN_API_VERSION) { 1641 D("invalid api version got %d need %d", 1642 o->id, DN_API_VERSION); 1643 return EINVAL; 1644 } 1645 for (; l >= sizeof(*o); o = next) { 1646 struct dn_id *prev = arg; 1647 if (o->len < sizeof(*o) || l < o->len) { 1648 D("bad len o->len %d len %d", o->len, l); 1649 err = EINVAL; 1650 break; 1651 } 1652 l -= o->len; 1653 next = (struct dn_id *)((char *)o + o->len); 1654 err = 0; 1655 switch (o->type) { 1656 default: 1657 D("cmd %d not implemented", o->type); 1658 break; 1659 1660#ifdef EMULATE_SYSCTL 1661 /* sysctl emulation. 1662 * if we recognize the command, jump to the correct 1663 * handler and return 1664 */ 1665 case DN_SYSCTL_SET: 1666 err = kesysctl_emu_set(p, l); 1667 return err; 1668#endif 1669 1670 case DN_CMD_CONFIG: /* simply a header */ 1671 break; 1672 1673 case DN_CMD_DELETE: 1674 /* the argument is in the first uintptr_t after o */ 1675 a = (uintptr_t *)(o+1); 1676 if (o->len < sizeof(*o) + sizeof(*a)) { 1677 err = EINVAL; 1678 break; 1679 } 1680 switch (o->subtype) { 1681 case DN_LINK: 1682 /* delete base and derived schedulers */ 1683 DN_BH_WLOCK(); 1684 err = delete_schk(*a); 1685 err2 = delete_schk(*a + DN_MAX_ID); 1686 DN_BH_WUNLOCK(); 1687 if (!err) 1688 err = err2; 1689 break; 1690 1691 default: 1692 D("invalid delete type %d", 1693 o->subtype); 1694 err = EINVAL; 1695 break; 1696 1697 case DN_FS: 1698 err = (*a <1 || *a >= DN_MAX_ID) ? 1699 EINVAL : delete_fs(*a, 0) ; 1700 break; 1701 } 1702 break; 1703 1704 case DN_CMD_FLUSH: 1705 DN_BH_WLOCK(); 1706 dummynet_flush(); 1707 DN_BH_WUNLOCK(); 1708 break; 1709 case DN_TEXT: /* store argument the next block */ 1710 prev = NULL; 1711 arg = o; 1712 break; 1713 case DN_LINK: 1714 err = config_link((struct dn_link *)o, arg); 1715 break; 1716 case DN_PROFILE: 1717 err = config_profile((struct dn_profile *)o, arg); 1718 break; 1719 case DN_SCH: 1720 err = config_sched((struct dn_sch *)o, arg); 1721 break; 1722 case DN_FS: 1723 err = (NULL==config_fs((struct dn_fs *)o, arg, 0)); 1724 break; 1725 } 1726 if (prev) 1727 arg = NULL; 1728 if (err != 0) 1729 break; 1730 } 1731 return err; 1732} 1733 1734static int 1735compute_space(struct dn_id *cmd, struct copy_args *a) 1736{ 1737 int x = 0, need = 0; 1738 int profile_size = sizeof(struct dn_profile) - 1739 ED_MAX_SAMPLES_NO*sizeof(int); 1740 1741 /* NOTE about compute space: 1742 * NP = dn_cfg.schk_count 1743 * NSI = dn_cfg.si_count 1744 * NF = dn_cfg.fsk_count 1745 * NQ = dn_cfg.queue_count 1746 * - ipfw pipe show 1747 * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler 1748 * link, scheduler template, flowset 1749 * integrated in scheduler and header 1750 * for flowset list 1751 * (NSI)*(dn_flow) all scheduler instance (includes 1752 * the queue instance) 1753 * - ipfw sched show 1754 * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler 1755 * link, scheduler template, flowset 1756 * integrated in scheduler and header 1757 * for flowset list 1758 * (NSI * dn_flow) all scheduler instances 1759 * (NF * sizeof(uint_32)) space for flowset list linked to scheduler 1760 * (NQ * dn_queue) all queue [XXXfor now not listed] 1761 * - ipfw queue show 1762 * (NF * dn_fs) all flowset 1763 * (NQ * dn_queue) all queues 1764 */ 1765 switch (cmd->subtype) { 1766 default: 1767 return -1; 1768 /* XXX where do LINK and SCH differ ? */ 1769 /* 'ipfw sched show' could list all queues associated to 1770 * a scheduler. This feature for now is disabled 1771 */ 1772 case DN_LINK: /* pipe show */ 1773 x = DN_C_LINK | DN_C_SCH | DN_C_FLOW; 1774 need += dn_cfg.schk_count * 1775 (sizeof(struct dn_fs) + profile_size) / 2; 1776 need += dn_cfg.fsk_count * sizeof(uint32_t); 1777 break; 1778 case DN_SCH: /* sched show */ 1779 need += dn_cfg.schk_count * 1780 (sizeof(struct dn_fs) + profile_size) / 2; 1781 need += dn_cfg.fsk_count * sizeof(uint32_t); 1782 x = DN_C_SCH | DN_C_LINK | DN_C_FLOW; 1783 break; 1784 case DN_FS: /* queue show */ 1785 x = DN_C_FS | DN_C_QUEUE; 1786 break; 1787 case DN_GET_COMPAT: /* compatibility mode */ 1788 need = dn_compat_calc_size(); 1789 break; 1790 } 1791 a->flags = x; 1792 if (x & DN_C_SCH) { 1793 need += dn_cfg.schk_count * sizeof(struct dn_sch) / 2; 1794 /* NOT also, each fs might be attached to a sched */ 1795 need += dn_cfg.schk_count * sizeof(struct dn_id) / 2; 1796 } 1797 if (x & DN_C_FS) 1798 need += dn_cfg.fsk_count * sizeof(struct dn_fs); 1799 if (x & DN_C_LINK) { 1800 need += dn_cfg.schk_count * sizeof(struct dn_link) / 2; 1801 } 1802 /* 1803 * When exporting a queue to userland, only pass up the 1804 * struct dn_flow, which is the only visible part. 1805 */ 1806 1807 if (x & DN_C_QUEUE) 1808 need += dn_cfg.queue_count * sizeof(struct dn_flow); 1809 if (x & DN_C_FLOW) 1810 need += dn_cfg.si_count * (sizeof(struct dn_flow)); 1811 return need; 1812} 1813 1814/* 1815 * If compat != NULL dummynet_get is called in compatibility mode. 1816 * *compat will be the pointer to the buffer to pass to ipfw 1817 */ 1818int 1819dummynet_get(struct sockopt *sopt, void **compat) 1820{ 1821 int have, i, need, error; 1822 char *start = NULL, *buf; 1823 size_t sopt_valsize; 1824 struct dn_id *cmd; 1825 struct copy_args a; 1826 struct copy_range r; 1827 int l = sizeof(struct dn_id); 1828 1829 bzero(&a, sizeof(a)); 1830 bzero(&r, sizeof(r)); 1831 1832 /* save and restore original sopt_valsize around copyin */ 1833 sopt_valsize = sopt->sopt_valsize; 1834 1835 cmd = &r.o; 1836 1837 if (!compat) { 1838 /* copy at least an oid, and possibly a full object */ 1839 error = sooptcopyin(sopt, cmd, sizeof(r), sizeof(*cmd)); 1840 sopt->sopt_valsize = sopt_valsize; 1841 if (error) 1842 goto done; 1843 l = cmd->len; 1844#ifdef EMULATE_SYSCTL 1845 /* sysctl emulation. */ 1846 if (cmd->type == DN_SYSCTL_GET) 1847 return kesysctl_emu_get(sopt); 1848#endif 1849 if (l > sizeof(r)) { 1850 /* request larger than default, allocate buffer */ 1851 cmd = malloc(l, M_DUMMYNET, M_WAITOK); 1852 error = sooptcopyin(sopt, cmd, l, l); 1853 sopt->sopt_valsize = sopt_valsize; 1854 if (error) 1855 goto done; 1856 } 1857 } else { /* compatibility */ 1858 error = 0; 1859 cmd->type = DN_CMD_GET; 1860 cmd->len = sizeof(struct dn_id); 1861 cmd->subtype = DN_GET_COMPAT; 1862 // cmd->id = sopt_valsize; 1863 D("compatibility mode"); 1864 } 1865 a.extra = (struct copy_range *)cmd; 1866 if (cmd->len == sizeof(*cmd)) { /* no range, create a default */ 1867 uint32_t *rp = (uint32_t *)(cmd + 1); 1868 cmd->len += 2* sizeof(uint32_t); 1869 rp[0] = 1; 1870 rp[1] = DN_MAX_ID - 1; 1871 if (cmd->subtype == DN_LINK) { 1872 rp[0] += DN_MAX_ID; 1873 rp[1] += DN_MAX_ID; 1874 } 1875 } 1876 /* Count space (under lock) and allocate (outside lock). 1877 * Exit with lock held if we manage to get enough buffer. 1878 * Try a few times then give up. 1879 */ 1880 for (have = 0, i = 0; i < 10; i++) { 1881 DN_BH_WLOCK(); 1882 need = compute_space(cmd, &a); 1883 1884 /* if there is a range, ignore value from compute_space() */ 1885 if (l > sizeof(*cmd)) 1886 need = sopt_valsize - sizeof(*cmd); 1887 1888 if (need < 0) { 1889 DN_BH_WUNLOCK(); 1890 error = EINVAL; 1891 goto done; 1892 } 1893 need += sizeof(*cmd); 1894 cmd->id = need; 1895 if (have >= need) 1896 break; 1897 1898 DN_BH_WUNLOCK(); 1899 if (start) 1900 free(start, M_DUMMYNET); 1901 start = NULL; 1902 if (need > sopt_valsize) 1903 break; 1904 1905 have = need; 1906 start = malloc(have, M_DUMMYNET, M_WAITOK | M_ZERO); 1907 } 1908 1909 if (start == NULL) { 1910 if (compat) { 1911 *compat = NULL; 1912 error = 1; // XXX 1913 } else { 1914 error = sooptcopyout(sopt, cmd, sizeof(*cmd)); 1915 } 1916 goto done; 1917 } 1918 ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, " 1919 "%d:%d si %d, %d:%d queues %d", 1920 dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH, 1921 dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK, 1922 dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS, 1923 dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I, 1924 dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE); 1925 sopt->sopt_valsize = sopt_valsize; 1926 a.type = cmd->subtype; 1927 1928 if (compat == NULL) { 1929 bcopy(cmd, start, sizeof(*cmd)); 1930 ((struct dn_id*)(start))->len = sizeof(struct dn_id); 1931 buf = start + sizeof(*cmd); 1932 } else 1933 buf = start; 1934 a.start = &buf; 1935 a.end = start + have; 1936 /* start copying other objects */ 1937 if (compat) { 1938 a.type = DN_COMPAT_PIPE; 1939 dn_ht_scan(dn_cfg.schedhash, copy_data_helper_compat, &a); 1940 a.type = DN_COMPAT_QUEUE; 1941 dn_ht_scan(dn_cfg.fshash, copy_data_helper_compat, &a); 1942 } else if (a.type == DN_FS) { 1943 dn_ht_scan(dn_cfg.fshash, copy_data_helper, &a); 1944 } else { 1945 dn_ht_scan(dn_cfg.schedhash, copy_data_helper, &a); 1946 } 1947 DN_BH_WUNLOCK(); 1948 1949 if (compat) { 1950 *compat = start; 1951 sopt->sopt_valsize = buf - start; 1952 /* free() is done by ip_dummynet_compat() */ 1953 start = NULL; //XXX hack 1954 } else { 1955 error = sooptcopyout(sopt, start, buf - start); 1956 } 1957done: 1958 if (cmd && cmd != &r.o) 1959 free(cmd, M_DUMMYNET); 1960 if (start) 1961 free(start, M_DUMMYNET); 1962 return error; 1963} 1964 1965/* Callback called on scheduler instance to delete it if idle */ 1966static int 1967drain_scheduler_cb(void *_si, void *arg) 1968{ 1969 struct dn_sch_inst *si = _si; 1970 1971 if ((si->kflags & DN_ACTIVE) || si->dline.mq.head != NULL) 1972 return 0; 1973 1974 if (si->sched->fp->flags & DN_MULTIQUEUE) { 1975 if (si->q_count == 0) 1976 return si_destroy(si, NULL); 1977 else 1978 return 0; 1979 } else { /* !DN_MULTIQUEUE */ 1980 if ((si+1)->ni.length == 0) 1981 return si_destroy(si, NULL); 1982 else 1983 return 0; 1984 } 1985 return 0; /* unreachable */ 1986} 1987 1988/* Callback called on scheduler to check if it has instances */ 1989static int 1990drain_scheduler_sch_cb(void *_s, void *arg) 1991{ 1992 struct dn_schk *s = _s; 1993 1994 if (s->sch.flags & DN_HAVE_MASK) { 1995 dn_ht_scan_bucket(s->siht, &s->drain_bucket, 1996 drain_scheduler_cb, NULL); 1997 s->drain_bucket++; 1998 } else { 1999 if (s->siht) { 2000 if (drain_scheduler_cb(s->siht, NULL) == DNHT_SCAN_DEL) 2001 s->siht = NULL; 2002 } 2003 } 2004 return 0; 2005} 2006 2007/* Called every tick, try to delete a 'bucket' of scheduler */ 2008void 2009dn_drain_scheduler(void) 2010{ 2011 dn_ht_scan_bucket(dn_cfg.schedhash, &dn_cfg.drain_sch, 2012 drain_scheduler_sch_cb, NULL); 2013 dn_cfg.drain_sch++; 2014} 2015 2016/* Callback called on queue to delete if it is idle */ 2017static int 2018drain_queue_cb(void *_q, void *arg) 2019{ 2020 struct dn_queue *q = _q; 2021 2022 if (q->ni.length == 0) { 2023 dn_delete_queue(q, DN_DESTROY); 2024 return DNHT_SCAN_DEL; /* queue is deleted */ 2025 } 2026 2027 return 0; /* queue isn't deleted */ 2028} 2029 2030/* Callback called on flowset used to check if it has queues */ 2031static int 2032drain_queue_fs_cb(void *_fs, void *arg) 2033{ 2034 struct dn_fsk *fs = _fs; 2035 2036 if (fs->fs.flags & DN_QHT_HASH) { 2037 /* Flowset has a hash table for queues */ 2038 dn_ht_scan_bucket(fs->qht, &fs->drain_bucket, 2039 drain_queue_cb, NULL); 2040 fs->drain_bucket++; 2041 } else { 2042 /* No hash table for this flowset, null the pointer 2043 * if the queue is deleted 2044 */ 2045 if (fs->qht) { 2046 if (drain_queue_cb(fs->qht, NULL) == DNHT_SCAN_DEL) 2047 fs->qht = NULL; 2048 } 2049 } 2050 return 0; 2051} 2052 2053/* Called every tick, try to delete a 'bucket' of queue */ 2054void 2055dn_drain_queue(void) 2056{ 2057 /* scan a bucket of flowset */ 2058 dn_ht_scan_bucket(dn_cfg.fshash, &dn_cfg.drain_fs, 2059 drain_queue_fs_cb, NULL); 2060 dn_cfg.drain_fs++; 2061} 2062 2063/* 2064 * Handler for the various dummynet socket options 2065 */ 2066static int 2067ip_dn_ctl(struct sockopt *sopt) 2068{ 2069 void *p = NULL; 2070 int error, l; 2071 2072 error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET); 2073 if (error) 2074 return (error); 2075 2076 /* Disallow sets in really-really secure mode. */ 2077 if (sopt->sopt_dir == SOPT_SET) { 2078 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 2079 if (error) 2080 return (error); 2081 } 2082 2083 switch (sopt->sopt_name) { 2084 default : 2085 D("dummynet: unknown option %d", sopt->sopt_name); 2086 error = EINVAL; 2087 break; 2088 2089 case IP_DUMMYNET_FLUSH: 2090 case IP_DUMMYNET_CONFIGURE: 2091 case IP_DUMMYNET_DEL: /* remove a pipe or queue */ 2092 case IP_DUMMYNET_GET: 2093 D("dummynet: compat option %d", sopt->sopt_name); 2094 error = ip_dummynet_compat(sopt); 2095 break; 2096 2097 case IP_DUMMYNET3 : 2098 if (sopt->sopt_dir == SOPT_GET) { 2099 error = dummynet_get(sopt, NULL); 2100 break; 2101 } 2102 l = sopt->sopt_valsize; 2103 if (l < sizeof(struct dn_id) || l > 12000) { 2104 D("argument len %d invalid", l); 2105 break; 2106 } 2107 p = malloc(l, M_TEMP, M_WAITOK); // XXX can it fail ? 2108 error = sooptcopyin(sopt, p, l, l); 2109 if (error) 2110 break ; 2111 error = do_config(p, l); 2112 break; 2113 } 2114 2115 if (p != NULL) 2116 free(p, M_TEMP); 2117 2118 return error ; 2119} 2120 2121 2122static void 2123ip_dn_init(void) 2124{ 2125 if (dn_cfg.init_done) 2126 return; 2127 printf("DUMMYNET %p with IPv6 initialized (100409)\n", curvnet); 2128 dn_cfg.init_done = 1; 2129 /* Set defaults here. MSVC does not accept initializers, 2130 * and this is also useful for vimages 2131 */ 2132 /* queue limits */ 2133 dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */ 2134 dn_cfg.byte_limit = 1024 * 1024; 2135 dn_cfg.expire = 1; 2136 2137 /* RED parameters */ 2138 dn_cfg.red_lookup_depth = 256; /* default lookup table depth */ 2139 dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */ 2140 dn_cfg.red_max_pkt_size = 1500; /* default max packet size */ 2141 2142 /* hash tables */ 2143 dn_cfg.max_hash_size = 65536; /* max in the hash tables */ 2144 dn_cfg.hash_size = 64; /* default hash size */ 2145 2146 /* create hash tables for schedulers and flowsets. 2147 * In both we search by key and by pointer. 2148 */ 2149 dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size, 2150 offsetof(struct dn_schk, schk_next), 2151 schk_hash, schk_match, schk_new); 2152 dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size, 2153 offsetof(struct dn_fsk, fsk_next), 2154 fsk_hash, fsk_match, fsk_new); 2155 2156 /* bucket index to drain object */ 2157 dn_cfg.drain_fs = 0; 2158 dn_cfg.drain_sch = 0; 2159 2160 heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id)); 2161 SLIST_INIT(&dn_cfg.fsu); 2162 SLIST_INIT(&dn_cfg.schedlist); 2163 2164 DN_LOCK_INIT(); 2165 2166 TASK_INIT(&dn_task, 0, dummynet_task, curvnet); 2167 dn_tq = taskqueue_create_fast("dummynet", M_WAITOK, 2168 taskqueue_thread_enqueue, &dn_tq); 2169 taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet"); 2170 2171 callout_init(&dn_timeout, CALLOUT_MPSAFE); 2172 dn_reschedule(); 2173 2174 /* Initialize curr_time adjustment mechanics. */ 2175 getmicrouptime(&dn_cfg.prev_t); 2176} 2177 2178static void 2179ip_dn_destroy(int last) 2180{ 2181 DN_BH_WLOCK(); 2182 /* ensure no more callouts are started */ 2183 dn_gone = 1; 2184 2185 /* check for last */ 2186 if (last) { 2187 ND("removing last instance\n"); 2188 ip_dn_ctl_ptr = NULL; 2189 ip_dn_io_ptr = NULL; 2190 } 2191 2192 dummynet_flush(); 2193 DN_BH_WUNLOCK(); 2194 2195 callout_drain(&dn_timeout); 2196 taskqueue_drain(dn_tq, &dn_task); 2197 taskqueue_free(dn_tq); 2198 2199 dn_ht_free(dn_cfg.schedhash, 0); 2200 dn_ht_free(dn_cfg.fshash, 0); 2201 heap_free(&dn_cfg.evheap); 2202 2203 DN_LOCK_DESTROY(); 2204} 2205 2206static int 2207dummynet_modevent(module_t mod, int type, void *data) 2208{ 2209 2210 if (type == MOD_LOAD) { 2211 if (ip_dn_io_ptr) { 2212 printf("DUMMYNET already loaded\n"); 2213 return EEXIST ; 2214 } 2215 ip_dn_init(); 2216 ip_dn_ctl_ptr = ip_dn_ctl; 2217 ip_dn_io_ptr = dummynet_io; 2218 return 0; 2219 } else if (type == MOD_UNLOAD) { 2220 ip_dn_destroy(1 /* last */); 2221 return 0; 2222 } else 2223 return EOPNOTSUPP; 2224} 2225 2226/* modevent helpers for the modules */ 2227static int 2228load_dn_sched(struct dn_alg *d) 2229{ 2230 struct dn_alg *s; 2231 2232 if (d == NULL) 2233 return 1; /* error */ 2234 ip_dn_init(); /* just in case, we need the lock */ 2235 2236 /* Check that mandatory funcs exists */ 2237 if (d->enqueue == NULL || d->dequeue == NULL) { 2238 D("missing enqueue or dequeue for %s", d->name); 2239 return 1; 2240 } 2241 2242 /* Search if scheduler already exists */ 2243 DN_BH_WLOCK(); 2244 SLIST_FOREACH(s, &dn_cfg.schedlist, next) { 2245 if (strcmp(s->name, d->name) == 0) { 2246 D("%s already loaded", d->name); 2247 break; /* scheduler already exists */ 2248 } 2249 } 2250 if (s == NULL) 2251 SLIST_INSERT_HEAD(&dn_cfg.schedlist, d, next); 2252 DN_BH_WUNLOCK(); 2253 D("dn_sched %s %sloaded", d->name, s ? "not ":""); 2254 return s ? 1 : 0; 2255} 2256 2257static int 2258unload_dn_sched(struct dn_alg *s) 2259{ 2260 struct dn_alg *tmp, *r; 2261 int err = EINVAL; 2262 2263 ND("called for %s", s->name); 2264 2265 DN_BH_WLOCK(); 2266 SLIST_FOREACH_SAFE(r, &dn_cfg.schedlist, next, tmp) { 2267 if (strcmp(s->name, r->name) != 0) 2268 continue; 2269 ND("ref_count = %d", r->ref_count); 2270 err = (r->ref_count != 0) ? EBUSY : 0; 2271 if (err == 0) 2272 SLIST_REMOVE(&dn_cfg.schedlist, r, dn_alg, next); 2273 break; 2274 } 2275 DN_BH_WUNLOCK(); 2276 D("dn_sched %s %sunloaded", s->name, err ? "not ":""); 2277 return err; 2278} 2279 2280int 2281dn_sched_modevent(module_t mod, int cmd, void *arg) 2282{ 2283 struct dn_alg *sch = arg; 2284 2285 if (cmd == MOD_LOAD) 2286 return load_dn_sched(sch); 2287 else if (cmd == MOD_UNLOAD) 2288 return unload_dn_sched(sch); 2289 else 2290 return EINVAL; 2291} 2292 2293static moduledata_t dummynet_mod = { 2294 "dummynet", dummynet_modevent, NULL 2295}; 2296 2297#define DN_SI_SUB SI_SUB_PROTO_IFATTACHDOMAIN 2298#define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */ 2299DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD); 2300MODULE_DEPEND(dummynet, ipfw, 2, 2, 2); 2301MODULE_VERSION(dummynet, 3); 2302 2303/* 2304 * Starting up. Done in order after dummynet_modevent() has been called. 2305 * VNET_SYSINIT is also called for each existing vnet and each new vnet. 2306 */ 2307//VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_init, NULL); 2308 2309/* 2310 * Shutdown handlers up shop. These are done in REVERSE ORDER, but still 2311 * after dummynet_modevent() has been called. Not called on reboot. 2312 * VNET_SYSUNINIT is also called for each exiting vnet as it exits. 2313 * or when the module is unloaded. 2314 */ 2315//VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_destroy, NULL); 2316 2317/* end of file */ 2318