ip_dn_io.c revision 325731
1/*- 2 * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa 3 * All rights reserved 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/* 28 * Dummynet portions related to packet handling. 29 */ 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/10/sys/netpfil/ipfw/ip_dn_io.c 325731 2017-11-12 01:28:20Z truckman $"); 32 33#include "opt_inet6.h" 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/malloc.h> 38#include <sys/mbuf.h> 39#include <sys/kernel.h> 40#include <sys/lock.h> 41#include <sys/module.h> 42#include <sys/priv.h> 43#include <sys/proc.h> 44#include <sys/rwlock.h> 45#include <sys/socket.h> 46#include <sys/time.h> 47#include <sys/sysctl.h> 48 49#include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ 50#include <net/netisr.h> 51#include <net/vnet.h> 52 53#include <netinet/in.h> 54#include <netinet/ip.h> /* ip_len, ip_off */ 55#include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */ 56#include <netinet/ip_fw.h> 57#include <netinet/ip_dummynet.h> 58#include <netinet/if_ether.h> /* various ether_* routines */ 59#include <netinet/ip6.h> /* for ip6_input, ip6_output prototypes */ 60#include <netinet6/ip6_var.h> 61 62#include <netpfil/ipfw/ip_fw_private.h> 63#include <netpfil/ipfw/dn_heap.h> 64#include <netpfil/ipfw/ip_dn_private.h> 65#ifdef NEW_AQM 66#include <netpfil/ipfw/dn_aqm.h> 67#endif 68#include <netpfil/ipfw/dn_sched.h> 69 70/* 71 * We keep a private variable for the simulation time, but we could 72 * probably use an existing one ("softticks" in sys/kern/kern_timeout.c) 73 * instead of dn_cfg.curr_time 74 */ 75 76struct dn_parms dn_cfg; 77//VNET_DEFINE(struct dn_parms, _base_dn_cfg); 78 79static long tick_last; /* Last tick duration (usec). */ 80static long tick_delta; /* Last vs standard tick diff (usec). */ 81static long tick_delta_sum; /* Accumulated tick difference (usec).*/ 82static long tick_adjustment; /* Tick adjustments done. */ 83static long tick_lost; /* Lost(coalesced) ticks number. */ 84/* Adjusted vs non-adjusted curr_time difference (ticks). */ 85static long tick_diff; 86 87static unsigned long io_pkt; 88static unsigned long io_pkt_fast; 89 90#ifdef NEW_AQM 91unsigned long io_pkt_drop; 92#else 93static unsigned long io_pkt_drop; 94#endif 95/* 96 * We use a heap to store entities for which we have pending timer events. 97 * The heap is checked at every tick and all entities with expired events 98 * are extracted. 99 */ 100 101MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap"); 102 103extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *); 104 105#ifdef SYSCTL_NODE 106 107/* 108 * Because of the way the SYSBEGIN/SYSEND macros work on other 109 * platforms, there should not be functions between them. 110 * So keep the handlers outside the block. 111 */ 112static int 113sysctl_hash_size(SYSCTL_HANDLER_ARGS) 114{ 115 int error, value; 116 117 value = dn_cfg.hash_size; 118 error = sysctl_handle_int(oidp, &value, 0, req); 119 if (error != 0 || req->newptr == NULL) 120 return (error); 121 if (value < 16 || value > 65536) 122 return (EINVAL); 123 dn_cfg.hash_size = value; 124 return (0); 125} 126 127static int 128sysctl_limits(SYSCTL_HANDLER_ARGS) 129{ 130 int error; 131 long value; 132 133 if (arg2 != 0) 134 value = dn_cfg.slot_limit; 135 else 136 value = dn_cfg.byte_limit; 137 error = sysctl_handle_long(oidp, &value, 0, req); 138 139 if (error != 0 || req->newptr == NULL) 140 return (error); 141 if (arg2 != 0) { 142 if (value < 1) 143 return (EINVAL); 144 dn_cfg.slot_limit = value; 145 } else { 146 if (value < 1500) 147 return (EINVAL); 148 dn_cfg.byte_limit = value; 149 } 150 return (0); 151} 152 153SYSBEGIN(f4) 154 155SYSCTL_DECL(_net_inet); 156SYSCTL_DECL(_net_inet_ip); 157#ifdef NEW_AQM 158SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet"); 159#else 160static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet"); 161#endif 162 163/* wrapper to pass dn_cfg fields to SYSCTL_* */ 164//#define DC(x) (&(VNET_NAME(_base_dn_cfg).x)) 165#define DC(x) (&(dn_cfg.x)) 166/* parameters */ 167 168 169SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size, 170 CTLTYPE_INT | CTLFLAG_RW, 0, 0, sysctl_hash_size, 171 "I", "Default hash table size"); 172 173 174SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit, 175 CTLTYPE_LONG | CTLFLAG_RW, 0, 1, sysctl_limits, 176 "L", "Upper limit in slots for pipe queue."); 177SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit, 178 CTLTYPE_LONG | CTLFLAG_RW, 0, 0, sysctl_limits, 179 "L", "Upper limit in bytes for pipe queue."); 180SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast, 181 CTLFLAG_RW, DC(io_fast), 0, "Enable fast dummynet io."); 182SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, 183 CTLFLAG_RW, DC(debug), 0, "Dummynet debug level"); 184 185/* RED parameters */ 186SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, 187 CTLFLAG_RD, DC(red_lookup_depth), 0, "Depth of RED lookup table"); 188SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, 189 CTLFLAG_RD, DC(red_avg_pkt_size), 0, "RED Medium packet size"); 190SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, 191 CTLFLAG_RD, DC(red_max_pkt_size), 0, "RED Max packet size"); 192 193/* time adjustment */ 194SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta, 195 CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec)."); 196SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum, 197 CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec)."); 198SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment, 199 CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done."); 200SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff, 201 CTLFLAG_RD, &tick_diff, 0, 202 "Adjusted vs non-adjusted curr_time difference (ticks)."); 203SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost, 204 CTLFLAG_RD, &tick_lost, 0, 205 "Number of ticks coalesced by dummynet taskqueue."); 206 207/* Drain parameters */ 208SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire, 209 CTLFLAG_RW, DC(expire), 0, "Expire empty queues/pipes"); 210SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle, 211 CTLFLAG_RD, DC(expire_cycle), 0, "Expire cycle for queues/pipes"); 212 213/* statistics */ 214SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count, 215 CTLFLAG_RD, DC(schk_count), 0, "Number of schedulers"); 216SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count, 217 CTLFLAG_RD, DC(si_count), 0, "Number of scheduler instances"); 218SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count, 219 CTLFLAG_RD, DC(fsk_count), 0, "Number of flowsets"); 220SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count, 221 CTLFLAG_RD, DC(queue_count), 0, "Number of queues"); 222SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt, 223 CTLFLAG_RD, &io_pkt, 0, 224 "Number of packets passed to dummynet."); 225SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast, 226 CTLFLAG_RD, &io_pkt_fast, 0, 227 "Number of packets bypassed dummynet scheduler."); 228SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop, 229 CTLFLAG_RD, &io_pkt_drop, 0, 230 "Number of packets dropped by dummynet."); 231#undef DC 232SYSEND 233 234#endif 235 236static void dummynet_send(struct mbuf *); 237 238/* 239 * Return the mbuf tag holding the dummynet state (it should 240 * be the first one on the list). 241 */ 242struct dn_pkt_tag * 243dn_tag_get(struct mbuf *m) 244{ 245 struct m_tag *mtag = m_tag_first(m); 246#ifdef NEW_AQM 247 /* XXX: to skip ts m_tag. For Debugging only*/ 248 if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) { 249 m_tag_delete(m,mtag); 250 mtag = m_tag_first(m); 251 D("skip TS tag"); 252 } 253#endif 254 KASSERT(mtag != NULL && 255 mtag->m_tag_cookie == MTAG_ABI_COMPAT && 256 mtag->m_tag_id == PACKET_TAG_DUMMYNET, 257 ("packet on dummynet queue w/o dummynet tag!")); 258 return (struct dn_pkt_tag *)(mtag+1); 259} 260 261#ifndef NEW_AQM 262static inline void 263mq_append(struct mq *q, struct mbuf *m) 264{ 265 if (q->head == NULL) 266 q->head = m; 267 else 268 q->tail->m_nextpkt = m; 269 q->tail = m; 270 m->m_nextpkt = NULL; 271} 272#endif 273 274/* 275 * Dispose a list of packet. Use a functions so if we need to do 276 * more work, this is a central point to do it. 277 */ 278void dn_free_pkts(struct mbuf *mnext) 279{ 280 struct mbuf *m; 281 282 while ((m = mnext) != NULL) { 283 mnext = m->m_nextpkt; 284 FREE_PKT(m); 285 } 286} 287 288static int 289red_drops (struct dn_queue *q, int len) 290{ 291 /* 292 * RED algorithm 293 * 294 * RED calculates the average queue size (avg) using a low-pass filter 295 * with an exponential weighted (w_q) moving average: 296 * avg <- (1-w_q) * avg + w_q * q_size 297 * where q_size is the queue length (measured in bytes or * packets). 298 * 299 * If q_size == 0, we compute the idle time for the link, and set 300 * avg = (1 - w_q)^(idle/s) 301 * where s is the time needed for transmitting a medium-sized packet. 302 * 303 * Now, if avg < min_th the packet is enqueued. 304 * If avg > max_th the packet is dropped. Otherwise, the packet is 305 * dropped with probability P function of avg. 306 */ 307 308 struct dn_fsk *fs = q->fs; 309 int64_t p_b = 0; 310 311 /* Queue in bytes or packets? */ 312 uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ? 313 q->ni.len_bytes : q->ni.length; 314 315 /* Average queue size estimation. */ 316 if (q_size != 0) { 317 /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */ 318 int diff = SCALE(q_size) - q->avg; 319 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); 320 321 q->avg += (int)v; 322 } else { 323 /* 324 * Queue is empty, find for how long the queue has been 325 * empty and use a lookup table for computing 326 * (1 - * w_q)^(idle_time/s) where s is the time to send a 327 * (small) packet. 328 * XXX check wraps... 329 */ 330 if (q->avg) { 331 u_int t = div64((dn_cfg.curr_time - q->q_time), fs->lookup_step); 332 333 q->avg = (t < fs->lookup_depth) ? 334 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; 335 } 336 } 337 338 /* Should i drop? */ 339 if (q->avg < fs->min_th) { 340 q->count = -1; 341 return (0); /* accept packet */ 342 } 343 if (q->avg >= fs->max_th) { /* average queue >= max threshold */ 344 if (fs->fs.flags & DN_IS_ECN) 345 return (1); 346 if (fs->fs.flags & DN_IS_GENTLE_RED) { 347 /* 348 * According to Gentle-RED, if avg is greater than 349 * max_th the packet is dropped with a probability 350 * p_b = c_3 * avg - c_4 351 * where c_3 = (1 - max_p) / max_th 352 * c_4 = 1 - 2 * max_p 353 */ 354 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - 355 fs->c_4; 356 } else { 357 q->count = -1; 358 return (1); 359 } 360 } else if (q->avg > fs->min_th) { 361 if (fs->fs.flags & DN_IS_ECN) 362 return (1); 363 /* 364 * We compute p_b using the linear dropping function 365 * p_b = c_1 * avg - c_2 366 * where c_1 = max_p / (max_th - min_th) 367 * c_2 = max_p * min_th / (max_th - min_th) 368 */ 369 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; 370 } 371 372 if (fs->fs.flags & DN_QSIZE_BYTES) 373 p_b = div64((p_b * len) , fs->max_pkt_size); 374 if (++q->count == 0) 375 q->random = random() & 0xffff; 376 else { 377 /* 378 * q->count counts packets arrived since last drop, so a greater 379 * value of q->count means a greater packet drop probability. 380 */ 381 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { 382 q->count = 0; 383 /* After a drop we calculate a new random value. */ 384 q->random = random() & 0xffff; 385 return (1); /* drop */ 386 } 387 } 388 /* End of RED algorithm. */ 389 390 return (0); /* accept */ 391 392} 393 394/* 395 * ECN/ECT Processing (partially adopted from altq) 396 */ 397#ifndef NEW_AQM 398static 399#endif 400int 401ecn_mark(struct mbuf* m) 402{ 403 struct ip *ip; 404 ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); 405 406 switch (ip->ip_v) { 407 case IPVERSION: 408 { 409 u_int8_t otos; 410 int sum; 411 412 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) 413 return (0); /* not-ECT */ 414 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) 415 return (1); /* already marked */ 416 417 /* 418 * ecn-capable but not marked, 419 * mark CE and update checksum 420 */ 421 otos = ip->ip_tos; 422 ip->ip_tos |= IPTOS_ECN_CE; 423 /* 424 * update checksum (from RFC1624) 425 * HC' = ~(~HC + ~m + m') 426 */ 427 sum = ~ntohs(ip->ip_sum) & 0xffff; 428 sum += (~otos & 0xffff) + ip->ip_tos; 429 sum = (sum >> 16) + (sum & 0xffff); 430 sum += (sum >> 16); /* add carry */ 431 ip->ip_sum = htons(~sum & 0xffff); 432 return (1); 433 } 434#ifdef INET6 435 case (IPV6_VERSION >> 4): 436 { 437 struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; 438 u_int32_t flowlabel; 439 440 flowlabel = ntohl(ip6->ip6_flow); 441 if ((flowlabel >> 28) != 6) 442 return (0); /* version mismatch! */ 443 if ((flowlabel & (IPTOS_ECN_MASK << 20)) == 444 (IPTOS_ECN_NOTECT << 20)) 445 return (0); /* not-ECT */ 446 if ((flowlabel & (IPTOS_ECN_MASK << 20)) == 447 (IPTOS_ECN_CE << 20)) 448 return (1); /* already marked */ 449 /* 450 * ecn-capable but not marked, mark CE 451 */ 452 flowlabel |= (IPTOS_ECN_CE << 20); 453 ip6->ip6_flow = htonl(flowlabel); 454 return (1); 455 } 456#endif 457 } 458 return (0); 459} 460 461/* 462 * Enqueue a packet in q, subject to space and queue management policy 463 * (whose parameters are in q->fs). 464 * Update stats for the queue and the scheduler. 465 * Return 0 on success, 1 on drop. The packet is consumed anyways. 466 */ 467int 468dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop) 469{ 470 struct dn_fs *f; 471 struct dn_flow *ni; /* stats for scheduler instance */ 472 uint64_t len; 473 474 if (q->fs == NULL || q->_si == NULL) { 475 printf("%s fs %p si %p, dropping\n", 476 __FUNCTION__, q->fs, q->_si); 477 FREE_PKT(m); 478 return 1; 479 } 480 f = &(q->fs->fs); 481 ni = &q->_si->ni; 482 len = m->m_pkthdr.len; 483 /* Update statistics, then check reasons to drop pkt. */ 484 q->ni.tot_bytes += len; 485 q->ni.tot_pkts++; 486 ni->tot_bytes += len; 487 ni->tot_pkts++; 488 if (drop) 489 goto drop; 490 if (f->plr && random() < f->plr) 491 goto drop; 492#ifdef NEW_AQM 493 /* Call AQM enqueue function */ 494 if (q->fs->aqmfp) 495 return q->fs->aqmfp->enqueue(q ,m); 496#endif 497 if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) { 498 if (!(f->flags & DN_IS_ECN) || !ecn_mark(m)) 499 goto drop; 500 } 501 if (f->flags & DN_QSIZE_BYTES) { 502 if (q->ni.len_bytes > f->qsize) 503 goto drop; 504 } else if (q->ni.length >= f->qsize) { 505 goto drop; 506 } 507 mq_append(&q->mq, m); 508 q->ni.length++; 509 q->ni.len_bytes += len; 510 ni->length++; 511 ni->len_bytes += len; 512 return (0); 513 514drop: 515 io_pkt_drop++; 516 q->ni.drops++; 517 ni->drops++; 518 FREE_PKT(m); 519 return (1); 520} 521 522/* 523 * Fetch packets from the delay line which are due now. If there are 524 * leftover packets, reinsert the delay line in the heap. 525 * Runs under scheduler lock. 526 */ 527static void 528transmit_event(struct mq *q, struct delay_line *dline, uint64_t now) 529{ 530 struct mbuf *m; 531 struct dn_pkt_tag *pkt = NULL; 532 533 dline->oid.subtype = 0; /* not in heap */ 534 while ((m = dline->mq.head) != NULL) { 535 pkt = dn_tag_get(m); 536 if (!DN_KEY_LEQ(pkt->output_time, now)) 537 break; 538 dline->mq.head = m->m_nextpkt; 539 mq_append(q, m); 540 } 541 if (m != NULL) { 542 dline->oid.subtype = 1; /* in heap */ 543 heap_insert(&dn_cfg.evheap, pkt->output_time, dline); 544 } 545} 546 547/* 548 * Convert the additional MAC overheads/delays into an equivalent 549 * number of bits for the given data rate. The samples are 550 * in milliseconds so we need to divide by 1000. 551 */ 552static uint64_t 553extra_bits(struct mbuf *m, struct dn_schk *s) 554{ 555 int index; 556 uint64_t bits; 557 struct dn_profile *pf = s->profile; 558 559 if (!pf || pf->samples_no == 0) 560 return 0; 561 index = random() % pf->samples_no; 562 bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000); 563 if (index >= pf->loss_level) { 564 struct dn_pkt_tag *dt = dn_tag_get(m); 565 if (dt) 566 dt->dn_dir = DIR_DROP; 567 } 568 return bits; 569} 570 571/* 572 * Send traffic from a scheduler instance due by 'now'. 573 * Return a pointer to the head of the queue. 574 */ 575static struct mbuf * 576serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now) 577{ 578 struct mq def_q; 579 struct dn_schk *s = si->sched; 580 struct mbuf *m = NULL; 581 int delay_line_idle = (si->dline.mq.head == NULL); 582 int done, bw; 583 584 if (q == NULL) { 585 q = &def_q; 586 q->head = NULL; 587 } 588 589 bw = s->link.bandwidth; 590 si->kflags &= ~DN_ACTIVE; 591 592 if (bw > 0) 593 si->credit += (now - si->sched_time) * bw; 594 else 595 si->credit = 0; 596 si->sched_time = now; 597 done = 0; 598 while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { 599 uint64_t len_scaled; 600 601 done++; 602 len_scaled = (bw == 0) ? 0 : hz * 603 (m->m_pkthdr.len * 8 + extra_bits(m, s)); 604 si->credit -= len_scaled; 605 /* Move packet in the delay line */ 606 dn_tag_get(m)->output_time = dn_cfg.curr_time + s->link.delay ; 607 mq_append(&si->dline.mq, m); 608 } 609 610 /* 611 * If credit >= 0 the instance is idle, mark time. 612 * Otherwise put back in the heap, and adjust the output 613 * time of the last inserted packet, m, which was too early. 614 */ 615 if (si->credit >= 0) { 616 si->idle_time = now; 617 } else { 618 uint64_t t; 619 KASSERT (bw > 0, ("bw=0 and credit<0 ?")); 620 t = div64(bw - 1 - si->credit, bw); 621 if (m) 622 dn_tag_get(m)->output_time += t; 623 si->kflags |= DN_ACTIVE; 624 heap_insert(&dn_cfg.evheap, now + t, si); 625 } 626 if (delay_line_idle && done) 627 transmit_event(q, &si->dline, now); 628 return q->head; 629} 630 631/* 632 * The timer handler for dummynet. Time is computed in ticks, but 633 * but the code is tolerant to the actual rate at which this is called. 634 * Once complete, the function reschedules itself for the next tick. 635 */ 636void 637dummynet_task(void *context, int pending) 638{ 639 struct timeval t; 640 struct mq q = { NULL, NULL }; /* queue to accumulate results */ 641 642 CURVNET_SET((struct vnet *)context); 643 644 DN_BH_WLOCK(); 645 646 /* Update number of lost(coalesced) ticks. */ 647 tick_lost += pending - 1; 648 649 getmicrouptime(&t); 650 /* Last tick duration (usec). */ 651 tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 + 652 (t.tv_usec - dn_cfg.prev_t.tv_usec); 653 /* Last tick vs standard tick difference (usec). */ 654 tick_delta = (tick_last * hz - 1000000) / hz; 655 /* Accumulated tick difference (usec). */ 656 tick_delta_sum += tick_delta; 657 658 dn_cfg.prev_t = t; 659 660 /* 661 * Adjust curr_time if the accumulated tick difference is 662 * greater than the 'standard' tick. Since curr_time should 663 * be monotonically increasing, we do positive adjustments 664 * as required, and throttle curr_time in case of negative 665 * adjustment. 666 */ 667 dn_cfg.curr_time++; 668 if (tick_delta_sum - tick >= 0) { 669 int diff = tick_delta_sum / tick; 670 671 dn_cfg.curr_time += diff; 672 tick_diff += diff; 673 tick_delta_sum %= tick; 674 tick_adjustment++; 675 } else if (tick_delta_sum + tick <= 0) { 676 dn_cfg.curr_time--; 677 tick_diff--; 678 tick_delta_sum += tick; 679 tick_adjustment++; 680 } 681 682 /* serve pending events, accumulate in q */ 683 for (;;) { 684 struct dn_id *p; /* generic parameter to handler */ 685 686 if (dn_cfg.evheap.elements == 0 || 687 DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key)) 688 break; 689 p = HEAP_TOP(&dn_cfg.evheap)->object; 690 heap_extract(&dn_cfg.evheap, NULL); 691 692 if (p->type == DN_SCH_I) { 693 serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time); 694 } else { /* extracted a delay line */ 695 transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time); 696 } 697 } 698 if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) { 699 dn_cfg.expire_cycle = 0; 700 dn_drain_scheduler(); 701 dn_drain_queue(); 702 } 703 704 dn_reschedule(); 705 DN_BH_WUNLOCK(); 706 if (q.head != NULL) 707 dummynet_send(q.head); 708 CURVNET_RESTORE(); 709} 710 711/* 712 * forward a chain of packets to the proper destination. 713 * This runs outside the dummynet lock. 714 */ 715static void 716dummynet_send(struct mbuf *m) 717{ 718 struct mbuf *n; 719 720 for (; m != NULL; m = n) { 721 struct ifnet *ifp = NULL; /* gcc 3.4.6 complains */ 722 struct m_tag *tag; 723 int dst; 724 725 n = m->m_nextpkt; 726 m->m_nextpkt = NULL; 727 tag = m_tag_first(m); 728 if (tag == NULL) { /* should not happen */ 729 dst = DIR_DROP; 730 } else { 731 struct dn_pkt_tag *pkt = dn_tag_get(m); 732 /* extract the dummynet info, rename the tag 733 * to carry reinject info. 734 */ 735 dst = pkt->dn_dir; 736 ifp = pkt->ifp; 737 tag->m_tag_cookie = MTAG_IPFW_RULE; 738 tag->m_tag_id = 0; 739 } 740 741 switch (dst) { 742 case DIR_OUT: 743 ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); 744 break ; 745 746 case DIR_IN : 747 netisr_dispatch(NETISR_IP, m); 748 break; 749 750#ifdef INET6 751 case DIR_IN | PROTO_IPV6: 752 netisr_dispatch(NETISR_IPV6, m); 753 break; 754 755 case DIR_OUT | PROTO_IPV6: 756 ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL); 757 break; 758#endif 759 760 case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */ 761 if (bridge_dn_p != NULL) 762 ((*bridge_dn_p)(m, ifp)); 763 else 764 printf("dummynet: if_bridge not loaded\n"); 765 766 break; 767 768 case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */ 769 /* 770 * The Ethernet code assumes the Ethernet header is 771 * contiguous in the first mbuf header. 772 * Insure this is true. 773 */ 774 if (m->m_len < ETHER_HDR_LEN && 775 (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) { 776 printf("dummynet/ether: pullup failed, " 777 "dropping packet\n"); 778 break; 779 } 780 ether_demux(m->m_pkthdr.rcvif, m); 781 break; 782 783 case DIR_OUT | PROTO_LAYER2: /* N_TO_ETH_OUT: */ 784 ether_output_frame(ifp, m); 785 break; 786 787 case DIR_DROP: 788 /* drop the packet after some time */ 789 FREE_PKT(m); 790 break; 791 792 default: 793 printf("dummynet: bad switch %d!\n", dst); 794 FREE_PKT(m); 795 break; 796 } 797 } 798} 799 800static inline int 801tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa) 802{ 803 struct dn_pkt_tag *dt; 804 struct m_tag *mtag; 805 806 mtag = m_tag_get(PACKET_TAG_DUMMYNET, 807 sizeof(*dt), M_NOWAIT | M_ZERO); 808 if (mtag == NULL) 809 return 1; /* Cannot allocate packet header. */ 810 m_tag_prepend(m, mtag); /* Attach to mbuf chain. */ 811 dt = (struct dn_pkt_tag *)(mtag + 1); 812 dt->rule = fwa->rule; 813 dt->rule.info &= IPFW_ONEPASS; /* only keep this info */ 814 dt->dn_dir = dir; 815 dt->ifp = fwa->oif; 816 /* dt->output tame is updated as we move through */ 817 dt->output_time = dn_cfg.curr_time; 818 dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0; 819 return 0; 820} 821 822 823/* 824 * dummynet hook for packets. 825 * We use the argument to locate the flowset fs and the sched_set sch 826 * associated to it. The we apply flow_mask and sched_mask to 827 * determine the queue and scheduler instances. 828 * 829 * dir where shall we send the packet after dummynet. 830 * *m0 the mbuf with the packet 831 * ifp the 'ifp' parameter from the caller. 832 * NULL in ip_input, destination interface in ip_output, 833 */ 834int 835dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa) 836{ 837 struct mbuf *m = *m0; 838 struct dn_fsk *fs = NULL; 839 struct dn_sch_inst *si; 840 struct dn_queue *q = NULL; /* default */ 841 842 int fs_id = (fwa->rule.info & IPFW_INFO_MASK) + 843 ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0); 844 DN_BH_WLOCK(); 845 io_pkt++; 846 /* we could actually tag outside the lock, but who cares... */ 847 if (tag_mbuf(m, dir, fwa)) 848 goto dropit; 849 if (dn_cfg.busy) { 850 /* if the upper half is busy doing something expensive, 851 * lets queue the packet and move forward 852 */ 853 mq_append(&dn_cfg.pending, m); 854 m = *m0 = NULL; /* consumed */ 855 goto done; /* already active, nothing to do */ 856 } 857 /* XXX locate_flowset could be optimised with a direct ref. */ 858 fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL); 859 if (fs == NULL) 860 goto dropit; /* This queue/pipe does not exist! */ 861 if (fs->sched == NULL) /* should not happen */ 862 goto dropit; 863 /* find scheduler instance, possibly applying sched_mask */ 864 si = ipdn_si_find(fs->sched, &(fwa->f_id)); 865 if (si == NULL) 866 goto dropit; 867 /* 868 * If the scheduler supports multiple queues, find the right one 869 * (otherwise it will be ignored by enqueue). 870 */ 871 if (fs->sched->fp->flags & DN_MULTIQUEUE) { 872 q = ipdn_q_find(fs, si, &(fwa->f_id)); 873 if (q == NULL) 874 goto dropit; 875 } 876 if (fs->sched->fp->enqueue(si, q, m)) { 877 /* packet was dropped by enqueue() */ 878 m = *m0 = NULL; 879 880 /* dn_enqueue already increases io_pkt_drop */ 881 io_pkt_drop--; 882 883 goto dropit; 884 } 885 886 if (si->kflags & DN_ACTIVE) { 887 m = *m0 = NULL; /* consumed */ 888 goto done; /* already active, nothing to do */ 889 } 890 891 /* compute the initial allowance */ 892 if (si->idle_time < dn_cfg.curr_time) { 893 /* Do this only on the first packet on an idle pipe */ 894 struct dn_link *p = &fs->sched->link; 895 896 si->sched_time = dn_cfg.curr_time; 897 si->credit = dn_cfg.io_fast ? p->bandwidth : 0; 898 if (p->burst) { 899 uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth; 900 if (burst > p->burst) 901 burst = p->burst; 902 si->credit += burst; 903 } 904 } 905 /* pass through scheduler and delay line */ 906 m = serve_sched(NULL, si, dn_cfg.curr_time); 907 908 /* optimization -- pass it back to ipfw for immediate send */ 909 /* XXX Don't call dummynet_send() if scheduler return the packet 910 * just enqueued. This avoid a lock order reversal. 911 * 912 */ 913 if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) { 914 /* fast io, rename the tag * to carry reinject info. */ 915 struct m_tag *tag = m_tag_first(m); 916 917 tag->m_tag_cookie = MTAG_IPFW_RULE; 918 tag->m_tag_id = 0; 919 io_pkt_fast++; 920 if (m->m_nextpkt != NULL) { 921 printf("dummynet: fast io: pkt chain detected!\n"); 922 m->m_nextpkt = NULL; 923 } 924 m = NULL; 925 } else { 926 *m0 = NULL; 927 } 928done: 929 DN_BH_WUNLOCK(); 930 if (m) 931 dummynet_send(m); 932 return 0; 933 934dropit: 935 io_pkt_drop++; 936 DN_BH_WUNLOCK(); 937 if (m) 938 FREE_PKT(m); 939 *m0 = NULL; 940 return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS; 941} 942