1/* 2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 */ 11 12#include <linux/config.h> 13#include <linux/module.h> 14#include <asm/uaccess.h> 15#include <asm/system.h> 16#include <asm/bitops.h> 17#include <linux/types.h> 18#include <linux/kernel.h> 19#include <linux/sched.h> 20#include <linux/string.h> 21#include <linux/mm.h> 22#include <linux/socket.h> 23#include <linux/sockios.h> 24#include <linux/in.h> 25#include <linux/errno.h> 26#include <linux/interrupt.h> 27#include <linux/if_ether.h> 28#include <linux/inet.h> 29#include <linux/netdevice.h> 30#include <linux/etherdevice.h> 31#include <linux/notifier.h> 32#include <linux/init.h> 33#include <net/ip.h> 34#include <linux/ipv6.h> 35#include <net/route.h> 36#include <linux/skbuff.h> 37#include <net/sock.h> 38#include <net/pkt_sched.h> 39 40 41/* Stochastic Fairness Queuing algorithm. 42 ======================================= 43 44 Source: 45 Paul E. McKenney "Stochastic Fairness Queuing", 46 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990. 47 48 Paul E. McKenney "Stochastic Fairness Queuing", 49 "Interworking: Research and Experience", v.2, 1991, p.113-131. 50 51 52 See also: 53 M. Shreedhar and George Varghese "Efficient Fair 54 Queuing using Deficit Round Robin", Proc. SIGCOMM 95. 55 56 57 This is not the thing that is usually called (W)FQ nowadays. 58 It does not use any timestamp mechanism, but instead 59 processes queues in round-robin order. 60 61 ADVANTAGE: 62 63 - It is very cheap. Both CPU and memory requirements are minimal. 64 65 DRAWBACKS: 66 67 - "Stochastic" -> It is not 100% fair. 68 When hash collisions occur, several flows are considered as one. 69 70 - "Round-robin" -> It introduces larger delays than virtual clock 71 based schemes, and should not be used for isolating interactive 72 traffic from non-interactive. It means, that this scheduler 73 should be used as leaf of CBQ or P3, which put interactive traffic 74 to higher priority band. 75 76 We still need true WFQ for top level CSZ, but using WFQ 77 for the best effort traffic is absolutely pointless: 78 SFQ is superior for this purpose. 79 80 IMPLEMENTATION: 81 This implementation limits maximal queue length to 128; 82 maximal mtu to 2^15-1; number of hash buckets to 1024. 83 The only goal of this restrictions was that all data 84 fit into one 4K page :-). Struct sfq_sched_data is 85 organized in anti-cache manner: all the data for a bucket 86 are scattered over different locations. This is not good, 87 but it allowed me to put it into 4K. 88 89 It is easy to increase these values, but not in flight. */ 90 91#define SFQ_DEPTH 128 92#define SFQ_HASH_DIVISOR 1024 93 94/* This type should contain at least SFQ_DEPTH*2 values */ 95typedef unsigned char sfq_index; 96 97struct sfq_head 98{ 99 sfq_index next; 100 sfq_index prev; 101}; 102 103struct sfq_sched_data 104{ 105/* Parameters */ 106 int perturb_period; 107 unsigned quantum; /* Allotment per round: MUST BE >= MTU */ 108 int limit; 109 110/* Variables */ 111 struct timer_list perturb_timer; 112 int perturbation; 113 sfq_index tail; /* Index of current slot in round */ 114 sfq_index max_depth; /* Maximal depth */ 115 116 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */ 117 sfq_index next[SFQ_DEPTH]; /* Active slots link */ 118 short allot[SFQ_DEPTH]; /* Current allotment per slot */ 119 unsigned short hash[SFQ_DEPTH]; /* Hash value indexed by slots */ 120 struct sk_buff_head qs[SFQ_DEPTH]; /* Slot queue */ 121 struct sfq_head dep[SFQ_DEPTH*2]; /* Linked list of slots, indexed by depth */ 122}; 123 124static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) 125{ 126 int pert = q->perturbation; 127 128 /* Have we any rotation primitives? If not, WHY? */ 129 h ^= (h1<<pert) ^ (h1>>(0x1F - pert)); 130 h ^= h>>10; 131 return h & 0x3FF; 132} 133 134#ifndef IPPROTO_ESP 135#define IPPROTO_ESP 50 136#endif 137 138static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) 139{ 140 u32 h, h2; 141 142 switch (skb->protocol) { 143 case __constant_htons(ETH_P_IP): 144 { 145 struct iphdr *iph = skb->nh.iph; 146 h = iph->daddr; 147 h2 = iph->saddr^iph->protocol; 148 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 149 (iph->protocol == IPPROTO_TCP || 150 iph->protocol == IPPROTO_UDP || 151 iph->protocol == IPPROTO_ESP)) 152 h2 ^= *(((u32*)iph) + iph->ihl); 153 break; 154 } 155 case __constant_htons(ETH_P_IPV6): 156 { 157 struct ipv6hdr *iph = skb->nh.ipv6h; 158 h = iph->daddr.s6_addr32[3]; 159 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; 160 if (iph->nexthdr == IPPROTO_TCP || 161 iph->nexthdr == IPPROTO_UDP || 162 iph->nexthdr == IPPROTO_ESP) 163 h2 ^= *(u32*)&iph[1]; 164 break; 165 } 166 default: 167 h = (u32)(unsigned long)skb->dst^skb->protocol; 168 h2 = (u32)(unsigned long)skb->sk; 169 } 170 return sfq_fold_hash(q, h, h2); 171} 172 173extern __inline__ void sfq_link(struct sfq_sched_data *q, sfq_index x) 174{ 175 sfq_index p, n; 176 int d = q->qs[x].qlen + SFQ_DEPTH; 177 178 p = d; 179 n = q->dep[d].next; 180 q->dep[x].next = n; 181 q->dep[x].prev = p; 182 q->dep[p].next = q->dep[n].prev = x; 183} 184 185extern __inline__ void sfq_dec(struct sfq_sched_data *q, sfq_index x) 186{ 187 sfq_index p, n; 188 189 n = q->dep[x].next; 190 p = q->dep[x].prev; 191 q->dep[p].next = n; 192 q->dep[n].prev = p; 193 194 if (n == p && q->max_depth == q->qs[x].qlen + 1) 195 q->max_depth--; 196 197 sfq_link(q, x); 198} 199 200extern __inline__ void sfq_inc(struct sfq_sched_data *q, sfq_index x) 201{ 202 sfq_index p, n; 203 int d; 204 205 n = q->dep[x].next; 206 p = q->dep[x].prev; 207 q->dep[p].next = n; 208 q->dep[n].prev = p; 209 d = q->qs[x].qlen; 210 if (q->max_depth < d) 211 q->max_depth = d; 212 213 sfq_link(q, x); 214} 215 216static int sfq_drop(struct Qdisc *sch) 217{ 218 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 219 sfq_index d = q->max_depth; 220 struct sk_buff *skb; 221 222 /* Queue is full! Find the longest slot and 223 drop a packet from it */ 224 225 if (d > 1) { 226 sfq_index x = q->dep[d+SFQ_DEPTH].next; 227 skb = q->qs[x].prev; 228 __skb_unlink(skb, &q->qs[x]); 229 kfree_skb(skb); 230 sfq_dec(q, x); 231 sch->q.qlen--; 232 sch->stats.drops++; 233 return 1; 234 } 235 236 if (d == 1) { 237 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ 238 d = q->next[q->tail]; 239 q->next[q->tail] = q->next[d]; 240 q->allot[q->next[d]] += q->quantum; 241 skb = q->qs[d].prev; 242 __skb_unlink(skb, &q->qs[d]); 243 kfree_skb(skb); 244 sfq_dec(q, d); 245 sch->q.qlen--; 246 q->ht[q->hash[d]] = SFQ_DEPTH; 247 sch->stats.drops++; 248 return 1; 249 } 250 251 return 0; 252} 253 254static int 255sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) 256{ 257 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 258 unsigned hash = sfq_hash(q, skb); 259 sfq_index x; 260 261 x = q->ht[hash]; 262 if (x == SFQ_DEPTH) { 263 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 264 q->hash[x] = hash; 265 } 266 __skb_queue_tail(&q->qs[x], skb); 267 sfq_inc(q, x); 268 if (q->qs[x].qlen == 1) { /* The flow is new */ 269 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 270 q->tail = x; 271 q->next[x] = x; 272 q->allot[x] = q->quantum; 273 } else { 274 q->next[x] = q->next[q->tail]; 275 q->next[q->tail] = x; 276 q->tail = x; 277 } 278 } 279 if (++sch->q.qlen < q->limit-1) { 280 sch->stats.bytes += skb->len; 281 sch->stats.packets++; 282 return 0; 283 } 284 285 sfq_drop(sch); 286 return NET_XMIT_CN; 287} 288 289static int 290sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) 291{ 292 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 293 unsigned hash = sfq_hash(q, skb); 294 sfq_index x; 295 296 x = q->ht[hash]; 297 if (x == SFQ_DEPTH) { 298 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 299 q->hash[x] = hash; 300 } 301 __skb_queue_head(&q->qs[x], skb); 302 sfq_inc(q, x); 303 if (q->qs[x].qlen == 1) { /* The flow is new */ 304 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 305 q->tail = x; 306 q->next[x] = x; 307 q->allot[x] = q->quantum; 308 } else { 309 q->next[x] = q->next[q->tail]; 310 q->next[q->tail] = x; 311 q->tail = x; 312 } 313 } 314 if (++sch->q.qlen < q->limit - 1) 315 return 0; 316 317 sch->stats.drops++; 318 sfq_drop(sch); 319 return NET_XMIT_CN; 320} 321 322 323 324 325static struct sk_buff * 326sfq_dequeue(struct Qdisc* sch) 327{ 328 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 329 struct sk_buff *skb; 330 sfq_index a, old_a; 331 332 /* No active slots */ 333 if (q->tail == SFQ_DEPTH) 334 return NULL; 335 336 a = old_a = q->next[q->tail]; 337 338 /* Grab packet */ 339 skb = __skb_dequeue(&q->qs[a]); 340 sfq_dec(q, a); 341 sch->q.qlen--; 342 343 /* Is the slot empty? */ 344 if (q->qs[a].qlen == 0) { 345 a = q->next[a]; 346 if (a == old_a) { 347 q->tail = SFQ_DEPTH; 348 return skb; 349 } 350 q->next[q->tail] = a; 351 q->allot[a] += q->quantum; 352 } else if ((q->allot[a] -= skb->len) <= 0) { 353 q->tail = a; 354 a = q->next[a]; 355 q->allot[a] += q->quantum; 356 } 357 return skb; 358} 359 360static void 361sfq_reset(struct Qdisc* sch) 362{ 363 struct sk_buff *skb; 364 365 while ((skb = sfq_dequeue(sch)) != NULL) 366 kfree_skb(skb); 367} 368 369static void sfq_perturbation(unsigned long arg) 370{ 371 struct Qdisc *sch = (struct Qdisc*)arg; 372 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 373 374 q->perturbation = net_random()&0x1F; 375 q->perturb_timer.expires = jiffies + q->perturb_period; 376 377 if (q->perturb_period) { 378 q->perturb_timer.expires = jiffies + q->perturb_period; 379 add_timer(&q->perturb_timer); 380 } 381} 382 383static int sfq_change(struct Qdisc *sch, struct rtattr *opt) 384{ 385 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 386 struct tc_sfq_qopt *ctl = RTA_DATA(opt); 387 388 if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) 389 return -EINVAL; 390 391 sch_tree_lock(sch); 392 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 393 q->perturb_period = ctl->perturb_period*HZ; 394 if (ctl->limit) 395 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH); 396 397 while (sch->q.qlen >= q->limit-1) 398 sfq_drop(sch); 399 400 del_timer(&q->perturb_timer); 401 if (q->perturb_period) { 402 q->perturb_timer.expires = jiffies + q->perturb_period; 403 add_timer(&q->perturb_timer); 404 } 405 sch_tree_unlock(sch); 406 return 0; 407} 408 409static int sfq_init(struct Qdisc *sch, struct rtattr *opt) 410{ 411 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 412 int i; 413 414 q->perturb_timer.data = (unsigned long)sch; 415 q->perturb_timer.function = sfq_perturbation; 416 init_timer(&q->perturb_timer); 417 418 for (i=0; i<SFQ_HASH_DIVISOR; i++) 419 q->ht[i] = SFQ_DEPTH; 420 for (i=0; i<SFQ_DEPTH; i++) { 421 skb_queue_head_init(&q->qs[i]); 422 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; 423 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; 424 } 425 q->limit = SFQ_DEPTH; 426 q->max_depth = 0; 427 q->tail = SFQ_DEPTH; 428 if (opt == NULL) { 429 q->quantum = psched_mtu(sch->dev); 430 q->perturb_period = 0; 431 } else { 432 int err = sfq_change(sch, opt); 433 if (err) 434 return err; 435 } 436 for (i=0; i<SFQ_DEPTH; i++) 437 sfq_link(q, i); 438 MOD_INC_USE_COUNT; 439 return 0; 440} 441 442static void sfq_destroy(struct Qdisc *sch) 443{ 444 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 445 del_timer(&q->perturb_timer); 446 MOD_DEC_USE_COUNT; 447} 448 449static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) 450{ 451 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; 452 unsigned char *b = skb->tail; 453 struct tc_sfq_qopt opt; 454 455 opt.quantum = q->quantum; 456 opt.perturb_period = q->perturb_period/HZ; 457 458 opt.limit = q->limit; 459 opt.divisor = SFQ_HASH_DIVISOR; 460 opt.flows = q->limit; 461 462 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 463 464 return skb->len; 465 466rtattr_failure: 467 skb_trim(skb, b - skb->data); 468 return -1; 469} 470 471struct Qdisc_ops sfq_qdisc_ops = 472{ 473 NULL, 474 NULL, 475 "sfq", 476 sizeof(struct sfq_sched_data), 477 478 sfq_enqueue, 479 sfq_dequeue, 480 sfq_requeue, 481 sfq_drop, 482 483 sfq_init, 484 sfq_reset, 485 sfq_destroy, 486 NULL, /* sfq_change */ 487 488 sfq_dump, 489}; 490 491#ifdef MODULE 492int init_module(void) 493{ 494 return register_qdisc(&sfq_qdisc_ops); 495} 496 497void cleanup_module(void) 498{ 499 unregister_qdisc(&sfq_qdisc_ops); 500} 501#endif 502MODULE_LICENSE("GPL"); 503