kern_timeout.c revision 278694
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: stable/10/sys/kern/kern_timeout.c 278694 2015-02-13 19:06:22Z sbruno $"); 39 40#include "opt_callout_profiling.h" 41#include "opt_kdtrace.h" 42#if defined(__arm__) 43#include "opt_timer.h" 44#endif 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/bus.h> 49#include <sys/callout.h> 50#include <sys/file.h> 51#include <sys/interrupt.h> 52#include <sys/kernel.h> 53#include <sys/ktr.h> 54#include <sys/lock.h> 55#include <sys/malloc.h> 56#include <sys/mutex.h> 57#include <sys/proc.h> 58#include <sys/sdt.h> 59#include <sys/sleepqueue.h> 60#include <sys/sysctl.h> 61#include <sys/smp.h> 62 63#ifdef SMP 64#include <machine/cpu.h> 65#endif 66 67#ifndef NO_EVENTTIMERS 68DPCPU_DECLARE(sbintime_t, hardclocktime); 69#endif 70 71SDT_PROVIDER_DEFINE(callout_execute); 72SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__start, 73 "struct callout *"); 74SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__end, 75 "struct callout *"); 76 77#ifdef CALLOUT_PROFILING 78static int avg_depth; 79SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 80 "Average number of items examined per softclock call. Units = 1/1000"); 81static int avg_gcalls; 82SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 83 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 84static int avg_lockcalls; 85SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 86 "Average number of lock callouts made per softclock call. Units = 1/1000"); 87static int avg_mpcalls; 88SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 89 "Average number of MP callouts made per softclock call. Units = 1/1000"); 90static int avg_depth_dir; 91SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 92 "Average number of direct callouts examined per callout_process call. " 93 "Units = 1/1000"); 94static int avg_lockcalls_dir; 95SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 96 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 97 "callout_process call. Units = 1/1000"); 98static int avg_mpcalls_dir; 99SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 100 0, "Average number of MP direct callouts made per callout_process call. " 101 "Units = 1/1000"); 102#endif 103 104static int ncallout; 105SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0, 106 "Number of entries in callwheel and size of timeout() preallocation"); 107 108/* 109 * TODO: 110 * allocate more timeout table slots when table overflows. 111 */ 112u_int callwheelsize, callwheelmask; 113 114/* 115 * The callout cpu exec entities represent informations necessary for 116 * describing the state of callouts currently running on the CPU and the ones 117 * necessary for migrating callouts to the new callout cpu. In particular, 118 * the first entry of the array cc_exec_entity holds informations for callout 119 * running in SWI thread context, while the second one holds informations 120 * for callout running directly from hardware interrupt context. 121 * The cached informations are very important for deferring migration when 122 * the migrating callout is already running. 123 */ 124struct cc_exec { 125 struct callout *cc_next; 126 struct callout *cc_curr; 127#ifdef SMP 128 void (*ce_migration_func)(void *); 129 void *ce_migration_arg; 130 int ce_migration_cpu; 131 sbintime_t ce_migration_time; 132 sbintime_t ce_migration_prec; 133#endif 134 bool cc_cancel; 135 bool cc_waiting; 136}; 137 138/* 139 * There is one struct callout_cpu per cpu, holding all relevant 140 * state for the callout processing thread on the individual CPU. 141 */ 142struct callout_cpu { 143 struct mtx_padalign cc_lock; 144 struct cc_exec cc_exec_entity[2]; 145 struct callout *cc_callout; 146 struct callout_list *cc_callwheel; 147 struct callout_tailq cc_expireq; 148 struct callout_slist cc_callfree; 149 sbintime_t cc_firstevent; 150 sbintime_t cc_lastscan; 151 void *cc_cookie; 152 u_int cc_bucket; 153 char cc_ktr_event_name[20]; 154}; 155 156#define cc_exec_curr cc_exec_entity[0].cc_curr 157#define cc_exec_next cc_exec_entity[0].cc_next 158#define cc_exec_cancel cc_exec_entity[0].cc_cancel 159#define cc_exec_waiting cc_exec_entity[0].cc_waiting 160#define cc_exec_curr_dir cc_exec_entity[1].cc_curr 161#define cc_exec_next_dir cc_exec_entity[1].cc_next 162#define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel 163#define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting 164 165#ifdef SMP 166#define cc_migration_func cc_exec_entity[0].ce_migration_func 167#define cc_migration_arg cc_exec_entity[0].ce_migration_arg 168#define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu 169#define cc_migration_time cc_exec_entity[0].ce_migration_time 170#define cc_migration_prec cc_exec_entity[0].ce_migration_prec 171#define cc_migration_func_dir cc_exec_entity[1].ce_migration_func 172#define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg 173#define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu 174#define cc_migration_time_dir cc_exec_entity[1].ce_migration_time 175#define cc_migration_prec_dir cc_exec_entity[1].ce_migration_prec 176 177struct callout_cpu cc_cpu[MAXCPU]; 178#define CPUBLOCK MAXCPU 179#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 180#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 181#else 182struct callout_cpu cc_cpu; 183#define CC_CPU(cpu) &cc_cpu 184#define CC_SELF() &cc_cpu 185#endif 186#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 187#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 188#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 189 190static int timeout_cpu; 191 192static void callout_cpu_init(struct callout_cpu *cc, int cpu); 193static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 194#ifdef CALLOUT_PROFILING 195 int *mpcalls, int *lockcalls, int *gcalls, 196#endif 197 int direct); 198 199static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 200 201/** 202 * Locked by cc_lock: 203 * cc_curr - If a callout is in progress, it is cc_curr. 204 * If cc_curr is non-NULL, threads waiting in 205 * callout_drain() will be woken up as soon as the 206 * relevant callout completes. 207 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 208 * guarantees that the current callout will not run. 209 * The softclock() function sets this to 0 before it 210 * drops callout_lock to acquire c_lock, and it calls 211 * the handler only if curr_cancelled is still 0 after 212 * cc_lock is successfully acquired. 213 * cc_waiting - If a thread is waiting in callout_drain(), then 214 * callout_wait is nonzero. Set only when 215 * cc_curr is non-NULL. 216 */ 217 218/* 219 * Resets the execution entity tied to a specific callout cpu. 220 */ 221static void 222cc_cce_cleanup(struct callout_cpu *cc, int direct) 223{ 224 225 cc->cc_exec_entity[direct].cc_curr = NULL; 226 cc->cc_exec_entity[direct].cc_next = NULL; 227 cc->cc_exec_entity[direct].cc_cancel = false; 228 cc->cc_exec_entity[direct].cc_waiting = false; 229#ifdef SMP 230 cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK; 231 cc->cc_exec_entity[direct].ce_migration_time = 0; 232 cc->cc_exec_entity[direct].ce_migration_prec = 0; 233 cc->cc_exec_entity[direct].ce_migration_func = NULL; 234 cc->cc_exec_entity[direct].ce_migration_arg = NULL; 235#endif 236} 237 238/* 239 * Checks if migration is requested by a specific callout cpu. 240 */ 241static int 242cc_cce_migrating(struct callout_cpu *cc, int direct) 243{ 244 245#ifdef SMP 246 return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK); 247#else 248 return (0); 249#endif 250} 251 252/* 253 * Kernel low level callwheel initialization 254 * called on cpu0 during kernel startup. 255 */ 256static void 257callout_callwheel_init(void *dummy) 258{ 259 struct callout_cpu *cc; 260 261 /* 262 * Calculate the size of the callout wheel and the preallocated 263 * timeout() structures. 264 * XXX: Clip callout to result of previous function of maxusers 265 * maximum 384. This is still huge, but acceptable. 266 */ 267 ncallout = imin(16 + maxproc + maxfiles, 18508); 268 TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 269 270 /* 271 * Calculate callout wheel size, should be next power of two higher 272 * than 'ncallout'. 273 */ 274 callwheelsize = 1 << fls(ncallout); 275 callwheelmask = callwheelsize - 1; 276 277 /* 278 * Only cpu0 handles timeout(9) and receives a preallocation. 279 * 280 * XXX: Once all timeout(9) consumers are converted this can 281 * be removed. 282 */ 283 timeout_cpu = PCPU_GET(cpuid); 284 cc = CC_CPU(timeout_cpu); 285 cc->cc_callout = malloc(ncallout * sizeof(struct callout), 286 M_CALLOUT, M_WAITOK); 287 callout_cpu_init(cc, timeout_cpu); 288} 289SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 290 291/* 292 * Initialize the per-cpu callout structures. 293 */ 294static void 295callout_cpu_init(struct callout_cpu *cc, int cpu) 296{ 297 struct callout *c; 298 int i; 299 300 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 301 SLIST_INIT(&cc->cc_callfree); 302 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, 303 M_CALLOUT, M_WAITOK); 304 for (i = 0; i < callwheelsize; i++) 305 LIST_INIT(&cc->cc_callwheel[i]); 306 TAILQ_INIT(&cc->cc_expireq); 307 cc->cc_firstevent = INT64_MAX; 308 for (i = 0; i < 2; i++) 309 cc_cce_cleanup(cc, i); 310 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), 311 "callwheel cpu %d", cpu); 312 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ 313 return; 314 for (i = 0; i < ncallout; i++) { 315 c = &cc->cc_callout[i]; 316 callout_init(c, 0); 317 c->c_flags = CALLOUT_LOCAL_ALLOC; 318 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 319 } 320} 321 322#ifdef SMP 323/* 324 * Switches the cpu tied to a specific callout. 325 * The function expects a locked incoming callout cpu and returns with 326 * locked outcoming callout cpu. 327 */ 328static struct callout_cpu * 329callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 330{ 331 struct callout_cpu *new_cc; 332 333 MPASS(c != NULL && cc != NULL); 334 CC_LOCK_ASSERT(cc); 335 336 /* 337 * Avoid interrupts and preemption firing after the callout cpu 338 * is blocked in order to avoid deadlocks as the new thread 339 * may be willing to acquire the callout cpu lock. 340 */ 341 c->c_cpu = CPUBLOCK; 342 spinlock_enter(); 343 CC_UNLOCK(cc); 344 new_cc = CC_CPU(new_cpu); 345 CC_LOCK(new_cc); 346 spinlock_exit(); 347 c->c_cpu = new_cpu; 348 return (new_cc); 349} 350#endif 351 352/* 353 * Start standard softclock thread. 354 */ 355static void 356start_softclock(void *dummy) 357{ 358 struct callout_cpu *cc; 359#ifdef SMP 360 int cpu; 361#endif 362 363 cc = CC_CPU(timeout_cpu); 364 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 365 INTR_MPSAFE, &cc->cc_cookie)) 366 panic("died while creating standard software ithreads"); 367#ifdef SMP 368 CPU_FOREACH(cpu) { 369 if (cpu == timeout_cpu) 370 continue; 371 cc = CC_CPU(cpu); 372 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ 373 callout_cpu_init(cc, cpu); 374 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 375 INTR_MPSAFE, &cc->cc_cookie)) 376 panic("died while creating standard software ithreads"); 377 } 378#endif 379} 380SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 381 382#define CC_HASH_SHIFT 8 383 384static inline u_int 385callout_hash(sbintime_t sbt) 386{ 387 388 return (sbt >> (32 - CC_HASH_SHIFT)); 389} 390 391static inline u_int 392callout_get_bucket(sbintime_t sbt) 393{ 394 395 return (callout_hash(sbt) & callwheelmask); 396} 397 398void 399callout_process(sbintime_t now) 400{ 401 struct callout *tmp, *tmpn; 402 struct callout_cpu *cc; 403 struct callout_list *sc; 404 sbintime_t first, last, max, tmp_max; 405 uint32_t lookahead; 406 u_int firstb, lastb, nowb; 407#ifdef CALLOUT_PROFILING 408 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 409#endif 410 411 cc = CC_SELF(); 412 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 413 414 /* Compute the buckets of the last scan and present times. */ 415 firstb = callout_hash(cc->cc_lastscan); 416 cc->cc_lastscan = now; 417 nowb = callout_hash(now); 418 419 /* Compute the last bucket and minimum time of the bucket after it. */ 420 if (nowb == firstb) 421 lookahead = (SBT_1S / 16); 422 else if (nowb - firstb == 1) 423 lookahead = (SBT_1S / 8); 424 else 425 lookahead = (SBT_1S / 2); 426 first = last = now; 427 first += (lookahead / 2); 428 last += lookahead; 429 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 430 lastb = callout_hash(last) - 1; 431 max = last; 432 433 /* 434 * Check if we wrapped around the entire wheel from the last scan. 435 * In case, we need to scan entirely the wheel for pending callouts. 436 */ 437 if (lastb - firstb >= callwheelsize) { 438 lastb = firstb + callwheelsize - 1; 439 if (nowb - firstb >= callwheelsize) 440 nowb = lastb; 441 } 442 443 /* Iterate callwheel from firstb to nowb and then up to lastb. */ 444 do { 445 sc = &cc->cc_callwheel[firstb & callwheelmask]; 446 tmp = LIST_FIRST(sc); 447 while (tmp != NULL) { 448 /* Run the callout if present time within allowed. */ 449 if (tmp->c_time <= now) { 450 /* 451 * Consumer told us the callout may be run 452 * directly from hardware interrupt context. 453 */ 454 if (tmp->c_flags & CALLOUT_DIRECT) { 455#ifdef CALLOUT_PROFILING 456 ++depth_dir; 457#endif 458 cc->cc_exec_next_dir = 459 LIST_NEXT(tmp, c_links.le); 460 cc->cc_bucket = firstb & callwheelmask; 461 LIST_REMOVE(tmp, c_links.le); 462 softclock_call_cc(tmp, cc, 463#ifdef CALLOUT_PROFILING 464 &mpcalls_dir, &lockcalls_dir, NULL, 465#endif 466 1); 467 tmp = cc->cc_exec_next_dir; 468 } else { 469 tmpn = LIST_NEXT(tmp, c_links.le); 470 LIST_REMOVE(tmp, c_links.le); 471 TAILQ_INSERT_TAIL(&cc->cc_expireq, 472 tmp, c_links.tqe); 473 tmp->c_flags |= CALLOUT_PROCESSED; 474 tmp = tmpn; 475 } 476 continue; 477 } 478 /* Skip events from distant future. */ 479 if (tmp->c_time >= max) 480 goto next; 481 /* 482 * Event minimal time is bigger than present maximal 483 * time, so it cannot be aggregated. 484 */ 485 if (tmp->c_time > last) { 486 lastb = nowb; 487 goto next; 488 } 489 /* Update first and last time, respecting this event. */ 490 if (tmp->c_time < first) 491 first = tmp->c_time; 492 tmp_max = tmp->c_time + tmp->c_precision; 493 if (tmp_max < last) 494 last = tmp_max; 495next: 496 tmp = LIST_NEXT(tmp, c_links.le); 497 } 498 /* Proceed with the next bucket. */ 499 firstb++; 500 /* 501 * Stop if we looked after present time and found 502 * some event we can't execute at now. 503 * Stop if we looked far enough into the future. 504 */ 505 } while (((int)(firstb - lastb)) <= 0); 506 cc->cc_firstevent = last; 507#ifndef NO_EVENTTIMERS 508 cpu_new_callout(curcpu, last, first); 509#endif 510#ifdef CALLOUT_PROFILING 511 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 512 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 513 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 514#endif 515 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 516 /* 517 * swi_sched acquires the thread lock, so we don't want to call it 518 * with cc_lock held; incorrect locking order. 519 */ 520 if (!TAILQ_EMPTY(&cc->cc_expireq)) 521 swi_sched(cc->cc_cookie, 0); 522} 523 524static struct callout_cpu * 525callout_lock(struct callout *c) 526{ 527 struct callout_cpu *cc; 528 int cpu; 529 530 for (;;) { 531 cpu = c->c_cpu; 532#ifdef SMP 533 if (cpu == CPUBLOCK) { 534 while (c->c_cpu == CPUBLOCK) 535 cpu_spinwait(); 536 continue; 537 } 538#endif 539 cc = CC_CPU(cpu); 540 CC_LOCK(cc); 541 if (cpu == c->c_cpu) 542 break; 543 CC_UNLOCK(cc); 544 } 545 return (cc); 546} 547 548static void 549callout_cc_add(struct callout *c, struct callout_cpu *cc, 550 sbintime_t sbt, sbintime_t precision, void (*func)(void *), 551 void *arg, int cpu, int flags) 552{ 553 int bucket; 554 555 CC_LOCK_ASSERT(cc); 556 if (sbt < cc->cc_lastscan) 557 sbt = cc->cc_lastscan; 558 c->c_arg = arg; 559 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 560 if (flags & C_DIRECT_EXEC) 561 c->c_flags |= CALLOUT_DIRECT; 562 c->c_flags &= ~CALLOUT_PROCESSED; 563 c->c_func = func; 564 c->c_time = sbt; 565 c->c_precision = precision; 566 bucket = callout_get_bucket(c->c_time); 567 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 568 c, (int)(c->c_precision >> 32), 569 (u_int)(c->c_precision & 0xffffffff)); 570 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 571 if (cc->cc_bucket == bucket) 572 cc->cc_exec_next_dir = c; 573#ifndef NO_EVENTTIMERS 574 /* 575 * Inform the eventtimers(4) subsystem there's a new callout 576 * that has been inserted, but only if really required. 577 */ 578 if (INT64_MAX - c->c_time < c->c_precision) 579 c->c_precision = INT64_MAX - c->c_time; 580 sbt = c->c_time + c->c_precision; 581 if (sbt < cc->cc_firstevent) { 582 cc->cc_firstevent = sbt; 583 cpu_new_callout(cpu, sbt, c->c_time); 584 } 585#endif 586} 587 588static void 589callout_cc_del(struct callout *c, struct callout_cpu *cc) 590{ 591 592 if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) 593 return; 594 c->c_func = NULL; 595 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 596} 597 598static void 599softclock_call_cc(struct callout *c, struct callout_cpu *cc, 600#ifdef CALLOUT_PROFILING 601 int *mpcalls, int *lockcalls, int *gcalls, 602#endif 603 int direct) 604{ 605 struct rm_priotracker tracker; 606 void (*c_func)(void *); 607 void *c_arg; 608 struct lock_class *class; 609 struct lock_object *c_lock; 610 uintptr_t lock_status; 611 int c_flags; 612#ifdef SMP 613 struct callout_cpu *new_cc; 614 void (*new_func)(void *); 615 void *new_arg; 616 int flags, new_cpu; 617 sbintime_t new_prec, new_time; 618#endif 619#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 620 sbintime_t sbt1, sbt2; 621 struct timespec ts2; 622 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 623 static timeout_t *lastfunc; 624#endif 625 626 KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 627 (CALLOUT_PENDING | CALLOUT_ACTIVE), 628 ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); 629 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 630 lock_status = 0; 631 if (c->c_flags & CALLOUT_SHAREDLOCK) { 632 if (class == &lock_class_rm) 633 lock_status = (uintptr_t)&tracker; 634 else 635 lock_status = 1; 636 } 637 c_lock = c->c_lock; 638 c_func = c->c_func; 639 c_arg = c->c_arg; 640 c_flags = c->c_flags; 641 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 642 c->c_flags = CALLOUT_LOCAL_ALLOC; 643 else 644 c->c_flags &= ~CALLOUT_PENDING; 645 cc->cc_exec_entity[direct].cc_curr = c; 646 cc->cc_exec_entity[direct].cc_cancel = false; 647 CC_UNLOCK(cc); 648 if (c_lock != NULL) { 649 class->lc_lock(c_lock, lock_status); 650 /* 651 * The callout may have been cancelled 652 * while we switched locks. 653 */ 654 if (cc->cc_exec_entity[direct].cc_cancel) { 655 class->lc_unlock(c_lock); 656 goto skip; 657 } 658 /* The callout cannot be stopped now. */ 659 cc->cc_exec_entity[direct].cc_cancel = true; 660 if (c_lock == &Giant.lock_object) { 661#ifdef CALLOUT_PROFILING 662 (*gcalls)++; 663#endif 664 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 665 c, c_func, c_arg); 666 } else { 667#ifdef CALLOUT_PROFILING 668 (*lockcalls)++; 669#endif 670 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 671 c, c_func, c_arg); 672 } 673 } else { 674#ifdef CALLOUT_PROFILING 675 (*mpcalls)++; 676#endif 677 CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 678 c, c_func, c_arg); 679 } 680 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", 681 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); 682#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 683 sbt1 = sbinuptime(); 684#endif 685 THREAD_NO_SLEEPING(); 686 SDT_PROBE(callout_execute, kernel, , callout__start, c, 0, 0, 0, 0); 687 c_func(c_arg); 688 SDT_PROBE(callout_execute, kernel, , callout__end, c, 0, 0, 0, 0); 689 THREAD_SLEEPING_OK(); 690#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 691 sbt2 = sbinuptime(); 692 sbt2 -= sbt1; 693 if (sbt2 > maxdt) { 694 if (lastfunc != c_func || sbt2 > maxdt * 2) { 695 ts2 = sbttots(sbt2); 696 printf( 697 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 698 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 699 } 700 maxdt = sbt2; 701 lastfunc = c_func; 702 } 703#endif 704 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); 705 CTR1(KTR_CALLOUT, "callout %p finished", c); 706 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 707 class->lc_unlock(c_lock); 708skip: 709 CC_LOCK(cc); 710 KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr")); 711 cc->cc_exec_entity[direct].cc_curr = NULL; 712 if (cc->cc_exec_entity[direct].cc_waiting) { 713 /* 714 * There is someone waiting for the 715 * callout to complete. 716 * If the callout was scheduled for 717 * migration just cancel it. 718 */ 719 if (cc_cce_migrating(cc, direct)) { 720 cc_cce_cleanup(cc, direct); 721 722 /* 723 * It should be assert here that the callout is not 724 * destroyed but that is not easy. 725 */ 726 c->c_flags &= ~CALLOUT_DFRMIGRATION; 727 } 728 cc->cc_exec_entity[direct].cc_waiting = false; 729 CC_UNLOCK(cc); 730 wakeup(&cc->cc_exec_entity[direct].cc_waiting); 731 CC_LOCK(cc); 732 } else if (cc_cce_migrating(cc, direct)) { 733 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, 734 ("Migrating legacy callout %p", c)); 735#ifdef SMP 736 /* 737 * If the callout was scheduled for 738 * migration just perform it now. 739 */ 740 new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu; 741 new_time = cc->cc_exec_entity[direct].ce_migration_time; 742 new_prec = cc->cc_exec_entity[direct].ce_migration_prec; 743 new_func = cc->cc_exec_entity[direct].ce_migration_func; 744 new_arg = cc->cc_exec_entity[direct].ce_migration_arg; 745 cc_cce_cleanup(cc, direct); 746 747 /* 748 * It should be assert here that the callout is not destroyed 749 * but that is not easy. 750 * 751 * As first thing, handle deferred callout stops. 752 */ 753 if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 754 CTR3(KTR_CALLOUT, 755 "deferred cancelled %p func %p arg %p", 756 c, new_func, new_arg); 757 callout_cc_del(c, cc); 758 return; 759 } 760 c->c_flags &= ~CALLOUT_DFRMIGRATION; 761 762 new_cc = callout_cpu_switch(c, cc, new_cpu); 763 flags = (direct) ? C_DIRECT_EXEC : 0; 764 callout_cc_add(c, new_cc, new_time, new_prec, new_func, 765 new_arg, new_cpu, flags); 766 CC_UNLOCK(new_cc); 767 CC_LOCK(cc); 768#else 769 panic("migration should not happen"); 770#endif 771 } 772 /* 773 * If the current callout is locally allocated (from 774 * timeout(9)) then put it on the freelist. 775 * 776 * Note: we need to check the cached copy of c_flags because 777 * if it was not local, then it's not safe to deref the 778 * callout pointer. 779 */ 780 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || 781 c->c_flags == CALLOUT_LOCAL_ALLOC, 782 ("corrupted callout")); 783 if (c_flags & CALLOUT_LOCAL_ALLOC) 784 callout_cc_del(c, cc); 785} 786 787/* 788 * The callout mechanism is based on the work of Adam M. Costello and 789 * George Varghese, published in a technical report entitled "Redesigning 790 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 791 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 792 * used in this implementation was published by G. Varghese and T. Lauck in 793 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 794 * the Efficient Implementation of a Timer Facility" in the Proceedings of 795 * the 11th ACM Annual Symposium on Operating Systems Principles, 796 * Austin, Texas Nov 1987. 797 */ 798 799/* 800 * Software (low priority) clock interrupt. 801 * Run periodic events from timeout queue. 802 */ 803void 804softclock(void *arg) 805{ 806 struct callout_cpu *cc; 807 struct callout *c; 808#ifdef CALLOUT_PROFILING 809 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 810#endif 811 812 cc = (struct callout_cpu *)arg; 813 CC_LOCK(cc); 814 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 815 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 816 softclock_call_cc(c, cc, 817#ifdef CALLOUT_PROFILING 818 &mpcalls, &lockcalls, &gcalls, 819#endif 820 0); 821#ifdef CALLOUT_PROFILING 822 ++depth; 823#endif 824 } 825#ifdef CALLOUT_PROFILING 826 avg_depth += (depth * 1000 - avg_depth) >> 8; 827 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 828 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 829 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 830#endif 831 CC_UNLOCK(cc); 832} 833 834/* 835 * timeout -- 836 * Execute a function after a specified length of time. 837 * 838 * untimeout -- 839 * Cancel previous timeout function call. 840 * 841 * callout_handle_init -- 842 * Initialize a handle so that using it with untimeout is benign. 843 * 844 * See AT&T BCI Driver Reference Manual for specification. This 845 * implementation differs from that one in that although an 846 * identification value is returned from timeout, the original 847 * arguments to timeout as well as the identifier are used to 848 * identify entries for untimeout. 849 */ 850struct callout_handle 851timeout(ftn, arg, to_ticks) 852 timeout_t *ftn; 853 void *arg; 854 int to_ticks; 855{ 856 struct callout_cpu *cc; 857 struct callout *new; 858 struct callout_handle handle; 859 860 cc = CC_CPU(timeout_cpu); 861 CC_LOCK(cc); 862 /* Fill in the next free callout structure. */ 863 new = SLIST_FIRST(&cc->cc_callfree); 864 if (new == NULL) 865 /* XXX Attempt to malloc first */ 866 panic("timeout table full"); 867 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 868 callout_reset(new, to_ticks, ftn, arg); 869 handle.callout = new; 870 CC_UNLOCK(cc); 871 872 return (handle); 873} 874 875void 876untimeout(ftn, arg, handle) 877 timeout_t *ftn; 878 void *arg; 879 struct callout_handle handle; 880{ 881 struct callout_cpu *cc; 882 883 /* 884 * Check for a handle that was initialized 885 * by callout_handle_init, but never used 886 * for a real timeout. 887 */ 888 if (handle.callout == NULL) 889 return; 890 891 cc = callout_lock(handle.callout); 892 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 893 callout_stop(handle.callout); 894 CC_UNLOCK(cc); 895} 896 897void 898callout_handle_init(struct callout_handle *handle) 899{ 900 handle->callout = NULL; 901} 902 903/* 904 * New interface; clients allocate their own callout structures. 905 * 906 * callout_reset() - establish or change a timeout 907 * callout_stop() - disestablish a timeout 908 * callout_init() - initialize a callout structure so that it can 909 * safely be passed to callout_reset() and callout_stop() 910 * 911 * <sys/callout.h> defines three convenience macros: 912 * 913 * callout_active() - returns truth if callout has not been stopped, 914 * drained, or deactivated since the last time the callout was 915 * reset. 916 * callout_pending() - returns truth if callout is still waiting for timeout 917 * callout_deactivate() - marks the callout as having been serviced 918 */ 919int 920callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, 921 void (*ftn)(void *), void *arg, int cpu, int flags) 922{ 923 sbintime_t to_sbt, pr; 924 struct callout_cpu *cc; 925 int cancelled, direct; 926 927 cancelled = 0; 928 if (flags & C_ABSOLUTE) { 929 to_sbt = sbt; 930 } else { 931 if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) 932 sbt = tick_sbt; 933 if ((flags & C_HARDCLOCK) || 934#ifdef NO_EVENTTIMERS 935 sbt >= sbt_timethreshold) { 936 to_sbt = getsbinuptime(); 937 938 /* Add safety belt for the case of hz > 1000. */ 939 to_sbt += tc_tick_sbt - tick_sbt; 940#else 941 sbt >= sbt_tickthreshold) { 942 /* 943 * Obtain the time of the last hardclock() call on 944 * this CPU directly from the kern_clocksource.c. 945 * This value is per-CPU, but it is equal for all 946 * active ones. 947 */ 948#ifdef __LP64__ 949 to_sbt = DPCPU_GET(hardclocktime); 950#else 951 spinlock_enter(); 952 to_sbt = DPCPU_GET(hardclocktime); 953 spinlock_exit(); 954#endif 955#endif 956 if ((flags & C_HARDCLOCK) == 0) 957 to_sbt += tick_sbt; 958 } else 959 to_sbt = sbinuptime(); 960 if (INT64_MAX - to_sbt < sbt) 961 to_sbt = INT64_MAX; 962 else 963 to_sbt += sbt; 964 pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 965 sbt >> C_PRELGET(flags)); 966 if (pr > precision) 967 precision = pr; 968 } 969 /* 970 * Don't allow migration of pre-allocated callouts lest they 971 * become unbalanced. 972 */ 973 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 974 cpu = c->c_cpu; 975 direct = (c->c_flags & CALLOUT_DIRECT) != 0; 976 KASSERT(!direct || c->c_lock == NULL, 977 ("%s: direct callout %p has lock", __func__, c)); 978 cc = callout_lock(c); 979 if (cc->cc_exec_entity[direct].cc_curr == c) { 980 /* 981 * We're being asked to reschedule a callout which is 982 * currently in progress. If there is a lock then we 983 * can cancel the callout if it has not really started. 984 */ 985 if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel) 986 cancelled = cc->cc_exec_entity[direct].cc_cancel = true; 987 if (cc->cc_exec_entity[direct].cc_waiting) { 988 /* 989 * Someone has called callout_drain to kill this 990 * callout. Don't reschedule. 991 */ 992 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 993 cancelled ? "cancelled" : "failed to cancel", 994 c, c->c_func, c->c_arg); 995 CC_UNLOCK(cc); 996 return (cancelled); 997 } 998 } 999 if (c->c_flags & CALLOUT_PENDING) { 1000 if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 1001 if (cc->cc_exec_next_dir == c) 1002 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 1003 LIST_REMOVE(c, c_links.le); 1004 } else 1005 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1006 cancelled = 1; 1007 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1008 } 1009 1010#ifdef SMP 1011 /* 1012 * If the callout must migrate try to perform it immediately. 1013 * If the callout is currently running, just defer the migration 1014 * to a more appropriate moment. 1015 */ 1016 if (c->c_cpu != cpu) { 1017 if (cc->cc_exec_entity[direct].cc_curr == c) { 1018 cc->cc_exec_entity[direct].ce_migration_cpu = cpu; 1019 cc->cc_exec_entity[direct].ce_migration_time 1020 = to_sbt; 1021 cc->cc_exec_entity[direct].ce_migration_prec 1022 = precision; 1023 cc->cc_exec_entity[direct].ce_migration_func = ftn; 1024 cc->cc_exec_entity[direct].ce_migration_arg = arg; 1025 c->c_flags |= CALLOUT_DFRMIGRATION; 1026 CTR6(KTR_CALLOUT, 1027 "migration of %p func %p arg %p in %d.%08x to %u deferred", 1028 c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1029 (u_int)(to_sbt & 0xffffffff), cpu); 1030 CC_UNLOCK(cc); 1031 return (cancelled); 1032 } 1033 cc = callout_cpu_switch(c, cc, cpu); 1034 } 1035#endif 1036 1037 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1038 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1039 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1040 (u_int)(to_sbt & 0xffffffff)); 1041 CC_UNLOCK(cc); 1042 1043 return (cancelled); 1044} 1045 1046/* 1047 * Common idioms that can be optimized in the future. 1048 */ 1049int 1050callout_schedule_on(struct callout *c, int to_ticks, int cpu) 1051{ 1052 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 1053} 1054 1055int 1056callout_schedule(struct callout *c, int to_ticks) 1057{ 1058 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 1059} 1060 1061int 1062_callout_stop_safe(c, safe) 1063 struct callout *c; 1064 int safe; 1065{ 1066 struct callout_cpu *cc, *old_cc; 1067 struct lock_class *class; 1068 int direct, sq_locked, use_lock; 1069 1070 /* 1071 * Some old subsystems don't hold Giant while running a callout_stop(), 1072 * so just discard this check for the moment. 1073 */ 1074 if (!safe && c->c_lock != NULL) { 1075 if (c->c_lock == &Giant.lock_object) 1076 use_lock = mtx_owned(&Giant); 1077 else { 1078 use_lock = 1; 1079 class = LOCK_CLASS(c->c_lock); 1080 class->lc_assert(c->c_lock, LA_XLOCKED); 1081 } 1082 } else 1083 use_lock = 0; 1084 direct = (c->c_flags & CALLOUT_DIRECT) != 0; 1085 sq_locked = 0; 1086 old_cc = NULL; 1087again: 1088 cc = callout_lock(c); 1089 1090 /* 1091 * If the callout was migrating while the callout cpu lock was 1092 * dropped, just drop the sleepqueue lock and check the states 1093 * again. 1094 */ 1095 if (sq_locked != 0 && cc != old_cc) { 1096#ifdef SMP 1097 CC_UNLOCK(cc); 1098 sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting); 1099 sq_locked = 0; 1100 old_cc = NULL; 1101 goto again; 1102#else 1103 panic("migration should not happen"); 1104#endif 1105 } 1106 1107 /* 1108 * If the callout isn't pending, it's not on the queue, so 1109 * don't attempt to remove it from the queue. We can try to 1110 * stop it by other means however. 1111 */ 1112 if (!(c->c_flags & CALLOUT_PENDING)) { 1113 c->c_flags &= ~CALLOUT_ACTIVE; 1114 1115 /* 1116 * If it wasn't on the queue and it isn't the current 1117 * callout, then we can't stop it, so just bail. 1118 */ 1119 if (cc->cc_exec_entity[direct].cc_curr != c) { 1120 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1121 c, c->c_func, c->c_arg); 1122 CC_UNLOCK(cc); 1123 if (sq_locked) 1124 sleepq_release( 1125 &cc->cc_exec_entity[direct].cc_waiting); 1126 return (0); 1127 } 1128 1129 if (safe) { 1130 /* 1131 * The current callout is running (or just 1132 * about to run) and blocking is allowed, so 1133 * just wait for the current invocation to 1134 * finish. 1135 */ 1136 while (cc->cc_exec_entity[direct].cc_curr == c) { 1137 /* 1138 * Use direct calls to sleepqueue interface 1139 * instead of cv/msleep in order to avoid 1140 * a LOR between cc_lock and sleepqueue 1141 * chain spinlocks. This piece of code 1142 * emulates a msleep_spin() call actually. 1143 * 1144 * If we already have the sleepqueue chain 1145 * locked, then we can safely block. If we 1146 * don't already have it locked, however, 1147 * we have to drop the cc_lock to lock 1148 * it. This opens several races, so we 1149 * restart at the beginning once we have 1150 * both locks. If nothing has changed, then 1151 * we will end up back here with sq_locked 1152 * set. 1153 */ 1154 if (!sq_locked) { 1155 CC_UNLOCK(cc); 1156 sleepq_lock( 1157 &cc->cc_exec_entity[direct].cc_waiting); 1158 sq_locked = 1; 1159 old_cc = cc; 1160 goto again; 1161 } 1162 1163 /* 1164 * Migration could be cancelled here, but 1165 * as long as it is still not sure when it 1166 * will be packed up, just let softclock() 1167 * take care of it. 1168 */ 1169 cc->cc_exec_entity[direct].cc_waiting = true; 1170 DROP_GIANT(); 1171 CC_UNLOCK(cc); 1172 sleepq_add( 1173 &cc->cc_exec_entity[direct].cc_waiting, 1174 &cc->cc_lock.lock_object, "codrain", 1175 SLEEPQ_SLEEP, 0); 1176 sleepq_wait( 1177 &cc->cc_exec_entity[direct].cc_waiting, 1178 0); 1179 sq_locked = 0; 1180 old_cc = NULL; 1181 1182 /* Reacquire locks previously released. */ 1183 PICKUP_GIANT(); 1184 CC_LOCK(cc); 1185 } 1186 } else if (use_lock && 1187 !cc->cc_exec_entity[direct].cc_cancel) { 1188 /* 1189 * The current callout is waiting for its 1190 * lock which we hold. Cancel the callout 1191 * and return. After our caller drops the 1192 * lock, the callout will be skipped in 1193 * softclock(). 1194 */ 1195 cc->cc_exec_entity[direct].cc_cancel = true; 1196 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1197 c, c->c_func, c->c_arg); 1198 KASSERT(!cc_cce_migrating(cc, direct), 1199 ("callout wrongly scheduled for migration")); 1200 CC_UNLOCK(cc); 1201 KASSERT(!sq_locked, ("sleepqueue chain locked")); 1202 return (1); 1203 } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 1204 c->c_flags &= ~CALLOUT_DFRMIGRATION; 1205 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1206 c, c->c_func, c->c_arg); 1207 CC_UNLOCK(cc); 1208 return (1); 1209 } 1210 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1211 c, c->c_func, c->c_arg); 1212 CC_UNLOCK(cc); 1213 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1214 return (0); 1215 } 1216 if (sq_locked) 1217 sleepq_release(&cc->cc_exec_entity[direct].cc_waiting); 1218 1219 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1220 1221 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1222 c, c->c_func, c->c_arg); 1223 if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 1224 if (cc->cc_exec_next_dir == c) 1225 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 1226 LIST_REMOVE(c, c_links.le); 1227 } else 1228 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1229 callout_cc_del(c, cc); 1230 1231 CC_UNLOCK(cc); 1232 return (1); 1233} 1234 1235void 1236callout_init(c, mpsafe) 1237 struct callout *c; 1238 int mpsafe; 1239{ 1240 bzero(c, sizeof *c); 1241 if (mpsafe) { 1242 c->c_lock = NULL; 1243 c->c_flags = CALLOUT_RETURNUNLOCKED; 1244 } else { 1245 c->c_lock = &Giant.lock_object; 1246 c->c_flags = 0; 1247 } 1248 c->c_cpu = timeout_cpu; 1249} 1250 1251void 1252_callout_init_lock(c, lock, flags) 1253 struct callout *c; 1254 struct lock_object *lock; 1255 int flags; 1256{ 1257 bzero(c, sizeof *c); 1258 c->c_lock = lock; 1259 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1260 ("callout_init_lock: bad flags %d", flags)); 1261 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1262 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1263 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1264 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1265 __func__)); 1266 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1267 c->c_cpu = timeout_cpu; 1268} 1269 1270#ifdef APM_FIXUP_CALLTODO 1271/* 1272 * Adjust the kernel calltodo timeout list. This routine is used after 1273 * an APM resume to recalculate the calltodo timer list values with the 1274 * number of hz's we have been sleeping. The next hardclock() will detect 1275 * that there are fired timers and run softclock() to execute them. 1276 * 1277 * Please note, I have not done an exhaustive analysis of what code this 1278 * might break. I am motivated to have my select()'s and alarm()'s that 1279 * have expired during suspend firing upon resume so that the applications 1280 * which set the timer can do the maintanence the timer was for as close 1281 * as possible to the originally intended time. Testing this code for a 1282 * week showed that resuming from a suspend resulted in 22 to 25 timers 1283 * firing, which seemed independant on whether the suspend was 2 hours or 1284 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1285 */ 1286void 1287adjust_timeout_calltodo(time_change) 1288 struct timeval *time_change; 1289{ 1290 register struct callout *p; 1291 unsigned long delta_ticks; 1292 1293 /* 1294 * How many ticks were we asleep? 1295 * (stolen from tvtohz()). 1296 */ 1297 1298 /* Don't do anything */ 1299 if (time_change->tv_sec < 0) 1300 return; 1301 else if (time_change->tv_sec <= LONG_MAX / 1000000) 1302 delta_ticks = (time_change->tv_sec * 1000000 + 1303 time_change->tv_usec + (tick - 1)) / tick + 1; 1304 else if (time_change->tv_sec <= LONG_MAX / hz) 1305 delta_ticks = time_change->tv_sec * hz + 1306 (time_change->tv_usec + (tick - 1)) / tick + 1; 1307 else 1308 delta_ticks = LONG_MAX; 1309 1310 if (delta_ticks > INT_MAX) 1311 delta_ticks = INT_MAX; 1312 1313 /* 1314 * Now rip through the timer calltodo list looking for timers 1315 * to expire. 1316 */ 1317 1318 /* don't collide with softclock() */ 1319 CC_LOCK(cc); 1320 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1321 p->c_time -= delta_ticks; 1322 1323 /* Break if the timer had more time on it than delta_ticks */ 1324 if (p->c_time > 0) 1325 break; 1326 1327 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1328 delta_ticks = -p->c_time; 1329 } 1330 CC_UNLOCK(cc); 1331 1332 return; 1333} 1334#endif /* APM_FIXUP_CALLTODO */ 1335 1336static int 1337flssbt(sbintime_t sbt) 1338{ 1339 1340 sbt += (uint64_t)sbt >> 1; 1341 if (sizeof(long) >= sizeof(sbintime_t)) 1342 return (flsl(sbt)); 1343 if (sbt >= SBT_1S) 1344 return (flsl(((uint64_t)sbt) >> 32) + 32); 1345 return (flsl(sbt)); 1346} 1347 1348/* 1349 * Dump immediate statistic snapshot of the scheduled callouts. 1350 */ 1351static int 1352sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 1353{ 1354 struct callout *tmp; 1355 struct callout_cpu *cc; 1356 struct callout_list *sc; 1357 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 1358 int ct[64], cpr[64], ccpbk[32]; 1359 int error, val, i, count, tcum, pcum, maxc, c, medc; 1360#ifdef SMP 1361 int cpu; 1362#endif 1363 1364 val = 0; 1365 error = sysctl_handle_int(oidp, &val, 0, req); 1366 if (error != 0 || req->newptr == NULL) 1367 return (error); 1368 count = maxc = 0; 1369 st = spr = maxt = maxpr = 0; 1370 bzero(ccpbk, sizeof(ccpbk)); 1371 bzero(ct, sizeof(ct)); 1372 bzero(cpr, sizeof(cpr)); 1373 now = sbinuptime(); 1374#ifdef SMP 1375 CPU_FOREACH(cpu) { 1376 cc = CC_CPU(cpu); 1377#else 1378 cc = CC_CPU(timeout_cpu); 1379#endif 1380 CC_LOCK(cc); 1381 for (i = 0; i < callwheelsize; i++) { 1382 sc = &cc->cc_callwheel[i]; 1383 c = 0; 1384 LIST_FOREACH(tmp, sc, c_links.le) { 1385 c++; 1386 t = tmp->c_time - now; 1387 if (t < 0) 1388 t = 0; 1389 st += t / SBT_1US; 1390 spr += tmp->c_precision / SBT_1US; 1391 if (t > maxt) 1392 maxt = t; 1393 if (tmp->c_precision > maxpr) 1394 maxpr = tmp->c_precision; 1395 ct[flssbt(t)]++; 1396 cpr[flssbt(tmp->c_precision)]++; 1397 } 1398 if (c > maxc) 1399 maxc = c; 1400 ccpbk[fls(c + c / 2)]++; 1401 count += c; 1402 } 1403 CC_UNLOCK(cc); 1404#ifdef SMP 1405 } 1406#endif 1407 1408 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 1409 tcum += ct[i]; 1410 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1411 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 1412 pcum += cpr[i]; 1413 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1414 for (i = 0, c = 0; i < 32 && c < count / 2; i++) 1415 c += ccpbk[i]; 1416 medc = (i >= 2) ? (1 << (i - 2)) : 0; 1417 1418 printf("Scheduled callouts statistic snapshot:\n"); 1419 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 1420 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 1421 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 1422 medc, 1423 count / callwheelsize / mp_ncpus, 1424 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 1425 maxc); 1426 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1427 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 1428 (st / count) / 1000000, (st / count) % 1000000, 1429 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 1430 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1431 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 1432 (spr / count) / 1000000, (spr / count) % 1000000, 1433 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 1434 printf(" Distribution: \tbuckets\t time\t tcum\t" 1435 " prec\t pcum\n"); 1436 for (i = 0, tcum = pcum = 0; i < 64; i++) { 1437 if (ct[i] == 0 && cpr[i] == 0) 1438 continue; 1439 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 1440 tcum += ct[i]; 1441 pcum += cpr[i]; 1442 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 1443 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 1444 i - 1 - (32 - CC_HASH_SHIFT), 1445 ct[i], tcum, cpr[i], pcum); 1446 } 1447 return (error); 1448} 1449SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 1450 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1451 0, 0, sysctl_kern_callout_stat, "I", 1452 "Dump immediate statistic snapshot of the scheduled callouts"); 1453