1104964Sjeff/*- 2104964Sjeff * Copyright (c) 1982, 1986, 1990, 1991, 1993 3104964Sjeff * The Regents of the University of California. All rights reserved. 4104964Sjeff * (c) UNIX System Laboratories, Inc. 5104964Sjeff * All or some portions of this file are derived from material licensed 6104964Sjeff * to the University of California by American Telephone and Telegraph 7104964Sjeff * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8104964Sjeff * the permission of UNIX System Laboratories, Inc. 9104964Sjeff * 10104964Sjeff * Redistribution and use in source and binary forms, with or without 11104964Sjeff * modification, are permitted provided that the following conditions 12104964Sjeff * are met: 13104964Sjeff * 1. Redistributions of source code must retain the above copyright 14104964Sjeff * notice, this list of conditions and the following disclaimer. 15104964Sjeff * 2. Redistributions in binary form must reproduce the above copyright 16104964Sjeff * notice, this list of conditions and the following disclaimer in the 17104964Sjeff * documentation and/or other materials provided with the distribution. 18104964Sjeff * 4. Neither the name of the University nor the names of its contributors 19104964Sjeff * may be used to endorse or promote products derived from this software 20104964Sjeff * without specific prior written permission. 21104964Sjeff * 22104964Sjeff * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23104964Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24104964Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25104964Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26104964Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27104964Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28104964Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29104964Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30104964Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31104964Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32104964Sjeff * SUCH DAMAGE. 33104964Sjeff */ 34104964Sjeff 35116182Sobrien#include <sys/cdefs.h> 36116182Sobrien__FBSDID("$FreeBSD$"); 37116182Sobrien 38147565Speter#include "opt_hwpmc_hooks.h" 39177418Sjeff#include "opt_sched.h" 40179297Sjb#include "opt_kdtrace.h" 41147565Speter 42104964Sjeff#include <sys/param.h> 43104964Sjeff#include <sys/systm.h> 44176750Smarcel#include <sys/cpuset.h> 45104964Sjeff#include <sys/kernel.h> 46104964Sjeff#include <sys/ktr.h> 47104964Sjeff#include <sys/lock.h> 48123871Sjhb#include <sys/kthread.h> 49104964Sjeff#include <sys/mutex.h> 50104964Sjeff#include <sys/proc.h> 51104964Sjeff#include <sys/resourcevar.h> 52104964Sjeff#include <sys/sched.h> 53235459Srstone#include <sys/sdt.h> 54104964Sjeff#include <sys/smp.h> 55104964Sjeff#include <sys/sysctl.h> 56104964Sjeff#include <sys/sx.h> 57139453Sjhb#include <sys/turnstile.h> 58161599Sdavidxu#include <sys/umtx.h> 59160039Sobrien#include <machine/pcb.h> 60134689Sjulian#include <machine/smp.h> 61104964Sjeff 62145256Sjkoshy#ifdef HWPMC_HOOKS 63145256Sjkoshy#include <sys/pmckern.h> 64145256Sjkoshy#endif 65145256Sjkoshy 66179297Sjb#ifdef KDTRACE_HOOKS 67179297Sjb#include <sys/dtrace_bsd.h> 68179297Sjbint dtrace_vtime_active; 69179297Sjbdtrace_vtime_switch_func_t dtrace_vtime_switch_func; 70179297Sjb#endif 71179297Sjb 72107135Sjeff/* 73107135Sjeff * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 74107135Sjeff * the range 100-256 Hz (approximately). 75107135Sjeff */ 76107135Sjeff#define ESTCPULIM(e) \ 77107135Sjeff min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 78107135Sjeff RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 79122355Sbde#ifdef SMP 80122355Sbde#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 81122355Sbde#else 82107135Sjeff#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 83122355Sbde#endif 84107135Sjeff#define NICE_WEIGHT 1 /* Priorities per nice level. */ 85107135Sjeff 86187679Sjeff#define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX))) 87187357Sjeff 88134791Sjulian/* 89163709Sjb * The schedulable entity that runs a context. 90164936Sjulian * This is an extension to the thread structure and is tailored to 91164936Sjulian * the requirements of this scheduler 92163709Sjb */ 93164936Sjulianstruct td_sched { 94164936Sjulian fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ 95164936Sjulian int ts_cpticks; /* (j) Ticks of cpu time. */ 96172264Sjeff int ts_slptime; /* (j) Seconds !RUNNING. */ 97239153Smav int ts_slice; /* Remaining part of time slice. */ 98180923Sjhb int ts_flags; 99164936Sjulian struct runq *ts_runq; /* runq the thread is currently on */ 100187357Sjeff#ifdef KTR 101187357Sjeff char ts_name[TS_NAME_LEN]; 102187357Sjeff#endif 103109145Sjeff}; 104109145Sjeff 105134791Sjulian/* flags kept in td_flags */ 106164936Sjulian#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ 107177435Sjeff#define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */ 108239157Smav#define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */ 109134791Sjulian 110180923Sjhb/* flags kept in ts_flags */ 111180923Sjhb#define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */ 112180923Sjhb 113164936Sjulian#define SKE_RUNQ_PCPU(ts) \ 114164936Sjulian ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) 115124955Sjeff 116180923Sjhb#define THREAD_CAN_SCHED(td, cpu) \ 117180923Sjhb CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 118180923Sjhb 119164936Sjulianstatic struct td_sched td_sched0; 120171488Sjeffstruct mtx sched_lock; 121134791Sjulian 122239185Smavstatic int realstathz = 127; /* stathz is sometimes 0 and run off of hz. */ 123125288Sjeffstatic int sched_tdcnt; /* Total runnable threads in the system. */ 124239185Smavstatic int sched_slice = 12; /* Thread run time before rescheduling. */ 125104964Sjeff 126124955Sjeffstatic void setup_runqs(void); 127123871Sjhbstatic void schedcpu(void); 128124955Sjeffstatic void schedcpu_thread(void); 129139453Sjhbstatic void sched_priority(struct thread *td, u_char prio); 130104964Sjeffstatic void sched_setup(void *dummy); 131104964Sjeffstatic void maybe_resched(struct thread *td); 132163709Sjbstatic void updatepri(struct thread *td); 133163709Sjbstatic void resetpriority(struct thread *td); 134163709Sjbstatic void resetpriority_thread(struct thread *td); 135134694Sjulian#ifdef SMP 136180923Sjhbstatic int sched_pickcpu(struct thread *td); 137180879Sjhbstatic int forward_wakeup(int cpunum); 138180879Sjhbstatic void kick_other_cpu(int pri, int cpuid); 139134694Sjulian#endif 140104964Sjeff 141124955Sjeffstatic struct kproc_desc sched_kp = { 142124955Sjeff "schedcpu", 143124955Sjeff schedcpu_thread, 144124955Sjeff NULL 145124955Sjeff}; 146253604SavgSYSINIT(schedcpu, SI_SUB_LAST, SI_ORDER_FIRST, kproc_start, 147177253Srwatson &sched_kp); 148177253SrwatsonSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 149104964Sjeff 150239153Smavstatic void sched_initticks(void *dummy); 151239153SmavSYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, 152239153Smav NULL); 153239153Smav 154104964Sjeff/* 155104964Sjeff * Global run queue. 156104964Sjeff */ 157104964Sjeffstatic struct runq runq; 158104964Sjeff 159124955Sjeff#ifdef SMP 160124955Sjeff/* 161124955Sjeff * Per-CPU run queues 162124955Sjeff */ 163124955Sjeffstatic struct runq runq_pcpu[MAXCPU]; 164180923Sjhblong runq_length[MAXCPU]; 165222001Sattilio 166222813Sattiliostatic cpuset_t idle_cpus_mask; 167124955Sjeff#endif 168124955Sjeff 169212455Smavstruct pcpuidlestat { 170212455Smav u_int idlecalls; 171212455Smav u_int oldidlecalls; 172212455Smav}; 173215701Sdimstatic DPCPU_DEFINE(struct pcpuidlestat, idlestat); 174212455Smav 175124955Sjeffstatic void 176124955Sjeffsetup_runqs(void) 177124955Sjeff{ 178124955Sjeff#ifdef SMP 179124955Sjeff int i; 180124955Sjeff 181124955Sjeff for (i = 0; i < MAXCPU; ++i) 182124955Sjeff runq_init(&runq_pcpu[i]); 183124955Sjeff#endif 184124955Sjeff 185124955Sjeff runq_init(&runq); 186124955Sjeff} 187124955Sjeff 188239185Smavstatic int 189239185Smavsysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 190239185Smav{ 191239185Smav int error, new_val, period; 192239185Smav 193239185Smav period = 1000000 / realstathz; 194239185Smav new_val = period * sched_slice; 195239185Smav error = sysctl_handle_int(oidp, &new_val, 0, req); 196239196Smav if (error != 0 || req->newptr == NULL) 197239185Smav return (error); 198239185Smav if (new_val <= 0) 199239185Smav return (EINVAL); 200239196Smav sched_slice = imax(1, (new_val + period / 2) / period); 201239196Smav hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / 202239196Smav realstathz); 203239185Smav return (0); 204239185Smav} 205239185Smav 206132589SscottlSYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); 207130881Sscottl 208132589SscottlSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, 209132589Sscottl "Scheduler name"); 210239185SmavSYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, 211239185Smav NULL, 0, sysctl_kern_quantum, "I", 212239196Smav "Quantum for timeshare threads in microseconds"); 213239153SmavSYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 214239196Smav "Quantum for timeshare threads in stathz ticks"); 215134693Sjulian#ifdef SMP 216134688Sjulian/* Enable forwarding of wakeups to all other cpus */ 217227309Sedstatic SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, 218227309Sed "Kernel SMP"); 219134688Sjulian 220177419Sjeffstatic int runq_fuzz = 1; 221177419SjeffSYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 222177419Sjeff 223134792Sjulianstatic int forward_wakeup_enabled = 1; 224134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, 225134688Sjulian &forward_wakeup_enabled, 0, 226134688Sjulian "Forwarding of wakeup to idle CPUs"); 227134688Sjulian 228134688Sjulianstatic int forward_wakeups_requested = 0; 229134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, 230134688Sjulian &forward_wakeups_requested, 0, 231134688Sjulian "Requests for Forwarding of wakeup to idle CPUs"); 232134688Sjulian 233134688Sjulianstatic int forward_wakeups_delivered = 0; 234134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, 235134688Sjulian &forward_wakeups_delivered, 0, 236134688Sjulian "Completed Forwarding of wakeup to idle CPUs"); 237134688Sjulian 238134792Sjulianstatic int forward_wakeup_use_mask = 1; 239134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, 240134688Sjulian &forward_wakeup_use_mask, 0, 241134688Sjulian "Use the mask of idle cpus"); 242134688Sjulian 243134688Sjulianstatic int forward_wakeup_use_loop = 0; 244134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, 245134688Sjulian &forward_wakeup_use_loop, 0, 246134688Sjulian "Use a loop to find idle cpus"); 247134688Sjulian 248134693Sjulian#endif 249164936Sjulian#if 0 250135051Sjulianstatic int sched_followon = 0; 251135051SjulianSYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, 252135051Sjulian &sched_followon, 0, 253135051Sjulian "allow threads to share a quantum"); 254163709Sjb#endif 255135051Sjulian 256235459SrstoneSDT_PROVIDER_DEFINE(sched); 257235459Srstone 258260817SavgSDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *", 259235459Srstone "struct proc *", "uint8_t"); 260260817SavgSDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *", 261235459Srstone "struct proc *", "void *"); 262260817SavgSDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *", 263235459Srstone "struct proc *", "void *", "int"); 264260817SavgSDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *", 265235459Srstone "struct proc *", "uint8_t", "struct thread *"); 266260817SavgSDT_PROBE_DEFINE2(sched, , , load__change, "int", "int"); 267260817SavgSDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *", 268235459Srstone "struct proc *"); 269260817SavgSDT_PROBE_DEFINE(sched, , , on__cpu); 270260817SavgSDT_PROBE_DEFINE(sched, , , remain__cpu); 271260817SavgSDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *", 272235459Srstone "struct proc *"); 273235459Srstone 274139317Sjeffstatic __inline void 275139317Sjeffsched_load_add(void) 276139317Sjeff{ 277187357Sjeff 278139317Sjeff sched_tdcnt++; 279187357Sjeff KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); 280260817Savg SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt); 281139317Sjeff} 282139317Sjeff 283139317Sjeffstatic __inline void 284139317Sjeffsched_load_rem(void) 285139317Sjeff{ 286187357Sjeff 287139317Sjeff sched_tdcnt--; 288187357Sjeff KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); 289260817Savg SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt); 290139317Sjeff} 291104964Sjeff/* 292104964Sjeff * Arrange to reschedule if necessary, taking the priorities and 293104964Sjeff * schedulers into account. 294104964Sjeff */ 295104964Sjeffstatic void 296104964Sjeffmaybe_resched(struct thread *td) 297104964Sjeff{ 298104964Sjeff 299170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 300134791Sjulian if (td->td_priority < curthread->td_priority) 301111032Sjulian curthread->td_flags |= TDF_NEEDRESCHED; 302104964Sjeff} 303104964Sjeff 304104964Sjeff/* 305177419Sjeff * This function is called when a thread is about to be put on run queue 306177419Sjeff * because it has been made runnable or its priority has been adjusted. It 307177419Sjeff * determines if the new thread should be immediately preempted to. If so, 308177419Sjeff * it switches to it and eventually returns true. If not, it returns false 309177419Sjeff * so that the caller may place the thread on an appropriate run queue. 310177419Sjeff */ 311177419Sjeffint 312177419Sjeffmaybe_preempt(struct thread *td) 313177419Sjeff{ 314177419Sjeff#ifdef PREEMPTION 315177419Sjeff struct thread *ctd; 316177419Sjeff int cpri, pri; 317177419Sjeff 318177419Sjeff /* 319177419Sjeff * The new thread should not preempt the current thread if any of the 320177419Sjeff * following conditions are true: 321177419Sjeff * 322177419Sjeff * - The kernel is in the throes of crashing (panicstr). 323177419Sjeff * - The current thread has a higher (numerically lower) or 324177419Sjeff * equivalent priority. Note that this prevents curthread from 325177419Sjeff * trying to preempt to itself. 326177419Sjeff * - It is too early in the boot for context switches (cold is set). 327177419Sjeff * - The current thread has an inhibitor set or is in the process of 328177419Sjeff * exiting. In this case, the current thread is about to switch 329177419Sjeff * out anyways, so there's no point in preempting. If we did, 330177419Sjeff * the current thread would not be properly resumed as well, so 331177419Sjeff * just avoid that whole landmine. 332177419Sjeff * - If the new thread's priority is not a realtime priority and 333177419Sjeff * the current thread's priority is not an idle priority and 334177419Sjeff * FULL_PREEMPTION is disabled. 335177419Sjeff * 336177419Sjeff * If all of these conditions are false, but the current thread is in 337177419Sjeff * a nested critical section, then we have to defer the preemption 338177419Sjeff * until we exit the critical section. Otherwise, switch immediately 339177419Sjeff * to the new thread. 340177419Sjeff */ 341177419Sjeff ctd = curthread; 342177419Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 343177419Sjeff KASSERT((td->td_inhibitors == 0), 344177419Sjeff ("maybe_preempt: trying to run inhibited thread")); 345177419Sjeff pri = td->td_priority; 346177419Sjeff cpri = ctd->td_priority; 347177419Sjeff if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || 348177419Sjeff TD_IS_INHIBITED(ctd)) 349177419Sjeff return (0); 350177419Sjeff#ifndef FULL_PREEMPTION 351177419Sjeff if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) 352177419Sjeff return (0); 353177419Sjeff#endif 354177419Sjeff 355177419Sjeff if (ctd->td_critnest > 1) { 356177419Sjeff CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 357177419Sjeff ctd->td_critnest); 358177419Sjeff ctd->td_owepreempt = 1; 359177419Sjeff return (0); 360177419Sjeff } 361177419Sjeff /* 362177419Sjeff * Thread is runnable but not yet put on system run queue. 363177419Sjeff */ 364177419Sjeff MPASS(ctd->td_lock == td->td_lock); 365177419Sjeff MPASS(TD_ON_RUNQ(td)); 366177419Sjeff TD_SET_RUNNING(td); 367177419Sjeff CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 368177419Sjeff td->td_proc->p_pid, td->td_name); 369178272Sjeff mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td); 370177419Sjeff /* 371177419Sjeff * td's lock pointer may have changed. We have to return with it 372177419Sjeff * locked. 373177419Sjeff */ 374177419Sjeff spinlock_enter(); 375177419Sjeff thread_unlock(ctd); 376177419Sjeff thread_lock(td); 377177419Sjeff spinlock_exit(); 378177419Sjeff return (1); 379177419Sjeff#else 380177419Sjeff return (0); 381177419Sjeff#endif 382177419Sjeff} 383177419Sjeff 384177419Sjeff/* 385104964Sjeff * Constants for digital decay and forget: 386163709Sjb * 90% of (td_estcpu) usage in 5 * loadav time 387164936Sjulian * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive) 388104964Sjeff * Note that, as ps(1) mentions, this can let percentages 389104964Sjeff * total over 100% (I've seen 137.9% for 3 processes). 390104964Sjeff * 391163709Sjb * Note that schedclock() updates td_estcpu and p_cpticks asynchronously. 392104964Sjeff * 393163709Sjb * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds. 394104964Sjeff * That is, the system wants to compute a value of decay such 395104964Sjeff * that the following for loop: 396104964Sjeff * for (i = 0; i < (5 * loadavg); i++) 397163709Sjb * td_estcpu *= decay; 398104964Sjeff * will compute 399163709Sjb * td_estcpu *= 0.1; 400104964Sjeff * for all values of loadavg: 401104964Sjeff * 402104964Sjeff * Mathematically this loop can be expressed by saying: 403104964Sjeff * decay ** (5 * loadavg) ~= .1 404104964Sjeff * 405104964Sjeff * The system computes decay as: 406104964Sjeff * decay = (2 * loadavg) / (2 * loadavg + 1) 407104964Sjeff * 408104964Sjeff * We wish to prove that the system's computation of decay 409104964Sjeff * will always fulfill the equation: 410104964Sjeff * decay ** (5 * loadavg) ~= .1 411104964Sjeff * 412104964Sjeff * If we compute b as: 413104964Sjeff * b = 2 * loadavg 414104964Sjeff * then 415104964Sjeff * decay = b / (b + 1) 416104964Sjeff * 417104964Sjeff * We now need to prove two things: 418104964Sjeff * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 419104964Sjeff * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 420104964Sjeff * 421104964Sjeff * Facts: 422104964Sjeff * For x close to zero, exp(x) =~ 1 + x, since 423104964Sjeff * exp(x) = 0! + x**1/1! + x**2/2! + ... . 424104964Sjeff * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 425104964Sjeff * For x close to zero, ln(1+x) =~ x, since 426104964Sjeff * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 427104964Sjeff * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 428104964Sjeff * ln(.1) =~ -2.30 429104964Sjeff * 430104964Sjeff * Proof of (1): 431104964Sjeff * Solve (factor)**(power) =~ .1 given power (5*loadav): 432104964Sjeff * solving for factor, 433104964Sjeff * ln(factor) =~ (-2.30/5*loadav), or 434104964Sjeff * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 435104964Sjeff * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 436104964Sjeff * 437104964Sjeff * Proof of (2): 438104964Sjeff * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 439104964Sjeff * solving for power, 440104964Sjeff * power*ln(b/(b+1)) =~ -2.30, or 441104964Sjeff * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 442104964Sjeff * 443104964Sjeff * Actual power values for the implemented algorithm are as follows: 444104964Sjeff * loadav: 1 2 3 4 445104964Sjeff * power: 5.68 10.32 14.94 19.55 446104964Sjeff */ 447104964Sjeff 448104964Sjeff/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 449104964Sjeff#define loadfactor(loadav) (2 * (loadav)) 450104964Sjeff#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 451104964Sjeff 452164936Sjulian/* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 453104964Sjeffstatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 454217370SmdfSYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 455104964Sjeff 456104964Sjeff/* 457104964Sjeff * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 458104964Sjeff * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 459104964Sjeff * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 460104964Sjeff * 461104964Sjeff * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 462104964Sjeff * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 463104964Sjeff * 464104964Sjeff * If you don't want to bother with the faster/more-accurate formula, you 465104964Sjeff * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 466104964Sjeff * (more general) method of calculating the %age of CPU used by a process. 467104964Sjeff */ 468104964Sjeff#define CCPU_SHIFT 11 469104964Sjeff 470104964Sjeff/* 471104964Sjeff * Recompute process priorities, every hz ticks. 472104964Sjeff * MP-safe, called without the Giant mutex. 473104964Sjeff */ 474104964Sjeff/* ARGSUSED */ 475104964Sjeffstatic void 476123871Sjhbschedcpu(void) 477104964Sjeff{ 478104964Sjeff register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 479104964Sjeff struct thread *td; 480104964Sjeff struct proc *p; 481164936Sjulian struct td_sched *ts; 482239153Smav int awake; 483104964Sjeff 484104964Sjeff sx_slock(&allproc_lock); 485104964Sjeff FOREACH_PROC_IN_SYSTEM(p) { 486177368Sjeff PROC_LOCK(p); 487220390Sjhb if (p->p_state == PRS_NEW) { 488220390Sjhb PROC_UNLOCK(p); 489220390Sjhb continue; 490220390Sjhb } 491180879Sjhb FOREACH_THREAD_IN_PROC(p, td) { 492104964Sjeff awake = 0; 493170293Sjeff thread_lock(td); 494164936Sjulian ts = td->td_sched; 495163709Sjb /* 496163709Sjb * Increment sleep time (if sleeping). We 497163709Sjb * ignore overflow, as above. 498163709Sjb */ 499163709Sjb /* 500164936Sjulian * The td_sched slptimes are not touched in wakeup 501164936Sjulian * because the thread may not HAVE everything in 502164936Sjulian * memory? XXX I think this is out of date. 503163709Sjb */ 504166188Sjeff if (TD_ON_RUNQ(td)) { 505163709Sjb awake = 1; 506177435Sjeff td->td_flags &= ~TDF_DIDRUN; 507166188Sjeff } else if (TD_IS_RUNNING(td)) { 508163709Sjb awake = 1; 509177435Sjeff /* Do not clear TDF_DIDRUN */ 510177435Sjeff } else if (td->td_flags & TDF_DIDRUN) { 511163709Sjb awake = 1; 512177435Sjeff td->td_flags &= ~TDF_DIDRUN; 513163709Sjb } 514163709Sjb 515163709Sjb /* 516164936Sjulian * ts_pctcpu is only for ps and ttyinfo(). 517163709Sjb */ 518164936Sjulian ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; 519163709Sjb /* 520164936Sjulian * If the td_sched has been idle the entire second, 521163709Sjb * stop recalculating its priority until 522163709Sjb * it wakes up. 523163709Sjb */ 524164936Sjulian if (ts->ts_cpticks != 0) { 525163709Sjb#if (FSHIFT >= CCPU_SHIFT) 526164936Sjulian ts->ts_pctcpu += (realstathz == 100) 527164936Sjulian ? ((fixpt_t) ts->ts_cpticks) << 528164936Sjulian (FSHIFT - CCPU_SHIFT) : 529164936Sjulian 100 * (((fixpt_t) ts->ts_cpticks) 530164936Sjulian << (FSHIFT - CCPU_SHIFT)) / realstathz; 531163709Sjb#else 532164936Sjulian ts->ts_pctcpu += ((FSCALE - ccpu) * 533164936Sjulian (ts->ts_cpticks * 534164936Sjulian FSCALE / realstathz)) >> FSHIFT; 535163709Sjb#endif 536164936Sjulian ts->ts_cpticks = 0; 537164267Sdavidxu } 538180879Sjhb /* 539163709Sjb * If there are ANY running threads in this process, 540104964Sjeff * then don't count it as sleeping. 541180879Sjhb * XXX: this is broken. 542104964Sjeff */ 543104964Sjeff if (awake) { 544172264Sjeff if (ts->ts_slptime > 1) { 545104964Sjeff /* 546104964Sjeff * In an ideal world, this should not 547104964Sjeff * happen, because whoever woke us 548104964Sjeff * up from the long sleep should have 549104964Sjeff * unwound the slptime and reset our 550104964Sjeff * priority before we run at the stale 551104964Sjeff * priority. Should KASSERT at some 552104964Sjeff * point when all the cases are fixed. 553104964Sjeff */ 554163709Sjb updatepri(td); 555163709Sjb } 556172264Sjeff ts->ts_slptime = 0; 557163709Sjb } else 558172264Sjeff ts->ts_slptime++; 559172264Sjeff if (ts->ts_slptime > 1) { 560170293Sjeff thread_unlock(td); 561163709Sjb continue; 562170293Sjeff } 563163709Sjb td->td_estcpu = decay_cpu(loadfac, td->td_estcpu); 564163709Sjb resetpriority(td); 565163709Sjb resetpriority_thread(td); 566170293Sjeff thread_unlock(td); 567180879Sjhb } 568177368Sjeff PROC_UNLOCK(p); 569180879Sjhb } 570104964Sjeff sx_sunlock(&allproc_lock); 571104964Sjeff} 572104964Sjeff 573104964Sjeff/* 574123871Sjhb * Main loop for a kthread that executes schedcpu once a second. 575123871Sjhb */ 576123871Sjhbstatic void 577124955Sjeffschedcpu_thread(void) 578123871Sjhb{ 579123871Sjhb 580123871Sjhb for (;;) { 581123871Sjhb schedcpu(); 582167086Sjhb pause("-", hz); 583123871Sjhb } 584123871Sjhb} 585123871Sjhb 586123871Sjhb/* 587104964Sjeff * Recalculate the priority of a process after it has slept for a while. 588163709Sjb * For all load averages >= 1 and max td_estcpu of 255, sleeping for at 589163709Sjb * least six times the loadfactor will decay td_estcpu to zero. 590104964Sjeff */ 591104964Sjeffstatic void 592163709Sjbupdatepri(struct thread *td) 593104964Sjeff{ 594172264Sjeff struct td_sched *ts; 595172264Sjeff fixpt_t loadfac; 596172264Sjeff unsigned int newcpu; 597104964Sjeff 598172264Sjeff ts = td->td_sched; 599118972Sjhb loadfac = loadfactor(averunnable.ldavg[0]); 600172264Sjeff if (ts->ts_slptime > 5 * loadfac) 601163709Sjb td->td_estcpu = 0; 602104964Sjeff else { 603163709Sjb newcpu = td->td_estcpu; 604172264Sjeff ts->ts_slptime--; /* was incremented in schedcpu() */ 605172264Sjeff while (newcpu && --ts->ts_slptime) 606104964Sjeff newcpu = decay_cpu(loadfac, newcpu); 607163709Sjb td->td_estcpu = newcpu; 608104964Sjeff } 609104964Sjeff} 610104964Sjeff 611104964Sjeff/* 612104964Sjeff * Compute the priority of a process when running in user mode. 613104964Sjeff * Arrange to reschedule if the resulting priority is better 614104964Sjeff * than that of the current process. 615104964Sjeff */ 616104964Sjeffstatic void 617163709Sjbresetpriority(struct thread *td) 618104964Sjeff{ 619104964Sjeff register unsigned int newpriority; 620104964Sjeff 621163709Sjb if (td->td_pri_class == PRI_TIMESHARE) { 622163709Sjb newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT + 623163709Sjb NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); 624104964Sjeff newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 625104964Sjeff PRI_MAX_TIMESHARE); 626163709Sjb sched_user_prio(td, newpriority); 627104964Sjeff } 628104964Sjeff} 629104964Sjeff 630139453Sjhb/* 631164936Sjulian * Update the thread's priority when the associated process's user 632139453Sjhb * priority changes. 633139453Sjhb */ 634139453Sjhbstatic void 635163709Sjbresetpriority_thread(struct thread *td) 636139453Sjhb{ 637139453Sjhb 638139453Sjhb /* Only change threads with a time sharing user priority. */ 639139453Sjhb if (td->td_priority < PRI_MIN_TIMESHARE || 640139453Sjhb td->td_priority > PRI_MAX_TIMESHARE) 641139453Sjhb return; 642139453Sjhb 643139453Sjhb /* XXX the whole needresched thing is broken, but not silly. */ 644139453Sjhb maybe_resched(td); 645139453Sjhb 646163709Sjb sched_prio(td, td->td_user_pri); 647139453Sjhb} 648139453Sjhb 649104964Sjeff/* ARGSUSED */ 650104964Sjeffstatic void 651104964Sjeffsched_setup(void *dummy) 652104964Sjeff{ 653239185Smav 654124955Sjeff setup_runqs(); 655118972Sjhb 656125288Sjeff /* Account for thread0. */ 657139317Sjeff sched_load_add(); 658104964Sjeff} 659104964Sjeff 660239153Smav/* 661239185Smav * This routine determines time constants after stathz and hz are setup. 662239153Smav */ 663239153Smavstatic void 664239153Smavsched_initticks(void *dummy) 665239153Smav{ 666239153Smav 667239153Smav realstathz = stathz ? stathz : hz; 668239153Smav sched_slice = realstathz / 10; /* ~100ms */ 669239196Smav hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / 670239196Smav realstathz); 671239153Smav} 672239153Smav 673104964Sjeff/* External interfaces start here */ 674180879Sjhb 675134791Sjulian/* 676134791Sjulian * Very early in the boot some setup of scheduler-specific 677145109Smaxim * parts of proc0 and of some scheduler resources needs to be done. 678134791Sjulian * Called from: 679134791Sjulian * proc0_init() 680134791Sjulian */ 681134791Sjulianvoid 682134791Sjulianschedinit(void) 683134791Sjulian{ 684134791Sjulian /* 685134791Sjulian * Set up the scheduler specific parts of proc0. 686134791Sjulian */ 687134791Sjulian proc0.p_sched = NULL; /* XXX */ 688164936Sjulian thread0.td_sched = &td_sched0; 689170293Sjeff thread0.td_lock = &sched_lock; 690239153Smav td_sched0.ts_slice = sched_slice; 691171488Sjeff mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 692134791Sjulian} 693134791Sjulian 694104964Sjeffint 695104964Sjeffsched_runnable(void) 696104964Sjeff{ 697124955Sjeff#ifdef SMP 698124955Sjeff return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 699124955Sjeff#else 700124955Sjeff return runq_check(&runq); 701124955Sjeff#endif 702104964Sjeff} 703104964Sjeff 704180879Sjhbint 705104964Sjeffsched_rr_interval(void) 706104964Sjeff{ 707239153Smav 708239153Smav /* Convert sched_slice from stathz to hz. */ 709239196Smav return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz)); 710104964Sjeff} 711104964Sjeff 712104964Sjeff/* 713104964Sjeff * We adjust the priority of the current process. The priority of 714104964Sjeff * a process gets worse as it accumulates CPU time. The cpu usage 715163709Sjb * estimator (td_estcpu) is increased here. resetpriority() will 716163709Sjb * compute a different priority each time td_estcpu increases by 717104964Sjeff * INVERSE_ESTCPU_WEIGHT 718104964Sjeff * (until MAXPRI is reached). The cpu usage estimator ramps up 719104964Sjeff * quite quickly when the process is running (linearly), and decays 720104964Sjeff * away exponentially, at a rate which is proportionally slower when 721104964Sjeff * the system is busy. The basic principle is that the system will 722104964Sjeff * 90% forget that the process used a lot of CPU time in 5 * loadav 723104964Sjeff * seconds. This causes the system to favor processes which haven't 724104964Sjeff * run much recently, and to round-robin among other processes. 725104964Sjeff */ 726104964Sjeffvoid 727121127Sjeffsched_clock(struct thread *td) 728104964Sjeff{ 729212455Smav struct pcpuidlestat *stat; 730164936Sjulian struct td_sched *ts; 731104964Sjeff 732170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 733164936Sjulian ts = td->td_sched; 734113356Sjeff 735164936Sjulian ts->ts_cpticks++; 736163709Sjb td->td_estcpu = ESTCPULIM(td->td_estcpu + 1); 737163709Sjb if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 738163709Sjb resetpriority(td); 739163709Sjb resetpriority_thread(td); 740104964Sjeff } 741173081Sjhb 742173081Sjhb /* 743173081Sjhb * Force a context switch if the current thread has used up a full 744239185Smav * time slice (default is 100ms). 745173081Sjhb */ 746239185Smav if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) { 747239153Smav ts->ts_slice = sched_slice; 748239157Smav td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; 749239153Smav } 750212455Smav 751212455Smav stat = DPCPU_PTR(idlestat); 752212455Smav stat->oldidlecalls = stat->idlecalls; 753212455Smav stat->idlecalls = 0; 754104964Sjeff} 755118972Sjhb 756104964Sjeff/* 757180879Sjhb * Charge child's scheduling CPU usage to parent. 758104964Sjeff */ 759104964Sjeffvoid 760132372Sjuliansched_exit(struct proc *p, struct thread *td) 761104964Sjeff{ 762163709Sjb 763187357Sjeff KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit", 764225199Sdelphij "prio:%d", td->td_priority); 765187357Sjeff 766177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 767164936Sjulian sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 768113356Sjeff} 769113356Sjeff 770113356Sjeffvoid 771164936Sjuliansched_exit_thread(struct thread *td, struct thread *child) 772113356Sjeff{ 773113923Sjhb 774187357Sjeff KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit", 775225199Sdelphij "prio:%d", child->td_priority); 776170293Sjeff thread_lock(td); 777164936Sjulian td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); 778170293Sjeff thread_unlock(td); 779198854Sattilio thread_lock(child); 780198854Sattilio if ((child->td_flags & TDF_NOLOAD) == 0) 781139317Sjeff sched_load_rem(); 782198854Sattilio thread_unlock(child); 783113356Sjeff} 784109145Sjeff 785113356Sjeffvoid 786134791Sjuliansched_fork(struct thread *td, struct thread *childtd) 787113356Sjeff{ 788134791Sjulian sched_fork_thread(td, childtd); 789113356Sjeff} 790113356Sjeff 791113356Sjeffvoid 792134791Sjuliansched_fork_thread(struct thread *td, struct thread *childtd) 793113356Sjeff{ 794177426Sjeff struct td_sched *ts; 795177426Sjeff 796164936Sjulian childtd->td_estcpu = td->td_estcpu; 797170293Sjeff childtd->td_lock = &sched_lock; 798176750Smarcel childtd->td_cpuset = cpuset_ref(td->td_cpuset); 799217078Sjhb childtd->td_priority = childtd->td_base_pri; 800177426Sjeff ts = childtd->td_sched; 801177426Sjeff bzero(ts, sizeof(*ts)); 802180923Sjhb ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY); 803239153Smav ts->ts_slice = 1; 804104964Sjeff} 805104964Sjeff 806104964Sjeffvoid 807130551Sjuliansched_nice(struct proc *p, int nice) 808104964Sjeff{ 809139453Sjhb struct thread *td; 810113873Sjhb 811130551Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 812130551Sjulian p->p_nice = nice; 813163709Sjb FOREACH_THREAD_IN_PROC(p, td) { 814170293Sjeff thread_lock(td); 815163709Sjb resetpriority(td); 816163709Sjb resetpriority_thread(td); 817170293Sjeff thread_unlock(td); 818163709Sjb } 819104964Sjeff} 820104964Sjeff 821113356Sjeffvoid 822163709Sjbsched_class(struct thread *td, int class) 823113356Sjeff{ 824170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 825163709Sjb td->td_pri_class = class; 826113356Sjeff} 827113356Sjeff 828105127Sjulian/* 829105127Sjulian * Adjust the priority of a thread. 830105127Sjulian */ 831139453Sjhbstatic void 832139453Sjhbsched_priority(struct thread *td, u_char prio) 833104964Sjeff{ 834104964Sjeff 835187357Sjeff 836187357Sjeff KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change", 837187357Sjeff "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, 838187357Sjeff sched_tdname(curthread)); 839260817Savg SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio); 840187357Sjeff if (td != curthread && prio > td->td_priority) { 841187357Sjeff KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread), 842187357Sjeff "lend prio", "prio:%d", td->td_priority, "new prio:%d", 843187357Sjeff prio, KTR_ATTR_LINKED, sched_tdname(td)); 844260817Savg SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio, 845235459Srstone curthread); 846187357Sjeff } 847170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 848139453Sjhb if (td->td_priority == prio) 849139453Sjhb return; 850166188Sjeff td->td_priority = prio; 851177435Sjeff if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) { 852166188Sjeff sched_rem(td); 853166188Sjeff sched_add(td, SRQ_BORING); 854104964Sjeff } 855104964Sjeff} 856104964Sjeff 857139453Sjhb/* 858139453Sjhb * Update a thread's priority when it is lent another thread's 859139453Sjhb * priority. 860139453Sjhb */ 861104964Sjeffvoid 862139453Sjhbsched_lend_prio(struct thread *td, u_char prio) 863139453Sjhb{ 864139453Sjhb 865139453Sjhb td->td_flags |= TDF_BORROWING; 866139453Sjhb sched_priority(td, prio); 867139453Sjhb} 868139453Sjhb 869139453Sjhb/* 870139453Sjhb * Restore a thread's priority when priority propagation is 871139453Sjhb * over. The prio argument is the minimum priority the thread 872139453Sjhb * needs to have to satisfy other possible priority lending 873139453Sjhb * requests. If the thread's regulary priority is less 874139453Sjhb * important than prio the thread will keep a priority boost 875139453Sjhb * of prio. 876139453Sjhb */ 877139453Sjhbvoid 878139453Sjhbsched_unlend_prio(struct thread *td, u_char prio) 879139453Sjhb{ 880139453Sjhb u_char base_pri; 881139453Sjhb 882139453Sjhb if (td->td_base_pri >= PRI_MIN_TIMESHARE && 883139453Sjhb td->td_base_pri <= PRI_MAX_TIMESHARE) 884163709Sjb base_pri = td->td_user_pri; 885139453Sjhb else 886139453Sjhb base_pri = td->td_base_pri; 887139453Sjhb if (prio >= base_pri) { 888139453Sjhb td->td_flags &= ~TDF_BORROWING; 889139453Sjhb sched_prio(td, base_pri); 890139453Sjhb } else 891139453Sjhb sched_lend_prio(td, prio); 892139453Sjhb} 893139453Sjhb 894139453Sjhbvoid 895139453Sjhbsched_prio(struct thread *td, u_char prio) 896139453Sjhb{ 897139453Sjhb u_char oldprio; 898139453Sjhb 899139453Sjhb /* First, update the base priority. */ 900139453Sjhb td->td_base_pri = prio; 901139453Sjhb 902139453Sjhb /* 903139453Sjhb * If the thread is borrowing another thread's priority, don't ever 904139453Sjhb * lower the priority. 905139453Sjhb */ 906139453Sjhb if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 907139453Sjhb return; 908139453Sjhb 909139453Sjhb /* Change the real priority. */ 910139453Sjhb oldprio = td->td_priority; 911139453Sjhb sched_priority(td, prio); 912139453Sjhb 913139453Sjhb /* 914139453Sjhb * If the thread is on a turnstile, then let the turnstile update 915139453Sjhb * its state. 916139453Sjhb */ 917139453Sjhb if (TD_ON_LOCK(td) && oldprio != prio) 918139453Sjhb turnstile_adjust(td, oldprio); 919139453Sjhb} 920139453Sjhb 921139453Sjhbvoid 922163709Sjbsched_user_prio(struct thread *td, u_char prio) 923161599Sdavidxu{ 924161599Sdavidxu 925174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 926163709Sjb td->td_base_user_pri = prio; 927216313Sdavidxu if (td->td_lend_user_pri <= prio) 928164177Sdavidxu return; 929163709Sjb td->td_user_pri = prio; 930161599Sdavidxu} 931161599Sdavidxu 932161599Sdavidxuvoid 933161599Sdavidxusched_lend_user_prio(struct thread *td, u_char prio) 934161599Sdavidxu{ 935161599Sdavidxu 936174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 937216313Sdavidxu td->td_lend_user_pri = prio; 938216791Sdavidxu td->td_user_pri = min(prio, td->td_base_user_pri); 939216791Sdavidxu if (td->td_priority > td->td_user_pri) 940216791Sdavidxu sched_prio(td, td->td_user_pri); 941216791Sdavidxu else if (td->td_priority != td->td_user_pri) 942216791Sdavidxu td->td_flags |= TDF_NEEDRESCHED; 943161599Sdavidxu} 944161599Sdavidxu 945161599Sdavidxuvoid 946177085Sjeffsched_sleep(struct thread *td, int pri) 947104964Sjeff{ 948113923Sjhb 949170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 950172264Sjeff td->td_slptick = ticks; 951172264Sjeff td->td_sched->ts_slptime = 0; 952217410Sjhb if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 953177085Sjeff sched_prio(td, pri); 954201347Skib if (TD_IS_SUSPENDED(td) || pri >= PSOCK) 955177085Sjeff td->td_flags |= TDF_CANSWAP; 956104964Sjeff} 957104964Sjeff 958104964Sjeffvoid 959135051Sjuliansched_switch(struct thread *td, struct thread *newtd, int flags) 960104964Sjeff{ 961202889Sattilio struct mtx *tmtx; 962164936Sjulian struct td_sched *ts; 963104964Sjeff struct proc *p; 964239157Smav int preempted; 965104964Sjeff 966202889Sattilio tmtx = NULL; 967164936Sjulian ts = td->td_sched; 968104964Sjeff p = td->td_proc; 969104964Sjeff 970170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 971180879Sjhb 972180879Sjhb /* 973170293Sjeff * Switch to the sched lock to fix things up and pick 974170293Sjeff * a new thread. 975202889Sattilio * Block the td_lock in order to avoid breaking the critical path. 976170293Sjeff */ 977170293Sjeff if (td->td_lock != &sched_lock) { 978170293Sjeff mtx_lock_spin(&sched_lock); 979202889Sattilio tmtx = thread_lock_block(td); 980170293Sjeff } 981104964Sjeff 982198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 983139317Sjeff sched_load_rem(); 984135051Sjulian 985113339Sjulian td->td_lastcpu = td->td_oncpu; 986271194Smav preempted = !((td->td_flags & TDF_SLICEEND) || 987271194Smav (flags & SWT_RELINQUISH)); 988239157Smav td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND); 989144777Sups td->td_owepreempt = 0; 990113339Sjulian td->td_oncpu = NOCPU; 991180879Sjhb 992104964Sjeff /* 993104964Sjeff * At the last moment, if this thread is still marked RUNNING, 994104964Sjeff * then put it back on the run queue as it has not been suspended 995131473Sjhb * or stopped or any thing else similar. We never put the idle 996131473Sjhb * threads on the run queue, however. 997104964Sjeff */ 998166415Sjulian if (td->td_flags & TDF_IDLETD) { 999131473Sjhb TD_SET_CAN_RUN(td); 1000166415Sjulian#ifdef SMP 1001223758Sattilio CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask); 1002166415Sjulian#endif 1003166415Sjulian } else { 1004134791Sjulian if (TD_IS_RUNNING(td)) { 1005164936Sjulian /* Put us back on the run queue. */ 1006239157Smav sched_add(td, preempted ? 1007136170Sjulian SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1008136170Sjulian SRQ_OURSELF|SRQ_YIELDING); 1009134791Sjulian } 1010104964Sjeff } 1011136170Sjulian if (newtd) { 1012180879Sjhb /* 1013136170Sjulian * The thread we are about to run needs to be counted 1014136170Sjulian * as if it had been added to the run queue and selected. 1015136170Sjulian * It came from: 1016136170Sjulian * * A preemption 1017180879Sjhb * * An upcall 1018136170Sjulian * * A followon 1019136170Sjulian */ 1020136170Sjulian KASSERT((newtd->td_inhibitors == 0), 1021165693Srwatson ("trying to run inhibited thread")); 1022177435Sjeff newtd->td_flags |= TDF_DIDRUN; 1023136170Sjulian TD_SET_RUNNING(newtd); 1024198854Sattilio if ((newtd->td_flags & TDF_NOLOAD) == 0) 1025139317Sjeff sched_load_add(); 1026136170Sjulian } else { 1027131473Sjhb newtd = choosethread(); 1028202940Sattilio MPASS(newtd->td_lock == &sched_lock); 1029136170Sjulian } 1030136170Sjulian 1031145256Sjkoshy if (td != newtd) { 1032145256Sjkoshy#ifdef HWPMC_HOOKS 1033145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1034145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1035145256Sjkoshy#endif 1036235459Srstone 1037262014Smarkj SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc); 1038235459Srstone 1039166415Sjulian /* I feel sleepy */ 1040174629Sjeff lock_profile_release_lock(&sched_lock.lock_object); 1041179297Sjb#ifdef KDTRACE_HOOKS 1042179297Sjb /* 1043179297Sjb * If DTrace has set the active vtime enum to anything 1044179297Sjb * other than INACTIVE (0), then it should have set the 1045179297Sjb * function to call. 1046179297Sjb */ 1047179297Sjb if (dtrace_vtime_active) 1048179297Sjb (*dtrace_vtime_switch_func)(newtd); 1049179297Sjb#endif 1050179297Sjb 1051202889Sattilio cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock); 1052174629Sjeff lock_profile_obtain_lock_success(&sched_lock.lock_object, 1053174629Sjeff 0, 0, __FILE__, __LINE__); 1054166415Sjulian /* 1055166415Sjulian * Where am I? What year is it? 1056166415Sjulian * We are in the same thread that went to sleep above, 1057180879Sjhb * but any amount of time may have passed. All our context 1058166415Sjulian * will still be available as will local variables. 1059166415Sjulian * PCPU values however may have changed as we may have 1060166415Sjulian * changed CPU so don't trust cached values of them. 1061166415Sjulian * New threads will go to fork_exit() instead of here 1062166415Sjulian * so if you change things here you may need to change 1063166415Sjulian * things there too. 1064180879Sjhb * 1065166415Sjulian * If the thread above was exiting it will never wake 1066166415Sjulian * up again here, so either it has saved everything it 1067166415Sjulian * needed to, or the thread_wait() or wait() will 1068166415Sjulian * need to reap it. 1069166415Sjulian */ 1070235459Srstone 1071260817Savg SDT_PROBE0(sched, , , on__cpu); 1072145256Sjkoshy#ifdef HWPMC_HOOKS 1073145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1074145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1075145256Sjkoshy#endif 1076235459Srstone } else 1077260817Savg SDT_PROBE0(sched, , , remain__cpu); 1078145256Sjkoshy 1079166415Sjulian#ifdef SMP 1080166415Sjulian if (td->td_flags & TDF_IDLETD) 1081223758Sattilio CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask); 1082166415Sjulian#endif 1083121128Sjeff sched_lock.mtx_lock = (uintptr_t)td; 1084121128Sjeff td->td_oncpu = PCPU_GET(cpuid); 1085170293Sjeff MPASS(td->td_lock == &sched_lock); 1086104964Sjeff} 1087104964Sjeff 1088104964Sjeffvoid 1089104964Sjeffsched_wakeup(struct thread *td) 1090104964Sjeff{ 1091172264Sjeff struct td_sched *ts; 1092172264Sjeff 1093170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1094172264Sjeff ts = td->td_sched; 1095177085Sjeff td->td_flags &= ~TDF_CANSWAP; 1096172264Sjeff if (ts->ts_slptime > 1) { 1097163709Sjb updatepri(td); 1098163709Sjb resetpriority(td); 1099163709Sjb } 1100201790Sattilio td->td_slptick = 0; 1101172264Sjeff ts->ts_slptime = 0; 1102239153Smav ts->ts_slice = sched_slice; 1103166188Sjeff sched_add(td, SRQ_BORING); 1104104964Sjeff} 1105104964Sjeff 1106134693Sjulian#ifdef SMP 1107134688Sjulianstatic int 1108180879Sjhbforward_wakeup(int cpunum) 1109134688Sjulian{ 1110134688Sjulian struct pcpu *pc; 1111223758Sattilio cpuset_t dontuse, map, map2; 1112223758Sattilio u_int id, me; 1113222813Sattilio int iscpuset; 1114134688Sjulian 1115134688Sjulian mtx_assert(&sched_lock, MA_OWNED); 1116134688Sjulian 1117134791Sjulian CTR0(KTR_RUNQ, "forward_wakeup()"); 1118134688Sjulian 1119134688Sjulian if ((!forward_wakeup_enabled) || 1120134688Sjulian (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) 1121134688Sjulian return (0); 1122134688Sjulian if (!smp_started || cold || panicstr) 1123134688Sjulian return (0); 1124134688Sjulian 1125134688Sjulian forward_wakeups_requested++; 1126134688Sjulian 1127180879Sjhb /* 1128180879Sjhb * Check the idle mask we received against what we calculated 1129180879Sjhb * before in the old version. 1130180879Sjhb */ 1131223758Sattilio me = PCPU_GET(cpuid); 1132180879Sjhb 1133180879Sjhb /* Don't bother if we should be doing it ourself. */ 1134223758Sattilio if (CPU_ISSET(me, &idle_cpus_mask) && 1135223758Sattilio (cpunum == NOCPU || me == cpunum)) 1136134688Sjulian return (0); 1137134688Sjulian 1138223758Sattilio CPU_SETOF(me, &dontuse); 1139222813Sattilio CPU_OR(&dontuse, &stopped_cpus); 1140222813Sattilio CPU_OR(&dontuse, &hlt_cpus_mask); 1141222813Sattilio CPU_ZERO(&map2); 1142134688Sjulian if (forward_wakeup_use_loop) { 1143222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1144223758Sattilio id = pc->pc_cpuid; 1145223758Sattilio if (!CPU_ISSET(id, &dontuse) && 1146134688Sjulian pc->pc_curthread == pc->pc_idlethread) { 1147223758Sattilio CPU_SET(id, &map2); 1148134688Sjulian } 1149134688Sjulian } 1150134688Sjulian } 1151134688Sjulian 1152134688Sjulian if (forward_wakeup_use_mask) { 1153222813Sattilio map = idle_cpus_mask; 1154222813Sattilio CPU_NAND(&map, &dontuse); 1155134688Sjulian 1156180879Sjhb /* If they are both on, compare and use loop if different. */ 1157134688Sjulian if (forward_wakeup_use_loop) { 1158222813Sattilio if (CPU_CMP(&map, &map2)) { 1159222040Sattilio printf("map != map2, loop method preferred\n"); 1160222040Sattilio map = map2; 1161134688Sjulian } 1162134688Sjulian } 1163134688Sjulian } else { 1164222040Sattilio map = map2; 1165134688Sjulian } 1166180879Sjhb 1167180879Sjhb /* If we only allow a specific CPU, then mask off all the others. */ 1168134688Sjulian if (cpunum != NOCPU) { 1169134688Sjulian KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); 1170222813Sattilio iscpuset = CPU_ISSET(cpunum, &map); 1171222813Sattilio if (iscpuset == 0) 1172222813Sattilio CPU_ZERO(&map); 1173222813Sattilio else 1174222813Sattilio CPU_SETOF(cpunum, &map); 1175134688Sjulian } 1176222813Sattilio if (!CPU_EMPTY(&map)) { 1177134688Sjulian forward_wakeups_delivered++; 1178222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1179223758Sattilio id = pc->pc_cpuid; 1180223758Sattilio if (!CPU_ISSET(id, &map)) 1181212455Smav continue; 1182212455Smav if (cpu_idle_wakeup(pc->pc_cpuid)) 1183223758Sattilio CPU_CLR(id, &map); 1184212455Smav } 1185222813Sattilio if (!CPU_EMPTY(&map)) 1186212455Smav ipi_selected(map, IPI_AST); 1187134688Sjulian return (1); 1188134688Sjulian } 1189134688Sjulian if (cpunum == NOCPU) 1190134688Sjulian printf("forward_wakeup: Idle processor not found\n"); 1191134688Sjulian return (0); 1192134688Sjulian} 1193134688Sjulian 1194147182Supsstatic void 1195180879Sjhbkick_other_cpu(int pri, int cpuid) 1196180879Sjhb{ 1197180879Sjhb struct pcpu *pcpu; 1198180879Sjhb int cpri; 1199147182Sups 1200180879Sjhb pcpu = pcpu_find(cpuid); 1201223758Sattilio if (CPU_ISSET(cpuid, &idle_cpus_mask)) { 1202147182Sups forward_wakeups_delivered++; 1203212455Smav if (!cpu_idle_wakeup(cpuid)) 1204212455Smav ipi_cpu(cpuid, IPI_AST); 1205147182Sups return; 1206147182Sups } 1207147182Sups 1208180879Sjhb cpri = pcpu->pc_curthread->td_priority; 1209147182Sups if (pri >= cpri) 1210147182Sups return; 1211147182Sups 1212147182Sups#if defined(IPI_PREEMPTION) && defined(PREEMPTION) 1213147182Sups#if !defined(FULL_PREEMPTION) 1214147182Sups if (pri <= PRI_MAX_ITHD) 1215147182Sups#endif /* ! FULL_PREEMPTION */ 1216147182Sups { 1217210939Sjhb ipi_cpu(cpuid, IPI_PREEMPT); 1218147182Sups return; 1219147182Sups } 1220147182Sups#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ 1221147182Sups 1222147182Sups pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 1223210939Sjhb ipi_cpu(cpuid, IPI_AST); 1224147182Sups return; 1225147182Sups} 1226147182Sups#endif /* SMP */ 1227147182Sups 1228180923Sjhb#ifdef SMP 1229180923Sjhbstatic int 1230180923Sjhbsched_pickcpu(struct thread *td) 1231180923Sjhb{ 1232180923Sjhb int best, cpu; 1233180923Sjhb 1234180923Sjhb mtx_assert(&sched_lock, MA_OWNED); 1235180923Sjhb 1236180937Sjhb if (THREAD_CAN_SCHED(td, td->td_lastcpu)) 1237180937Sjhb best = td->td_lastcpu; 1238180937Sjhb else 1239180937Sjhb best = NOCPU; 1240209059Sjhb CPU_FOREACH(cpu) { 1241180923Sjhb if (!THREAD_CAN_SCHED(td, cpu)) 1242180923Sjhb continue; 1243180923Sjhb 1244180923Sjhb if (best == NOCPU) 1245180923Sjhb best = cpu; 1246180923Sjhb else if (runq_length[cpu] < runq_length[best]) 1247180923Sjhb best = cpu; 1248180923Sjhb } 1249180923Sjhb KASSERT(best != NOCPU, ("no valid CPUs")); 1250180923Sjhb 1251180923Sjhb return (best); 1252180923Sjhb} 1253180923Sjhb#endif 1254180923Sjhb 1255104964Sjeffvoid 1256134586Sjuliansched_add(struct thread *td, int flags) 1257147182Sups#ifdef SMP 1258104964Sjeff{ 1259223758Sattilio cpuset_t tidlemsk; 1260164936Sjulian struct td_sched *ts; 1261223758Sattilio u_int cpu, cpuid; 1262134591Sjulian int forwarded = 0; 1263147182Sups int single_cpu = 0; 1264121127Sjeff 1265164936Sjulian ts = td->td_sched; 1266170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1267166188Sjeff KASSERT((td->td_inhibitors == 0), 1268166188Sjeff ("sched_add: trying to run inhibited thread")); 1269166188Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1270166188Sjeff ("sched_add: bad thread state")); 1271172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1272172207Sjeff ("sched_add: thread swapped out")); 1273180879Sjhb 1274187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 1275187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1276187357Sjeff sched_tdname(curthread)); 1277187357Sjeff KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 1278187357Sjeff KTR_ATTR_LINKED, sched_tdname(td)); 1279235459Srstone SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, 1280235459Srstone flags & SRQ_PREEMPTED); 1281187357Sjeff 1282187357Sjeff 1283170293Sjeff /* 1284170293Sjeff * Now that the thread is moving to the run-queue, set the lock 1285170293Sjeff * to the scheduler's lock. 1286170293Sjeff */ 1287170293Sjeff if (td->td_lock != &sched_lock) { 1288170293Sjeff mtx_lock_spin(&sched_lock); 1289170293Sjeff thread_lock_set(td, &sched_lock); 1290170293Sjeff } 1291166188Sjeff TD_SET_RUNQ(td); 1292131481Sjhb 1293221081Srstone /* 1294221081Srstone * If SMP is started and the thread is pinned or otherwise limited to 1295221081Srstone * a specific set of CPUs, queue the thread to a per-CPU run queue. 1296221081Srstone * Otherwise, queue the thread to the global run queue. 1297221081Srstone * 1298221081Srstone * If SMP has not yet been started we must use the global run queue 1299221081Srstone * as per-CPU state may not be initialized yet and we may crash if we 1300221081Srstone * try to access the per-CPU run queues. 1301221081Srstone */ 1302221081Srstone if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND || 1303221081Srstone ts->ts_flags & TSF_AFFINITY)) { 1304221081Srstone if (td->td_pinned != 0) 1305221081Srstone cpu = td->td_lastcpu; 1306221081Srstone else if (td->td_flags & TDF_BOUND) { 1307221081Srstone /* Find CPU from bound runq. */ 1308221081Srstone KASSERT(SKE_RUNQ_PCPU(ts), 1309221081Srstone ("sched_add: bound td_sched not on cpu runq")); 1310221081Srstone cpu = ts->ts_runq - &runq_pcpu[0]; 1311221081Srstone } else 1312221081Srstone /* Find a valid CPU for our cpuset */ 1313221081Srstone cpu = sched_pickcpu(td); 1314164936Sjulian ts->ts_runq = &runq_pcpu[cpu]; 1315147182Sups single_cpu = 1; 1316147182Sups CTR3(KTR_RUNQ, 1317180879Sjhb "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, 1318180879Sjhb cpu); 1319180879Sjhb } else { 1320134591Sjulian CTR2(KTR_RUNQ, 1321180879Sjhb "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, 1322180879Sjhb td); 1323134591Sjulian cpu = NOCPU; 1324164936Sjulian ts->ts_runq = &runq; 1325147182Sups } 1326180879Sjhb 1327223758Sattilio cpuid = PCPU_GET(cpuid); 1328223758Sattilio if (single_cpu && cpu != cpuid) { 1329180879Sjhb kick_other_cpu(td->td_priority, cpu); 1330124955Sjeff } else { 1331147190Sups if (!single_cpu) { 1332223758Sattilio tidlemsk = idle_cpus_mask; 1333223758Sattilio CPU_NAND(&tidlemsk, &hlt_cpus_mask); 1334223758Sattilio CPU_CLR(cpuid, &tidlemsk); 1335147182Sups 1336223758Sattilio if (!CPU_ISSET(cpuid, &idle_cpus_mask) && 1337223758Sattilio ((flags & SRQ_INTR) == 0) && 1338222813Sattilio !CPU_EMPTY(&tidlemsk)) 1339147182Sups forwarded = forward_wakeup(cpu); 1340147182Sups } 1341147182Sups 1342147182Sups if (!forwarded) { 1343147190Sups if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td)) 1344147182Sups return; 1345147182Sups else 1346147182Sups maybe_resched(td); 1347147182Sups } 1348124955Sjeff } 1349180879Sjhb 1350198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1351147182Sups sched_load_add(); 1352177435Sjeff runq_add(ts->ts_runq, td, flags); 1353180923Sjhb if (cpu != NOCPU) 1354180923Sjhb runq_length[cpu]++; 1355147182Sups} 1356147182Sups#else /* SMP */ 1357147182Sups{ 1358164936Sjulian struct td_sched *ts; 1359180923Sjhb 1360164936Sjulian ts = td->td_sched; 1361170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1362166188Sjeff KASSERT((td->td_inhibitors == 0), 1363166188Sjeff ("sched_add: trying to run inhibited thread")); 1364166188Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1365166188Sjeff ("sched_add: bad thread state")); 1366172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1367172207Sjeff ("sched_add: thread swapped out")); 1368187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 1369187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1370187357Sjeff sched_tdname(curthread)); 1371187357Sjeff KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 1372187357Sjeff KTR_ATTR_LINKED, sched_tdname(td)); 1373235471Spluknet SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, 1374235459Srstone flags & SRQ_PREEMPTED); 1375180879Sjhb 1376170293Sjeff /* 1377170293Sjeff * Now that the thread is moving to the run-queue, set the lock 1378170293Sjeff * to the scheduler's lock. 1379170293Sjeff */ 1380170293Sjeff if (td->td_lock != &sched_lock) { 1381170293Sjeff mtx_lock_spin(&sched_lock); 1382170293Sjeff thread_lock_set(td, &sched_lock); 1383170293Sjeff } 1384166188Sjeff TD_SET_RUNQ(td); 1385164936Sjulian CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); 1386164936Sjulian ts->ts_runq = &runq; 1387134591Sjulian 1388180879Sjhb /* 1389180879Sjhb * If we are yielding (on the way out anyhow) or the thread 1390180879Sjhb * being saved is US, then don't try be smart about preemption 1391180879Sjhb * or kicking off another CPU as it won't help and may hinder. 1392180879Sjhb * In the YIEDLING case, we are about to run whoever is being 1393180879Sjhb * put in the queue anyhow, and in the OURSELF case, we are 1394180879Sjhb * puting ourself on the run queue which also only happens 1395180879Sjhb * when we are about to yield. 1396134591Sjulian */ 1397180879Sjhb if ((flags & SRQ_YIELDING) == 0) { 1398147182Sups if (maybe_preempt(td)) 1399147182Sups return; 1400180879Sjhb } 1401198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1402139317Sjeff sched_load_add(); 1403177435Sjeff runq_add(ts->ts_runq, td, flags); 1404132118Sjhb maybe_resched(td); 1405104964Sjeff} 1406147182Sups#endif /* SMP */ 1407147182Sups 1408104964Sjeffvoid 1409121127Sjeffsched_rem(struct thread *td) 1410104964Sjeff{ 1411164936Sjulian struct td_sched *ts; 1412121127Sjeff 1413164936Sjulian ts = td->td_sched; 1414172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1415172207Sjeff ("sched_rem: thread swapped out")); 1416166188Sjeff KASSERT(TD_ON_RUNQ(td), 1417164936Sjulian ("sched_rem: thread not on run queue")); 1418104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 1419187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem", 1420187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1421187357Sjeff sched_tdname(curthread)); 1422235459Srstone SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL); 1423104964Sjeff 1424198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1425139317Sjeff sched_load_rem(); 1426180923Sjhb#ifdef SMP 1427180923Sjhb if (ts->ts_runq != &runq) 1428180923Sjhb runq_length[ts->ts_runq - runq_pcpu]--; 1429180923Sjhb#endif 1430177435Sjeff runq_remove(ts->ts_runq, td); 1431166188Sjeff TD_SET_CAN_RUN(td); 1432104964Sjeff} 1433104964Sjeff 1434135295Sjulian/* 1435180879Sjhb * Select threads to run. Note that running threads still consume a 1436180879Sjhb * slot. 1437135295Sjulian */ 1438166188Sjeffstruct thread * 1439104964Sjeffsched_choose(void) 1440104964Sjeff{ 1441177435Sjeff struct thread *td; 1442124955Sjeff struct runq *rq; 1443104964Sjeff 1444170293Sjeff mtx_assert(&sched_lock, MA_OWNED); 1445124955Sjeff#ifdef SMP 1446177435Sjeff struct thread *tdcpu; 1447124955Sjeff 1448124955Sjeff rq = &runq; 1449177435Sjeff td = runq_choose_fuzz(&runq, runq_fuzz); 1450177435Sjeff tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 1451104964Sjeff 1452180879Sjhb if (td == NULL || 1453180879Sjhb (tdcpu != NULL && 1454177435Sjeff tdcpu->td_priority < td->td_priority)) { 1455177435Sjeff CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu, 1456124955Sjeff PCPU_GET(cpuid)); 1457177435Sjeff td = tdcpu; 1458124955Sjeff rq = &runq_pcpu[PCPU_GET(cpuid)]; 1459180879Sjhb } else { 1460177435Sjeff CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td); 1461124955Sjeff } 1462124955Sjeff 1463124955Sjeff#else 1464124955Sjeff rq = &runq; 1465177435Sjeff td = runq_choose(&runq); 1466124955Sjeff#endif 1467124955Sjeff 1468177435Sjeff if (td) { 1469180923Sjhb#ifdef SMP 1470180923Sjhb if (td == tdcpu) 1471180923Sjhb runq_length[PCPU_GET(cpuid)]--; 1472180923Sjhb#endif 1473177435Sjeff runq_remove(rq, td); 1474177435Sjeff td->td_flags |= TDF_DIDRUN; 1475104964Sjeff 1476177435Sjeff KASSERT(td->td_flags & TDF_INMEM, 1477172207Sjeff ("sched_choose: thread swapped out")); 1478177435Sjeff return (td); 1479180879Sjhb } 1480166188Sjeff return (PCPU_GET(idlethread)); 1481104964Sjeff} 1482104964Sjeff 1483104964Sjeffvoid 1484177004Sjeffsched_preempt(struct thread *td) 1485177004Sjeff{ 1486235459Srstone 1487235459Srstone SDT_PROBE2(sched, , , surrender, td, td->td_proc); 1488177004Sjeff thread_lock(td); 1489177004Sjeff if (td->td_critnest > 1) 1490177004Sjeff td->td_owepreempt = 1; 1491177004Sjeff else 1492178272Sjeff mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL); 1493177004Sjeff thread_unlock(td); 1494177004Sjeff} 1495177004Sjeff 1496177004Sjeffvoid 1497104964Sjeffsched_userret(struct thread *td) 1498104964Sjeff{ 1499104964Sjeff /* 1500104964Sjeff * XXX we cheat slightly on the locking here to avoid locking in 1501104964Sjeff * the usual case. Setting td_priority here is essentially an 1502104964Sjeff * incomplete workaround for not setting it properly elsewhere. 1503104964Sjeff * Now that some interrupt handlers are threads, not setting it 1504104964Sjeff * properly elsewhere can clobber it in the window between setting 1505104964Sjeff * it here and returning to user mode, so don't waste time setting 1506104964Sjeff * it perfectly here. 1507104964Sjeff */ 1508139453Sjhb KASSERT((td->td_flags & TDF_BORROWING) == 0, 1509139453Sjhb ("thread with borrowed priority returning to userland")); 1510163709Sjb if (td->td_priority != td->td_user_pri) { 1511170293Sjeff thread_lock(td); 1512163709Sjb td->td_priority = td->td_user_pri; 1513163709Sjb td->td_base_pri = td->td_user_pri; 1514170293Sjeff thread_unlock(td); 1515163709Sjb } 1516104964Sjeff} 1517107126Sjeff 1518124955Sjeffvoid 1519124955Sjeffsched_bind(struct thread *td, int cpu) 1520124955Sjeff{ 1521164936Sjulian struct td_sched *ts; 1522124955Sjeff 1523208391Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 1524208391Sjhb KASSERT(td == curthread, ("sched_bind: can only bind curthread")); 1525124955Sjeff 1526164936Sjulian ts = td->td_sched; 1527124955Sjeff 1528177435Sjeff td->td_flags |= TDF_BOUND; 1529124955Sjeff#ifdef SMP 1530164936Sjulian ts->ts_runq = &runq_pcpu[cpu]; 1531124955Sjeff if (PCPU_GET(cpuid) == cpu) 1532124955Sjeff return; 1533124955Sjeff 1534131473Sjhb mi_switch(SW_VOL, NULL); 1535124955Sjeff#endif 1536124955Sjeff} 1537124955Sjeff 1538124955Sjeffvoid 1539124955Sjeffsched_unbind(struct thread* td) 1540124955Sjeff{ 1541170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1542208391Sjhb KASSERT(td == curthread, ("sched_unbind: can only bind curthread")); 1543177435Sjeff td->td_flags &= ~TDF_BOUND; 1544124955Sjeff} 1545124955Sjeff 1546107126Sjeffint 1547145256Sjkoshysched_is_bound(struct thread *td) 1548145256Sjkoshy{ 1549170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1550177435Sjeff return (td->td_flags & TDF_BOUND); 1551145256Sjkoshy} 1552145256Sjkoshy 1553159630Sdavidxuvoid 1554159630Sdavidxusched_relinquish(struct thread *td) 1555159630Sdavidxu{ 1556170293Sjeff thread_lock(td); 1557178272Sjeff mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 1558170293Sjeff thread_unlock(td); 1559159630Sdavidxu} 1560159630Sdavidxu 1561145256Sjkoshyint 1562125288Sjeffsched_load(void) 1563125288Sjeff{ 1564125288Sjeff return (sched_tdcnt); 1565125288Sjeff} 1566125288Sjeff 1567125288Sjeffint 1568107126Sjeffsched_sizeof_proc(void) 1569107126Sjeff{ 1570107126Sjeff return (sizeof(struct proc)); 1571107126Sjeff} 1572159630Sdavidxu 1573107126Sjeffint 1574107126Sjeffsched_sizeof_thread(void) 1575107126Sjeff{ 1576164936Sjulian return (sizeof(struct thread) + sizeof(struct td_sched)); 1577107126Sjeff} 1578107137Sjeff 1579107137Sjefffixpt_t 1580121127Sjeffsched_pctcpu(struct thread *td) 1581107137Sjeff{ 1582164936Sjulian struct td_sched *ts; 1583121147Sjeff 1584208787Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED); 1585164936Sjulian ts = td->td_sched; 1586164936Sjulian return (ts->ts_pctcpu); 1587107137Sjeff} 1588159570Sdavidxu 1589242139Strasz#ifdef RACCT 1590242139Strasz/* 1591242139Strasz * Calculates the contribution to the thread cpu usage for the latest 1592242139Strasz * (unfinished) second. 1593242139Strasz */ 1594242139Straszfixpt_t 1595242139Straszsched_pctcpu_delta(struct thread *td) 1596242139Strasz{ 1597242139Strasz struct td_sched *ts; 1598242139Strasz fixpt_t delta; 1599242139Strasz int realstathz; 1600242139Strasz 1601242139Strasz THREAD_LOCK_ASSERT(td, MA_OWNED); 1602242139Strasz ts = td->td_sched; 1603242139Strasz delta = 0; 1604242139Strasz realstathz = stathz ? stathz : hz; 1605242139Strasz if (ts->ts_cpticks != 0) { 1606242139Strasz#if (FSHIFT >= CCPU_SHIFT) 1607242139Strasz delta = (realstathz == 100) 1608242139Strasz ? ((fixpt_t) ts->ts_cpticks) << 1609242139Strasz (FSHIFT - CCPU_SHIFT) : 1610242139Strasz 100 * (((fixpt_t) ts->ts_cpticks) 1611242139Strasz << (FSHIFT - CCPU_SHIFT)) / realstathz; 1612242139Strasz#else 1613242139Strasz delta = ((FSCALE - ccpu) * 1614242139Strasz (ts->ts_cpticks * 1615242139Strasz FSCALE / realstathz)) >> FSHIFT; 1616242139Strasz#endif 1617242139Strasz } 1618242139Strasz 1619242139Strasz return (delta); 1620242139Strasz} 1621242139Strasz#endif 1622242139Strasz 1623159570Sdavidxuvoid 1624212541Smavsched_tick(int cnt) 1625159570Sdavidxu{ 1626159570Sdavidxu} 1627166188Sjeff 1628166188Sjeff/* 1629166188Sjeff * The actual idle process. 1630166188Sjeff */ 1631166188Sjeffvoid 1632166188Sjeffsched_idletd(void *dummy) 1633166188Sjeff{ 1634212455Smav struct pcpuidlestat *stat; 1635166188Sjeff 1636239585Sjhb THREAD_NO_SLEEPING(); 1637212455Smav stat = DPCPU_PTR(idlestat); 1638166188Sjeff for (;;) { 1639166188Sjeff mtx_assert(&Giant, MA_NOTOWNED); 1640166188Sjeff 1641212455Smav while (sched_runnable() == 0) { 1642212455Smav cpu_idle(stat->idlecalls + stat->oldidlecalls > 64); 1643212455Smav stat->idlecalls++; 1644212455Smav } 1645166188Sjeff 1646166188Sjeff mtx_lock_spin(&sched_lock); 1647178272Sjeff mi_switch(SW_VOL | SWT_IDLE, NULL); 1648166188Sjeff mtx_unlock_spin(&sched_lock); 1649166188Sjeff } 1650166188Sjeff} 1651166188Sjeff 1652170293Sjeff/* 1653170293Sjeff * A CPU is entering for the first time or a thread is exiting. 1654170293Sjeff */ 1655170293Sjeffvoid 1656170293Sjeffsched_throw(struct thread *td) 1657170293Sjeff{ 1658170293Sjeff /* 1659170293Sjeff * Correct spinlock nesting. The idle thread context that we are 1660170293Sjeff * borrowing was created so that it would start out with a single 1661170293Sjeff * spin lock (sched_lock) held in fork_trampoline(). Since we've 1662170293Sjeff * explicitly acquired locks in this function, the nesting count 1663170293Sjeff * is now 2 rather than 1. Since we are nested, calling 1664170293Sjeff * spinlock_exit() will simply adjust the counts without allowing 1665170293Sjeff * spin lock using code to interrupt us. 1666170293Sjeff */ 1667170293Sjeff if (td == NULL) { 1668170293Sjeff mtx_lock_spin(&sched_lock); 1669170293Sjeff spinlock_exit(); 1670229429Sjhb PCPU_SET(switchtime, cpu_ticks()); 1671229429Sjhb PCPU_SET(switchticks, ticks); 1672170293Sjeff } else { 1673174629Sjeff lock_profile_release_lock(&sched_lock.lock_object); 1674170293Sjeff MPASS(td->td_lock == &sched_lock); 1675170293Sjeff } 1676170293Sjeff mtx_assert(&sched_lock, MA_OWNED); 1677170293Sjeff KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 1678170293Sjeff cpu_throw(td, choosethread()); /* doesn't return */ 1679170293Sjeff} 1680170293Sjeff 1681170293Sjeffvoid 1682170600Sjeffsched_fork_exit(struct thread *td) 1683170293Sjeff{ 1684170293Sjeff 1685170293Sjeff /* 1686170293Sjeff * Finish setting up thread glue so that it begins execution in a 1687170293Sjeff * non-nested critical section with sched_lock held but not recursed. 1688170293Sjeff */ 1689170600Sjeff td->td_oncpu = PCPU_GET(cpuid); 1690170600Sjeff sched_lock.mtx_lock = (uintptr_t)td; 1691174629Sjeff lock_profile_obtain_lock_success(&sched_lock.lock_object, 1692174629Sjeff 0, 0, __FILE__, __LINE__); 1693170600Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 1694170293Sjeff} 1695170293Sjeff 1696187357Sjeffchar * 1697187357Sjeffsched_tdname(struct thread *td) 1698187357Sjeff{ 1699187357Sjeff#ifdef KTR 1700187357Sjeff struct td_sched *ts; 1701187357Sjeff 1702187357Sjeff ts = td->td_sched; 1703187357Sjeff if (ts->ts_name[0] == '\0') 1704187357Sjeff snprintf(ts->ts_name, sizeof(ts->ts_name), 1705187357Sjeff "%s tid %d", td->td_name, td->td_tid); 1706187357Sjeff return (ts->ts_name); 1707187357Sjeff#else 1708187357Sjeff return (td->td_name); 1709187357Sjeff#endif 1710187357Sjeff} 1711187357Sjeff 1712232700Sjhb#ifdef KTR 1713176729Sjeffvoid 1714232700Sjhbsched_clear_tdname(struct thread *td) 1715232700Sjhb{ 1716232700Sjhb struct td_sched *ts; 1717232700Sjhb 1718232700Sjhb ts = td->td_sched; 1719232700Sjhb ts->ts_name[0] = '\0'; 1720232700Sjhb} 1721232700Sjhb#endif 1722232700Sjhb 1723232700Sjhbvoid 1724176729Sjeffsched_affinity(struct thread *td) 1725176729Sjeff{ 1726180923Sjhb#ifdef SMP 1727180923Sjhb struct td_sched *ts; 1728180923Sjhb int cpu; 1729180923Sjhb 1730180923Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED); 1731180923Sjhb 1732180923Sjhb /* 1733180923Sjhb * Set the TSF_AFFINITY flag if there is at least one CPU this 1734180923Sjhb * thread can't run on. 1735180923Sjhb */ 1736180923Sjhb ts = td->td_sched; 1737180923Sjhb ts->ts_flags &= ~TSF_AFFINITY; 1738209059Sjhb CPU_FOREACH(cpu) { 1739180923Sjhb if (!THREAD_CAN_SCHED(td, cpu)) { 1740180923Sjhb ts->ts_flags |= TSF_AFFINITY; 1741180923Sjhb break; 1742180923Sjhb } 1743180923Sjhb } 1744180923Sjhb 1745180923Sjhb /* 1746180923Sjhb * If this thread can run on all CPUs, nothing else to do. 1747180923Sjhb */ 1748180923Sjhb if (!(ts->ts_flags & TSF_AFFINITY)) 1749180923Sjhb return; 1750180923Sjhb 1751180923Sjhb /* Pinned threads and bound threads should be left alone. */ 1752180923Sjhb if (td->td_pinned != 0 || td->td_flags & TDF_BOUND) 1753180923Sjhb return; 1754180923Sjhb 1755180923Sjhb switch (td->td_state) { 1756180923Sjhb case TDS_RUNQ: 1757180923Sjhb /* 1758180923Sjhb * If we are on a per-CPU runqueue that is in the set, 1759180923Sjhb * then nothing needs to be done. 1760180923Sjhb */ 1761180923Sjhb if (ts->ts_runq != &runq && 1762180923Sjhb THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu)) 1763180923Sjhb return; 1764180923Sjhb 1765180923Sjhb /* Put this thread on a valid per-CPU runqueue. */ 1766180923Sjhb sched_rem(td); 1767180923Sjhb sched_add(td, SRQ_BORING); 1768180923Sjhb break; 1769180923Sjhb case TDS_RUNNING: 1770180923Sjhb /* 1771180923Sjhb * See if our current CPU is in the set. If not, force a 1772180923Sjhb * context switch. 1773180923Sjhb */ 1774180923Sjhb if (THREAD_CAN_SCHED(td, td->td_oncpu)) 1775180923Sjhb return; 1776180923Sjhb 1777180923Sjhb td->td_flags |= TDF_NEEDRESCHED; 1778180923Sjhb if (td != curthread) 1779210939Sjhb ipi_cpu(cpu, IPI_AST); 1780180923Sjhb break; 1781180923Sjhb default: 1782180923Sjhb break; 1783180923Sjhb } 1784180923Sjhb#endif 1785176729Sjeff} 1786