1/*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND BSD-2-Clause)
3 *
4 * Copyright (c) 1996, 1997
5 *      HD Associates, Inc.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *      This product includes software developed by HD Associates, Inc
18 *      and Jukka Antero Ukkonen.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY HD ASSOCIATES AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL HD ASSOCIATES OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36/*-
37 * Copyright (c) 2002-2008, Jeffrey Roberson <jeff@freebsd.org>
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 *    notice unmodified, this list of conditions, and the following
45 *    disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
55 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
59 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62#ifndef _SCHED_H_
63#define	_SCHED_H_
64
65#ifdef _KERNEL
66/*
67 * General scheduling info.
68 *
69 * sched_load:
70 *	Total runnable non-ithread threads in the system.
71 *
72 * sched_runnable:
73 *	Runnable threads for this processor.
74 */
75int	sched_load(void);
76int	sched_rr_interval(void);
77int	sched_runnable(void);
78
79/*
80 * Proc related scheduling hooks.
81 */
82void	sched_exit(struct proc *p, struct thread *childtd);
83void	sched_fork(struct thread *td, struct thread *childtd);
84void	sched_fork_exit(struct thread *td);
85void	sched_class(struct thread *td, int class);
86void	sched_nice(struct proc *p, int nice);
87
88/*
89 * Threads are switched in and out, block on resources, have temporary
90 * priorities inherited from their procs, and use up cpu time.
91 */
92void	sched_ap_entry(void);
93void	sched_exit_thread(struct thread *td, struct thread *child);
94u_int	sched_estcpu(struct thread *td);
95void	sched_fork_thread(struct thread *td, struct thread *child);
96void	sched_ithread_prio(struct thread *td, u_char prio);
97void	sched_lend_prio(struct thread *td, u_char prio);
98void	sched_lend_user_prio(struct thread *td, u_char pri);
99void	sched_lend_user_prio_cond(struct thread *td, u_char pri);
100fixpt_t	sched_pctcpu(struct thread *td);
101void	sched_prio(struct thread *td, u_char prio);
102void	sched_sleep(struct thread *td, int prio);
103void	sched_switch(struct thread *td, int flags);
104void	sched_throw(struct thread *td);
105void	sched_unlend_prio(struct thread *td, u_char prio);
106void	sched_user_prio(struct thread *td, u_char prio);
107void	sched_userret_slowpath(struct thread *td);
108#ifdef	RACCT
109#ifdef	SCHED_4BSD
110fixpt_t	sched_pctcpu_delta(struct thread *td);
111#endif
112#endif
113
114static inline void
115sched_userret(struct thread *td)
116{
117
118	/*
119	 * XXX we cheat slightly on the locking here to avoid locking in
120	 * the usual case.  Setting td_priority here is essentially an
121	 * incomplete workaround for not setting it properly elsewhere.
122	 * Now that some interrupt handlers are threads, not setting it
123	 * properly elsewhere can clobber it in the window between setting
124	 * it here and returning to user mode, so don't waste time setting
125	 * it perfectly here.
126	 */
127	KASSERT((td->td_flags & TDF_BORROWING) == 0,
128	    ("thread with borrowed priority returning to userland"));
129	if (__predict_false(td->td_priority != td->td_user_pri))
130		sched_userret_slowpath(td);
131}
132
133/*
134 * Threads are moved on and off of run queues
135 */
136void	sched_add(struct thread *td, int flags);
137struct thread *sched_choose(void);
138void	sched_clock(struct thread *td, int cnt);
139void	sched_idletd(void *);
140void	sched_preempt(struct thread *td);
141void	sched_relinquish(struct thread *td);
142void	sched_rem(struct thread *td);
143void	sched_wakeup(struct thread *td, int srqflags);
144
145/*
146 * Binding makes cpu affinity permanent while pinning is used to temporarily
147 * hold a thread on a particular CPU.
148 */
149void	sched_bind(struct thread *td, int cpu);
150static __inline void sched_pin(void);
151void	sched_unbind(struct thread *td);
152static __inline void sched_unpin(void);
153int	sched_is_bound(struct thread *td);
154void	sched_affinity(struct thread *td);
155
156/*
157 * These procedures tell the process data structure allocation code how
158 * many bytes to actually allocate.
159 */
160int	sched_sizeof_proc(void);
161int	sched_sizeof_thread(void);
162
163/*
164 * This routine provides a consistent thread name for use with KTR graphing
165 * functions.
166 */
167char	*sched_tdname(struct thread *td);
168#ifdef KTR
169void	sched_clear_tdname(struct thread *td);
170#endif
171
172static __inline void
173sched_pin(void)
174{
175	curthread->td_pinned++;
176	atomic_interrupt_fence();
177}
178
179static __inline void
180sched_unpin(void)
181{
182	atomic_interrupt_fence();
183	curthread->td_pinned--;
184}
185
186/* sched_add arguments (formerly setrunqueue) */
187#define	SRQ_BORING	0x0000		/* No special circumstances. */
188#define	SRQ_YIELDING	0x0001		/* We are yielding (from mi_switch). */
189#define	SRQ_OURSELF	0x0002		/* It is ourself (from mi_switch). */
190#define	SRQ_INTR	0x0004		/* It is probably urgent. */
191#define	SRQ_PREEMPTED	0x0008		/* has been preempted.. be kind */
192#define	SRQ_BORROWING	0x0010		/* Priority updated due to prio_lend */
193#define	SRQ_HOLD	0x0020		/* Return holding original td lock */
194#define	SRQ_HOLDTD	0x0040		/* Return holding td lock */
195
196/* Scheduler stats. */
197#ifdef SCHED_STATS
198DPCPU_DECLARE(long, sched_switch_stats[SWT_COUNT]);
199
200#define	SCHED_STAT_DEFINE_VAR(name, ptr, descr)				\
201static void name ## _add_proc(void *dummy __unused)			\
202{									\
203									\
204	SYSCTL_ADD_PROC(NULL,						\
205	    SYSCTL_STATIC_CHILDREN(_kern_sched_stats), OID_AUTO,	\
206	    #name, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,		\
207	    ptr, 0, sysctl_dpcpu_long, "LU", descr);			\
208}									\
209SYSINIT(name, SI_SUB_LAST, SI_ORDER_MIDDLE, name ## _add_proc, NULL);
210
211#define	SCHED_STAT_DEFINE(name, descr)					\
212    DPCPU_DEFINE(unsigned long, name);					\
213    SCHED_STAT_DEFINE_VAR(name, &DPCPU_NAME(name), descr)
214/*
215 * Sched stats are always incremented in critical sections so no atomic
216 * is necessary to increment them.
217 */
218#define SCHED_STAT_INC(var)     DPCPU_GET(var)++;
219#else
220#define	SCHED_STAT_DEFINE_VAR(name, descr, ptr)
221#define	SCHED_STAT_DEFINE(name, descr)
222#define SCHED_STAT_INC(var)			(void)0
223#endif
224
225/*
226 * Fixup scheduler state for proc0 and thread0
227 */
228void schedinit(void);
229
230/*
231 * Fixup scheduler state for secondary APs
232 */
233void schedinit_ap(void);
234#endif /* _KERNEL */
235
236/* POSIX 1003.1b Process Scheduling */
237
238/*
239 * POSIX scheduling policies
240 */
241#define SCHED_FIFO      1
242#define SCHED_OTHER     2
243#define SCHED_RR        3
244
245struct sched_param {
246        int     sched_priority;
247};
248
249/*
250 * POSIX scheduling declarations for userland.
251 */
252#ifndef _KERNEL
253#include <sys/cdefs.h>
254#include <sys/_timespec.h>
255#include <sys/_types.h>
256
257#ifndef _PID_T_DECLARED
258typedef __pid_t         pid_t;
259#define _PID_T_DECLARED
260#endif
261
262__BEGIN_DECLS
263int     sched_get_priority_max(int);
264int     sched_get_priority_min(int);
265int     sched_getparam(pid_t, struct sched_param *);
266int     sched_getscheduler(pid_t);
267int     sched_rr_get_interval(pid_t, struct timespec *);
268int     sched_setparam(pid_t, const struct sched_param *);
269int     sched_setscheduler(pid_t, int, const struct sched_param *);
270int     sched_yield(void);
271__END_DECLS
272
273#endif
274#endif /* !_SCHED_H_ */
275