1/*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * Implementation of sleep queues used to hold queue of threads blocked on
29 * a wait channel.  Sleep queues are different from turnstiles in that wait
30 * channels are not owned by anyone, so there is no priority propagation.
31 * Sleep queues can also provide a timeout and can also be interrupted by
32 * signals.  That said, there are several similarities between the turnstile
33 * and sleep queue implementations.  (Note: turnstiles were implemented
34 * first.)  For example, both use a hash table of the same size where each
35 * bucket is referred to as a "chain" that contains both a spin lock and
36 * a linked list of queues.  An individual queue is located by using a hash
37 * to pick a chain, locking the chain, and then walking the chain searching
38 * for the queue.  This means that a wait channel object does not need to
39 * embed its queue head just as locks do not embed their turnstile queue
40 * head.  Threads also carry around a sleep queue that they lend to the
41 * wait channel when blocking.  Just as in turnstiles, the queue includes
42 * a free list of the sleep queues of other threads blocked on the same
43 * wait channel in the case of multiple waiters.
44 *
45 * Some additional functionality provided by sleep queues include the
46 * ability to set a timeout.  The timeout is managed using a per-thread
47 * callout that resumes a thread if it is asleep.  A thread may also
48 * catch signals while it is asleep (aka an interruptible sleep).  The
49 * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
50 * sleep queues also provide some extra assertions.  One is not allowed to
51 * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
52 * must consistently use the same lock to synchronize with a wait channel,
53 * though this check is currently only a warning for sleep/wakeup due to
54 * pre-existing abuse of that API.  The same lock must also be held when
55 * awakening threads, though that is currently only enforced for condition
56 * variables.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: stable/10/sys/kern/subr_sleepqueue.c 324800 2017-10-20 10:06:02Z hselasky $");
61
62#include "opt_sleepqueue_profiling.h"
63#include "opt_ddb.h"
64#include "opt_kdtrace.h"
65#include "opt_sched.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/mutex.h>
73#include <sys/proc.h>
74#include <sys/sbuf.h>
75#include <sys/sched.h>
76#include <sys/sdt.h>
77#include <sys/signalvar.h>
78#include <sys/sleepqueue.h>
79#include <sys/sysctl.h>
80
81#include <vm/uma.h>
82
83#ifdef DDB
84#include <ddb/ddb.h>
85#endif
86
87/*
88 * Constants for the hash table of sleep queue chains.
89 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
90 */
91#define	SC_TABLESIZE	256			/* Must be power of 2. */
92#define	SC_MASK		(SC_TABLESIZE - 1)
93#define	SC_SHIFT	8
94#define	SC_HASH(wc)	((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
95			    SC_MASK)
96#define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
97#define NR_SLEEPQS      2
98/*
99 * There are two different lists of sleep queues.  Both lists are connected
100 * via the sq_hash entries.  The first list is the sleep queue chain list
101 * that a sleep queue is on when it is attached to a wait channel.  The
102 * second list is the free list hung off of a sleep queue that is attached
103 * to a wait channel.
104 *
105 * Each sleep queue also contains the wait channel it is attached to, the
106 * list of threads blocked on that wait channel, flags specific to the
107 * wait channel, and the lock used to synchronize with a wait channel.
108 * The flags are used to catch mismatches between the various consumers
109 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
110 * The lock pointer is only used when invariants are enabled for various
111 * debugging checks.
112 *
113 * Locking key:
114 *  c - sleep queue chain lock
115 */
116struct sleepqueue {
117	TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS];	/* (c) Blocked threads. */
118	u_int sq_blockedcnt[NR_SLEEPQS];	/* (c) N. of blocked threads. */
119	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
120	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
121	void	*sq_wchan;			/* (c) Wait channel. */
122	int	sq_type;			/* (c) Queue type. */
123#ifdef INVARIANTS
124	struct lock_object *sq_lock;		/* (c) Associated lock. */
125#endif
126};
127
128struct sleepqueue_chain {
129	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
130	struct mtx sc_lock;			/* Spin lock for this chain. */
131#ifdef SLEEPQUEUE_PROFILING
132	u_int	sc_depth;			/* Length of sc_queues. */
133	u_int	sc_max_depth;			/* Max length of sc_queues. */
134#endif
135};
136
137#ifdef SLEEPQUEUE_PROFILING
138u_int sleepq_max_depth;
139static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
140static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
141    "sleepq chain stats");
142SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
143    0, "maxmimum depth achieved of a single chain");
144
145static void	sleepq_profile(const char *wmesg);
146static int	prof_enabled;
147#endif
148static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
149static uma_zone_t sleepq_zone;
150
151/*
152 * Prototypes for non-exported routines.
153 */
154static int	sleepq_catch_signals(void *wchan, int pri);
155static int	sleepq_check_signals(void);
156static int	sleepq_check_timeout(void);
157#ifdef INVARIANTS
158static void	sleepq_dtor(void *mem, int size, void *arg);
159#endif
160static int	sleepq_init(void *mem, int size, int flags);
161static int	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
162		    int pri);
163static void	sleepq_switch(void *wchan, int pri);
164static void	sleepq_timeout(void *arg);
165
166SDT_PROBE_DECLARE(sched, , , sleep);
167SDT_PROBE_DECLARE(sched, , , wakeup);
168
169/*
170 * Early initialization of sleep queues that is called from the sleepinit()
171 * SYSINIT.
172 */
173void
174init_sleepqueues(void)
175{
176#ifdef SLEEPQUEUE_PROFILING
177	struct sysctl_oid *chain_oid;
178	char chain_name[10];
179#endif
180	int i;
181
182	for (i = 0; i < SC_TABLESIZE; i++) {
183		LIST_INIT(&sleepq_chains[i].sc_queues);
184		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
185		    MTX_SPIN | MTX_RECURSE);
186#ifdef SLEEPQUEUE_PROFILING
187		snprintf(chain_name, sizeof(chain_name), "%d", i);
188		chain_oid = SYSCTL_ADD_NODE(NULL,
189		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
190		    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
191		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
192		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
193		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
194		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
195		    NULL);
196#endif
197	}
198	sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
199#ifdef INVARIANTS
200	    NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
201#else
202	    NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
203#endif
204
205	thread0.td_sleepqueue = sleepq_alloc();
206}
207
208/*
209 * Get a sleep queue for a new thread.
210 */
211struct sleepqueue *
212sleepq_alloc(void)
213{
214
215	return (uma_zalloc(sleepq_zone, M_WAITOK));
216}
217
218/*
219 * Free a sleep queue when a thread is destroyed.
220 */
221void
222sleepq_free(struct sleepqueue *sq)
223{
224
225	uma_zfree(sleepq_zone, sq);
226}
227
228/*
229 * Lock the sleep queue chain associated with the specified wait channel.
230 */
231void
232sleepq_lock(void *wchan)
233{
234	struct sleepqueue_chain *sc;
235
236	sc = SC_LOOKUP(wchan);
237	mtx_lock_spin(&sc->sc_lock);
238}
239
240/*
241 * Look up the sleep queue associated with a given wait channel in the hash
242 * table locking the associated sleep queue chain.  If no queue is found in
243 * the table, NULL is returned.
244 */
245struct sleepqueue *
246sleepq_lookup(void *wchan)
247{
248	struct sleepqueue_chain *sc;
249	struct sleepqueue *sq;
250
251	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
252	sc = SC_LOOKUP(wchan);
253	mtx_assert(&sc->sc_lock, MA_OWNED);
254	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
255		if (sq->sq_wchan == wchan)
256			return (sq);
257	return (NULL);
258}
259
260/*
261 * Unlock the sleep queue chain associated with a given wait channel.
262 */
263void
264sleepq_release(void *wchan)
265{
266	struct sleepqueue_chain *sc;
267
268	sc = SC_LOOKUP(wchan);
269	mtx_unlock_spin(&sc->sc_lock);
270}
271
272/*
273 * Places the current thread on the sleep queue for the specified wait
274 * channel.  If INVARIANTS is enabled, then it associates the passed in
275 * lock with the sleepq to make sure it is held when that sleep queue is
276 * woken up.
277 */
278void
279sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
280    int queue)
281{
282	struct sleepqueue_chain *sc;
283	struct sleepqueue *sq;
284	struct thread *td;
285
286	td = curthread;
287	sc = SC_LOOKUP(wchan);
288	mtx_assert(&sc->sc_lock, MA_OWNED);
289	MPASS(td->td_sleepqueue != NULL);
290	MPASS(wchan != NULL);
291	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
292
293	/* If this thread is not allowed to sleep, die a horrible death. */
294	KASSERT(td->td_no_sleeping == 0,
295	    ("%s: td %p to sleep on wchan %p with sleeping prohibited",
296	    __func__, td, wchan));
297
298	/* Look up the sleep queue associated with the wait channel 'wchan'. */
299	sq = sleepq_lookup(wchan);
300
301	/*
302	 * If the wait channel does not already have a sleep queue, use
303	 * this thread's sleep queue.  Otherwise, insert the current thread
304	 * into the sleep queue already in use by this wait channel.
305	 */
306	if (sq == NULL) {
307#ifdef INVARIANTS
308		int i;
309
310		sq = td->td_sleepqueue;
311		for (i = 0; i < NR_SLEEPQS; i++) {
312			KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
313			    ("thread's sleep queue %d is not empty", i));
314			KASSERT(sq->sq_blockedcnt[i] == 0,
315			    ("thread's sleep queue %d count mismatches", i));
316		}
317		KASSERT(LIST_EMPTY(&sq->sq_free),
318		    ("thread's sleep queue has a non-empty free list"));
319		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
320		sq->sq_lock = lock;
321#endif
322#ifdef SLEEPQUEUE_PROFILING
323		sc->sc_depth++;
324		if (sc->sc_depth > sc->sc_max_depth) {
325			sc->sc_max_depth = sc->sc_depth;
326			if (sc->sc_max_depth > sleepq_max_depth)
327				sleepq_max_depth = sc->sc_max_depth;
328		}
329#endif
330		sq = td->td_sleepqueue;
331		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
332		sq->sq_wchan = wchan;
333		sq->sq_type = flags & SLEEPQ_TYPE;
334	} else {
335		MPASS(wchan == sq->sq_wchan);
336		MPASS(lock == sq->sq_lock);
337		MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
338		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
339	}
340	thread_lock(td);
341	TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
342	sq->sq_blockedcnt[queue]++;
343	td->td_sleepqueue = NULL;
344	td->td_sqqueue = queue;
345	td->td_wchan = wchan;
346	td->td_wmesg = wmesg;
347	if (flags & SLEEPQ_INTERRUPTIBLE) {
348		td->td_flags |= TDF_SINTR;
349		td->td_flags &= ~TDF_SLEEPABORT;
350	}
351	thread_unlock(td);
352}
353
354/*
355 * Sets a timeout that will remove the current thread from the specified
356 * sleep queue after timo ticks if the thread has not already been awakened.
357 */
358void
359sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
360    int flags)
361{
362	struct sleepqueue_chain *sc;
363	struct thread *td;
364	sbintime_t pr1;
365
366	td = curthread;
367	sc = SC_LOOKUP(wchan);
368	mtx_assert(&sc->sc_lock, MA_OWNED);
369	MPASS(TD_ON_SLEEPQ(td));
370	MPASS(td->td_sleepqueue == NULL);
371	MPASS(wchan != NULL);
372	KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
373	    td->td_tid, td, (uintmax_t)td->td_sleeptimo));
374	thread_lock(td);
375	callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
376	thread_unlock(td);
377	callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
378	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
379	    C_DIRECT_EXEC);
380}
381
382/*
383 * Return the number of actual sleepers for the specified queue.
384 */
385u_int
386sleepq_sleepcnt(void *wchan, int queue)
387{
388	struct sleepqueue *sq;
389
390	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
391	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
392	sq = sleepq_lookup(wchan);
393	if (sq == NULL)
394		return (0);
395	return (sq->sq_blockedcnt[queue]);
396}
397
398/*
399 * Marks the pending sleep of the current thread as interruptible and
400 * makes an initial check for pending signals before putting a thread
401 * to sleep. Enters and exits with the thread lock held.  Thread lock
402 * may have transitioned from the sleepq lock to a run lock.
403 */
404static int
405sleepq_catch_signals(void *wchan, int pri)
406{
407	struct sleepqueue_chain *sc;
408	struct sleepqueue *sq;
409	struct thread *td;
410	struct proc *p;
411	struct sigacts *ps;
412	int sig, ret;
413
414	ret = 0;
415	td = curthread;
416	p = curproc;
417	sc = SC_LOOKUP(wchan);
418	mtx_assert(&sc->sc_lock, MA_OWNED);
419	MPASS(wchan != NULL);
420	if ((td->td_pflags & TDP_WAKEUP) != 0) {
421		td->td_pflags &= ~TDP_WAKEUP;
422		ret = EINTR;
423		thread_lock(td);
424		goto out;
425	}
426
427	/*
428	 * See if there are any pending signals or suspension requests for this
429	 * thread.  If not, we can switch immediately.
430	 */
431	thread_lock(td);
432	if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) {
433		thread_unlock(td);
434		mtx_unlock_spin(&sc->sc_lock);
435		CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
436			(void *)td, (long)p->p_pid, td->td_name);
437		PROC_LOCK(p);
438		/*
439		 * Check for suspension first. Checking for signals and then
440		 * suspending could result in a missed signal, since a signal
441		 * can be delivered while this thread is suspended.
442		 */
443		if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
444			ret = thread_suspend_check(1);
445			MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
446			if (ret != 0) {
447				PROC_UNLOCK(p);
448				mtx_lock_spin(&sc->sc_lock);
449				thread_lock(td);
450				goto out;
451			}
452		}
453		if ((td->td_flags & TDF_NEEDSIGCHK) != 0) {
454			ps = p->p_sigacts;
455			mtx_lock(&ps->ps_mtx);
456			sig = cursig(td);
457			if (sig != 0)
458				ret = SIGISMEMBER(ps->ps_sigintr, sig) ?
459				    EINTR : ERESTART;
460			mtx_unlock(&ps->ps_mtx);
461		}
462		/*
463		 * Lock the per-process spinlock prior to dropping the PROC_LOCK
464		 * to avoid a signal delivery race.  PROC_LOCK, PROC_SLOCK, and
465		 * thread_lock() are currently held in tdsendsignal().
466		 */
467		PROC_SLOCK(p);
468		mtx_lock_spin(&sc->sc_lock);
469		PROC_UNLOCK(p);
470		thread_lock(td);
471		PROC_SUNLOCK(p);
472	}
473	if (ret == 0) {
474		sleepq_switch(wchan, pri);
475		return (0);
476	}
477out:
478	/*
479	 * There were pending signals and this thread is still
480	 * on the sleep queue, remove it from the sleep queue.
481	 */
482	if (TD_ON_SLEEPQ(td)) {
483		sq = sleepq_lookup(wchan);
484		if (sleepq_resume_thread(sq, td, 0)) {
485#ifdef INVARIANTS
486			/*
487			 * This thread hasn't gone to sleep yet, so it
488			 * should not be swapped out.
489			 */
490			panic("not waking up swapper");
491#endif
492		}
493	}
494	mtx_unlock_spin(&sc->sc_lock);
495	MPASS(td->td_lock != &sc->sc_lock);
496	return (ret);
497}
498
499/*
500 * Switches to another thread if we are still asleep on a sleep queue.
501 * Returns with thread lock.
502 */
503static void
504sleepq_switch(void *wchan, int pri)
505{
506	struct sleepqueue_chain *sc;
507	struct sleepqueue *sq;
508	struct thread *td;
509
510	td = curthread;
511	sc = SC_LOOKUP(wchan);
512	mtx_assert(&sc->sc_lock, MA_OWNED);
513	THREAD_LOCK_ASSERT(td, MA_OWNED);
514
515	/*
516	 * If we have a sleep queue, then we've already been woken up, so
517	 * just return.
518	 */
519	if (td->td_sleepqueue != NULL) {
520		mtx_unlock_spin(&sc->sc_lock);
521		return;
522	}
523
524	/*
525	 * If TDF_TIMEOUT is set, then our sleep has been timed out
526	 * already but we are still on the sleep queue, so dequeue the
527	 * thread and return.
528	 */
529	if (td->td_flags & TDF_TIMEOUT) {
530		MPASS(TD_ON_SLEEPQ(td));
531		sq = sleepq_lookup(wchan);
532		if (sleepq_resume_thread(sq, td, 0)) {
533#ifdef INVARIANTS
534			/*
535			 * This thread hasn't gone to sleep yet, so it
536			 * should not be swapped out.
537			 */
538			panic("not waking up swapper");
539#endif
540		}
541		mtx_unlock_spin(&sc->sc_lock);
542		return;
543	}
544#ifdef SLEEPQUEUE_PROFILING
545	if (prof_enabled)
546		sleepq_profile(td->td_wmesg);
547#endif
548	MPASS(td->td_sleepqueue == NULL);
549	sched_sleep(td, pri);
550	thread_lock_set(td, &sc->sc_lock);
551	SDT_PROBE0(sched, , , sleep);
552	TD_SET_SLEEPING(td);
553	mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
554	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
555	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
556	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
557}
558
559/*
560 * Check to see if we timed out.
561 */
562static int
563sleepq_check_timeout(void)
564{
565	struct thread *td;
566	int res;
567
568	td = curthread;
569	THREAD_LOCK_ASSERT(td, MA_OWNED);
570
571	/*
572	 * If TDF_TIMEOUT is set, we timed out.  But recheck
573	 * td_sleeptimo anyway.
574	 */
575	res = 0;
576	if (td->td_sleeptimo != 0) {
577		if (td->td_sleeptimo <= sbinuptime())
578			res = EWOULDBLOCK;
579		td->td_sleeptimo = 0;
580	}
581	if (td->td_flags & TDF_TIMEOUT)
582		td->td_flags &= ~TDF_TIMEOUT;
583	else
584		/*
585		 * We ignore the situation where timeout subsystem was
586		 * unable to stop our callout.  The struct thread is
587		 * type-stable, the callout will use the correct
588		 * memory when running.  The checks of the
589		 * td_sleeptimo value in this function and in
590		 * sleepq_timeout() ensure that the thread does not
591		 * get spurious wakeups, even if the callout was reset
592		 * or thread reused.
593		 */
594		callout_stop(&td->td_slpcallout);
595	return (res);
596}
597
598/*
599 * Check to see if we were awoken by a signal.
600 */
601static int
602sleepq_check_signals(void)
603{
604	struct thread *td;
605
606	td = curthread;
607	THREAD_LOCK_ASSERT(td, MA_OWNED);
608
609	/* We are no longer in an interruptible sleep. */
610	if (td->td_flags & TDF_SINTR)
611		td->td_flags &= ~TDF_SINTR;
612
613	if (td->td_flags & TDF_SLEEPABORT) {
614		td->td_flags &= ~TDF_SLEEPABORT;
615		return (td->td_intrval);
616	}
617
618	return (0);
619}
620
621/*
622 * Block the current thread until it is awakened from its sleep queue.
623 */
624void
625sleepq_wait(void *wchan, int pri)
626{
627	struct thread *td;
628
629	td = curthread;
630	MPASS(!(td->td_flags & TDF_SINTR));
631	thread_lock(td);
632	sleepq_switch(wchan, pri);
633	thread_unlock(td);
634}
635
636/*
637 * Block the current thread until it is awakened from its sleep queue
638 * or it is interrupted by a signal.
639 */
640int
641sleepq_wait_sig(void *wchan, int pri)
642{
643	int rcatch;
644	int rval;
645
646	rcatch = sleepq_catch_signals(wchan, pri);
647	rval = sleepq_check_signals();
648	thread_unlock(curthread);
649	if (rcatch)
650		return (rcatch);
651	return (rval);
652}
653
654/*
655 * Block the current thread until it is awakened from its sleep queue
656 * or it times out while waiting.
657 */
658int
659sleepq_timedwait(void *wchan, int pri)
660{
661	struct thread *td;
662	int rval;
663
664	td = curthread;
665	MPASS(!(td->td_flags & TDF_SINTR));
666	thread_lock(td);
667	sleepq_switch(wchan, pri);
668	rval = sleepq_check_timeout();
669	thread_unlock(td);
670
671	return (rval);
672}
673
674/*
675 * Block the current thread until it is awakened from its sleep queue,
676 * it is interrupted by a signal, or it times out waiting to be awakened.
677 */
678int
679sleepq_timedwait_sig(void *wchan, int pri)
680{
681	int rcatch, rvalt, rvals;
682
683	rcatch = sleepq_catch_signals(wchan, pri);
684	rvalt = sleepq_check_timeout();
685	rvals = sleepq_check_signals();
686	thread_unlock(curthread);
687	if (rcatch)
688		return (rcatch);
689	if (rvals)
690		return (rvals);
691	return (rvalt);
692}
693
694/*
695 * Returns the type of sleepqueue given a waitchannel.
696 */
697int
698sleepq_type(void *wchan)
699{
700	struct sleepqueue *sq;
701	int type;
702
703	MPASS(wchan != NULL);
704
705	sleepq_lock(wchan);
706	sq = sleepq_lookup(wchan);
707	if (sq == NULL) {
708		sleepq_release(wchan);
709		return (-1);
710	}
711	type = sq->sq_type;
712	sleepq_release(wchan);
713	return (type);
714}
715
716/*
717 * Removes a thread from a sleep queue and makes it
718 * runnable.
719 */
720static int
721sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
722{
723	struct sleepqueue_chain *sc;
724
725	MPASS(td != NULL);
726	MPASS(sq->sq_wchan != NULL);
727	MPASS(td->td_wchan == sq->sq_wchan);
728	MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
729	THREAD_LOCK_ASSERT(td, MA_OWNED);
730	sc = SC_LOOKUP(sq->sq_wchan);
731	mtx_assert(&sc->sc_lock, MA_OWNED);
732
733	SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
734
735	/* Remove the thread from the queue. */
736	sq->sq_blockedcnt[td->td_sqqueue]--;
737	TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
738
739	/*
740	 * Get a sleep queue for this thread.  If this is the last waiter,
741	 * use the queue itself and take it out of the chain, otherwise,
742	 * remove a queue from the free list.
743	 */
744	if (LIST_EMPTY(&sq->sq_free)) {
745		td->td_sleepqueue = sq;
746#ifdef INVARIANTS
747		sq->sq_wchan = NULL;
748#endif
749#ifdef SLEEPQUEUE_PROFILING
750		sc->sc_depth--;
751#endif
752	} else
753		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
754	LIST_REMOVE(td->td_sleepqueue, sq_hash);
755
756	td->td_wmesg = NULL;
757	td->td_wchan = NULL;
758	td->td_flags &= ~TDF_SINTR;
759
760	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
761	    (void *)td, (long)td->td_proc->p_pid, td->td_name);
762
763	/* Adjust priority if requested. */
764	MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
765	if (pri != 0 && td->td_priority > pri &&
766	    PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
767		sched_prio(td, pri);
768
769	/*
770	 * Note that thread td might not be sleeping if it is running
771	 * sleepq_catch_signals() on another CPU or is blocked on its
772	 * proc lock to check signals.  There's no need to mark the
773	 * thread runnable in that case.
774	 */
775	if (TD_IS_SLEEPING(td)) {
776		TD_CLR_SLEEPING(td);
777		return (setrunnable(td));
778	}
779	return (0);
780}
781
782#ifdef INVARIANTS
783/*
784 * UMA zone item deallocator.
785 */
786static void
787sleepq_dtor(void *mem, int size, void *arg)
788{
789	struct sleepqueue *sq;
790	int i;
791
792	sq = mem;
793	for (i = 0; i < NR_SLEEPQS; i++) {
794		MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
795		MPASS(sq->sq_blockedcnt[i] == 0);
796	}
797}
798#endif
799
800/*
801 * UMA zone item initializer.
802 */
803static int
804sleepq_init(void *mem, int size, int flags)
805{
806	struct sleepqueue *sq;
807	int i;
808
809	bzero(mem, size);
810	sq = mem;
811	for (i = 0; i < NR_SLEEPQS; i++) {
812		TAILQ_INIT(&sq->sq_blocked[i]);
813		sq->sq_blockedcnt[i] = 0;
814	}
815	LIST_INIT(&sq->sq_free);
816	return (0);
817}
818
819/*
820 * Find the highest priority thread sleeping on a wait channel and resume it.
821 */
822int
823sleepq_signal(void *wchan, int flags, int pri, int queue)
824{
825	struct sleepqueue *sq;
826	struct thread *td, *besttd;
827	int wakeup_swapper;
828
829	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
830	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
831	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
832	sq = sleepq_lookup(wchan);
833	if (sq == NULL)
834		return (0);
835	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
836	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
837
838	/*
839	 * Find the highest priority thread on the queue.  If there is a
840	 * tie, use the thread that first appears in the queue as it has
841	 * been sleeping the longest since threads are always added to
842	 * the tail of sleep queues.
843	 */
844	besttd = NULL;
845	TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
846		if (besttd == NULL || td->td_priority < besttd->td_priority)
847			besttd = td;
848	}
849	MPASS(besttd != NULL);
850	thread_lock(besttd);
851	wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
852	thread_unlock(besttd);
853	return (wakeup_swapper);
854}
855
856/*
857 * Resume all threads sleeping on a specified wait channel.
858 */
859int
860sleepq_broadcast(void *wchan, int flags, int pri, int queue)
861{
862	struct sleepqueue *sq;
863	struct thread *td, *tdn;
864	int wakeup_swapper;
865
866	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
867	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
868	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
869	sq = sleepq_lookup(wchan);
870	if (sq == NULL)
871		return (0);
872	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
873	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
874
875	/* Resume all blocked threads on the sleep queue. */
876	wakeup_swapper = 0;
877	TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
878		thread_lock(td);
879		if (sleepq_resume_thread(sq, td, pri))
880			wakeup_swapper = 1;
881		thread_unlock(td);
882	}
883	return (wakeup_swapper);
884}
885
886/*
887 * Time sleeping threads out.  When the timeout expires, the thread is
888 * removed from the sleep queue and made runnable if it is still asleep.
889 */
890static void
891sleepq_timeout(void *arg)
892{
893	struct sleepqueue_chain *sc;
894	struct sleepqueue *sq;
895	struct thread *td;
896	void *wchan;
897	int wakeup_swapper;
898
899	td = arg;
900	wakeup_swapper = 0;
901	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
902	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
903
904	thread_lock(td);
905
906	if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) {
907		/*
908		 * The thread does not want a timeout (yet).
909		 */
910	} else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
911		/*
912		 * See if the thread is asleep and get the wait
913		 * channel if it is.
914		 */
915		wchan = td->td_wchan;
916		sc = SC_LOOKUP(wchan);
917		THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
918		sq = sleepq_lookup(wchan);
919		MPASS(sq != NULL);
920		td->td_flags |= TDF_TIMEOUT;
921		wakeup_swapper = sleepq_resume_thread(sq, td, 0);
922	} else if (TD_ON_SLEEPQ(td)) {
923		/*
924		 * If the thread is on the SLEEPQ but isn't sleeping
925		 * yet, it can either be on another CPU in between
926		 * sleepq_add() and one of the sleepq_*wait*()
927		 * routines or it can be in sleepq_catch_signals().
928		 */
929		td->td_flags |= TDF_TIMEOUT;
930	}
931
932	thread_unlock(td);
933	if (wakeup_swapper)
934		kick_proc0();
935}
936
937/*
938 * Resumes a specific thread from the sleep queue associated with a specific
939 * wait channel if it is on that queue.
940 */
941void
942sleepq_remove(struct thread *td, void *wchan)
943{
944	struct sleepqueue *sq;
945	int wakeup_swapper;
946
947	/*
948	 * Look up the sleep queue for this wait channel, then re-check
949	 * that the thread is asleep on that channel, if it is not, then
950	 * bail.
951	 */
952	MPASS(wchan != NULL);
953	sleepq_lock(wchan);
954	sq = sleepq_lookup(wchan);
955	/*
956	 * We can not lock the thread here as it may be sleeping on a
957	 * different sleepq.  However, holding the sleepq lock for this
958	 * wchan can guarantee that we do not miss a wakeup for this
959	 * channel.  The asserts below will catch any false positives.
960	 */
961	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
962		sleepq_release(wchan);
963		return;
964	}
965	/* Thread is asleep on sleep queue sq, so wake it up. */
966	thread_lock(td);
967	MPASS(sq != NULL);
968	MPASS(td->td_wchan == wchan);
969	wakeup_swapper = sleepq_resume_thread(sq, td, 0);
970	thread_unlock(td);
971	sleepq_release(wchan);
972	if (wakeup_swapper)
973		kick_proc0();
974}
975
976/*
977 * Abort a thread as if an interrupt had occurred.  Only abort
978 * interruptible waits (unfortunately it isn't safe to abort others).
979 */
980int
981sleepq_abort(struct thread *td, int intrval)
982{
983	struct sleepqueue *sq;
984	void *wchan;
985
986	THREAD_LOCK_ASSERT(td, MA_OWNED);
987	MPASS(TD_ON_SLEEPQ(td));
988	MPASS(td->td_flags & TDF_SINTR);
989	MPASS(intrval == EINTR || intrval == ERESTART);
990
991	/*
992	 * If the TDF_TIMEOUT flag is set, just leave. A
993	 * timeout is scheduled anyhow.
994	 */
995	if (td->td_flags & TDF_TIMEOUT)
996		return (0);
997
998	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
999	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1000	td->td_intrval = intrval;
1001	td->td_flags |= TDF_SLEEPABORT;
1002	/*
1003	 * If the thread has not slept yet it will find the signal in
1004	 * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1005	 * we have to do it here.
1006	 */
1007	if (!TD_IS_SLEEPING(td))
1008		return (0);
1009	wchan = td->td_wchan;
1010	MPASS(wchan != NULL);
1011	sq = sleepq_lookup(wchan);
1012	MPASS(sq != NULL);
1013
1014	/* Thread is asleep on sleep queue sq, so wake it up. */
1015	return (sleepq_resume_thread(sq, td, 0));
1016}
1017
1018#ifdef SLEEPQUEUE_PROFILING
1019#define	SLEEPQ_PROF_LOCATIONS	1024
1020#define	SLEEPQ_SBUFSIZE		512
1021struct sleepq_prof {
1022	LIST_ENTRY(sleepq_prof) sp_link;
1023	const char	*sp_wmesg;
1024	long		sp_count;
1025};
1026
1027LIST_HEAD(sqphead, sleepq_prof);
1028
1029struct sqphead sleepq_prof_free;
1030struct sqphead sleepq_hash[SC_TABLESIZE];
1031static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1032static struct mtx sleepq_prof_lock;
1033MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1034
1035static void
1036sleepq_profile(const char *wmesg)
1037{
1038	struct sleepq_prof *sp;
1039
1040	mtx_lock_spin(&sleepq_prof_lock);
1041	if (prof_enabled == 0)
1042		goto unlock;
1043	LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1044		if (sp->sp_wmesg == wmesg)
1045			goto done;
1046	sp = LIST_FIRST(&sleepq_prof_free);
1047	if (sp == NULL)
1048		goto unlock;
1049	sp->sp_wmesg = wmesg;
1050	LIST_REMOVE(sp, sp_link);
1051	LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1052done:
1053	sp->sp_count++;
1054unlock:
1055	mtx_unlock_spin(&sleepq_prof_lock);
1056	return;
1057}
1058
1059static void
1060sleepq_prof_reset(void)
1061{
1062	struct sleepq_prof *sp;
1063	int enabled;
1064	int i;
1065
1066	mtx_lock_spin(&sleepq_prof_lock);
1067	enabled = prof_enabled;
1068	prof_enabled = 0;
1069	for (i = 0; i < SC_TABLESIZE; i++)
1070		LIST_INIT(&sleepq_hash[i]);
1071	LIST_INIT(&sleepq_prof_free);
1072	for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1073		sp = &sleepq_profent[i];
1074		sp->sp_wmesg = NULL;
1075		sp->sp_count = 0;
1076		LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1077	}
1078	prof_enabled = enabled;
1079	mtx_unlock_spin(&sleepq_prof_lock);
1080}
1081
1082static int
1083enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1084{
1085	int error, v;
1086
1087	v = prof_enabled;
1088	error = sysctl_handle_int(oidp, &v, v, req);
1089	if (error)
1090		return (error);
1091	if (req->newptr == NULL)
1092		return (error);
1093	if (v == prof_enabled)
1094		return (0);
1095	if (v == 1)
1096		sleepq_prof_reset();
1097	mtx_lock_spin(&sleepq_prof_lock);
1098	prof_enabled = !!v;
1099	mtx_unlock_spin(&sleepq_prof_lock);
1100
1101	return (0);
1102}
1103
1104static int
1105reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1106{
1107	int error, v;
1108
1109	v = 0;
1110	error = sysctl_handle_int(oidp, &v, 0, req);
1111	if (error)
1112		return (error);
1113	if (req->newptr == NULL)
1114		return (error);
1115	if (v == 0)
1116		return (0);
1117	sleepq_prof_reset();
1118
1119	return (0);
1120}
1121
1122static int
1123dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1124{
1125	struct sleepq_prof *sp;
1126	struct sbuf *sb;
1127	int enabled;
1128	int error;
1129	int i;
1130
1131	error = sysctl_wire_old_buffer(req, 0);
1132	if (error != 0)
1133		return (error);
1134	sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1135	sbuf_printf(sb, "\nwmesg\tcount\n");
1136	enabled = prof_enabled;
1137	mtx_lock_spin(&sleepq_prof_lock);
1138	prof_enabled = 0;
1139	mtx_unlock_spin(&sleepq_prof_lock);
1140	for (i = 0; i < SC_TABLESIZE; i++) {
1141		LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1142			sbuf_printf(sb, "%s\t%ld\n",
1143			    sp->sp_wmesg, sp->sp_count);
1144		}
1145	}
1146	mtx_lock_spin(&sleepq_prof_lock);
1147	prof_enabled = enabled;
1148	mtx_unlock_spin(&sleepq_prof_lock);
1149
1150	error = sbuf_finish(sb);
1151	sbuf_delete(sb);
1152	return (error);
1153}
1154
1155SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1156    NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1157SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1158    NULL, 0, reset_sleepq_prof_stats, "I",
1159    "Reset sleepqueue profiling statistics");
1160SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1161    NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1162#endif
1163
1164#ifdef DDB
1165DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1166{
1167	struct sleepqueue_chain *sc;
1168	struct sleepqueue *sq;
1169#ifdef INVARIANTS
1170	struct lock_object *lock;
1171#endif
1172	struct thread *td;
1173	void *wchan;
1174	int i;
1175
1176	if (!have_addr)
1177		return;
1178
1179	/*
1180	 * First, see if there is an active sleep queue for the wait channel
1181	 * indicated by the address.
1182	 */
1183	wchan = (void *)addr;
1184	sc = SC_LOOKUP(wchan);
1185	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1186		if (sq->sq_wchan == wchan)
1187			goto found;
1188
1189	/*
1190	 * Second, see if there is an active sleep queue at the address
1191	 * indicated.
1192	 */
1193	for (i = 0; i < SC_TABLESIZE; i++)
1194		LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1195			if (sq == (struct sleepqueue *)addr)
1196				goto found;
1197		}
1198
1199	db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1200	return;
1201found:
1202	db_printf("Wait channel: %p\n", sq->sq_wchan);
1203	db_printf("Queue type: %d\n", sq->sq_type);
1204#ifdef INVARIANTS
1205	if (sq->sq_lock) {
1206		lock = sq->sq_lock;
1207		db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1208		    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1209	}
1210#endif
1211	db_printf("Blocked threads:\n");
1212	for (i = 0; i < NR_SLEEPQS; i++) {
1213		db_printf("\nQueue[%d]:\n", i);
1214		if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1215			db_printf("\tempty\n");
1216		else
1217			TAILQ_FOREACH(td, &sq->sq_blocked[i],
1218				      td_slpq) {
1219				db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1220					  td->td_tid, td->td_proc->p_pid,
1221					  td->td_name);
1222			}
1223		db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1224	}
1225}
1226
1227/* Alias 'show sleepqueue' to 'show sleepq'. */
1228DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1229#endif
1230