1/*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 *  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include "opt_witness.h"
30#include "opt_kdtrace.h"
31#include "opt_hwpmc_hooks.h"
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/kern/kern_thread.c 337258 2018-08-03 14:45:53Z asomers $");
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/kernel.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/rangelock.h>
43#include <sys/resourcevar.h>
44#include <sys/sdt.h>
45#include <sys/smp.h>
46#include <sys/sched.h>
47#include <sys/sleepqueue.h>
48#include <sys/selinfo.h>
49#include <sys/syscallsubr.h>
50#include <sys/sysent.h>
51#include <sys/turnstile.h>
52#include <sys/ktr.h>
53#include <sys/rwlock.h>
54#include <sys/umtx.h>
55#include <sys/cpuset.h>
56#ifdef	HWPMC_HOOKS
57#include <sys/pmckern.h>
58#endif
59
60#include <security/audit/audit.h>
61
62#include <vm/vm.h>
63#include <vm/vm_extern.h>
64#include <vm/uma.h>
65#include <sys/eventhandler.h>
66
67SDT_PROVIDER_DECLARE(proc);
68SDT_PROBE_DEFINE(proc, , , lwp__exit);
69
70/*
71 * thread related storage.
72 */
73static uma_zone_t thread_zone;
74
75TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
76static struct mtx zombie_lock;
77MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
78
79static void thread_zombie(struct thread *);
80static int thread_unsuspend_one(struct thread *td, struct proc *p,
81    bool boundary);
82
83#define TID_BUFFER_SIZE	1024
84
85struct mtx tid_lock;
86static struct unrhdr *tid_unrhdr;
87static lwpid_t tid_buffer[TID_BUFFER_SIZE];
88static int tid_head, tid_tail;
89static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
90
91struct	tidhashhead *tidhashtbl;
92u_long	tidhash;
93struct	rwlock tidhash_lock;
94
95static lwpid_t
96tid_alloc(void)
97{
98	lwpid_t	tid;
99
100	tid = alloc_unr(tid_unrhdr);
101	if (tid != -1)
102		return (tid);
103	mtx_lock(&tid_lock);
104	if (tid_head == tid_tail) {
105		mtx_unlock(&tid_lock);
106		return (-1);
107	}
108	tid = tid_buffer[tid_head];
109	tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
110	mtx_unlock(&tid_lock);
111	return (tid);
112}
113
114static void
115tid_free(lwpid_t tid)
116{
117	lwpid_t tmp_tid = -1;
118
119	mtx_lock(&tid_lock);
120	if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
121		tmp_tid = tid_buffer[tid_head];
122		tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
123	}
124	tid_buffer[tid_tail] = tid;
125	tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
126	mtx_unlock(&tid_lock);
127	if (tmp_tid != -1)
128		free_unr(tid_unrhdr, tmp_tid);
129}
130
131/*
132 * Prepare a thread for use.
133 */
134static int
135thread_ctor(void *mem, int size, void *arg, int flags)
136{
137	struct thread	*td;
138
139	td = (struct thread *)mem;
140	td->td_state = TDS_INACTIVE;
141	td->td_oncpu = NOCPU;
142
143	td->td_tid = tid_alloc();
144
145	/*
146	 * Note that td_critnest begins life as 1 because the thread is not
147	 * running and is thereby implicitly waiting to be on the receiving
148	 * end of a context switch.
149	 */
150	td->td_critnest = 1;
151	td->td_lend_user_pri = PRI_MAX;
152	EVENTHANDLER_INVOKE(thread_ctor, td);
153#ifdef AUDIT
154	audit_thread_alloc(td);
155#endif
156	umtx_thread_alloc(td);
157	return (0);
158}
159
160/*
161 * Reclaim a thread after use.
162 */
163static void
164thread_dtor(void *mem, int size, void *arg)
165{
166	struct thread *td;
167
168	td = (struct thread *)mem;
169
170#ifdef INVARIANTS
171	/* Verify that this thread is in a safe state to free. */
172	switch (td->td_state) {
173	case TDS_INHIBITED:
174	case TDS_RUNNING:
175	case TDS_CAN_RUN:
176	case TDS_RUNQ:
177		/*
178		 * We must never unlink a thread that is in one of
179		 * these states, because it is currently active.
180		 */
181		panic("bad state for thread unlinking");
182		/* NOTREACHED */
183	case TDS_INACTIVE:
184		break;
185	default:
186		panic("bad thread state");
187		/* NOTREACHED */
188	}
189#endif
190#ifdef AUDIT
191	audit_thread_free(td);
192#endif
193	/* Free all OSD associated to this thread. */
194	osd_thread_exit(td);
195
196	EVENTHANDLER_INVOKE(thread_dtor, td);
197	tid_free(td->td_tid);
198}
199
200/*
201 * Initialize type-stable parts of a thread (when newly created).
202 */
203static int
204thread_init(void *mem, int size, int flags)
205{
206	struct thread *td;
207
208	td = (struct thread *)mem;
209
210	td->td_sleepqueue = sleepq_alloc();
211	td->td_turnstile = turnstile_alloc();
212	td->td_rlqe = NULL;
213	EVENTHANDLER_INVOKE(thread_init, td);
214	td->td_sched = (struct td_sched *)&td[1];
215	umtx_thread_init(td);
216	td->td_kstack = 0;
217	td->td_sel = NULL;
218	return (0);
219}
220
221/*
222 * Tear down type-stable parts of a thread (just before being discarded).
223 */
224static void
225thread_fini(void *mem, int size)
226{
227	struct thread *td;
228
229	td = (struct thread *)mem;
230	EVENTHANDLER_INVOKE(thread_fini, td);
231	rlqentry_free(td->td_rlqe);
232	turnstile_free(td->td_turnstile);
233	sleepq_free(td->td_sleepqueue);
234	umtx_thread_fini(td);
235	seltdfini(td);
236}
237
238/*
239 * For a newly created process,
240 * link up all the structures and its initial threads etc.
241 * called from:
242 * {arch}/{arch}/machdep.c   ia64_init(), init386() etc.
243 * proc_dtor() (should go away)
244 * proc_init()
245 */
246void
247proc_linkup0(struct proc *p, struct thread *td)
248{
249	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
250	proc_linkup(p, td);
251}
252
253void
254proc_linkup(struct proc *p, struct thread *td)
255{
256
257	sigqueue_init(&p->p_sigqueue, p);
258	p->p_ksi = ksiginfo_alloc(1);
259	if (p->p_ksi != NULL) {
260		/* XXX p_ksi may be null if ksiginfo zone is not ready */
261		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
262	}
263	LIST_INIT(&p->p_mqnotifier);
264	p->p_numthreads = 0;
265	thread_link(td, p);
266}
267
268/*
269 * Initialize global thread allocation resources.
270 */
271void
272threadinit(void)
273{
274
275	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
276
277	/*
278	 * pid_max cannot be greater than PID_MAX.
279	 * leave one number for thread0.
280	 */
281	tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
282
283	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
284	    thread_ctor, thread_dtor, thread_init, thread_fini,
285	    16 - 1, UMA_ZONE_NOFREE);
286	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
287	rw_init(&tidhash_lock, "tidhash");
288}
289
290/*
291 * Place an unused thread on the zombie list.
292 * Use the slpq as that must be unused by now.
293 */
294void
295thread_zombie(struct thread *td)
296{
297	mtx_lock_spin(&zombie_lock);
298	TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
299	mtx_unlock_spin(&zombie_lock);
300}
301
302/*
303 * Release a thread that has exited after cpu_throw().
304 */
305void
306thread_stash(struct thread *td)
307{
308	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
309	thread_zombie(td);
310}
311
312/*
313 * Reap zombie resources.
314 */
315void
316thread_reap(void)
317{
318	struct thread *td_first, *td_next;
319
320	/*
321	 * Don't even bother to lock if none at this instant,
322	 * we really don't care about the next instant.
323	 */
324	if (!TAILQ_EMPTY(&zombie_threads)) {
325		mtx_lock_spin(&zombie_lock);
326		td_first = TAILQ_FIRST(&zombie_threads);
327		if (td_first)
328			TAILQ_INIT(&zombie_threads);
329		mtx_unlock_spin(&zombie_lock);
330		while (td_first) {
331			td_next = TAILQ_NEXT(td_first, td_slpq);
332			if (td_first->td_ucred)
333				crfree(td_first->td_ucred);
334			thread_free(td_first);
335			td_first = td_next;
336		}
337	}
338}
339
340/*
341 * Allocate a thread.
342 */
343struct thread *
344thread_alloc(int pages)
345{
346	struct thread *td;
347
348	thread_reap(); /* check if any zombies to get */
349
350	td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
351	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
352	if (!vm_thread_new(td, pages)) {
353		uma_zfree(thread_zone, td);
354		return (NULL);
355	}
356	cpu_thread_alloc(td);
357	return (td);
358}
359
360int
361thread_alloc_stack(struct thread *td, int pages)
362{
363
364	KASSERT(td->td_kstack == 0,
365	    ("thread_alloc_stack called on a thread with kstack"));
366	if (!vm_thread_new(td, pages))
367		return (0);
368	cpu_thread_alloc(td);
369	return (1);
370}
371
372/*
373 * Deallocate a thread.
374 */
375void
376thread_free(struct thread *td)
377{
378
379	lock_profile_thread_exit(td);
380	if (td->td_cpuset)
381		cpuset_rel(td->td_cpuset);
382	td->td_cpuset = NULL;
383	cpu_thread_free(td);
384	if (td->td_kstack != 0)
385		vm_thread_dispose(td);
386	callout_drain(&td->td_slpcallout);
387	uma_zfree(thread_zone, td);
388}
389
390/*
391 * Discard the current thread and exit from its context.
392 * Always called with scheduler locked.
393 *
394 * Because we can't free a thread while we're operating under its context,
395 * push the current thread into our CPU's deadthread holder. This means
396 * we needn't worry about someone else grabbing our context before we
397 * do a cpu_throw().
398 */
399void
400thread_exit(void)
401{
402	uint64_t runtime, new_switchtime;
403	struct thread *td;
404	struct thread *td2;
405	struct proc *p;
406	int wakeup_swapper;
407
408	td = curthread;
409	p = td->td_proc;
410
411	PROC_SLOCK_ASSERT(p, MA_OWNED);
412	mtx_assert(&Giant, MA_NOTOWNED);
413
414	PROC_LOCK_ASSERT(p, MA_OWNED);
415	KASSERT(p != NULL, ("thread exiting without a process"));
416	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
417	    (long)p->p_pid, td->td_name);
418	SDT_PROBE0(proc, , , lwp__exit);
419	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
420
421	/*
422	 * drop FPU & debug register state storage, or any other
423	 * architecture specific resources that
424	 * would not be on a new untouched process.
425	 */
426	cpu_thread_exit(td);
427
428	/*
429	 * The last thread is left attached to the process
430	 * So that the whole bundle gets recycled. Skip
431	 * all this stuff if we never had threads.
432	 * EXIT clears all sign of other threads when
433	 * it goes to single threading, so the last thread always
434	 * takes the short path.
435	 */
436	if (p->p_flag & P_HADTHREADS) {
437		if (p->p_numthreads > 1) {
438			atomic_add_int(&td->td_proc->p_exitthreads, 1);
439			thread_unlink(td);
440			td2 = FIRST_THREAD_IN_PROC(p);
441			sched_exit_thread(td2, td);
442
443			/*
444			 * The test below is NOT true if we are the
445			 * sole exiting thread. P_STOPPED_SINGLE is unset
446			 * in exit1() after it is the only survivor.
447			 */
448			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
449				if (p->p_numthreads == p->p_suspcount) {
450					thread_lock(p->p_singlethread);
451					wakeup_swapper = thread_unsuspend_one(
452						p->p_singlethread, p, false);
453					thread_unlock(p->p_singlethread);
454					if (wakeup_swapper)
455						kick_proc0();
456				}
457			}
458
459			PCPU_SET(deadthread, td);
460		} else {
461			/*
462			 * The last thread is exiting.. but not through exit()
463			 */
464			panic ("thread_exit: Last thread exiting on its own");
465		}
466	}
467#ifdef	HWPMC_HOOKS
468	/*
469	 * If this thread is part of a process that is being tracked by hwpmc(4),
470	 * inform the module of the thread's impending exit.
471	 */
472	if (PMC_PROC_IS_USING_PMCS(td->td_proc))
473		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
474#endif
475	PROC_UNLOCK(p);
476	PROC_STATLOCK(p);
477	thread_lock(td);
478	PROC_SUNLOCK(p);
479
480	/* Do the same timestamp bookkeeping that mi_switch() would do. */
481	new_switchtime = cpu_ticks();
482	runtime = new_switchtime - PCPU_GET(switchtime);
483	td->td_runtime += runtime;
484	td->td_incruntime += runtime;
485	PCPU_SET(switchtime, new_switchtime);
486	PCPU_SET(switchticks, ticks);
487	PCPU_INC(cnt.v_swtch);
488
489	/* Save our resource usage in our process. */
490	td->td_ru.ru_nvcsw++;
491	ruxagg(p, td);
492	rucollect(&p->p_ru, &td->td_ru);
493	PROC_STATUNLOCK(p);
494
495	td->td_state = TDS_INACTIVE;
496#ifdef WITNESS
497	witness_thread_exit(td);
498#endif
499	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
500	sched_throw(td);
501	panic("I'm a teapot!");
502	/* NOTREACHED */
503}
504
505/*
506 * Do any thread specific cleanups that may be needed in wait()
507 * called with Giant, proc and schedlock not held.
508 */
509void
510thread_wait(struct proc *p)
511{
512	struct thread *td;
513
514	mtx_assert(&Giant, MA_NOTOWNED);
515	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
516	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
517	td = FIRST_THREAD_IN_PROC(p);
518	/* Lock the last thread so we spin until it exits cpu_throw(). */
519	thread_lock(td);
520	thread_unlock(td);
521	lock_profile_thread_exit(td);
522	cpuset_rel(td->td_cpuset);
523	td->td_cpuset = NULL;
524	cpu_thread_clean(td);
525	crfree(td->td_ucred);
526	callout_drain(&td->td_slpcallout);
527	thread_reap();	/* check for zombie threads etc. */
528}
529
530/*
531 * Link a thread to a process.
532 * set up anything that needs to be initialized for it to
533 * be used by the process.
534 */
535void
536thread_link(struct thread *td, struct proc *p)
537{
538
539	/*
540	 * XXX This can't be enabled because it's called for proc0 before
541	 * its lock has been created.
542	 * PROC_LOCK_ASSERT(p, MA_OWNED);
543	 */
544	td->td_state    = TDS_INACTIVE;
545	td->td_proc     = p;
546	td->td_flags    = TDF_INMEM;
547
548	LIST_INIT(&td->td_contested);
549	LIST_INIT(&td->td_lprof[0]);
550	LIST_INIT(&td->td_lprof[1]);
551	sigqueue_init(&td->td_sigqueue, p);
552	callout_init(&td->td_slpcallout, 1);
553	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
554	p->p_numthreads++;
555}
556
557/*
558 * Called from:
559 *  thread_exit()
560 */
561void
562thread_unlink(struct thread *td)
563{
564	struct proc *p = td->td_proc;
565
566	PROC_LOCK_ASSERT(p, MA_OWNED);
567	TAILQ_REMOVE(&p->p_threads, td, td_plist);
568	p->p_numthreads--;
569	/* could clear a few other things here */
570	/* Must  NOT clear links to proc! */
571}
572
573static int
574calc_remaining(struct proc *p, int mode)
575{
576	int remaining;
577
578	PROC_LOCK_ASSERT(p, MA_OWNED);
579	PROC_SLOCK_ASSERT(p, MA_OWNED);
580	if (mode == SINGLE_EXIT)
581		remaining = p->p_numthreads;
582	else if (mode == SINGLE_BOUNDARY)
583		remaining = p->p_numthreads - p->p_boundary_count;
584	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
585		remaining = p->p_numthreads - p->p_suspcount;
586	else
587		panic("calc_remaining: wrong mode %d", mode);
588	return (remaining);
589}
590
591static int
592remain_for_mode(int mode)
593{
594
595	return (mode == SINGLE_ALLPROC ? 0 : 1);
596}
597
598static int
599weed_inhib(int mode, struct thread *td2, struct proc *p)
600{
601	int wakeup_swapper;
602
603	PROC_LOCK_ASSERT(p, MA_OWNED);
604	PROC_SLOCK_ASSERT(p, MA_OWNED);
605	THREAD_LOCK_ASSERT(td2, MA_OWNED);
606
607	wakeup_swapper = 0;
608	switch (mode) {
609	case SINGLE_EXIT:
610		if (TD_IS_SUSPENDED(td2))
611			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
612		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
613			wakeup_swapper |= sleepq_abort(td2, EINTR);
614		break;
615	case SINGLE_BOUNDARY:
616	case SINGLE_NO_EXIT:
617		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
618			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
619		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
620			wakeup_swapper |= sleepq_abort(td2, ERESTART);
621		break;
622	case SINGLE_ALLPROC:
623		/*
624		 * ALLPROC suspend tries to avoid spurious EINTR for
625		 * threads sleeping interruptable, by suspending the
626		 * thread directly, similarly to sig_suspend_threads().
627		 * Since such sleep is not performed at the user
628		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
629		 * is used to avoid immediate un-suspend.
630		 */
631		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
632		    TDF_ALLPROCSUSP)) == 0)
633			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
634		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
635			if ((td2->td_flags & TDF_SBDRY) == 0) {
636				thread_suspend_one(td2);
637				td2->td_flags |= TDF_ALLPROCSUSP;
638			} else {
639				wakeup_swapper |= sleepq_abort(td2, ERESTART);
640			}
641		}
642		break;
643	}
644	return (wakeup_swapper);
645}
646
647/*
648 * Enforce single-threading.
649 *
650 * Returns 1 if the caller must abort (another thread is waiting to
651 * exit the process or similar). Process is locked!
652 * Returns 0 when you are successfully the only thread running.
653 * A process has successfully single threaded in the suspend mode when
654 * There are no threads in user mode. Threads in the kernel must be
655 * allowed to continue until they get to the user boundary. They may even
656 * copy out their return values and data before suspending. They may however be
657 * accelerated in reaching the user boundary as we will wake up
658 * any sleeping threads that are interruptable. (PCATCH).
659 */
660int
661thread_single(struct proc *p, int mode)
662{
663	struct thread *td;
664	struct thread *td2;
665	int remaining, wakeup_swapper;
666
667	td = curthread;
668	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
669	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
670	    ("invalid mode %d", mode));
671	/*
672	 * If allowing non-ALLPROC singlethreading for non-curproc
673	 * callers, calc_remaining() and remain_for_mode() should be
674	 * adjusted to also account for td->td_proc != p.  For now
675	 * this is not implemented because it is not used.
676	 */
677	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
678	    (mode != SINGLE_ALLPROC && td->td_proc == p),
679	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
680	mtx_assert(&Giant, MA_NOTOWNED);
681	PROC_LOCK_ASSERT(p, MA_OWNED);
682
683	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
684		return (0);
685
686	/* Is someone already single threading? */
687	if (p->p_singlethread != NULL && p->p_singlethread != td)
688		return (1);
689
690	if (mode == SINGLE_EXIT) {
691		p->p_flag |= P_SINGLE_EXIT;
692		p->p_flag &= ~P_SINGLE_BOUNDARY;
693	} else {
694		p->p_flag &= ~P_SINGLE_EXIT;
695		if (mode == SINGLE_BOUNDARY)
696			p->p_flag |= P_SINGLE_BOUNDARY;
697		else
698			p->p_flag &= ~P_SINGLE_BOUNDARY;
699	}
700	if (mode == SINGLE_ALLPROC)
701		p->p_flag |= P_TOTAL_STOP;
702	p->p_flag |= P_STOPPED_SINGLE;
703	PROC_SLOCK(p);
704	p->p_singlethread = td;
705	remaining = calc_remaining(p, mode);
706	while (remaining != remain_for_mode(mode)) {
707		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
708			goto stopme;
709		wakeup_swapper = 0;
710		FOREACH_THREAD_IN_PROC(p, td2) {
711			if (td2 == td)
712				continue;
713			thread_lock(td2);
714			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
715			if (TD_IS_INHIBITED(td2)) {
716				wakeup_swapper |= weed_inhib(mode, td2, p);
717#ifdef SMP
718			} else if (TD_IS_RUNNING(td2) && td != td2) {
719				forward_signal(td2);
720#endif
721			}
722			thread_unlock(td2);
723		}
724		if (wakeup_swapper)
725			kick_proc0();
726		remaining = calc_remaining(p, mode);
727
728		/*
729		 * Maybe we suspended some threads.. was it enough?
730		 */
731		if (remaining == remain_for_mode(mode))
732			break;
733
734stopme:
735		/*
736		 * Wake us up when everyone else has suspended.
737		 * In the mean time we suspend as well.
738		 */
739		thread_suspend_switch(td, p);
740		remaining = calc_remaining(p, mode);
741	}
742	if (mode == SINGLE_EXIT) {
743		/*
744		 * Convert the process to an unthreaded process.  The
745		 * SINGLE_EXIT is called by exit1() or execve(), in
746		 * both cases other threads must be retired.
747		 */
748		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
749		p->p_singlethread = NULL;
750		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
751
752		/*
753		 * Wait for any remaining threads to exit cpu_throw().
754		 */
755		while (p->p_exitthreads != 0) {
756			PROC_SUNLOCK(p);
757			PROC_UNLOCK(p);
758			sched_relinquish(td);
759			PROC_LOCK(p);
760			PROC_SLOCK(p);
761		}
762	} else if (mode == SINGLE_BOUNDARY) {
763		/*
764		 * Wait until all suspended threads are removed from
765		 * the processors.  The thread_suspend_check()
766		 * increments p_boundary_count while it is still
767		 * running, which makes it possible for the execve()
768		 * to destroy vmspace while our other threads are
769		 * still using the address space.
770		 *
771		 * We lock the thread, which is only allowed to
772		 * succeed after context switch code finished using
773		 * the address space.
774		 */
775		FOREACH_THREAD_IN_PROC(p, td2) {
776			if (td2 == td)
777				continue;
778			thread_lock(td2);
779			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
780			    ("td %p not on boundary", td2));
781			KASSERT(TD_IS_SUSPENDED(td2),
782			    ("td %p is not suspended", td2));
783			thread_unlock(td2);
784		}
785	}
786	PROC_SUNLOCK(p);
787	return (0);
788}
789
790bool
791thread_suspend_check_needed(void)
792{
793	struct proc *p;
794	struct thread *td;
795
796	td = curthread;
797	p = td->td_proc;
798	PROC_LOCK_ASSERT(p, MA_OWNED);
799	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
800	    (td->td_dbgflags & TDB_SUSPEND) != 0));
801}
802
803/*
804 * Called in from locations that can safely check to see
805 * whether we have to suspend or at least throttle for a
806 * single-thread event (e.g. fork).
807 *
808 * Such locations include userret().
809 * If the "return_instead" argument is non zero, the thread must be able to
810 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
811 *
812 * The 'return_instead' argument tells the function if it may do a
813 * thread_exit() or suspend, or whether the caller must abort and back
814 * out instead.
815 *
816 * If the thread that set the single_threading request has set the
817 * P_SINGLE_EXIT bit in the process flags then this call will never return
818 * if 'return_instead' is false, but will exit.
819 *
820 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
821 *---------------+--------------------+---------------------
822 *       0       | returns 0          |   returns 0 or 1
823 *               | when ST ends       |   immediately
824 *---------------+--------------------+---------------------
825 *       1       | thread exits       |   returns 1
826 *               |                    |  immediately
827 * 0 = thread_exit() or suspension ok,
828 * other = return error instead of stopping the thread.
829 *
830 * While a full suspension is under effect, even a single threading
831 * thread would be suspended if it made this call (but it shouldn't).
832 * This call should only be made from places where
833 * thread_exit() would be safe as that may be the outcome unless
834 * return_instead is set.
835 */
836int
837thread_suspend_check(int return_instead)
838{
839	struct thread *td;
840	struct proc *p;
841	int wakeup_swapper;
842
843	td = curthread;
844	p = td->td_proc;
845	mtx_assert(&Giant, MA_NOTOWNED);
846	PROC_LOCK_ASSERT(p, MA_OWNED);
847	while (thread_suspend_check_needed()) {
848		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
849			KASSERT(p->p_singlethread != NULL,
850			    ("singlethread not set"));
851			/*
852			 * The only suspension in action is a
853			 * single-threading. Single threader need not stop.
854			 * It is safe to access p->p_singlethread unlocked
855			 * because it can only be set to our address by us.
856			 */
857			if (p->p_singlethread == td)
858				return (0);	/* Exempt from stopping. */
859		}
860		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
861			return (EINTR);
862
863		/* Should we goto user boundary if we didn't come from there? */
864		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
865		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
866			return (ERESTART);
867
868		/*
869		 * Ignore suspend requests if they are deferred.
870		 */
871		if ((td->td_flags & TDF_SBDRY) != 0) {
872			KASSERT(return_instead,
873			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
874			return (0);
875		}
876
877		/*
878		 * If the process is waiting for us to exit,
879		 * this thread should just suicide.
880		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
881		 */
882		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
883			PROC_UNLOCK(p);
884
885			/*
886			 * Allow Linux emulation layer to do some work
887			 * before thread suicide.
888			 */
889			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
890				(p->p_sysent->sv_thread_detach)(td);
891			kern_thr_exit(td);
892			panic("stopped thread did not exit");
893		}
894
895		PROC_SLOCK(p);
896		thread_stopped(p);
897		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
898			if (p->p_numthreads == p->p_suspcount + 1) {
899				thread_lock(p->p_singlethread);
900				wakeup_swapper = thread_unsuspend_one(
901				    p->p_singlethread, p, false);
902				thread_unlock(p->p_singlethread);
903				if (wakeup_swapper)
904					kick_proc0();
905			}
906		}
907		PROC_UNLOCK(p);
908		thread_lock(td);
909		/*
910		 * When a thread suspends, it just
911		 * gets taken off all queues.
912		 */
913		thread_suspend_one(td);
914		if (return_instead == 0) {
915			p->p_boundary_count++;
916			td->td_flags |= TDF_BOUNDARY;
917		}
918		PROC_SUNLOCK(p);
919		mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
920		thread_unlock(td);
921		PROC_LOCK(p);
922	}
923	return (0);
924}
925
926void
927thread_suspend_switch(struct thread *td, struct proc *p)
928{
929
930	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
931	PROC_LOCK_ASSERT(p, MA_OWNED);
932	PROC_SLOCK_ASSERT(p, MA_OWNED);
933	/*
934	 * We implement thread_suspend_one in stages here to avoid
935	 * dropping the proc lock while the thread lock is owned.
936	 */
937	if (p == td->td_proc) {
938		thread_stopped(p);
939		p->p_suspcount++;
940	}
941	PROC_UNLOCK(p);
942	thread_lock(td);
943	td->td_flags &= ~TDF_NEEDSUSPCHK;
944	TD_SET_SUSPENDED(td);
945	sched_sleep(td, 0);
946	PROC_SUNLOCK(p);
947	DROP_GIANT();
948	mi_switch(SW_VOL | SWT_SUSPEND, NULL);
949	thread_unlock(td);
950	PICKUP_GIANT();
951	PROC_LOCK(p);
952	PROC_SLOCK(p);
953}
954
955void
956thread_suspend_one(struct thread *td)
957{
958	struct proc *p;
959
960	p = td->td_proc;
961	PROC_SLOCK_ASSERT(p, MA_OWNED);
962	THREAD_LOCK_ASSERT(td, MA_OWNED);
963	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
964	p->p_suspcount++;
965	td->td_flags &= ~TDF_NEEDSUSPCHK;
966	TD_SET_SUSPENDED(td);
967	sched_sleep(td, 0);
968}
969
970static int
971thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
972{
973
974	THREAD_LOCK_ASSERT(td, MA_OWNED);
975	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
976	TD_CLR_SUSPENDED(td);
977	td->td_flags &= ~TDF_ALLPROCSUSP;
978	if (td->td_proc == p) {
979		PROC_SLOCK_ASSERT(p, MA_OWNED);
980		p->p_suspcount--;
981		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
982			td->td_flags &= ~TDF_BOUNDARY;
983			p->p_boundary_count--;
984		}
985	}
986	return (setrunnable(td));
987}
988
989/*
990 * Allow all threads blocked by single threading to continue running.
991 */
992void
993thread_unsuspend(struct proc *p)
994{
995	struct thread *td;
996	int wakeup_swapper;
997
998	PROC_LOCK_ASSERT(p, MA_OWNED);
999	PROC_SLOCK_ASSERT(p, MA_OWNED);
1000	wakeup_swapper = 0;
1001	if (!P_SHOULDSTOP(p)) {
1002                FOREACH_THREAD_IN_PROC(p, td) {
1003			thread_lock(td);
1004			if (TD_IS_SUSPENDED(td)) {
1005				wakeup_swapper |= thread_unsuspend_one(td, p,
1006				    true);
1007			}
1008			thread_unlock(td);
1009		}
1010	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1011	    p->p_numthreads == p->p_suspcount) {
1012		/*
1013		 * Stopping everything also did the job for the single
1014		 * threading request. Now we've downgraded to single-threaded,
1015		 * let it continue.
1016		 */
1017		if (p->p_singlethread->td_proc == p) {
1018			thread_lock(p->p_singlethread);
1019			wakeup_swapper = thread_unsuspend_one(
1020			    p->p_singlethread, p, false);
1021			thread_unlock(p->p_singlethread);
1022		}
1023	}
1024	if (wakeup_swapper)
1025		kick_proc0();
1026}
1027
1028/*
1029 * End the single threading mode..
1030 */
1031void
1032thread_single_end(struct proc *p, int mode)
1033{
1034	struct thread *td;
1035	int wakeup_swapper;
1036
1037	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1038	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1039	    ("invalid mode %d", mode));
1040	PROC_LOCK_ASSERT(p, MA_OWNED);
1041	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1042	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1043	    ("mode %d does not match P_TOTAL_STOP", mode));
1044	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1045	    ("thread_single_end from other thread %p %p",
1046	    curthread, p->p_singlethread));
1047	KASSERT(mode != SINGLE_BOUNDARY ||
1048	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1049	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1050	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1051	    P_TOTAL_STOP);
1052	PROC_SLOCK(p);
1053	p->p_singlethread = NULL;
1054	wakeup_swapper = 0;
1055	/*
1056	 * If there are other threads they may now run,
1057	 * unless of course there is a blanket 'stop order'
1058	 * on the process. The single threader must be allowed
1059	 * to continue however as this is a bad place to stop.
1060	 */
1061	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1062                FOREACH_THREAD_IN_PROC(p, td) {
1063			thread_lock(td);
1064			if (TD_IS_SUSPENDED(td)) {
1065				wakeup_swapper |= thread_unsuspend_one(td, p,
1066				    mode == SINGLE_BOUNDARY);
1067			}
1068			thread_unlock(td);
1069		}
1070	}
1071	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1072	    ("inconsistent boundary count %d", p->p_boundary_count));
1073	PROC_SUNLOCK(p);
1074	if (wakeup_swapper)
1075		kick_proc0();
1076}
1077
1078struct thread *
1079thread_find(struct proc *p, lwpid_t tid)
1080{
1081	struct thread *td;
1082
1083	PROC_LOCK_ASSERT(p, MA_OWNED);
1084	FOREACH_THREAD_IN_PROC(p, td) {
1085		if (td->td_tid == tid)
1086			break;
1087	}
1088	return (td);
1089}
1090
1091/* Locate a thread by number; return with proc lock held. */
1092struct thread *
1093tdfind(lwpid_t tid, pid_t pid)
1094{
1095#define RUN_THRESH	16
1096	struct thread *td;
1097	int run = 0;
1098
1099	rw_rlock(&tidhash_lock);
1100	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1101		if (td->td_tid == tid) {
1102			if (pid != -1 && td->td_proc->p_pid != pid) {
1103				td = NULL;
1104				break;
1105			}
1106			PROC_LOCK(td->td_proc);
1107			if (td->td_proc->p_state == PRS_NEW) {
1108				PROC_UNLOCK(td->td_proc);
1109				td = NULL;
1110				break;
1111			}
1112			if (run > RUN_THRESH) {
1113				if (rw_try_upgrade(&tidhash_lock)) {
1114					LIST_REMOVE(td, td_hash);
1115					LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1116						td, td_hash);
1117					rw_wunlock(&tidhash_lock);
1118					return (td);
1119				}
1120			}
1121			break;
1122		}
1123		run++;
1124	}
1125	rw_runlock(&tidhash_lock);
1126	return (td);
1127}
1128
1129void
1130tidhash_add(struct thread *td)
1131{
1132	rw_wlock(&tidhash_lock);
1133	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1134	rw_wunlock(&tidhash_lock);
1135}
1136
1137void
1138tidhash_remove(struct thread *td)
1139{
1140	rw_wlock(&tidhash_lock);
1141	LIST_REMOVE(td, td_hash);
1142	rw_wunlock(&tidhash_lock);
1143}
1144