kern_thread.c revision 276272
1/*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include "opt_witness.h" 30#include "opt_kdtrace.h" 31#include "opt_hwpmc_hooks.h" 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/kern/kern_thread.c 276272 2014-12-27 00:55:14Z kib $"); 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/rangelock.h> 43#include <sys/resourcevar.h> 44#include <sys/sdt.h> 45#include <sys/smp.h> 46#include <sys/sched.h> 47#include <sys/sleepqueue.h> 48#include <sys/selinfo.h> 49#include <sys/turnstile.h> 50#include <sys/ktr.h> 51#include <sys/rwlock.h> 52#include <sys/umtx.h> 53#include <sys/cpuset.h> 54#ifdef HWPMC_HOOKS 55#include <sys/pmckern.h> 56#endif 57 58#include <security/audit/audit.h> 59 60#include <vm/vm.h> 61#include <vm/vm_extern.h> 62#include <vm/uma.h> 63#include <sys/eventhandler.h> 64 65SDT_PROVIDER_DECLARE(proc); 66SDT_PROBE_DEFINE(proc, , , lwp__exit); 67 68/* 69 * thread related storage. 70 */ 71static uma_zone_t thread_zone; 72 73TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 74static struct mtx zombie_lock; 75MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 76 77static void thread_zombie(struct thread *); 78 79#define TID_BUFFER_SIZE 1024 80 81struct mtx tid_lock; 82static struct unrhdr *tid_unrhdr; 83static lwpid_t tid_buffer[TID_BUFFER_SIZE]; 84static int tid_head, tid_tail; 85static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 86 87struct tidhashhead *tidhashtbl; 88u_long tidhash; 89struct rwlock tidhash_lock; 90 91static lwpid_t 92tid_alloc(void) 93{ 94 lwpid_t tid; 95 96 tid = alloc_unr(tid_unrhdr); 97 if (tid != -1) 98 return (tid); 99 mtx_lock(&tid_lock); 100 if (tid_head == tid_tail) { 101 mtx_unlock(&tid_lock); 102 return (-1); 103 } 104 tid = tid_buffer[tid_head]; 105 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 106 mtx_unlock(&tid_lock); 107 return (tid); 108} 109 110static void 111tid_free(lwpid_t tid) 112{ 113 lwpid_t tmp_tid = -1; 114 115 mtx_lock(&tid_lock); 116 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 117 tmp_tid = tid_buffer[tid_head]; 118 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 119 } 120 tid_buffer[tid_tail] = tid; 121 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 122 mtx_unlock(&tid_lock); 123 if (tmp_tid != -1) 124 free_unr(tid_unrhdr, tmp_tid); 125} 126 127/* 128 * Prepare a thread for use. 129 */ 130static int 131thread_ctor(void *mem, int size, void *arg, int flags) 132{ 133 struct thread *td; 134 135 td = (struct thread *)mem; 136 td->td_state = TDS_INACTIVE; 137 td->td_oncpu = NOCPU; 138 139 td->td_tid = tid_alloc(); 140 141 /* 142 * Note that td_critnest begins life as 1 because the thread is not 143 * running and is thereby implicitly waiting to be on the receiving 144 * end of a context switch. 145 */ 146 td->td_critnest = 1; 147 td->td_lend_user_pri = PRI_MAX; 148 EVENTHANDLER_INVOKE(thread_ctor, td); 149#ifdef AUDIT 150 audit_thread_alloc(td); 151#endif 152 umtx_thread_alloc(td); 153 return (0); 154} 155 156/* 157 * Reclaim a thread after use. 158 */ 159static void 160thread_dtor(void *mem, int size, void *arg) 161{ 162 struct thread *td; 163 164 td = (struct thread *)mem; 165 166#ifdef INVARIANTS 167 /* Verify that this thread is in a safe state to free. */ 168 switch (td->td_state) { 169 case TDS_INHIBITED: 170 case TDS_RUNNING: 171 case TDS_CAN_RUN: 172 case TDS_RUNQ: 173 /* 174 * We must never unlink a thread that is in one of 175 * these states, because it is currently active. 176 */ 177 panic("bad state for thread unlinking"); 178 /* NOTREACHED */ 179 case TDS_INACTIVE: 180 break; 181 default: 182 panic("bad thread state"); 183 /* NOTREACHED */ 184 } 185#endif 186#ifdef AUDIT 187 audit_thread_free(td); 188#endif 189 /* Free all OSD associated to this thread. */ 190 osd_thread_exit(td); 191 192 EVENTHANDLER_INVOKE(thread_dtor, td); 193 tid_free(td->td_tid); 194} 195 196/* 197 * Initialize type-stable parts of a thread (when newly created). 198 */ 199static int 200thread_init(void *mem, int size, int flags) 201{ 202 struct thread *td; 203 204 td = (struct thread *)mem; 205 206 td->td_sleepqueue = sleepq_alloc(); 207 td->td_turnstile = turnstile_alloc(); 208 td->td_rlqe = NULL; 209 EVENTHANDLER_INVOKE(thread_init, td); 210 td->td_sched = (struct td_sched *)&td[1]; 211 umtx_thread_init(td); 212 td->td_kstack = 0; 213 return (0); 214} 215 216/* 217 * Tear down type-stable parts of a thread (just before being discarded). 218 */ 219static void 220thread_fini(void *mem, int size) 221{ 222 struct thread *td; 223 224 td = (struct thread *)mem; 225 EVENTHANDLER_INVOKE(thread_fini, td); 226 rlqentry_free(td->td_rlqe); 227 turnstile_free(td->td_turnstile); 228 sleepq_free(td->td_sleepqueue); 229 umtx_thread_fini(td); 230 seltdfini(td); 231} 232 233/* 234 * For a newly created process, 235 * link up all the structures and its initial threads etc. 236 * called from: 237 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 238 * proc_dtor() (should go away) 239 * proc_init() 240 */ 241void 242proc_linkup0(struct proc *p, struct thread *td) 243{ 244 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 245 proc_linkup(p, td); 246} 247 248void 249proc_linkup(struct proc *p, struct thread *td) 250{ 251 252 sigqueue_init(&p->p_sigqueue, p); 253 p->p_ksi = ksiginfo_alloc(1); 254 if (p->p_ksi != NULL) { 255 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 256 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 257 } 258 LIST_INIT(&p->p_mqnotifier); 259 p->p_numthreads = 0; 260 thread_link(td, p); 261} 262 263/* 264 * Initialize global thread allocation resources. 265 */ 266void 267threadinit(void) 268{ 269 270 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 271 272 /* 273 * pid_max cannot be greater than PID_MAX. 274 * leave one number for thread0. 275 */ 276 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 277 278 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 279 thread_ctor, thread_dtor, thread_init, thread_fini, 280 16 - 1, 0); 281 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 282 rw_init(&tidhash_lock, "tidhash"); 283} 284 285/* 286 * Place an unused thread on the zombie list. 287 * Use the slpq as that must be unused by now. 288 */ 289void 290thread_zombie(struct thread *td) 291{ 292 mtx_lock_spin(&zombie_lock); 293 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 294 mtx_unlock_spin(&zombie_lock); 295} 296 297/* 298 * Release a thread that has exited after cpu_throw(). 299 */ 300void 301thread_stash(struct thread *td) 302{ 303 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 304 thread_zombie(td); 305} 306 307/* 308 * Reap zombie resources. 309 */ 310void 311thread_reap(void) 312{ 313 struct thread *td_first, *td_next; 314 315 /* 316 * Don't even bother to lock if none at this instant, 317 * we really don't care about the next instant.. 318 */ 319 if (!TAILQ_EMPTY(&zombie_threads)) { 320 mtx_lock_spin(&zombie_lock); 321 td_first = TAILQ_FIRST(&zombie_threads); 322 if (td_first) 323 TAILQ_INIT(&zombie_threads); 324 mtx_unlock_spin(&zombie_lock); 325 while (td_first) { 326 td_next = TAILQ_NEXT(td_first, td_slpq); 327 if (td_first->td_ucred) 328 crfree(td_first->td_ucred); 329 thread_free(td_first); 330 td_first = td_next; 331 } 332 } 333} 334 335/* 336 * Allocate a thread. 337 */ 338struct thread * 339thread_alloc(int pages) 340{ 341 struct thread *td; 342 343 thread_reap(); /* check if any zombies to get */ 344 345 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 346 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 347 if (!vm_thread_new(td, pages)) { 348 uma_zfree(thread_zone, td); 349 return (NULL); 350 } 351 cpu_thread_alloc(td); 352 return (td); 353} 354 355int 356thread_alloc_stack(struct thread *td, int pages) 357{ 358 359 KASSERT(td->td_kstack == 0, 360 ("thread_alloc_stack called on a thread with kstack")); 361 if (!vm_thread_new(td, pages)) 362 return (0); 363 cpu_thread_alloc(td); 364 return (1); 365} 366 367/* 368 * Deallocate a thread. 369 */ 370void 371thread_free(struct thread *td) 372{ 373 374 lock_profile_thread_exit(td); 375 if (td->td_cpuset) 376 cpuset_rel(td->td_cpuset); 377 td->td_cpuset = NULL; 378 cpu_thread_free(td); 379 if (td->td_kstack != 0) 380 vm_thread_dispose(td); 381 uma_zfree(thread_zone, td); 382} 383 384/* 385 * Discard the current thread and exit from its context. 386 * Always called with scheduler locked. 387 * 388 * Because we can't free a thread while we're operating under its context, 389 * push the current thread into our CPU's deadthread holder. This means 390 * we needn't worry about someone else grabbing our context before we 391 * do a cpu_throw(). 392 */ 393void 394thread_exit(void) 395{ 396 uint64_t runtime, new_switchtime; 397 struct thread *td; 398 struct thread *td2; 399 struct proc *p; 400 int wakeup_swapper; 401 402 td = curthread; 403 p = td->td_proc; 404 405 PROC_SLOCK_ASSERT(p, MA_OWNED); 406 mtx_assert(&Giant, MA_NOTOWNED); 407 408 PROC_LOCK_ASSERT(p, MA_OWNED); 409 KASSERT(p != NULL, ("thread exiting without a process")); 410 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 411 (long)p->p_pid, td->td_name); 412 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 413 414#ifdef AUDIT 415 AUDIT_SYSCALL_EXIT(0, td); 416#endif 417 umtx_thread_exit(td); 418 /* 419 * drop FPU & debug register state storage, or any other 420 * architecture specific resources that 421 * would not be on a new untouched process. 422 */ 423 cpu_thread_exit(td); /* XXXSMP */ 424 425 /* 426 * The last thread is left attached to the process 427 * So that the whole bundle gets recycled. Skip 428 * all this stuff if we never had threads. 429 * EXIT clears all sign of other threads when 430 * it goes to single threading, so the last thread always 431 * takes the short path. 432 */ 433 if (p->p_flag & P_HADTHREADS) { 434 if (p->p_numthreads > 1) { 435 atomic_add_int(&td->td_proc->p_exitthreads, 1); 436 thread_unlink(td); 437 td2 = FIRST_THREAD_IN_PROC(p); 438 sched_exit_thread(td2, td); 439 440 /* 441 * The test below is NOT true if we are the 442 * sole exiting thread. P_STOPPED_SINGLE is unset 443 * in exit1() after it is the only survivor. 444 */ 445 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 446 if (p->p_numthreads == p->p_suspcount) { 447 thread_lock(p->p_singlethread); 448 wakeup_swapper = thread_unsuspend_one( 449 p->p_singlethread, p); 450 thread_unlock(p->p_singlethread); 451 if (wakeup_swapper) 452 kick_proc0(); 453 } 454 } 455 456 PCPU_SET(deadthread, td); 457 } else { 458 /* 459 * The last thread is exiting.. but not through exit() 460 */ 461 panic ("thread_exit: Last thread exiting on its own"); 462 } 463 } 464#ifdef HWPMC_HOOKS 465 /* 466 * If this thread is part of a process that is being tracked by hwpmc(4), 467 * inform the module of the thread's impending exit. 468 */ 469 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 470 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 471#endif 472 PROC_UNLOCK(p); 473 474 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 475 new_switchtime = cpu_ticks(); 476 runtime = new_switchtime - PCPU_GET(switchtime); 477 td->td_runtime += runtime; 478 td->td_incruntime += runtime; 479 PCPU_SET(switchtime, new_switchtime); 480 PCPU_SET(switchticks, ticks); 481 PCPU_INC(cnt.v_swtch); 482 483 /* Save our resource usage in our process. */ 484 td->td_ru.ru_nvcsw++; 485 ruxagg(p, td); 486 rucollect(&p->p_ru, &td->td_ru); 487 488 thread_lock(td); 489 PROC_SUNLOCK(p); 490 td->td_state = TDS_INACTIVE; 491#ifdef WITNESS 492 witness_thread_exit(td); 493#endif 494 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 495 sched_throw(td); 496 panic("I'm a teapot!"); 497 /* NOTREACHED */ 498} 499 500/* 501 * Do any thread specific cleanups that may be needed in wait() 502 * called with Giant, proc and schedlock not held. 503 */ 504void 505thread_wait(struct proc *p) 506{ 507 struct thread *td; 508 509 mtx_assert(&Giant, MA_NOTOWNED); 510 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 511 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 512 td = FIRST_THREAD_IN_PROC(p); 513 /* Lock the last thread so we spin until it exits cpu_throw(). */ 514 thread_lock(td); 515 thread_unlock(td); 516 lock_profile_thread_exit(td); 517 cpuset_rel(td->td_cpuset); 518 td->td_cpuset = NULL; 519 cpu_thread_clean(td); 520 crfree(td->td_ucred); 521 thread_reap(); /* check for zombie threads etc. */ 522} 523 524/* 525 * Link a thread to a process. 526 * set up anything that needs to be initialized for it to 527 * be used by the process. 528 */ 529void 530thread_link(struct thread *td, struct proc *p) 531{ 532 533 /* 534 * XXX This can't be enabled because it's called for proc0 before 535 * its lock has been created. 536 * PROC_LOCK_ASSERT(p, MA_OWNED); 537 */ 538 td->td_state = TDS_INACTIVE; 539 td->td_proc = p; 540 td->td_flags = TDF_INMEM; 541 542 LIST_INIT(&td->td_contested); 543 LIST_INIT(&td->td_lprof[0]); 544 LIST_INIT(&td->td_lprof[1]); 545 sigqueue_init(&td->td_sigqueue, p); 546 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 547 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 548 p->p_numthreads++; 549} 550 551/* 552 * Called from: 553 * thread_exit() 554 */ 555void 556thread_unlink(struct thread *td) 557{ 558 struct proc *p = td->td_proc; 559 560 PROC_LOCK_ASSERT(p, MA_OWNED); 561 TAILQ_REMOVE(&p->p_threads, td, td_plist); 562 p->p_numthreads--; 563 /* could clear a few other things here */ 564 /* Must NOT clear links to proc! */ 565} 566 567static int 568calc_remaining(struct proc *p, int mode) 569{ 570 int remaining; 571 572 PROC_LOCK_ASSERT(p, MA_OWNED); 573 PROC_SLOCK_ASSERT(p, MA_OWNED); 574 if (mode == SINGLE_EXIT) 575 remaining = p->p_numthreads; 576 else if (mode == SINGLE_BOUNDARY) 577 remaining = p->p_numthreads - p->p_boundary_count; 578 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 579 remaining = p->p_numthreads - p->p_suspcount; 580 else 581 panic("calc_remaining: wrong mode %d", mode); 582 return (remaining); 583} 584 585static int 586remain_for_mode(int mode) 587{ 588 589 return (mode == SINGLE_ALLPROC ? 0 : 1); 590} 591 592static int 593weed_inhib(int mode, struct thread *td2, struct proc *p) 594{ 595 int wakeup_swapper; 596 597 PROC_LOCK_ASSERT(p, MA_OWNED); 598 PROC_SLOCK_ASSERT(p, MA_OWNED); 599 THREAD_LOCK_ASSERT(td2, MA_OWNED); 600 601 wakeup_swapper = 0; 602 switch (mode) { 603 case SINGLE_EXIT: 604 if (TD_IS_SUSPENDED(td2)) 605 wakeup_swapper |= thread_unsuspend_one(td2, p); 606 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 607 wakeup_swapper |= sleepq_abort(td2, EINTR); 608 break; 609 case SINGLE_BOUNDARY: 610 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 611 wakeup_swapper |= thread_unsuspend_one(td2, p); 612 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 613 wakeup_swapper |= sleepq_abort(td2, ERESTART); 614 break; 615 case SINGLE_NO_EXIT: 616 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 617 wakeup_swapper |= thread_unsuspend_one(td2, p); 618 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 619 wakeup_swapper |= sleepq_abort(td2, ERESTART); 620 break; 621 case SINGLE_ALLPROC: 622 /* 623 * ALLPROC suspend tries to avoid spurious EINTR for 624 * threads sleeping interruptable, by suspending the 625 * thread directly, similarly to sig_suspend_threads(). 626 * Since such sleep is not performed at the user 627 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 628 * is used to avoid immediate un-suspend. 629 */ 630 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 631 TDF_ALLPROCSUSP)) == 0) 632 wakeup_swapper |= thread_unsuspend_one(td2, p); 633 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { 634 if ((td2->td_flags & TDF_SBDRY) == 0) { 635 thread_suspend_one(td2); 636 td2->td_flags |= TDF_ALLPROCSUSP; 637 } else { 638 wakeup_swapper |= sleepq_abort(td2, ERESTART); 639 } 640 } 641 break; 642 } 643 return (wakeup_swapper); 644} 645 646/* 647 * Enforce single-threading. 648 * 649 * Returns 1 if the caller must abort (another thread is waiting to 650 * exit the process or similar). Process is locked! 651 * Returns 0 when you are successfully the only thread running. 652 * A process has successfully single threaded in the suspend mode when 653 * There are no threads in user mode. Threads in the kernel must be 654 * allowed to continue until they get to the user boundary. They may even 655 * copy out their return values and data before suspending. They may however be 656 * accelerated in reaching the user boundary as we will wake up 657 * any sleeping threads that are interruptable. (PCATCH). 658 */ 659int 660thread_single(struct proc *p, int mode) 661{ 662 struct thread *td; 663 struct thread *td2; 664 int remaining, wakeup_swapper; 665 666 td = curthread; 667 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 668 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 669 ("invalid mode %d", mode)); 670 /* 671 * If allowing non-ALLPROC singlethreading for non-curproc 672 * callers, calc_remaining() and remain_for_mode() should be 673 * adjusted to also account for td->td_proc != p. For now 674 * this is not implemented because it is not used. 675 */ 676 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 677 (mode != SINGLE_ALLPROC && td->td_proc == p), 678 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 679 mtx_assert(&Giant, MA_NOTOWNED); 680 PROC_LOCK_ASSERT(p, MA_OWNED); 681 682 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 683 return (0); 684 685 /* Is someone already single threading? */ 686 if (p->p_singlethread != NULL && p->p_singlethread != td) 687 return (1); 688 689 if (mode == SINGLE_EXIT) { 690 p->p_flag |= P_SINGLE_EXIT; 691 p->p_flag &= ~P_SINGLE_BOUNDARY; 692 } else { 693 p->p_flag &= ~P_SINGLE_EXIT; 694 if (mode == SINGLE_BOUNDARY) 695 p->p_flag |= P_SINGLE_BOUNDARY; 696 else 697 p->p_flag &= ~P_SINGLE_BOUNDARY; 698 } 699 if (mode == SINGLE_ALLPROC) 700 p->p_flag |= P_TOTAL_STOP; 701 p->p_flag |= P_STOPPED_SINGLE; 702 PROC_SLOCK(p); 703 p->p_singlethread = td; 704 remaining = calc_remaining(p, mode); 705 while (remaining != remain_for_mode(mode)) { 706 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 707 goto stopme; 708 wakeup_swapper = 0; 709 FOREACH_THREAD_IN_PROC(p, td2) { 710 if (td2 == td) 711 continue; 712 thread_lock(td2); 713 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 714 if (TD_IS_INHIBITED(td2)) { 715 wakeup_swapper |= weed_inhib(mode, td2, p); 716#ifdef SMP 717 } else if (TD_IS_RUNNING(td2) && td != td2) { 718 forward_signal(td2); 719#endif 720 } 721 thread_unlock(td2); 722 } 723 if (wakeup_swapper) 724 kick_proc0(); 725 remaining = calc_remaining(p, mode); 726 727 /* 728 * Maybe we suspended some threads.. was it enough? 729 */ 730 if (remaining == remain_for_mode(mode)) 731 break; 732 733stopme: 734 /* 735 * Wake us up when everyone else has suspended. 736 * In the mean time we suspend as well. 737 */ 738 thread_suspend_switch(td, p); 739 remaining = calc_remaining(p, mode); 740 } 741 if (mode == SINGLE_EXIT) { 742 /* 743 * Convert the process to an unthreaded process. The 744 * SINGLE_EXIT is called by exit1() or execve(), in 745 * both cases other threads must be retired. 746 */ 747 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 748 p->p_singlethread = NULL; 749 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 750 751 /* 752 * Wait for any remaining threads to exit cpu_throw(). 753 */ 754 while (p->p_exitthreads != 0) { 755 PROC_SUNLOCK(p); 756 PROC_UNLOCK(p); 757 sched_relinquish(td); 758 PROC_LOCK(p); 759 PROC_SLOCK(p); 760 } 761 } 762 PROC_SUNLOCK(p); 763 return (0); 764} 765 766bool 767thread_suspend_check_needed(void) 768{ 769 struct proc *p; 770 struct thread *td; 771 772 td = curthread; 773 p = td->td_proc; 774 PROC_LOCK_ASSERT(p, MA_OWNED); 775 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 776 (td->td_dbgflags & TDB_SUSPEND) != 0)); 777} 778 779/* 780 * Called in from locations that can safely check to see 781 * whether we have to suspend or at least throttle for a 782 * single-thread event (e.g. fork). 783 * 784 * Such locations include userret(). 785 * If the "return_instead" argument is non zero, the thread must be able to 786 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 787 * 788 * The 'return_instead' argument tells the function if it may do a 789 * thread_exit() or suspend, or whether the caller must abort and back 790 * out instead. 791 * 792 * If the thread that set the single_threading request has set the 793 * P_SINGLE_EXIT bit in the process flags then this call will never return 794 * if 'return_instead' is false, but will exit. 795 * 796 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 797 *---------------+--------------------+--------------------- 798 * 0 | returns 0 | returns 0 or 1 799 * | when ST ends | immediately 800 *---------------+--------------------+--------------------- 801 * 1 | thread exits | returns 1 802 * | | immediately 803 * 0 = thread_exit() or suspension ok, 804 * other = return error instead of stopping the thread. 805 * 806 * While a full suspension is under effect, even a single threading 807 * thread would be suspended if it made this call (but it shouldn't). 808 * This call should only be made from places where 809 * thread_exit() would be safe as that may be the outcome unless 810 * return_instead is set. 811 */ 812int 813thread_suspend_check(int return_instead) 814{ 815 struct thread *td; 816 struct proc *p; 817 int wakeup_swapper; 818 819 td = curthread; 820 p = td->td_proc; 821 mtx_assert(&Giant, MA_NOTOWNED); 822 PROC_LOCK_ASSERT(p, MA_OWNED); 823 while (thread_suspend_check_needed()) { 824 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 825 KASSERT(p->p_singlethread != NULL, 826 ("singlethread not set")); 827 /* 828 * The only suspension in action is a 829 * single-threading. Single threader need not stop. 830 * XXX Should be safe to access unlocked 831 * as it can only be set to be true by us. 832 */ 833 if (p->p_singlethread == td) 834 return (0); /* Exempt from stopping. */ 835 } 836 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 837 return (EINTR); 838 839 /* Should we goto user boundary if we didn't come from there? */ 840 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 841 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 842 return (ERESTART); 843 844 /* 845 * Ignore suspend requests for stop signals if they 846 * are deferred. 847 */ 848 if ((P_SHOULDSTOP(p) == P_STOPPED_SIG || 849 (p->p_flag & P_TOTAL_STOP) != 0) && 850 (td->td_flags & TDF_SBDRY) != 0) { 851 KASSERT(return_instead, 852 ("TDF_SBDRY set for unsafe thread_suspend_check")); 853 return (0); 854 } 855 856 /* 857 * If the process is waiting for us to exit, 858 * this thread should just suicide. 859 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 860 */ 861 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 862 PROC_UNLOCK(p); 863 tidhash_remove(td); 864 PROC_LOCK(p); 865 tdsigcleanup(td); 866 PROC_SLOCK(p); 867 thread_stopped(p); 868 thread_exit(); 869 } 870 871 PROC_SLOCK(p); 872 thread_stopped(p); 873 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 874 if (p->p_numthreads == p->p_suspcount + 1) { 875 thread_lock(p->p_singlethread); 876 wakeup_swapper = 877 thread_unsuspend_one(p->p_singlethread, p); 878 thread_unlock(p->p_singlethread); 879 if (wakeup_swapper) 880 kick_proc0(); 881 } 882 } 883 PROC_UNLOCK(p); 884 thread_lock(td); 885 /* 886 * When a thread suspends, it just 887 * gets taken off all queues. 888 */ 889 thread_suspend_one(td); 890 if (return_instead == 0) { 891 p->p_boundary_count++; 892 td->td_flags |= TDF_BOUNDARY; 893 } 894 PROC_SUNLOCK(p); 895 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 896 if (return_instead == 0) 897 td->td_flags &= ~TDF_BOUNDARY; 898 thread_unlock(td); 899 PROC_LOCK(p); 900 if (return_instead == 0) { 901 PROC_SLOCK(p); 902 p->p_boundary_count--; 903 PROC_SUNLOCK(p); 904 } 905 } 906 return (0); 907} 908 909void 910thread_suspend_switch(struct thread *td, struct proc *p) 911{ 912 913 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 914 PROC_LOCK_ASSERT(p, MA_OWNED); 915 PROC_SLOCK_ASSERT(p, MA_OWNED); 916 /* 917 * We implement thread_suspend_one in stages here to avoid 918 * dropping the proc lock while the thread lock is owned. 919 */ 920 if (p == td->td_proc) { 921 thread_stopped(p); 922 p->p_suspcount++; 923 } 924 PROC_UNLOCK(p); 925 thread_lock(td); 926 td->td_flags &= ~TDF_NEEDSUSPCHK; 927 TD_SET_SUSPENDED(td); 928 sched_sleep(td, 0); 929 PROC_SUNLOCK(p); 930 DROP_GIANT(); 931 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 932 thread_unlock(td); 933 PICKUP_GIANT(); 934 PROC_LOCK(p); 935 PROC_SLOCK(p); 936} 937 938void 939thread_suspend_one(struct thread *td) 940{ 941 struct proc *p; 942 943 p = td->td_proc; 944 PROC_SLOCK_ASSERT(p, MA_OWNED); 945 THREAD_LOCK_ASSERT(td, MA_OWNED); 946 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 947 p->p_suspcount++; 948 td->td_flags &= ~TDF_NEEDSUSPCHK; 949 TD_SET_SUSPENDED(td); 950 sched_sleep(td, 0); 951} 952 953int 954thread_unsuspend_one(struct thread *td, struct proc *p) 955{ 956 957 THREAD_LOCK_ASSERT(td, MA_OWNED); 958 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 959 TD_CLR_SUSPENDED(td); 960 td->td_flags &= ~TDF_ALLPROCSUSP; 961 if (td->td_proc == p) { 962 PROC_SLOCK_ASSERT(p, MA_OWNED); 963 p->p_suspcount--; 964 } 965 return (setrunnable(td)); 966} 967 968/* 969 * Allow all threads blocked by single threading to continue running. 970 */ 971void 972thread_unsuspend(struct proc *p) 973{ 974 struct thread *td; 975 int wakeup_swapper; 976 977 PROC_LOCK_ASSERT(p, MA_OWNED); 978 PROC_SLOCK_ASSERT(p, MA_OWNED); 979 wakeup_swapper = 0; 980 if (!P_SHOULDSTOP(p)) { 981 FOREACH_THREAD_IN_PROC(p, td) { 982 thread_lock(td); 983 if (TD_IS_SUSPENDED(td)) { 984 wakeup_swapper |= thread_unsuspend_one(td, p); 985 } 986 thread_unlock(td); 987 } 988 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 989 (p->p_numthreads == p->p_suspcount)) { 990 /* 991 * Stopping everything also did the job for the single 992 * threading request. Now we've downgraded to single-threaded, 993 * let it continue. 994 */ 995 if (p->p_singlethread->td_proc == p) { 996 thread_lock(p->p_singlethread); 997 wakeup_swapper = thread_unsuspend_one( 998 p->p_singlethread, p); 999 thread_unlock(p->p_singlethread); 1000 } 1001 } 1002 if (wakeup_swapper) 1003 kick_proc0(); 1004} 1005 1006/* 1007 * End the single threading mode.. 1008 */ 1009void 1010thread_single_end(struct proc *p, int mode) 1011{ 1012 struct thread *td; 1013 int wakeup_swapper; 1014 1015 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1016 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1017 ("invalid mode %d", mode)); 1018 PROC_LOCK_ASSERT(p, MA_OWNED); 1019 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1020 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1021 ("mode %d does not match P_TOTAL_STOP", mode)); 1022 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1023 P_TOTAL_STOP); 1024 PROC_SLOCK(p); 1025 p->p_singlethread = NULL; 1026 wakeup_swapper = 0; 1027 /* 1028 * If there are other threads they may now run, 1029 * unless of course there is a blanket 'stop order' 1030 * on the process. The single threader must be allowed 1031 * to continue however as this is a bad place to stop. 1032 */ 1033 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1034 FOREACH_THREAD_IN_PROC(p, td) { 1035 thread_lock(td); 1036 if (TD_IS_SUSPENDED(td)) { 1037 wakeup_swapper |= thread_unsuspend_one(td, p); 1038 } 1039 thread_unlock(td); 1040 } 1041 } 1042 PROC_SUNLOCK(p); 1043 if (wakeup_swapper) 1044 kick_proc0(); 1045} 1046 1047struct thread * 1048thread_find(struct proc *p, lwpid_t tid) 1049{ 1050 struct thread *td; 1051 1052 PROC_LOCK_ASSERT(p, MA_OWNED); 1053 FOREACH_THREAD_IN_PROC(p, td) { 1054 if (td->td_tid == tid) 1055 break; 1056 } 1057 return (td); 1058} 1059 1060/* Locate a thread by number; return with proc lock held. */ 1061struct thread * 1062tdfind(lwpid_t tid, pid_t pid) 1063{ 1064#define RUN_THRESH 16 1065 struct thread *td; 1066 int run = 0; 1067 1068 rw_rlock(&tidhash_lock); 1069 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1070 if (td->td_tid == tid) { 1071 if (pid != -1 && td->td_proc->p_pid != pid) { 1072 td = NULL; 1073 break; 1074 } 1075 PROC_LOCK(td->td_proc); 1076 if (td->td_proc->p_state == PRS_NEW) { 1077 PROC_UNLOCK(td->td_proc); 1078 td = NULL; 1079 break; 1080 } 1081 if (run > RUN_THRESH) { 1082 if (rw_try_upgrade(&tidhash_lock)) { 1083 LIST_REMOVE(td, td_hash); 1084 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1085 td, td_hash); 1086 rw_wunlock(&tidhash_lock); 1087 return (td); 1088 } 1089 } 1090 break; 1091 } 1092 run++; 1093 } 1094 rw_runlock(&tidhash_lock); 1095 return (td); 1096} 1097 1098void 1099tidhash_add(struct thread *td) 1100{ 1101 rw_wlock(&tidhash_lock); 1102 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1103 rw_wunlock(&tidhash_lock); 1104} 1105 1106void 1107tidhash_remove(struct thread *td) 1108{ 1109 rw_wlock(&tidhash_lock); 1110 LIST_REMOVE(td, td_hash); 1111 rw_wunlock(&tidhash_lock); 1112} 1113