kern_thread.c revision 314667
1/*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include "opt_witness.h" 30#include "opt_kdtrace.h" 31#include "opt_hwpmc_hooks.h" 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/kern/kern_thread.c 314667 2017-03-04 13:03:31Z avg $"); 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/rangelock.h> 43#include <sys/resourcevar.h> 44#include <sys/sdt.h> 45#include <sys/smp.h> 46#include <sys/sched.h> 47#include <sys/sleepqueue.h> 48#include <sys/selinfo.h> 49#include <sys/syscallsubr.h> 50#include <sys/sysent.h> 51#include <sys/turnstile.h> 52#include <sys/ktr.h> 53#include <sys/rwlock.h> 54#include <sys/umtx.h> 55#include <sys/cpuset.h> 56#ifdef HWPMC_HOOKS 57#include <sys/pmckern.h> 58#endif 59 60#include <security/audit/audit.h> 61 62#include <vm/vm.h> 63#include <vm/vm_extern.h> 64#include <vm/uma.h> 65#include <sys/eventhandler.h> 66 67SDT_PROVIDER_DECLARE(proc); 68SDT_PROBE_DEFINE(proc, , , lwp__exit); 69 70/* 71 * thread related storage. 72 */ 73static uma_zone_t thread_zone; 74 75TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 76static struct mtx zombie_lock; 77MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 78 79static void thread_zombie(struct thread *); 80static int thread_unsuspend_one(struct thread *td, struct proc *p, 81 bool boundary); 82 83#define TID_BUFFER_SIZE 1024 84 85struct mtx tid_lock; 86static struct unrhdr *tid_unrhdr; 87static lwpid_t tid_buffer[TID_BUFFER_SIZE]; 88static int tid_head, tid_tail; 89static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 90 91struct tidhashhead *tidhashtbl; 92u_long tidhash; 93struct rwlock tidhash_lock; 94 95static lwpid_t 96tid_alloc(void) 97{ 98 lwpid_t tid; 99 100 tid = alloc_unr(tid_unrhdr); 101 if (tid != -1) 102 return (tid); 103 mtx_lock(&tid_lock); 104 if (tid_head == tid_tail) { 105 mtx_unlock(&tid_lock); 106 return (-1); 107 } 108 tid = tid_buffer[tid_head]; 109 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 110 mtx_unlock(&tid_lock); 111 return (tid); 112} 113 114static void 115tid_free(lwpid_t tid) 116{ 117 lwpid_t tmp_tid = -1; 118 119 mtx_lock(&tid_lock); 120 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 121 tmp_tid = tid_buffer[tid_head]; 122 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 123 } 124 tid_buffer[tid_tail] = tid; 125 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 126 mtx_unlock(&tid_lock); 127 if (tmp_tid != -1) 128 free_unr(tid_unrhdr, tmp_tid); 129} 130 131/* 132 * Prepare a thread for use. 133 */ 134static int 135thread_ctor(void *mem, int size, void *arg, int flags) 136{ 137 struct thread *td; 138 139 td = (struct thread *)mem; 140 td->td_state = TDS_INACTIVE; 141 td->td_oncpu = NOCPU; 142 143 td->td_tid = tid_alloc(); 144 145 /* 146 * Note that td_critnest begins life as 1 because the thread is not 147 * running and is thereby implicitly waiting to be on the receiving 148 * end of a context switch. 149 */ 150 td->td_critnest = 1; 151 td->td_lend_user_pri = PRI_MAX; 152 EVENTHANDLER_INVOKE(thread_ctor, td); 153#ifdef AUDIT 154 audit_thread_alloc(td); 155#endif 156 umtx_thread_alloc(td); 157 return (0); 158} 159 160/* 161 * Reclaim a thread after use. 162 */ 163static void 164thread_dtor(void *mem, int size, void *arg) 165{ 166 struct thread *td; 167 168 td = (struct thread *)mem; 169 170#ifdef INVARIANTS 171 /* Verify that this thread is in a safe state to free. */ 172 switch (td->td_state) { 173 case TDS_INHIBITED: 174 case TDS_RUNNING: 175 case TDS_CAN_RUN: 176 case TDS_RUNQ: 177 /* 178 * We must never unlink a thread that is in one of 179 * these states, because it is currently active. 180 */ 181 panic("bad state for thread unlinking"); 182 /* NOTREACHED */ 183 case TDS_INACTIVE: 184 break; 185 default: 186 panic("bad thread state"); 187 /* NOTREACHED */ 188 } 189#endif 190#ifdef AUDIT 191 audit_thread_free(td); 192#endif 193 /* Free all OSD associated to this thread. */ 194 osd_thread_exit(td); 195 196 EVENTHANDLER_INVOKE(thread_dtor, td); 197 tid_free(td->td_tid); 198} 199 200/* 201 * Initialize type-stable parts of a thread (when newly created). 202 */ 203static int 204thread_init(void *mem, int size, int flags) 205{ 206 struct thread *td; 207 208 td = (struct thread *)mem; 209 210 td->td_sleepqueue = sleepq_alloc(); 211 td->td_turnstile = turnstile_alloc(); 212 td->td_rlqe = NULL; 213 EVENTHANDLER_INVOKE(thread_init, td); 214 td->td_sched = (struct td_sched *)&td[1]; 215 umtx_thread_init(td); 216 td->td_kstack = 0; 217 td->td_sel = NULL; 218 return (0); 219} 220 221/* 222 * Tear down type-stable parts of a thread (just before being discarded). 223 */ 224static void 225thread_fini(void *mem, int size) 226{ 227 struct thread *td; 228 229 td = (struct thread *)mem; 230 EVENTHANDLER_INVOKE(thread_fini, td); 231 rlqentry_free(td->td_rlqe); 232 turnstile_free(td->td_turnstile); 233 sleepq_free(td->td_sleepqueue); 234 umtx_thread_fini(td); 235 seltdfini(td); 236} 237 238/* 239 * For a newly created process, 240 * link up all the structures and its initial threads etc. 241 * called from: 242 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 243 * proc_dtor() (should go away) 244 * proc_init() 245 */ 246void 247proc_linkup0(struct proc *p, struct thread *td) 248{ 249 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 250 proc_linkup(p, td); 251} 252 253void 254proc_linkup(struct proc *p, struct thread *td) 255{ 256 257 sigqueue_init(&p->p_sigqueue, p); 258 p->p_ksi = ksiginfo_alloc(1); 259 if (p->p_ksi != NULL) { 260 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 261 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 262 } 263 LIST_INIT(&p->p_mqnotifier); 264 p->p_numthreads = 0; 265 thread_link(td, p); 266} 267 268/* 269 * Initialize global thread allocation resources. 270 */ 271void 272threadinit(void) 273{ 274 275 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 276 277 /* 278 * pid_max cannot be greater than PID_MAX. 279 * leave one number for thread0. 280 */ 281 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 282 283 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 284 thread_ctor, thread_dtor, thread_init, thread_fini, 285 16 - 1, UMA_ZONE_NOFREE); 286 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 287 rw_init(&tidhash_lock, "tidhash"); 288} 289 290/* 291 * Place an unused thread on the zombie list. 292 * Use the slpq as that must be unused by now. 293 */ 294void 295thread_zombie(struct thread *td) 296{ 297 mtx_lock_spin(&zombie_lock); 298 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 299 mtx_unlock_spin(&zombie_lock); 300} 301 302/* 303 * Release a thread that has exited after cpu_throw(). 304 */ 305void 306thread_stash(struct thread *td) 307{ 308 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 309 thread_zombie(td); 310} 311 312/* 313 * Reap zombie resources. 314 */ 315void 316thread_reap(void) 317{ 318 struct thread *td_first, *td_next; 319 320 /* 321 * Don't even bother to lock if none at this instant, 322 * we really don't care about the next instant. 323 */ 324 if (!TAILQ_EMPTY(&zombie_threads)) { 325 mtx_lock_spin(&zombie_lock); 326 td_first = TAILQ_FIRST(&zombie_threads); 327 if (td_first) 328 TAILQ_INIT(&zombie_threads); 329 mtx_unlock_spin(&zombie_lock); 330 while (td_first) { 331 td_next = TAILQ_NEXT(td_first, td_slpq); 332 if (td_first->td_ucred) 333 crfree(td_first->td_ucred); 334 thread_free(td_first); 335 td_first = td_next; 336 } 337 } 338} 339 340/* 341 * Allocate a thread. 342 */ 343struct thread * 344thread_alloc(int pages) 345{ 346 struct thread *td; 347 348 thread_reap(); /* check if any zombies to get */ 349 350 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 351 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 352 if (!vm_thread_new(td, pages)) { 353 uma_zfree(thread_zone, td); 354 return (NULL); 355 } 356 cpu_thread_alloc(td); 357 return (td); 358} 359 360int 361thread_alloc_stack(struct thread *td, int pages) 362{ 363 364 KASSERT(td->td_kstack == 0, 365 ("thread_alloc_stack called on a thread with kstack")); 366 if (!vm_thread_new(td, pages)) 367 return (0); 368 cpu_thread_alloc(td); 369 return (1); 370} 371 372/* 373 * Deallocate a thread. 374 */ 375void 376thread_free(struct thread *td) 377{ 378 379 lock_profile_thread_exit(td); 380 if (td->td_cpuset) 381 cpuset_rel(td->td_cpuset); 382 td->td_cpuset = NULL; 383 cpu_thread_free(td); 384 if (td->td_kstack != 0) 385 vm_thread_dispose(td); 386 callout_drain(&td->td_slpcallout); 387 uma_zfree(thread_zone, td); 388} 389 390/* 391 * Discard the current thread and exit from its context. 392 * Always called with scheduler locked. 393 * 394 * Because we can't free a thread while we're operating under its context, 395 * push the current thread into our CPU's deadthread holder. This means 396 * we needn't worry about someone else grabbing our context before we 397 * do a cpu_throw(). 398 */ 399void 400thread_exit(void) 401{ 402 uint64_t runtime, new_switchtime; 403 struct thread *td; 404 struct thread *td2; 405 struct proc *p; 406 int wakeup_swapper; 407 408 td = curthread; 409 p = td->td_proc; 410 411 PROC_SLOCK_ASSERT(p, MA_OWNED); 412 mtx_assert(&Giant, MA_NOTOWNED); 413 414 PROC_LOCK_ASSERT(p, MA_OWNED); 415 KASSERT(p != NULL, ("thread exiting without a process")); 416 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 417 (long)p->p_pid, td->td_name); 418 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 419 420#ifdef AUDIT 421 AUDIT_SYSCALL_EXIT(0, td); 422#endif 423 /* 424 * drop FPU & debug register state storage, or any other 425 * architecture specific resources that 426 * would not be on a new untouched process. 427 */ 428 cpu_thread_exit(td); 429 430 /* 431 * The last thread is left attached to the process 432 * So that the whole bundle gets recycled. Skip 433 * all this stuff if we never had threads. 434 * EXIT clears all sign of other threads when 435 * it goes to single threading, so the last thread always 436 * takes the short path. 437 */ 438 if (p->p_flag & P_HADTHREADS) { 439 if (p->p_numthreads > 1) { 440 atomic_add_int(&td->td_proc->p_exitthreads, 1); 441 thread_unlink(td); 442 td2 = FIRST_THREAD_IN_PROC(p); 443 sched_exit_thread(td2, td); 444 445 /* 446 * The test below is NOT true if we are the 447 * sole exiting thread. P_STOPPED_SINGLE is unset 448 * in exit1() after it is the only survivor. 449 */ 450 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 451 if (p->p_numthreads == p->p_suspcount) { 452 thread_lock(p->p_singlethread); 453 wakeup_swapper = thread_unsuspend_one( 454 p->p_singlethread, p, false); 455 thread_unlock(p->p_singlethread); 456 if (wakeup_swapper) 457 kick_proc0(); 458 } 459 } 460 461 PCPU_SET(deadthread, td); 462 } else { 463 /* 464 * The last thread is exiting.. but not through exit() 465 */ 466 panic ("thread_exit: Last thread exiting on its own"); 467 } 468 } 469#ifdef HWPMC_HOOKS 470 /* 471 * If this thread is part of a process that is being tracked by hwpmc(4), 472 * inform the module of the thread's impending exit. 473 */ 474 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 475 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 476#endif 477 PROC_UNLOCK(p); 478 PROC_STATLOCK(p); 479 thread_lock(td); 480 PROC_SUNLOCK(p); 481 482 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 483 new_switchtime = cpu_ticks(); 484 runtime = new_switchtime - PCPU_GET(switchtime); 485 td->td_runtime += runtime; 486 td->td_incruntime += runtime; 487 PCPU_SET(switchtime, new_switchtime); 488 PCPU_SET(switchticks, ticks); 489 PCPU_INC(cnt.v_swtch); 490 491 /* Save our resource usage in our process. */ 492 td->td_ru.ru_nvcsw++; 493 ruxagg(p, td); 494 rucollect(&p->p_ru, &td->td_ru); 495 PROC_STATUNLOCK(p); 496 497 td->td_state = TDS_INACTIVE; 498#ifdef WITNESS 499 witness_thread_exit(td); 500#endif 501 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 502 sched_throw(td); 503 panic("I'm a teapot!"); 504 /* NOTREACHED */ 505} 506 507/* 508 * Do any thread specific cleanups that may be needed in wait() 509 * called with Giant, proc and schedlock not held. 510 */ 511void 512thread_wait(struct proc *p) 513{ 514 struct thread *td; 515 516 mtx_assert(&Giant, MA_NOTOWNED); 517 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 518 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 519 td = FIRST_THREAD_IN_PROC(p); 520 /* Lock the last thread so we spin until it exits cpu_throw(). */ 521 thread_lock(td); 522 thread_unlock(td); 523 lock_profile_thread_exit(td); 524 cpuset_rel(td->td_cpuset); 525 td->td_cpuset = NULL; 526 cpu_thread_clean(td); 527 crfree(td->td_ucred); 528 callout_drain(&td->td_slpcallout); 529 thread_reap(); /* check for zombie threads etc. */ 530} 531 532/* 533 * Link a thread to a process. 534 * set up anything that needs to be initialized for it to 535 * be used by the process. 536 */ 537void 538thread_link(struct thread *td, struct proc *p) 539{ 540 541 /* 542 * XXX This can't be enabled because it's called for proc0 before 543 * its lock has been created. 544 * PROC_LOCK_ASSERT(p, MA_OWNED); 545 */ 546 td->td_state = TDS_INACTIVE; 547 td->td_proc = p; 548 td->td_flags = TDF_INMEM; 549 550 LIST_INIT(&td->td_contested); 551 LIST_INIT(&td->td_lprof[0]); 552 LIST_INIT(&td->td_lprof[1]); 553 sigqueue_init(&td->td_sigqueue, p); 554 callout_init(&td->td_slpcallout, 1); 555 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 556 p->p_numthreads++; 557} 558 559/* 560 * Called from: 561 * thread_exit() 562 */ 563void 564thread_unlink(struct thread *td) 565{ 566 struct proc *p = td->td_proc; 567 568 PROC_LOCK_ASSERT(p, MA_OWNED); 569 TAILQ_REMOVE(&p->p_threads, td, td_plist); 570 p->p_numthreads--; 571 /* could clear a few other things here */ 572 /* Must NOT clear links to proc! */ 573} 574 575static int 576calc_remaining(struct proc *p, int mode) 577{ 578 int remaining; 579 580 PROC_LOCK_ASSERT(p, MA_OWNED); 581 PROC_SLOCK_ASSERT(p, MA_OWNED); 582 if (mode == SINGLE_EXIT) 583 remaining = p->p_numthreads; 584 else if (mode == SINGLE_BOUNDARY) 585 remaining = p->p_numthreads - p->p_boundary_count; 586 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 587 remaining = p->p_numthreads - p->p_suspcount; 588 else 589 panic("calc_remaining: wrong mode %d", mode); 590 return (remaining); 591} 592 593static int 594remain_for_mode(int mode) 595{ 596 597 return (mode == SINGLE_ALLPROC ? 0 : 1); 598} 599 600static int 601weed_inhib(int mode, struct thread *td2, struct proc *p) 602{ 603 int wakeup_swapper; 604 605 PROC_LOCK_ASSERT(p, MA_OWNED); 606 PROC_SLOCK_ASSERT(p, MA_OWNED); 607 THREAD_LOCK_ASSERT(td2, MA_OWNED); 608 609 wakeup_swapper = 0; 610 switch (mode) { 611 case SINGLE_EXIT: 612 if (TD_IS_SUSPENDED(td2)) 613 wakeup_swapper |= thread_unsuspend_one(td2, p, true); 614 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 615 wakeup_swapper |= sleepq_abort(td2, EINTR); 616 break; 617 case SINGLE_BOUNDARY: 618 case SINGLE_NO_EXIT: 619 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 620 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 621 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 622 wakeup_swapper |= sleepq_abort(td2, ERESTART); 623 break; 624 case SINGLE_ALLPROC: 625 /* 626 * ALLPROC suspend tries to avoid spurious EINTR for 627 * threads sleeping interruptable, by suspending the 628 * thread directly, similarly to sig_suspend_threads(). 629 * Since such sleep is not performed at the user 630 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 631 * is used to avoid immediate un-suspend. 632 */ 633 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 634 TDF_ALLPROCSUSP)) == 0) 635 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 636 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { 637 if ((td2->td_flags & TDF_SBDRY) == 0) { 638 thread_suspend_one(td2); 639 td2->td_flags |= TDF_ALLPROCSUSP; 640 } else { 641 wakeup_swapper |= sleepq_abort(td2, ERESTART); 642 } 643 } 644 break; 645 } 646 return (wakeup_swapper); 647} 648 649/* 650 * Enforce single-threading. 651 * 652 * Returns 1 if the caller must abort (another thread is waiting to 653 * exit the process or similar). Process is locked! 654 * Returns 0 when you are successfully the only thread running. 655 * A process has successfully single threaded in the suspend mode when 656 * There are no threads in user mode. Threads in the kernel must be 657 * allowed to continue until they get to the user boundary. They may even 658 * copy out their return values and data before suspending. They may however be 659 * accelerated in reaching the user boundary as we will wake up 660 * any sleeping threads that are interruptable. (PCATCH). 661 */ 662int 663thread_single(struct proc *p, int mode) 664{ 665 struct thread *td; 666 struct thread *td2; 667 int remaining, wakeup_swapper; 668 669 td = curthread; 670 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 671 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 672 ("invalid mode %d", mode)); 673 /* 674 * If allowing non-ALLPROC singlethreading for non-curproc 675 * callers, calc_remaining() and remain_for_mode() should be 676 * adjusted to also account for td->td_proc != p. For now 677 * this is not implemented because it is not used. 678 */ 679 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 680 (mode != SINGLE_ALLPROC && td->td_proc == p), 681 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 682 mtx_assert(&Giant, MA_NOTOWNED); 683 PROC_LOCK_ASSERT(p, MA_OWNED); 684 685 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 686 return (0); 687 688 /* Is someone already single threading? */ 689 if (p->p_singlethread != NULL && p->p_singlethread != td) 690 return (1); 691 692 if (mode == SINGLE_EXIT) { 693 p->p_flag |= P_SINGLE_EXIT; 694 p->p_flag &= ~P_SINGLE_BOUNDARY; 695 } else { 696 p->p_flag &= ~P_SINGLE_EXIT; 697 if (mode == SINGLE_BOUNDARY) 698 p->p_flag |= P_SINGLE_BOUNDARY; 699 else 700 p->p_flag &= ~P_SINGLE_BOUNDARY; 701 } 702 if (mode == SINGLE_ALLPROC) 703 p->p_flag |= P_TOTAL_STOP; 704 p->p_flag |= P_STOPPED_SINGLE; 705 PROC_SLOCK(p); 706 p->p_singlethread = td; 707 remaining = calc_remaining(p, mode); 708 while (remaining != remain_for_mode(mode)) { 709 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 710 goto stopme; 711 wakeup_swapper = 0; 712 FOREACH_THREAD_IN_PROC(p, td2) { 713 if (td2 == td) 714 continue; 715 thread_lock(td2); 716 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 717 if (TD_IS_INHIBITED(td2)) { 718 wakeup_swapper |= weed_inhib(mode, td2, p); 719#ifdef SMP 720 } else if (TD_IS_RUNNING(td2) && td != td2) { 721 forward_signal(td2); 722#endif 723 } 724 thread_unlock(td2); 725 } 726 if (wakeup_swapper) 727 kick_proc0(); 728 remaining = calc_remaining(p, mode); 729 730 /* 731 * Maybe we suspended some threads.. was it enough? 732 */ 733 if (remaining == remain_for_mode(mode)) 734 break; 735 736stopme: 737 /* 738 * Wake us up when everyone else has suspended. 739 * In the mean time we suspend as well. 740 */ 741 thread_suspend_switch(td, p); 742 remaining = calc_remaining(p, mode); 743 } 744 if (mode == SINGLE_EXIT) { 745 /* 746 * Convert the process to an unthreaded process. The 747 * SINGLE_EXIT is called by exit1() or execve(), in 748 * both cases other threads must be retired. 749 */ 750 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 751 p->p_singlethread = NULL; 752 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 753 754 /* 755 * Wait for any remaining threads to exit cpu_throw(). 756 */ 757 while (p->p_exitthreads != 0) { 758 PROC_SUNLOCK(p); 759 PROC_UNLOCK(p); 760 sched_relinquish(td); 761 PROC_LOCK(p); 762 PROC_SLOCK(p); 763 } 764 } else if (mode == SINGLE_BOUNDARY) { 765 /* 766 * Wait until all suspended threads are removed from 767 * the processors. The thread_suspend_check() 768 * increments p_boundary_count while it is still 769 * running, which makes it possible for the execve() 770 * to destroy vmspace while our other threads are 771 * still using the address space. 772 * 773 * We lock the thread, which is only allowed to 774 * succeed after context switch code finished using 775 * the address space. 776 */ 777 FOREACH_THREAD_IN_PROC(p, td2) { 778 if (td2 == td) 779 continue; 780 thread_lock(td2); 781 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 782 ("td %p not on boundary", td2)); 783 KASSERT(TD_IS_SUSPENDED(td2), 784 ("td %p is not suspended", td2)); 785 thread_unlock(td2); 786 } 787 } 788 PROC_SUNLOCK(p); 789 return (0); 790} 791 792bool 793thread_suspend_check_needed(void) 794{ 795 struct proc *p; 796 struct thread *td; 797 798 td = curthread; 799 p = td->td_proc; 800 PROC_LOCK_ASSERT(p, MA_OWNED); 801 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 802 (td->td_dbgflags & TDB_SUSPEND) != 0)); 803} 804 805/* 806 * Called in from locations that can safely check to see 807 * whether we have to suspend or at least throttle for a 808 * single-thread event (e.g. fork). 809 * 810 * Such locations include userret(). 811 * If the "return_instead" argument is non zero, the thread must be able to 812 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 813 * 814 * The 'return_instead' argument tells the function if it may do a 815 * thread_exit() or suspend, or whether the caller must abort and back 816 * out instead. 817 * 818 * If the thread that set the single_threading request has set the 819 * P_SINGLE_EXIT bit in the process flags then this call will never return 820 * if 'return_instead' is false, but will exit. 821 * 822 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 823 *---------------+--------------------+--------------------- 824 * 0 | returns 0 | returns 0 or 1 825 * | when ST ends | immediately 826 *---------------+--------------------+--------------------- 827 * 1 | thread exits | returns 1 828 * | | immediately 829 * 0 = thread_exit() or suspension ok, 830 * other = return error instead of stopping the thread. 831 * 832 * While a full suspension is under effect, even a single threading 833 * thread would be suspended if it made this call (but it shouldn't). 834 * This call should only be made from places where 835 * thread_exit() would be safe as that may be the outcome unless 836 * return_instead is set. 837 */ 838int 839thread_suspend_check(int return_instead) 840{ 841 struct thread *td; 842 struct proc *p; 843 int wakeup_swapper; 844 845 td = curthread; 846 p = td->td_proc; 847 mtx_assert(&Giant, MA_NOTOWNED); 848 PROC_LOCK_ASSERT(p, MA_OWNED); 849 while (thread_suspend_check_needed()) { 850 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 851 KASSERT(p->p_singlethread != NULL, 852 ("singlethread not set")); 853 /* 854 * The only suspension in action is a 855 * single-threading. Single threader need not stop. 856 * It is safe to access p->p_singlethread unlocked 857 * because it can only be set to our address by us. 858 */ 859 if (p->p_singlethread == td) 860 return (0); /* Exempt from stopping. */ 861 } 862 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 863 return (EINTR); 864 865 /* Should we goto user boundary if we didn't come from there? */ 866 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 867 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 868 return (ERESTART); 869 870 /* 871 * Ignore suspend requests if they are deferred. 872 */ 873 if ((td->td_flags & TDF_SBDRY) != 0) { 874 KASSERT(return_instead, 875 ("TDF_SBDRY set for unsafe thread_suspend_check")); 876 return (0); 877 } 878 879 /* 880 * If the process is waiting for us to exit, 881 * this thread should just suicide. 882 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 883 */ 884 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 885 PROC_UNLOCK(p); 886 887 /* 888 * Allow Linux emulation layer to do some work 889 * before thread suicide. 890 */ 891 if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 892 (p->p_sysent->sv_thread_detach)(td); 893 kern_thr_exit(td); 894 panic("stopped thread did not exit"); 895 } 896 897 PROC_SLOCK(p); 898 thread_stopped(p); 899 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 900 if (p->p_numthreads == p->p_suspcount + 1) { 901 thread_lock(p->p_singlethread); 902 wakeup_swapper = thread_unsuspend_one( 903 p->p_singlethread, p, false); 904 thread_unlock(p->p_singlethread); 905 if (wakeup_swapper) 906 kick_proc0(); 907 } 908 } 909 PROC_UNLOCK(p); 910 thread_lock(td); 911 /* 912 * When a thread suspends, it just 913 * gets taken off all queues. 914 */ 915 thread_suspend_one(td); 916 if (return_instead == 0) { 917 p->p_boundary_count++; 918 td->td_flags |= TDF_BOUNDARY; 919 } 920 PROC_SUNLOCK(p); 921 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 922 thread_unlock(td); 923 PROC_LOCK(p); 924 } 925 return (0); 926} 927 928void 929thread_suspend_switch(struct thread *td, struct proc *p) 930{ 931 932 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 933 PROC_LOCK_ASSERT(p, MA_OWNED); 934 PROC_SLOCK_ASSERT(p, MA_OWNED); 935 /* 936 * We implement thread_suspend_one in stages here to avoid 937 * dropping the proc lock while the thread lock is owned. 938 */ 939 if (p == td->td_proc) { 940 thread_stopped(p); 941 p->p_suspcount++; 942 } 943 PROC_UNLOCK(p); 944 thread_lock(td); 945 td->td_flags &= ~TDF_NEEDSUSPCHK; 946 TD_SET_SUSPENDED(td); 947 sched_sleep(td, 0); 948 PROC_SUNLOCK(p); 949 DROP_GIANT(); 950 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 951 thread_unlock(td); 952 PICKUP_GIANT(); 953 PROC_LOCK(p); 954 PROC_SLOCK(p); 955} 956 957void 958thread_suspend_one(struct thread *td) 959{ 960 struct proc *p; 961 962 p = td->td_proc; 963 PROC_SLOCK_ASSERT(p, MA_OWNED); 964 THREAD_LOCK_ASSERT(td, MA_OWNED); 965 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 966 p->p_suspcount++; 967 td->td_flags &= ~TDF_NEEDSUSPCHK; 968 TD_SET_SUSPENDED(td); 969 sched_sleep(td, 0); 970} 971 972static int 973thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 974{ 975 976 THREAD_LOCK_ASSERT(td, MA_OWNED); 977 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 978 TD_CLR_SUSPENDED(td); 979 td->td_flags &= ~TDF_ALLPROCSUSP; 980 if (td->td_proc == p) { 981 PROC_SLOCK_ASSERT(p, MA_OWNED); 982 p->p_suspcount--; 983 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 984 td->td_flags &= ~TDF_BOUNDARY; 985 p->p_boundary_count--; 986 } 987 } 988 return (setrunnable(td)); 989} 990 991/* 992 * Allow all threads blocked by single threading to continue running. 993 */ 994void 995thread_unsuspend(struct proc *p) 996{ 997 struct thread *td; 998 int wakeup_swapper; 999 1000 PROC_LOCK_ASSERT(p, MA_OWNED); 1001 PROC_SLOCK_ASSERT(p, MA_OWNED); 1002 wakeup_swapper = 0; 1003 if (!P_SHOULDSTOP(p)) { 1004 FOREACH_THREAD_IN_PROC(p, td) { 1005 thread_lock(td); 1006 if (TD_IS_SUSPENDED(td)) { 1007 wakeup_swapper |= thread_unsuspend_one(td, p, 1008 true); 1009 } 1010 thread_unlock(td); 1011 } 1012 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1013 p->p_numthreads == p->p_suspcount) { 1014 /* 1015 * Stopping everything also did the job for the single 1016 * threading request. Now we've downgraded to single-threaded, 1017 * let it continue. 1018 */ 1019 if (p->p_singlethread->td_proc == p) { 1020 thread_lock(p->p_singlethread); 1021 wakeup_swapper = thread_unsuspend_one( 1022 p->p_singlethread, p, false); 1023 thread_unlock(p->p_singlethread); 1024 } 1025 } 1026 if (wakeup_swapper) 1027 kick_proc0(); 1028} 1029 1030/* 1031 * End the single threading mode.. 1032 */ 1033void 1034thread_single_end(struct proc *p, int mode) 1035{ 1036 struct thread *td; 1037 int wakeup_swapper; 1038 1039 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1040 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1041 ("invalid mode %d", mode)); 1042 PROC_LOCK_ASSERT(p, MA_OWNED); 1043 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1044 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1045 ("mode %d does not match P_TOTAL_STOP", mode)); 1046 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 1047 ("thread_single_end from other thread %p %p", 1048 curthread, p->p_singlethread)); 1049 KASSERT(mode != SINGLE_BOUNDARY || 1050 (p->p_flag & P_SINGLE_BOUNDARY) != 0, 1051 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 1052 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1053 P_TOTAL_STOP); 1054 PROC_SLOCK(p); 1055 p->p_singlethread = NULL; 1056 wakeup_swapper = 0; 1057 /* 1058 * If there are other threads they may now run, 1059 * unless of course there is a blanket 'stop order' 1060 * on the process. The single threader must be allowed 1061 * to continue however as this is a bad place to stop. 1062 */ 1063 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1064 FOREACH_THREAD_IN_PROC(p, td) { 1065 thread_lock(td); 1066 if (TD_IS_SUSPENDED(td)) { 1067 wakeup_swapper |= thread_unsuspend_one(td, p, 1068 mode == SINGLE_BOUNDARY); 1069 } 1070 thread_unlock(td); 1071 } 1072 } 1073 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 1074 ("inconsistent boundary count %d", p->p_boundary_count)); 1075 PROC_SUNLOCK(p); 1076 if (wakeup_swapper) 1077 kick_proc0(); 1078} 1079 1080struct thread * 1081thread_find(struct proc *p, lwpid_t tid) 1082{ 1083 struct thread *td; 1084 1085 PROC_LOCK_ASSERT(p, MA_OWNED); 1086 FOREACH_THREAD_IN_PROC(p, td) { 1087 if (td->td_tid == tid) 1088 break; 1089 } 1090 return (td); 1091} 1092 1093/* Locate a thread by number; return with proc lock held. */ 1094struct thread * 1095tdfind(lwpid_t tid, pid_t pid) 1096{ 1097#define RUN_THRESH 16 1098 struct thread *td; 1099 int run = 0; 1100 1101 rw_rlock(&tidhash_lock); 1102 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1103 if (td->td_tid == tid) { 1104 if (pid != -1 && td->td_proc->p_pid != pid) { 1105 td = NULL; 1106 break; 1107 } 1108 PROC_LOCK(td->td_proc); 1109 if (td->td_proc->p_state == PRS_NEW) { 1110 PROC_UNLOCK(td->td_proc); 1111 td = NULL; 1112 break; 1113 } 1114 if (run > RUN_THRESH) { 1115 if (rw_try_upgrade(&tidhash_lock)) { 1116 LIST_REMOVE(td, td_hash); 1117 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1118 td, td_hash); 1119 rw_wunlock(&tidhash_lock); 1120 return (td); 1121 } 1122 } 1123 break; 1124 } 1125 run++; 1126 } 1127 rw_runlock(&tidhash_lock); 1128 return (td); 1129} 1130 1131void 1132tidhash_add(struct thread *td) 1133{ 1134 rw_wlock(&tidhash_lock); 1135 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1136 rw_wunlock(&tidhash_lock); 1137} 1138 1139void 1140tidhash_remove(struct thread *td) 1141{ 1142 rw_wlock(&tidhash_lock); 1143 LIST_REMOVE(td, td_hash); 1144 rw_wunlock(&tidhash_lock); 1145} 1146