kern_thread.c revision 293490
1/*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include "opt_witness.h" 30#include "opt_kdtrace.h" 31#include "opt_hwpmc_hooks.h" 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/kern/kern_thread.c 293490 2016-01-09 14:53:08Z dchagin $"); 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/rangelock.h> 43#include <sys/resourcevar.h> 44#include <sys/sdt.h> 45#include <sys/smp.h> 46#include <sys/sched.h> 47#include <sys/sleepqueue.h> 48#include <sys/selinfo.h> 49#include <sys/sysent.h> 50#include <sys/turnstile.h> 51#include <sys/ktr.h> 52#include <sys/rwlock.h> 53#include <sys/umtx.h> 54#include <sys/cpuset.h> 55#ifdef HWPMC_HOOKS 56#include <sys/pmckern.h> 57#endif 58 59#include <security/audit/audit.h> 60 61#include <vm/vm.h> 62#include <vm/vm_extern.h> 63#include <vm/uma.h> 64#include <sys/eventhandler.h> 65 66SDT_PROVIDER_DECLARE(proc); 67SDT_PROBE_DEFINE(proc, , , lwp__exit); 68 69/* 70 * thread related storage. 71 */ 72static uma_zone_t thread_zone; 73 74TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 75static struct mtx zombie_lock; 76MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 77 78static void thread_zombie(struct thread *); 79static int thread_unsuspend_one(struct thread *td, struct proc *p, 80 bool boundary); 81 82#define TID_BUFFER_SIZE 1024 83 84struct mtx tid_lock; 85static struct unrhdr *tid_unrhdr; 86static lwpid_t tid_buffer[TID_BUFFER_SIZE]; 87static int tid_head, tid_tail; 88static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 89 90struct tidhashhead *tidhashtbl; 91u_long tidhash; 92struct rwlock tidhash_lock; 93 94static lwpid_t 95tid_alloc(void) 96{ 97 lwpid_t tid; 98 99 tid = alloc_unr(tid_unrhdr); 100 if (tid != -1) 101 return (tid); 102 mtx_lock(&tid_lock); 103 if (tid_head == tid_tail) { 104 mtx_unlock(&tid_lock); 105 return (-1); 106 } 107 tid = tid_buffer[tid_head]; 108 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 109 mtx_unlock(&tid_lock); 110 return (tid); 111} 112 113static void 114tid_free(lwpid_t tid) 115{ 116 lwpid_t tmp_tid = -1; 117 118 mtx_lock(&tid_lock); 119 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 120 tmp_tid = tid_buffer[tid_head]; 121 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 122 } 123 tid_buffer[tid_tail] = tid; 124 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 125 mtx_unlock(&tid_lock); 126 if (tmp_tid != -1) 127 free_unr(tid_unrhdr, tmp_tid); 128} 129 130/* 131 * Prepare a thread for use. 132 */ 133static int 134thread_ctor(void *mem, int size, void *arg, int flags) 135{ 136 struct thread *td; 137 138 td = (struct thread *)mem; 139 td->td_state = TDS_INACTIVE; 140 td->td_oncpu = NOCPU; 141 142 td->td_tid = tid_alloc(); 143 144 /* 145 * Note that td_critnest begins life as 1 because the thread is not 146 * running and is thereby implicitly waiting to be on the receiving 147 * end of a context switch. 148 */ 149 td->td_critnest = 1; 150 td->td_lend_user_pri = PRI_MAX; 151 EVENTHANDLER_INVOKE(thread_ctor, td); 152#ifdef AUDIT 153 audit_thread_alloc(td); 154#endif 155 umtx_thread_alloc(td); 156 return (0); 157} 158 159/* 160 * Reclaim a thread after use. 161 */ 162static void 163thread_dtor(void *mem, int size, void *arg) 164{ 165 struct thread *td; 166 167 td = (struct thread *)mem; 168 169#ifdef INVARIANTS 170 /* Verify that this thread is in a safe state to free. */ 171 switch (td->td_state) { 172 case TDS_INHIBITED: 173 case TDS_RUNNING: 174 case TDS_CAN_RUN: 175 case TDS_RUNQ: 176 /* 177 * We must never unlink a thread that is in one of 178 * these states, because it is currently active. 179 */ 180 panic("bad state for thread unlinking"); 181 /* NOTREACHED */ 182 case TDS_INACTIVE: 183 break; 184 default: 185 panic("bad thread state"); 186 /* NOTREACHED */ 187 } 188#endif 189#ifdef AUDIT 190 audit_thread_free(td); 191#endif 192 /* Free all OSD associated to this thread. */ 193 osd_thread_exit(td); 194 195 EVENTHANDLER_INVOKE(thread_dtor, td); 196 tid_free(td->td_tid); 197} 198 199/* 200 * Initialize type-stable parts of a thread (when newly created). 201 */ 202static int 203thread_init(void *mem, int size, int flags) 204{ 205 struct thread *td; 206 207 td = (struct thread *)mem; 208 209 td->td_sleepqueue = sleepq_alloc(); 210 td->td_turnstile = turnstile_alloc(); 211 td->td_rlqe = NULL; 212 EVENTHANDLER_INVOKE(thread_init, td); 213 td->td_sched = (struct td_sched *)&td[1]; 214 umtx_thread_init(td); 215 td->td_kstack = 0; 216 td->td_sel = NULL; 217 return (0); 218} 219 220/* 221 * Tear down type-stable parts of a thread (just before being discarded). 222 */ 223static void 224thread_fini(void *mem, int size) 225{ 226 struct thread *td; 227 228 td = (struct thread *)mem; 229 EVENTHANDLER_INVOKE(thread_fini, td); 230 rlqentry_free(td->td_rlqe); 231 turnstile_free(td->td_turnstile); 232 sleepq_free(td->td_sleepqueue); 233 umtx_thread_fini(td); 234 seltdfini(td); 235} 236 237/* 238 * For a newly created process, 239 * link up all the structures and its initial threads etc. 240 * called from: 241 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 242 * proc_dtor() (should go away) 243 * proc_init() 244 */ 245void 246proc_linkup0(struct proc *p, struct thread *td) 247{ 248 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 249 proc_linkup(p, td); 250} 251 252void 253proc_linkup(struct proc *p, struct thread *td) 254{ 255 256 sigqueue_init(&p->p_sigqueue, p); 257 p->p_ksi = ksiginfo_alloc(1); 258 if (p->p_ksi != NULL) { 259 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 260 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 261 } 262 LIST_INIT(&p->p_mqnotifier); 263 p->p_numthreads = 0; 264 thread_link(td, p); 265} 266 267/* 268 * Initialize global thread allocation resources. 269 */ 270void 271threadinit(void) 272{ 273 274 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 275 276 /* 277 * pid_max cannot be greater than PID_MAX. 278 * leave one number for thread0. 279 */ 280 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 281 282 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 283 thread_ctor, thread_dtor, thread_init, thread_fini, 284 16 - 1, UMA_ZONE_NOFREE); 285 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 286 rw_init(&tidhash_lock, "tidhash"); 287} 288 289/* 290 * Place an unused thread on the zombie list. 291 * Use the slpq as that must be unused by now. 292 */ 293void 294thread_zombie(struct thread *td) 295{ 296 mtx_lock_spin(&zombie_lock); 297 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 298 mtx_unlock_spin(&zombie_lock); 299} 300 301/* 302 * Release a thread that has exited after cpu_throw(). 303 */ 304void 305thread_stash(struct thread *td) 306{ 307 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 308 thread_zombie(td); 309} 310 311/* 312 * Reap zombie resources. 313 */ 314void 315thread_reap(void) 316{ 317 struct thread *td_first, *td_next; 318 319 /* 320 * Don't even bother to lock if none at this instant, 321 * we really don't care about the next instant.. 322 */ 323 if (!TAILQ_EMPTY(&zombie_threads)) { 324 mtx_lock_spin(&zombie_lock); 325 td_first = TAILQ_FIRST(&zombie_threads); 326 if (td_first) 327 TAILQ_INIT(&zombie_threads); 328 mtx_unlock_spin(&zombie_lock); 329 while (td_first) { 330 td_next = TAILQ_NEXT(td_first, td_slpq); 331 if (td_first->td_ucred) 332 crfree(td_first->td_ucred); 333 thread_free(td_first); 334 td_first = td_next; 335 } 336 } 337} 338 339/* 340 * Allocate a thread. 341 */ 342struct thread * 343thread_alloc(int pages) 344{ 345 struct thread *td; 346 347 thread_reap(); /* check if any zombies to get */ 348 349 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 350 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 351 if (!vm_thread_new(td, pages)) { 352 uma_zfree(thread_zone, td); 353 return (NULL); 354 } 355 cpu_thread_alloc(td); 356 return (td); 357} 358 359int 360thread_alloc_stack(struct thread *td, int pages) 361{ 362 363 KASSERT(td->td_kstack == 0, 364 ("thread_alloc_stack called on a thread with kstack")); 365 if (!vm_thread_new(td, pages)) 366 return (0); 367 cpu_thread_alloc(td); 368 return (1); 369} 370 371/* 372 * Deallocate a thread. 373 */ 374void 375thread_free(struct thread *td) 376{ 377 378 lock_profile_thread_exit(td); 379 if (td->td_cpuset) 380 cpuset_rel(td->td_cpuset); 381 td->td_cpuset = NULL; 382 cpu_thread_free(td); 383 if (td->td_kstack != 0) 384 vm_thread_dispose(td); 385 uma_zfree(thread_zone, td); 386} 387 388/* 389 * Discard the current thread and exit from its context. 390 * Always called with scheduler locked. 391 * 392 * Because we can't free a thread while we're operating under its context, 393 * push the current thread into our CPU's deadthread holder. This means 394 * we needn't worry about someone else grabbing our context before we 395 * do a cpu_throw(). 396 */ 397void 398thread_exit(void) 399{ 400 uint64_t runtime, new_switchtime; 401 struct thread *td; 402 struct thread *td2; 403 struct proc *p; 404 int wakeup_swapper; 405 406 td = curthread; 407 p = td->td_proc; 408 409 PROC_SLOCK_ASSERT(p, MA_OWNED); 410 mtx_assert(&Giant, MA_NOTOWNED); 411 412 PROC_LOCK_ASSERT(p, MA_OWNED); 413 KASSERT(p != NULL, ("thread exiting without a process")); 414 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 415 (long)p->p_pid, td->td_name); 416 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 417 418#ifdef AUDIT 419 AUDIT_SYSCALL_EXIT(0, td); 420#endif 421 /* 422 * drop FPU & debug register state storage, or any other 423 * architecture specific resources that 424 * would not be on a new untouched process. 425 */ 426 cpu_thread_exit(td); /* XXXSMP */ 427 428 /* 429 * The last thread is left attached to the process 430 * So that the whole bundle gets recycled. Skip 431 * all this stuff if we never had threads. 432 * EXIT clears all sign of other threads when 433 * it goes to single threading, so the last thread always 434 * takes the short path. 435 */ 436 if (p->p_flag & P_HADTHREADS) { 437 if (p->p_numthreads > 1) { 438 atomic_add_int(&td->td_proc->p_exitthreads, 1); 439 thread_unlink(td); 440 td2 = FIRST_THREAD_IN_PROC(p); 441 sched_exit_thread(td2, td); 442 443 /* 444 * The test below is NOT true if we are the 445 * sole exiting thread. P_STOPPED_SINGLE is unset 446 * in exit1() after it is the only survivor. 447 */ 448 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 449 if (p->p_numthreads == p->p_suspcount) { 450 thread_lock(p->p_singlethread); 451 wakeup_swapper = thread_unsuspend_one( 452 p->p_singlethread, p, false); 453 thread_unlock(p->p_singlethread); 454 if (wakeup_swapper) 455 kick_proc0(); 456 } 457 } 458 459 PCPU_SET(deadthread, td); 460 } else { 461 /* 462 * The last thread is exiting.. but not through exit() 463 */ 464 panic ("thread_exit: Last thread exiting on its own"); 465 } 466 } 467#ifdef HWPMC_HOOKS 468 /* 469 * If this thread is part of a process that is being tracked by hwpmc(4), 470 * inform the module of the thread's impending exit. 471 */ 472 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 473 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 474#endif 475 PROC_UNLOCK(p); 476 PROC_STATLOCK(p); 477 thread_lock(td); 478 PROC_SUNLOCK(p); 479 480 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 481 new_switchtime = cpu_ticks(); 482 runtime = new_switchtime - PCPU_GET(switchtime); 483 td->td_runtime += runtime; 484 td->td_incruntime += runtime; 485 PCPU_SET(switchtime, new_switchtime); 486 PCPU_SET(switchticks, ticks); 487 PCPU_INC(cnt.v_swtch); 488 489 /* Save our resource usage in our process. */ 490 td->td_ru.ru_nvcsw++; 491 ruxagg(p, td); 492 rucollect(&p->p_ru, &td->td_ru); 493 PROC_STATUNLOCK(p); 494 495 td->td_state = TDS_INACTIVE; 496#ifdef WITNESS 497 witness_thread_exit(td); 498#endif 499 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 500 sched_throw(td); 501 panic("I'm a teapot!"); 502 /* NOTREACHED */ 503} 504 505/* 506 * Do any thread specific cleanups that may be needed in wait() 507 * called with Giant, proc and schedlock not held. 508 */ 509void 510thread_wait(struct proc *p) 511{ 512 struct thread *td; 513 514 mtx_assert(&Giant, MA_NOTOWNED); 515 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 516 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 517 td = FIRST_THREAD_IN_PROC(p); 518 /* Lock the last thread so we spin until it exits cpu_throw(). */ 519 thread_lock(td); 520 thread_unlock(td); 521 lock_profile_thread_exit(td); 522 cpuset_rel(td->td_cpuset); 523 td->td_cpuset = NULL; 524 cpu_thread_clean(td); 525 crfree(td->td_ucred); 526 thread_reap(); /* check for zombie threads etc. */ 527} 528 529/* 530 * Link a thread to a process. 531 * set up anything that needs to be initialized for it to 532 * be used by the process. 533 */ 534void 535thread_link(struct thread *td, struct proc *p) 536{ 537 538 /* 539 * XXX This can't be enabled because it's called for proc0 before 540 * its lock has been created. 541 * PROC_LOCK_ASSERT(p, MA_OWNED); 542 */ 543 td->td_state = TDS_INACTIVE; 544 td->td_proc = p; 545 td->td_flags = TDF_INMEM; 546 547 LIST_INIT(&td->td_contested); 548 LIST_INIT(&td->td_lprof[0]); 549 LIST_INIT(&td->td_lprof[1]); 550 sigqueue_init(&td->td_sigqueue, p); 551 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 552 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 553 p->p_numthreads++; 554} 555 556/* 557 * Called from: 558 * thread_exit() 559 */ 560void 561thread_unlink(struct thread *td) 562{ 563 struct proc *p = td->td_proc; 564 565 PROC_LOCK_ASSERT(p, MA_OWNED); 566 TAILQ_REMOVE(&p->p_threads, td, td_plist); 567 p->p_numthreads--; 568 /* could clear a few other things here */ 569 /* Must NOT clear links to proc! */ 570} 571 572static int 573calc_remaining(struct proc *p, int mode) 574{ 575 int remaining; 576 577 PROC_LOCK_ASSERT(p, MA_OWNED); 578 PROC_SLOCK_ASSERT(p, MA_OWNED); 579 if (mode == SINGLE_EXIT) 580 remaining = p->p_numthreads; 581 else if (mode == SINGLE_BOUNDARY) 582 remaining = p->p_numthreads - p->p_boundary_count; 583 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 584 remaining = p->p_numthreads - p->p_suspcount; 585 else 586 panic("calc_remaining: wrong mode %d", mode); 587 return (remaining); 588} 589 590static int 591remain_for_mode(int mode) 592{ 593 594 return (mode == SINGLE_ALLPROC ? 0 : 1); 595} 596 597static int 598weed_inhib(int mode, struct thread *td2, struct proc *p) 599{ 600 int wakeup_swapper; 601 602 PROC_LOCK_ASSERT(p, MA_OWNED); 603 PROC_SLOCK_ASSERT(p, MA_OWNED); 604 THREAD_LOCK_ASSERT(td2, MA_OWNED); 605 606 wakeup_swapper = 0; 607 switch (mode) { 608 case SINGLE_EXIT: 609 if (TD_IS_SUSPENDED(td2)) 610 wakeup_swapper |= thread_unsuspend_one(td2, p, true); 611 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 612 wakeup_swapper |= sleepq_abort(td2, EINTR); 613 break; 614 case SINGLE_BOUNDARY: 615 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 616 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 617 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 618 wakeup_swapper |= sleepq_abort(td2, ERESTART); 619 break; 620 case SINGLE_NO_EXIT: 621 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 622 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 623 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 624 wakeup_swapper |= sleepq_abort(td2, ERESTART); 625 break; 626 case SINGLE_ALLPROC: 627 /* 628 * ALLPROC suspend tries to avoid spurious EINTR for 629 * threads sleeping interruptable, by suspending the 630 * thread directly, similarly to sig_suspend_threads(). 631 * Since such sleep is not performed at the user 632 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 633 * is used to avoid immediate un-suspend. 634 */ 635 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 636 TDF_ALLPROCSUSP)) == 0) 637 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 638 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { 639 if ((td2->td_flags & TDF_SBDRY) == 0) { 640 thread_suspend_one(td2); 641 td2->td_flags |= TDF_ALLPROCSUSP; 642 } else { 643 wakeup_swapper |= sleepq_abort(td2, ERESTART); 644 } 645 } 646 break; 647 } 648 return (wakeup_swapper); 649} 650 651/* 652 * Enforce single-threading. 653 * 654 * Returns 1 if the caller must abort (another thread is waiting to 655 * exit the process or similar). Process is locked! 656 * Returns 0 when you are successfully the only thread running. 657 * A process has successfully single threaded in the suspend mode when 658 * There are no threads in user mode. Threads in the kernel must be 659 * allowed to continue until they get to the user boundary. They may even 660 * copy out their return values and data before suspending. They may however be 661 * accelerated in reaching the user boundary as we will wake up 662 * any sleeping threads that are interruptable. (PCATCH). 663 */ 664int 665thread_single(struct proc *p, int mode) 666{ 667 struct thread *td; 668 struct thread *td2; 669 int remaining, wakeup_swapper; 670 671 td = curthread; 672 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 673 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 674 ("invalid mode %d", mode)); 675 /* 676 * If allowing non-ALLPROC singlethreading for non-curproc 677 * callers, calc_remaining() and remain_for_mode() should be 678 * adjusted to also account for td->td_proc != p. For now 679 * this is not implemented because it is not used. 680 */ 681 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 682 (mode != SINGLE_ALLPROC && td->td_proc == p), 683 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 684 mtx_assert(&Giant, MA_NOTOWNED); 685 PROC_LOCK_ASSERT(p, MA_OWNED); 686 687 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 688 return (0); 689 690 /* Is someone already single threading? */ 691 if (p->p_singlethread != NULL && p->p_singlethread != td) 692 return (1); 693 694 if (mode == SINGLE_EXIT) { 695 p->p_flag |= P_SINGLE_EXIT; 696 p->p_flag &= ~P_SINGLE_BOUNDARY; 697 } else { 698 p->p_flag &= ~P_SINGLE_EXIT; 699 if (mode == SINGLE_BOUNDARY) 700 p->p_flag |= P_SINGLE_BOUNDARY; 701 else 702 p->p_flag &= ~P_SINGLE_BOUNDARY; 703 } 704 if (mode == SINGLE_ALLPROC) 705 p->p_flag |= P_TOTAL_STOP; 706 p->p_flag |= P_STOPPED_SINGLE; 707 PROC_SLOCK(p); 708 p->p_singlethread = td; 709 remaining = calc_remaining(p, mode); 710 while (remaining != remain_for_mode(mode)) { 711 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 712 goto stopme; 713 wakeup_swapper = 0; 714 FOREACH_THREAD_IN_PROC(p, td2) { 715 if (td2 == td) 716 continue; 717 thread_lock(td2); 718 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 719 if (TD_IS_INHIBITED(td2)) { 720 wakeup_swapper |= weed_inhib(mode, td2, p); 721#ifdef SMP 722 } else if (TD_IS_RUNNING(td2) && td != td2) { 723 forward_signal(td2); 724#endif 725 } 726 thread_unlock(td2); 727 } 728 if (wakeup_swapper) 729 kick_proc0(); 730 remaining = calc_remaining(p, mode); 731 732 /* 733 * Maybe we suspended some threads.. was it enough? 734 */ 735 if (remaining == remain_for_mode(mode)) 736 break; 737 738stopme: 739 /* 740 * Wake us up when everyone else has suspended. 741 * In the mean time we suspend as well. 742 */ 743 thread_suspend_switch(td, p); 744 remaining = calc_remaining(p, mode); 745 } 746 if (mode == SINGLE_EXIT) { 747 /* 748 * Convert the process to an unthreaded process. The 749 * SINGLE_EXIT is called by exit1() or execve(), in 750 * both cases other threads must be retired. 751 */ 752 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 753 p->p_singlethread = NULL; 754 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 755 756 /* 757 * Wait for any remaining threads to exit cpu_throw(). 758 */ 759 while (p->p_exitthreads != 0) { 760 PROC_SUNLOCK(p); 761 PROC_UNLOCK(p); 762 sched_relinquish(td); 763 PROC_LOCK(p); 764 PROC_SLOCK(p); 765 } 766 } else if (mode == SINGLE_BOUNDARY) { 767 /* 768 * Wait until all suspended threads are removed from 769 * the processors. The thread_suspend_check() 770 * increments p_boundary_count while it is still 771 * running, which makes it possible for the execve() 772 * to destroy vmspace while our other threads are 773 * still using the address space. 774 * 775 * We lock the thread, which is only allowed to 776 * succeed after context switch code finished using 777 * the address space. 778 */ 779 FOREACH_THREAD_IN_PROC(p, td2) { 780 if (td2 == td) 781 continue; 782 thread_lock(td2); 783 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 784 ("td %p not on boundary", td2)); 785 KASSERT(TD_IS_SUSPENDED(td2), 786 ("td %p is not suspended", td2)); 787 thread_unlock(td2); 788 } 789 } 790 PROC_SUNLOCK(p); 791 return (0); 792} 793 794bool 795thread_suspend_check_needed(void) 796{ 797 struct proc *p; 798 struct thread *td; 799 800 td = curthread; 801 p = td->td_proc; 802 PROC_LOCK_ASSERT(p, MA_OWNED); 803 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 804 (td->td_dbgflags & TDB_SUSPEND) != 0)); 805} 806 807/* 808 * Called in from locations that can safely check to see 809 * whether we have to suspend or at least throttle for a 810 * single-thread event (e.g. fork). 811 * 812 * Such locations include userret(). 813 * If the "return_instead" argument is non zero, the thread must be able to 814 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 815 * 816 * The 'return_instead' argument tells the function if it may do a 817 * thread_exit() or suspend, or whether the caller must abort and back 818 * out instead. 819 * 820 * If the thread that set the single_threading request has set the 821 * P_SINGLE_EXIT bit in the process flags then this call will never return 822 * if 'return_instead' is false, but will exit. 823 * 824 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 825 *---------------+--------------------+--------------------- 826 * 0 | returns 0 | returns 0 or 1 827 * | when ST ends | immediately 828 *---------------+--------------------+--------------------- 829 * 1 | thread exits | returns 1 830 * | | immediately 831 * 0 = thread_exit() or suspension ok, 832 * other = return error instead of stopping the thread. 833 * 834 * While a full suspension is under effect, even a single threading 835 * thread would be suspended if it made this call (but it shouldn't). 836 * This call should only be made from places where 837 * thread_exit() would be safe as that may be the outcome unless 838 * return_instead is set. 839 */ 840int 841thread_suspend_check(int return_instead) 842{ 843 struct thread *td; 844 struct proc *p; 845 int wakeup_swapper; 846 847 td = curthread; 848 p = td->td_proc; 849 mtx_assert(&Giant, MA_NOTOWNED); 850 PROC_LOCK_ASSERT(p, MA_OWNED); 851 while (thread_suspend_check_needed()) { 852 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 853 KASSERT(p->p_singlethread != NULL, 854 ("singlethread not set")); 855 /* 856 * The only suspension in action is a 857 * single-threading. Single threader need not stop. 858 * XXX Should be safe to access unlocked 859 * as it can only be set to be true by us. 860 */ 861 if (p->p_singlethread == td) 862 return (0); /* Exempt from stopping. */ 863 } 864 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 865 return (EINTR); 866 867 /* Should we goto user boundary if we didn't come from there? */ 868 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 869 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 870 return (ERESTART); 871 872 /* 873 * Ignore suspend requests if they are deferred. 874 */ 875 if ((td->td_flags & TDF_SBDRY) != 0) { 876 KASSERT(return_instead, 877 ("TDF_SBDRY set for unsafe thread_suspend_check")); 878 return (0); 879 } 880 881 /* 882 * If the process is waiting for us to exit, 883 * this thread should just suicide. 884 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 885 */ 886 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 887 PROC_UNLOCK(p); 888 tidhash_remove(td); 889 890 /* 891 * Allow Linux emulation layer to do some work 892 * before thread suicide. 893 */ 894 if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 895 (p->p_sysent->sv_thread_detach)(td); 896 897 PROC_LOCK(p); 898 tdsigcleanup(td); 899 umtx_thread_exit(td); 900 PROC_SLOCK(p); 901 thread_stopped(p); 902 thread_exit(); 903 } 904 905 PROC_SLOCK(p); 906 thread_stopped(p); 907 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 908 if (p->p_numthreads == p->p_suspcount + 1) { 909 thread_lock(p->p_singlethread); 910 wakeup_swapper = thread_unsuspend_one( 911 p->p_singlethread, p, false); 912 thread_unlock(p->p_singlethread); 913 if (wakeup_swapper) 914 kick_proc0(); 915 } 916 } 917 PROC_UNLOCK(p); 918 thread_lock(td); 919 /* 920 * When a thread suspends, it just 921 * gets taken off all queues. 922 */ 923 thread_suspend_one(td); 924 if (return_instead == 0) { 925 p->p_boundary_count++; 926 td->td_flags |= TDF_BOUNDARY; 927 } 928 PROC_SUNLOCK(p); 929 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 930 thread_unlock(td); 931 PROC_LOCK(p); 932 } 933 return (0); 934} 935 936void 937thread_suspend_switch(struct thread *td, struct proc *p) 938{ 939 940 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 941 PROC_LOCK_ASSERT(p, MA_OWNED); 942 PROC_SLOCK_ASSERT(p, MA_OWNED); 943 /* 944 * We implement thread_suspend_one in stages here to avoid 945 * dropping the proc lock while the thread lock is owned. 946 */ 947 if (p == td->td_proc) { 948 thread_stopped(p); 949 p->p_suspcount++; 950 } 951 PROC_UNLOCK(p); 952 thread_lock(td); 953 td->td_flags &= ~TDF_NEEDSUSPCHK; 954 TD_SET_SUSPENDED(td); 955 sched_sleep(td, 0); 956 PROC_SUNLOCK(p); 957 DROP_GIANT(); 958 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 959 thread_unlock(td); 960 PICKUP_GIANT(); 961 PROC_LOCK(p); 962 PROC_SLOCK(p); 963} 964 965void 966thread_suspend_one(struct thread *td) 967{ 968 struct proc *p; 969 970 p = td->td_proc; 971 PROC_SLOCK_ASSERT(p, MA_OWNED); 972 THREAD_LOCK_ASSERT(td, MA_OWNED); 973 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 974 p->p_suspcount++; 975 td->td_flags &= ~TDF_NEEDSUSPCHK; 976 TD_SET_SUSPENDED(td); 977 sched_sleep(td, 0); 978} 979 980static int 981thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 982{ 983 984 THREAD_LOCK_ASSERT(td, MA_OWNED); 985 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 986 TD_CLR_SUSPENDED(td); 987 td->td_flags &= ~TDF_ALLPROCSUSP; 988 if (td->td_proc == p) { 989 PROC_SLOCK_ASSERT(p, MA_OWNED); 990 p->p_suspcount--; 991 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 992 td->td_flags &= ~TDF_BOUNDARY; 993 p->p_boundary_count--; 994 } 995 } 996 return (setrunnable(td)); 997} 998 999/* 1000 * Allow all threads blocked by single threading to continue running. 1001 */ 1002void 1003thread_unsuspend(struct proc *p) 1004{ 1005 struct thread *td; 1006 int wakeup_swapper; 1007 1008 PROC_LOCK_ASSERT(p, MA_OWNED); 1009 PROC_SLOCK_ASSERT(p, MA_OWNED); 1010 wakeup_swapper = 0; 1011 if (!P_SHOULDSTOP(p)) { 1012 FOREACH_THREAD_IN_PROC(p, td) { 1013 thread_lock(td); 1014 if (TD_IS_SUSPENDED(td)) { 1015 wakeup_swapper |= thread_unsuspend_one(td, p, 1016 true); 1017 } 1018 thread_unlock(td); 1019 } 1020 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1021 p->p_numthreads == p->p_suspcount) { 1022 /* 1023 * Stopping everything also did the job for the single 1024 * threading request. Now we've downgraded to single-threaded, 1025 * let it continue. 1026 */ 1027 if (p->p_singlethread->td_proc == p) { 1028 thread_lock(p->p_singlethread); 1029 wakeup_swapper = thread_unsuspend_one( 1030 p->p_singlethread, p, false); 1031 thread_unlock(p->p_singlethread); 1032 } 1033 } 1034 if (wakeup_swapper) 1035 kick_proc0(); 1036} 1037 1038/* 1039 * End the single threading mode.. 1040 */ 1041void 1042thread_single_end(struct proc *p, int mode) 1043{ 1044 struct thread *td; 1045 int wakeup_swapper; 1046 1047 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1048 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1049 ("invalid mode %d", mode)); 1050 PROC_LOCK_ASSERT(p, MA_OWNED); 1051 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1052 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1053 ("mode %d does not match P_TOTAL_STOP", mode)); 1054 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 1055 ("thread_single_end from other thread %p %p", 1056 curthread, p->p_singlethread)); 1057 KASSERT(mode != SINGLE_BOUNDARY || 1058 (p->p_flag & P_SINGLE_BOUNDARY) != 0, 1059 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 1060 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1061 P_TOTAL_STOP); 1062 PROC_SLOCK(p); 1063 p->p_singlethread = NULL; 1064 wakeup_swapper = 0; 1065 /* 1066 * If there are other threads they may now run, 1067 * unless of course there is a blanket 'stop order' 1068 * on the process. The single threader must be allowed 1069 * to continue however as this is a bad place to stop. 1070 */ 1071 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1072 FOREACH_THREAD_IN_PROC(p, td) { 1073 thread_lock(td); 1074 if (TD_IS_SUSPENDED(td)) { 1075 wakeup_swapper |= thread_unsuspend_one(td, p, 1076 mode == SINGLE_BOUNDARY); 1077 } 1078 thread_unlock(td); 1079 } 1080 } 1081 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 1082 ("inconsistent boundary count %d", p->p_boundary_count)); 1083 PROC_SUNLOCK(p); 1084 if (wakeup_swapper) 1085 kick_proc0(); 1086} 1087 1088struct thread * 1089thread_find(struct proc *p, lwpid_t tid) 1090{ 1091 struct thread *td; 1092 1093 PROC_LOCK_ASSERT(p, MA_OWNED); 1094 FOREACH_THREAD_IN_PROC(p, td) { 1095 if (td->td_tid == tid) 1096 break; 1097 } 1098 return (td); 1099} 1100 1101/* Locate a thread by number; return with proc lock held. */ 1102struct thread * 1103tdfind(lwpid_t tid, pid_t pid) 1104{ 1105#define RUN_THRESH 16 1106 struct thread *td; 1107 int run = 0; 1108 1109 rw_rlock(&tidhash_lock); 1110 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1111 if (td->td_tid == tid) { 1112 if (pid != -1 && td->td_proc->p_pid != pid) { 1113 td = NULL; 1114 break; 1115 } 1116 PROC_LOCK(td->td_proc); 1117 if (td->td_proc->p_state == PRS_NEW) { 1118 PROC_UNLOCK(td->td_proc); 1119 td = NULL; 1120 break; 1121 } 1122 if (run > RUN_THRESH) { 1123 if (rw_try_upgrade(&tidhash_lock)) { 1124 LIST_REMOVE(td, td_hash); 1125 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1126 td, td_hash); 1127 rw_wunlock(&tidhash_lock); 1128 return (td); 1129 } 1130 } 1131 break; 1132 } 1133 run++; 1134 } 1135 rw_runlock(&tidhash_lock); 1136 return (td); 1137} 1138 1139void 1140tidhash_add(struct thread *td) 1141{ 1142 rw_wlock(&tidhash_lock); 1143 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1144 rw_wunlock(&tidhash_lock); 1145} 1146 1147void 1148tidhash_remove(struct thread *td) 1149{ 1150 rw_wlock(&tidhash_lock); 1151 LIST_REMOVE(td, td_hash); 1152 rw_wunlock(&tidhash_lock); 1153} 1154