kern_thread.c revision 275794
1/*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include "opt_witness.h" 30#include "opt_kdtrace.h" 31#include "opt_hwpmc_hooks.h" 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/kern/kern_thread.c 275794 2014-12-15 10:46:07Z kib $"); 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/rangelock.h> 43#include <sys/resourcevar.h> 44#include <sys/sdt.h> 45#include <sys/smp.h> 46#include <sys/sched.h> 47#include <sys/sleepqueue.h> 48#include <sys/selinfo.h> 49#include <sys/turnstile.h> 50#include <sys/ktr.h> 51#include <sys/rwlock.h> 52#include <sys/umtx.h> 53#include <sys/cpuset.h> 54#ifdef HWPMC_HOOKS 55#include <sys/pmckern.h> 56#endif 57 58#include <security/audit/audit.h> 59 60#include <vm/vm.h> 61#include <vm/vm_extern.h> 62#include <vm/uma.h> 63#include <sys/eventhandler.h> 64 65SDT_PROVIDER_DECLARE(proc); 66SDT_PROBE_DEFINE(proc, , , lwp__exit); 67 68 69/* 70 * thread related storage. 71 */ 72static uma_zone_t thread_zone; 73 74TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 75static struct mtx zombie_lock; 76MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 77 78static void thread_zombie(struct thread *); 79 80#define TID_BUFFER_SIZE 1024 81 82struct mtx tid_lock; 83static struct unrhdr *tid_unrhdr; 84static lwpid_t tid_buffer[TID_BUFFER_SIZE]; 85static int tid_head, tid_tail; 86static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 87 88struct tidhashhead *tidhashtbl; 89u_long tidhash; 90struct rwlock tidhash_lock; 91 92static lwpid_t 93tid_alloc(void) 94{ 95 lwpid_t tid; 96 97 tid = alloc_unr(tid_unrhdr); 98 if (tid != -1) 99 return (tid); 100 mtx_lock(&tid_lock); 101 if (tid_head == tid_tail) { 102 mtx_unlock(&tid_lock); 103 return (-1); 104 } 105 tid = tid_buffer[tid_head]; 106 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 107 mtx_unlock(&tid_lock); 108 return (tid); 109} 110 111static void 112tid_free(lwpid_t tid) 113{ 114 lwpid_t tmp_tid = -1; 115 116 mtx_lock(&tid_lock); 117 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 118 tmp_tid = tid_buffer[tid_head]; 119 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 120 } 121 tid_buffer[tid_tail] = tid; 122 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 123 mtx_unlock(&tid_lock); 124 if (tmp_tid != -1) 125 free_unr(tid_unrhdr, tmp_tid); 126} 127 128/* 129 * Prepare a thread for use. 130 */ 131static int 132thread_ctor(void *mem, int size, void *arg, int flags) 133{ 134 struct thread *td; 135 136 td = (struct thread *)mem; 137 td->td_state = TDS_INACTIVE; 138 td->td_oncpu = NOCPU; 139 140 td->td_tid = tid_alloc(); 141 142 /* 143 * Note that td_critnest begins life as 1 because the thread is not 144 * running and is thereby implicitly waiting to be on the receiving 145 * end of a context switch. 146 */ 147 td->td_critnest = 1; 148 td->td_lend_user_pri = PRI_MAX; 149 EVENTHANDLER_INVOKE(thread_ctor, td); 150#ifdef AUDIT 151 audit_thread_alloc(td); 152#endif 153 umtx_thread_alloc(td); 154 return (0); 155} 156 157/* 158 * Reclaim a thread after use. 159 */ 160static void 161thread_dtor(void *mem, int size, void *arg) 162{ 163 struct thread *td; 164 165 td = (struct thread *)mem; 166 167#ifdef INVARIANTS 168 /* Verify that this thread is in a safe state to free. */ 169 switch (td->td_state) { 170 case TDS_INHIBITED: 171 case TDS_RUNNING: 172 case TDS_CAN_RUN: 173 case TDS_RUNQ: 174 /* 175 * We must never unlink a thread that is in one of 176 * these states, because it is currently active. 177 */ 178 panic("bad state for thread unlinking"); 179 /* NOTREACHED */ 180 case TDS_INACTIVE: 181 break; 182 default: 183 panic("bad thread state"); 184 /* NOTREACHED */ 185 } 186#endif 187#ifdef AUDIT 188 audit_thread_free(td); 189#endif 190 /* Free all OSD associated to this thread. */ 191 osd_thread_exit(td); 192 193 EVENTHANDLER_INVOKE(thread_dtor, td); 194 tid_free(td->td_tid); 195} 196 197/* 198 * Initialize type-stable parts of a thread (when newly created). 199 */ 200static int 201thread_init(void *mem, int size, int flags) 202{ 203 struct thread *td; 204 205 td = (struct thread *)mem; 206 207 td->td_sleepqueue = sleepq_alloc(); 208 td->td_turnstile = turnstile_alloc(); 209 td->td_rlqe = NULL; 210 EVENTHANDLER_INVOKE(thread_init, td); 211 td->td_sched = (struct td_sched *)&td[1]; 212 umtx_thread_init(td); 213 td->td_kstack = 0; 214 return (0); 215} 216 217/* 218 * Tear down type-stable parts of a thread (just before being discarded). 219 */ 220static void 221thread_fini(void *mem, int size) 222{ 223 struct thread *td; 224 225 td = (struct thread *)mem; 226 EVENTHANDLER_INVOKE(thread_fini, td); 227 rlqentry_free(td->td_rlqe); 228 turnstile_free(td->td_turnstile); 229 sleepq_free(td->td_sleepqueue); 230 umtx_thread_fini(td); 231 seltdfini(td); 232} 233 234/* 235 * For a newly created process, 236 * link up all the structures and its initial threads etc. 237 * called from: 238 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 239 * proc_dtor() (should go away) 240 * proc_init() 241 */ 242void 243proc_linkup0(struct proc *p, struct thread *td) 244{ 245 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 246 proc_linkup(p, td); 247} 248 249void 250proc_linkup(struct proc *p, struct thread *td) 251{ 252 253 sigqueue_init(&p->p_sigqueue, p); 254 p->p_ksi = ksiginfo_alloc(1); 255 if (p->p_ksi != NULL) { 256 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 257 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 258 } 259 LIST_INIT(&p->p_mqnotifier); 260 p->p_numthreads = 0; 261 thread_link(td, p); 262} 263 264/* 265 * Initialize global thread allocation resources. 266 */ 267void 268threadinit(void) 269{ 270 271 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 272 273 /* 274 * pid_max cannot be greater than PID_MAX. 275 * leave one number for thread0. 276 */ 277 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 278 279 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 280 thread_ctor, thread_dtor, thread_init, thread_fini, 281 16 - 1, 0); 282 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 283 rw_init(&tidhash_lock, "tidhash"); 284} 285 286/* 287 * Place an unused thread on the zombie list. 288 * Use the slpq as that must be unused by now. 289 */ 290void 291thread_zombie(struct thread *td) 292{ 293 mtx_lock_spin(&zombie_lock); 294 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 295 mtx_unlock_spin(&zombie_lock); 296} 297 298/* 299 * Release a thread that has exited after cpu_throw(). 300 */ 301void 302thread_stash(struct thread *td) 303{ 304 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 305 thread_zombie(td); 306} 307 308/* 309 * Reap zombie resources. 310 */ 311void 312thread_reap(void) 313{ 314 struct thread *td_first, *td_next; 315 316 /* 317 * Don't even bother to lock if none at this instant, 318 * we really don't care about the next instant.. 319 */ 320 if (!TAILQ_EMPTY(&zombie_threads)) { 321 mtx_lock_spin(&zombie_lock); 322 td_first = TAILQ_FIRST(&zombie_threads); 323 if (td_first) 324 TAILQ_INIT(&zombie_threads); 325 mtx_unlock_spin(&zombie_lock); 326 while (td_first) { 327 td_next = TAILQ_NEXT(td_first, td_slpq); 328 if (td_first->td_ucred) 329 crfree(td_first->td_ucred); 330 thread_free(td_first); 331 td_first = td_next; 332 } 333 } 334} 335 336/* 337 * Allocate a thread. 338 */ 339struct thread * 340thread_alloc(int pages) 341{ 342 struct thread *td; 343 344 thread_reap(); /* check if any zombies to get */ 345 346 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 347 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 348 if (!vm_thread_new(td, pages)) { 349 uma_zfree(thread_zone, td); 350 return (NULL); 351 } 352 cpu_thread_alloc(td); 353 return (td); 354} 355 356int 357thread_alloc_stack(struct thread *td, int pages) 358{ 359 360 KASSERT(td->td_kstack == 0, 361 ("thread_alloc_stack called on a thread with kstack")); 362 if (!vm_thread_new(td, pages)) 363 return (0); 364 cpu_thread_alloc(td); 365 return (1); 366} 367 368/* 369 * Deallocate a thread. 370 */ 371void 372thread_free(struct thread *td) 373{ 374 375 lock_profile_thread_exit(td); 376 if (td->td_cpuset) 377 cpuset_rel(td->td_cpuset); 378 td->td_cpuset = NULL; 379 cpu_thread_free(td); 380 if (td->td_kstack != 0) 381 vm_thread_dispose(td); 382 uma_zfree(thread_zone, td); 383} 384 385/* 386 * Discard the current thread and exit from its context. 387 * Always called with scheduler locked. 388 * 389 * Because we can't free a thread while we're operating under its context, 390 * push the current thread into our CPU's deadthread holder. This means 391 * we needn't worry about someone else grabbing our context before we 392 * do a cpu_throw(). 393 */ 394void 395thread_exit(void) 396{ 397 uint64_t runtime, new_switchtime; 398 struct thread *td; 399 struct thread *td2; 400 struct proc *p; 401 int wakeup_swapper; 402 403 td = curthread; 404 p = td->td_proc; 405 406 PROC_SLOCK_ASSERT(p, MA_OWNED); 407 mtx_assert(&Giant, MA_NOTOWNED); 408 409 PROC_LOCK_ASSERT(p, MA_OWNED); 410 KASSERT(p != NULL, ("thread exiting without a process")); 411 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 412 (long)p->p_pid, td->td_name); 413 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 414 415#ifdef AUDIT 416 AUDIT_SYSCALL_EXIT(0, td); 417#endif 418 umtx_thread_exit(td); 419 /* 420 * drop FPU & debug register state storage, or any other 421 * architecture specific resources that 422 * would not be on a new untouched process. 423 */ 424 cpu_thread_exit(td); /* XXXSMP */ 425 426 /* 427 * The last thread is left attached to the process 428 * So that the whole bundle gets recycled. Skip 429 * all this stuff if we never had threads. 430 * EXIT clears all sign of other threads when 431 * it goes to single threading, so the last thread always 432 * takes the short path. 433 */ 434 if (p->p_flag & P_HADTHREADS) { 435 if (p->p_numthreads > 1) { 436 atomic_add_int(&td->td_proc->p_exitthreads, 1); 437 thread_unlink(td); 438 td2 = FIRST_THREAD_IN_PROC(p); 439 sched_exit_thread(td2, td); 440 441 /* 442 * The test below is NOT true if we are the 443 * sole exiting thread. P_STOPPED_SINGLE is unset 444 * in exit1() after it is the only survivor. 445 */ 446 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 447 if (p->p_numthreads == p->p_suspcount) { 448 thread_lock(p->p_singlethread); 449 wakeup_swapper = thread_unsuspend_one( 450 p->p_singlethread); 451 thread_unlock(p->p_singlethread); 452 if (wakeup_swapper) 453 kick_proc0(); 454 } 455 } 456 457 PCPU_SET(deadthread, td); 458 } else { 459 /* 460 * The last thread is exiting.. but not through exit() 461 */ 462 panic ("thread_exit: Last thread exiting on its own"); 463 } 464 } 465#ifdef HWPMC_HOOKS 466 /* 467 * If this thread is part of a process that is being tracked by hwpmc(4), 468 * inform the module of the thread's impending exit. 469 */ 470 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 471 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 472#endif 473 PROC_UNLOCK(p); 474 475 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 476 new_switchtime = cpu_ticks(); 477 runtime = new_switchtime - PCPU_GET(switchtime); 478 td->td_runtime += runtime; 479 td->td_incruntime += runtime; 480 PCPU_SET(switchtime, new_switchtime); 481 PCPU_SET(switchticks, ticks); 482 PCPU_INC(cnt.v_swtch); 483 484 /* Save our resource usage in our process. */ 485 td->td_ru.ru_nvcsw++; 486 ruxagg(p, td); 487 rucollect(&p->p_ru, &td->td_ru); 488 489 thread_lock(td); 490 PROC_SUNLOCK(p); 491 td->td_state = TDS_INACTIVE; 492#ifdef WITNESS 493 witness_thread_exit(td); 494#endif 495 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 496 sched_throw(td); 497 panic("I'm a teapot!"); 498 /* NOTREACHED */ 499} 500 501/* 502 * Do any thread specific cleanups that may be needed in wait() 503 * called with Giant, proc and schedlock not held. 504 */ 505void 506thread_wait(struct proc *p) 507{ 508 struct thread *td; 509 510 mtx_assert(&Giant, MA_NOTOWNED); 511 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 512 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 513 td = FIRST_THREAD_IN_PROC(p); 514 /* Lock the last thread so we spin until it exits cpu_throw(). */ 515 thread_lock(td); 516 thread_unlock(td); 517 lock_profile_thread_exit(td); 518 cpuset_rel(td->td_cpuset); 519 td->td_cpuset = NULL; 520 cpu_thread_clean(td); 521 crfree(td->td_ucred); 522 thread_reap(); /* check for zombie threads etc. */ 523} 524 525/* 526 * Link a thread to a process. 527 * set up anything that needs to be initialized for it to 528 * be used by the process. 529 */ 530void 531thread_link(struct thread *td, struct proc *p) 532{ 533 534 /* 535 * XXX This can't be enabled because it's called for proc0 before 536 * its lock has been created. 537 * PROC_LOCK_ASSERT(p, MA_OWNED); 538 */ 539 td->td_state = TDS_INACTIVE; 540 td->td_proc = p; 541 td->td_flags = TDF_INMEM; 542 543 LIST_INIT(&td->td_contested); 544 LIST_INIT(&td->td_lprof[0]); 545 LIST_INIT(&td->td_lprof[1]); 546 sigqueue_init(&td->td_sigqueue, p); 547 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 548 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 549 p->p_numthreads++; 550} 551 552/* 553 * Called from: 554 * thread_exit() 555 */ 556void 557thread_unlink(struct thread *td) 558{ 559 struct proc *p = td->td_proc; 560 561 PROC_LOCK_ASSERT(p, MA_OWNED); 562 TAILQ_REMOVE(&p->p_threads, td, td_plist); 563 p->p_numthreads--; 564 /* could clear a few other things here */ 565 /* Must NOT clear links to proc! */ 566} 567 568static int 569calc_remaining(struct proc *p, int mode) 570{ 571 int remaining; 572 573 PROC_LOCK_ASSERT(p, MA_OWNED); 574 PROC_SLOCK_ASSERT(p, MA_OWNED); 575 if (mode == SINGLE_EXIT) 576 remaining = p->p_numthreads; 577 else if (mode == SINGLE_BOUNDARY) 578 remaining = p->p_numthreads - p->p_boundary_count; 579 else if (mode == SINGLE_NO_EXIT) 580 remaining = p->p_numthreads - p->p_suspcount; 581 else 582 panic("calc_remaining: wrong mode %d", mode); 583 return (remaining); 584} 585 586/* 587 * Enforce single-threading. 588 * 589 * Returns 1 if the caller must abort (another thread is waiting to 590 * exit the process or similar). Process is locked! 591 * Returns 0 when you are successfully the only thread running. 592 * A process has successfully single threaded in the suspend mode when 593 * There are no threads in user mode. Threads in the kernel must be 594 * allowed to continue until they get to the user boundary. They may even 595 * copy out their return values and data before suspending. They may however be 596 * accelerated in reaching the user boundary as we will wake up 597 * any sleeping threads that are interruptable. (PCATCH). 598 */ 599int 600thread_single(int mode) 601{ 602 struct thread *td; 603 struct thread *td2; 604 struct proc *p; 605 int remaining, wakeup_swapper; 606 607 td = curthread; 608 p = td->td_proc; 609 mtx_assert(&Giant, MA_NOTOWNED); 610 PROC_LOCK_ASSERT(p, MA_OWNED); 611 612 if ((p->p_flag & P_HADTHREADS) == 0) 613 return (0); 614 615 /* Is someone already single threading? */ 616 if (p->p_singlethread != NULL && p->p_singlethread != td) 617 return (1); 618 619 if (mode == SINGLE_EXIT) { 620 p->p_flag |= P_SINGLE_EXIT; 621 p->p_flag &= ~P_SINGLE_BOUNDARY; 622 } else { 623 p->p_flag &= ~P_SINGLE_EXIT; 624 if (mode == SINGLE_BOUNDARY) 625 p->p_flag |= P_SINGLE_BOUNDARY; 626 else 627 p->p_flag &= ~P_SINGLE_BOUNDARY; 628 } 629 p->p_flag |= P_STOPPED_SINGLE; 630 PROC_SLOCK(p); 631 p->p_singlethread = td; 632 remaining = calc_remaining(p, mode); 633 while (remaining != 1) { 634 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 635 goto stopme; 636 wakeup_swapper = 0; 637 FOREACH_THREAD_IN_PROC(p, td2) { 638 if (td2 == td) 639 continue; 640 thread_lock(td2); 641 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 642 if (TD_IS_INHIBITED(td2)) { 643 switch (mode) { 644 case SINGLE_EXIT: 645 if (TD_IS_SUSPENDED(td2)) 646 wakeup_swapper |= 647 thread_unsuspend_one(td2); 648 if (TD_ON_SLEEPQ(td2) && 649 (td2->td_flags & TDF_SINTR)) 650 wakeup_swapper |= 651 sleepq_abort(td2, EINTR); 652 break; 653 case SINGLE_BOUNDARY: 654 if (TD_IS_SUSPENDED(td2) && 655 !(td2->td_flags & TDF_BOUNDARY)) 656 wakeup_swapper |= 657 thread_unsuspend_one(td2); 658 if (TD_ON_SLEEPQ(td2) && 659 (td2->td_flags & TDF_SINTR)) 660 wakeup_swapper |= 661 sleepq_abort(td2, ERESTART); 662 break; 663 case SINGLE_NO_EXIT: 664 if (TD_IS_SUSPENDED(td2) && 665 !(td2->td_flags & TDF_BOUNDARY)) 666 wakeup_swapper |= 667 thread_unsuspend_one(td2); 668 if (TD_ON_SLEEPQ(td2) && 669 (td2->td_flags & TDF_SINTR)) 670 wakeup_swapper |= 671 sleepq_abort(td2, ERESTART); 672 break; 673 default: 674 break; 675 } 676 } 677#ifdef SMP 678 else if (TD_IS_RUNNING(td2) && td != td2) { 679 forward_signal(td2); 680 } 681#endif 682 thread_unlock(td2); 683 } 684 if (wakeup_swapper) 685 kick_proc0(); 686 remaining = calc_remaining(p, mode); 687 688 /* 689 * Maybe we suspended some threads.. was it enough? 690 */ 691 if (remaining == 1) 692 break; 693 694stopme: 695 /* 696 * Wake us up when everyone else has suspended. 697 * In the mean time we suspend as well. 698 */ 699 thread_suspend_switch(td); 700 remaining = calc_remaining(p, mode); 701 } 702 if (mode == SINGLE_EXIT) { 703 /* 704 * Convert the process to an unthreaded process. The 705 * SINGLE_EXIT is called by exit1() or execve(), in 706 * both cases other threads must be retired. 707 */ 708 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 709 p->p_singlethread = NULL; 710 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 711 712 /* 713 * Wait for any remaining threads to exit cpu_throw(). 714 */ 715 while (p->p_exitthreads != 0) { 716 PROC_SUNLOCK(p); 717 PROC_UNLOCK(p); 718 sched_relinquish(td); 719 PROC_LOCK(p); 720 PROC_SLOCK(p); 721 } 722 } 723 PROC_SUNLOCK(p); 724 return (0); 725} 726 727bool 728thread_suspend_check_needed(void) 729{ 730 struct proc *p; 731 struct thread *td; 732 733 td = curthread; 734 p = td->td_proc; 735 PROC_LOCK_ASSERT(p, MA_OWNED); 736 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 737 (td->td_dbgflags & TDB_SUSPEND) != 0)); 738} 739 740/* 741 * Called in from locations that can safely check to see 742 * whether we have to suspend or at least throttle for a 743 * single-thread event (e.g. fork). 744 * 745 * Such locations include userret(). 746 * If the "return_instead" argument is non zero, the thread must be able to 747 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 748 * 749 * The 'return_instead' argument tells the function if it may do a 750 * thread_exit() or suspend, or whether the caller must abort and back 751 * out instead. 752 * 753 * If the thread that set the single_threading request has set the 754 * P_SINGLE_EXIT bit in the process flags then this call will never return 755 * if 'return_instead' is false, but will exit. 756 * 757 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 758 *---------------+--------------------+--------------------- 759 * 0 | returns 0 | returns 0 or 1 760 * | when ST ends | immediately 761 *---------------+--------------------+--------------------- 762 * 1 | thread exits | returns 1 763 * | | immediately 764 * 0 = thread_exit() or suspension ok, 765 * other = return error instead of stopping the thread. 766 * 767 * While a full suspension is under effect, even a single threading 768 * thread would be suspended if it made this call (but it shouldn't). 769 * This call should only be made from places where 770 * thread_exit() would be safe as that may be the outcome unless 771 * return_instead is set. 772 */ 773int 774thread_suspend_check(int return_instead) 775{ 776 struct thread *td; 777 struct proc *p; 778 int wakeup_swapper; 779 780 td = curthread; 781 p = td->td_proc; 782 mtx_assert(&Giant, MA_NOTOWNED); 783 PROC_LOCK_ASSERT(p, MA_OWNED); 784 while (thread_suspend_check_needed()) { 785 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 786 KASSERT(p->p_singlethread != NULL, 787 ("singlethread not set")); 788 /* 789 * The only suspension in action is a 790 * single-threading. Single threader need not stop. 791 * XXX Should be safe to access unlocked 792 * as it can only be set to be true by us. 793 */ 794 if (p->p_singlethread == td) 795 return (0); /* Exempt from stopping. */ 796 } 797 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 798 return (EINTR); 799 800 /* Should we goto user boundary if we didn't come from there? */ 801 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 802 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 803 return (ERESTART); 804 805 /* 806 * Ignore suspend requests for stop signals if they 807 * are deferred. 808 */ 809 if (P_SHOULDSTOP(p) == P_STOPPED_SIG && 810 td->td_flags & TDF_SBDRY) { 811 KASSERT(return_instead, 812 ("TDF_SBDRY set for unsafe thread_suspend_check")); 813 return (0); 814 } 815 816 /* 817 * If the process is waiting for us to exit, 818 * this thread should just suicide. 819 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 820 */ 821 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 822 PROC_UNLOCK(p); 823 tidhash_remove(td); 824 PROC_LOCK(p); 825 tdsigcleanup(td); 826 PROC_SLOCK(p); 827 thread_stopped(p); 828 thread_exit(); 829 } 830 831 PROC_SLOCK(p); 832 thread_stopped(p); 833 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 834 if (p->p_numthreads == p->p_suspcount + 1) { 835 thread_lock(p->p_singlethread); 836 wakeup_swapper = 837 thread_unsuspend_one(p->p_singlethread); 838 thread_unlock(p->p_singlethread); 839 if (wakeup_swapper) 840 kick_proc0(); 841 } 842 } 843 PROC_UNLOCK(p); 844 thread_lock(td); 845 /* 846 * When a thread suspends, it just 847 * gets taken off all queues. 848 */ 849 thread_suspend_one(td); 850 if (return_instead == 0) { 851 p->p_boundary_count++; 852 td->td_flags |= TDF_BOUNDARY; 853 } 854 PROC_SUNLOCK(p); 855 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 856 if (return_instead == 0) 857 td->td_flags &= ~TDF_BOUNDARY; 858 thread_unlock(td); 859 PROC_LOCK(p); 860 if (return_instead == 0) { 861 PROC_SLOCK(p); 862 p->p_boundary_count--; 863 PROC_SUNLOCK(p); 864 } 865 } 866 return (0); 867} 868 869void 870thread_suspend_switch(struct thread *td) 871{ 872 struct proc *p; 873 874 p = td->td_proc; 875 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 876 PROC_LOCK_ASSERT(p, MA_OWNED); 877 PROC_SLOCK_ASSERT(p, MA_OWNED); 878 /* 879 * We implement thread_suspend_one in stages here to avoid 880 * dropping the proc lock while the thread lock is owned. 881 */ 882 thread_stopped(p); 883 p->p_suspcount++; 884 PROC_UNLOCK(p); 885 thread_lock(td); 886 td->td_flags &= ~TDF_NEEDSUSPCHK; 887 TD_SET_SUSPENDED(td); 888 sched_sleep(td, 0); 889 PROC_SUNLOCK(p); 890 DROP_GIANT(); 891 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 892 thread_unlock(td); 893 PICKUP_GIANT(); 894 PROC_LOCK(p); 895 PROC_SLOCK(p); 896} 897 898void 899thread_suspend_one(struct thread *td) 900{ 901 struct proc *p = td->td_proc; 902 903 PROC_SLOCK_ASSERT(p, MA_OWNED); 904 THREAD_LOCK_ASSERT(td, MA_OWNED); 905 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 906 p->p_suspcount++; 907 td->td_flags &= ~TDF_NEEDSUSPCHK; 908 TD_SET_SUSPENDED(td); 909 sched_sleep(td, 0); 910} 911 912int 913thread_unsuspend_one(struct thread *td) 914{ 915 struct proc *p = td->td_proc; 916 917 PROC_SLOCK_ASSERT(p, MA_OWNED); 918 THREAD_LOCK_ASSERT(td, MA_OWNED); 919 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 920 TD_CLR_SUSPENDED(td); 921 p->p_suspcount--; 922 return (setrunnable(td)); 923} 924 925/* 926 * Allow all threads blocked by single threading to continue running. 927 */ 928void 929thread_unsuspend(struct proc *p) 930{ 931 struct thread *td; 932 int wakeup_swapper; 933 934 PROC_LOCK_ASSERT(p, MA_OWNED); 935 PROC_SLOCK_ASSERT(p, MA_OWNED); 936 wakeup_swapper = 0; 937 if (!P_SHOULDSTOP(p)) { 938 FOREACH_THREAD_IN_PROC(p, td) { 939 thread_lock(td); 940 if (TD_IS_SUSPENDED(td)) { 941 wakeup_swapper |= thread_unsuspend_one(td); 942 } 943 thread_unlock(td); 944 } 945 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 946 (p->p_numthreads == p->p_suspcount)) { 947 /* 948 * Stopping everything also did the job for the single 949 * threading request. Now we've downgraded to single-threaded, 950 * let it continue. 951 */ 952 thread_lock(p->p_singlethread); 953 wakeup_swapper = thread_unsuspend_one(p->p_singlethread); 954 thread_unlock(p->p_singlethread); 955 } 956 if (wakeup_swapper) 957 kick_proc0(); 958} 959 960/* 961 * End the single threading mode.. 962 */ 963void 964thread_single_end(void) 965{ 966 struct thread *td; 967 struct proc *p; 968 int wakeup_swapper; 969 970 td = curthread; 971 p = td->td_proc; 972 PROC_LOCK_ASSERT(p, MA_OWNED); 973 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 974 PROC_SLOCK(p); 975 p->p_singlethread = NULL; 976 wakeup_swapper = 0; 977 /* 978 * If there are other threads they may now run, 979 * unless of course there is a blanket 'stop order' 980 * on the process. The single threader must be allowed 981 * to continue however as this is a bad place to stop. 982 */ 983 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 984 FOREACH_THREAD_IN_PROC(p, td) { 985 thread_lock(td); 986 if (TD_IS_SUSPENDED(td)) { 987 wakeup_swapper |= thread_unsuspend_one(td); 988 } 989 thread_unlock(td); 990 } 991 } 992 PROC_SUNLOCK(p); 993 if (wakeup_swapper) 994 kick_proc0(); 995} 996 997struct thread * 998thread_find(struct proc *p, lwpid_t tid) 999{ 1000 struct thread *td; 1001 1002 PROC_LOCK_ASSERT(p, MA_OWNED); 1003 FOREACH_THREAD_IN_PROC(p, td) { 1004 if (td->td_tid == tid) 1005 break; 1006 } 1007 return (td); 1008} 1009 1010/* Locate a thread by number; return with proc lock held. */ 1011struct thread * 1012tdfind(lwpid_t tid, pid_t pid) 1013{ 1014#define RUN_THRESH 16 1015 struct thread *td; 1016 int run = 0; 1017 1018 rw_rlock(&tidhash_lock); 1019 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1020 if (td->td_tid == tid) { 1021 if (pid != -1 && td->td_proc->p_pid != pid) { 1022 td = NULL; 1023 break; 1024 } 1025 PROC_LOCK(td->td_proc); 1026 if (td->td_proc->p_state == PRS_NEW) { 1027 PROC_UNLOCK(td->td_proc); 1028 td = NULL; 1029 break; 1030 } 1031 if (run > RUN_THRESH) { 1032 if (rw_try_upgrade(&tidhash_lock)) { 1033 LIST_REMOVE(td, td_hash); 1034 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1035 td, td_hash); 1036 rw_wunlock(&tidhash_lock); 1037 return (td); 1038 } 1039 } 1040 break; 1041 } 1042 run++; 1043 } 1044 rw_runlock(&tidhash_lock); 1045 return (td); 1046} 1047 1048void 1049tidhash_add(struct thread *td) 1050{ 1051 rw_wlock(&tidhash_lock); 1052 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1053 rw_wunlock(&tidhash_lock); 1054} 1055 1056void 1057tidhash_remove(struct thread *td) 1058{ 1059 rw_wlock(&tidhash_lock); 1060 LIST_REMOVE(td, td_hash); 1061 rw_wunlock(&tidhash_lock); 1062} 1063