1/* 2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ 29/* 30 * Copyright (c) 1982, 1986, 1989, 1991, 1993 31 * The Regents of the University of California. All rights reserved. 32 * (c) UNIX System Laboratories, Inc. 33 * All or some portions of this file are derived from material licensed 34 * to the University of California by American Telephone and Telegraph 35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 36 * the permission of UNIX System Laboratories, Inc. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 67 */ 68/* 69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce 70 * support for mandatory and extensible security protections. This notice 71 * is included in support of clause 2.2 (b) of the Apple Public License, 72 * Version 2.0. 73 */ 74 75#include <machine/reg.h> 76#include <machine/psl.h> 77 78#include "compat_43.h" 79 80#include <sys/param.h> 81#include <sys/systm.h> 82#include <sys/ioctl.h> 83#include <sys/proc_internal.h> 84#include <sys/proc.h> 85#include <sys/kauth.h> 86#include <sys/tty.h> 87#include <sys/time.h> 88#include <sys/resource.h> 89#include <sys/kernel.h> 90#include <sys/wait.h> 91#include <sys/file_internal.h> 92#include <sys/vnode_internal.h> 93#include <sys/syslog.h> 94#include <sys/malloc.h> 95#include <sys/resourcevar.h> 96#include <sys/ptrace.h> 97#include <sys/user.h> 98#include <sys/aio_kern.h> 99#include <sys/sysproto.h> 100#include <sys/signalvar.h> 101#include <sys/kdebug.h> 102#include <sys/filedesc.h> /* fdfree */ 103#if SYSV_SHM 104#include <sys/shm_internal.h> /* shmexit */ 105#endif 106#include <sys/acct.h> /* acct_process */ 107 108#include <security/audit/audit.h> 109#include <bsm/audit_kevents.h> 110 111#include <mach/mach_types.h> 112 113#include <kern/kern_types.h> 114#include <kern/kalloc.h> 115#include <kern/task.h> 116#include <kern/thread.h> 117#include <kern/thread_call.h> 118#include <kern/sched_prim.h> 119#include <kern/assert.h> 120#include <sys/codesign.h> 121 122#if VM_PRESSURE_EVENTS 123#include <kern/vm_pressure.h> 124#endif 125 126#if CONFIG_MEMORYSTATUS 127#include <sys/kern_memorystatus.h> 128#endif 129 130#if CONFIG_DTRACE 131/* Do not include dtrace.h, it redefines kmem_[alloc/free] */ 132extern void (*dtrace_fasttrap_exit_ptr)(proc_t); 133extern void (*dtrace_helpers_cleanup)(proc_t); 134extern void dtrace_lazy_dofs_destroy(proc_t); 135 136#include <sys/dtrace_ptss.h> 137#endif 138 139#if CONFIG_MACF 140#include <security/mac.h> 141#include <sys/syscall.h> 142#endif 143 144#include <mach/mach_types.h> 145#include <mach/task.h> 146#include <mach/thread_act.h> 147 148#include <sys/sdt.h> 149 150extern char init_task_failure_data[]; 151void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify); 152void vfork_exit(proc_t p, int rv); 153void vproc_exit(proc_t p); 154__private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p); 155__private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p); 156static int reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoinit, int locked, int droplock); 157 158/* 159 * Things which should have prototypes in headers, but don't 160 */ 161void *get_bsduthreadarg(thread_t); 162void proc_exit(proc_t p); 163int wait1continue(int result); 164int waitidcontinue(int result); 165int *get_bsduthreadrval(thread_t); 166kern_return_t sys_perf_notify(thread_t thread, int pid); 167kern_return_t task_exception_notify(exception_type_t exception, 168 mach_exception_data_type_t code, mach_exception_data_type_t subcode); 169void delay(int); 170 171/* 172 * NOTE: Source and target may *NOT* overlap! 173 * XXX Should share code with bsd/dev/ppc/unix_signal.c 174 */ 175void 176siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out) 177{ 178 out->si_signo = in->si_signo; 179 out->si_errno = in->si_errno; 180 out->si_code = in->si_code; 181 out->si_pid = in->si_pid; 182 out->si_uid = in->si_uid; 183 out->si_status = in->si_status; 184 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr); 185 /* following cast works for sival_int because of padding */ 186 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr); 187 out->si_band = in->si_band; /* range reduction */ 188} 189 190void 191siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out) 192{ 193 out->si_signo = in->si_signo; 194 out->si_errno = in->si_errno; 195 out->si_code = in->si_code; 196 out->si_pid = in->si_pid; 197 out->si_uid = in->si_uid; 198 out->si_status = in->si_status; 199 out->si_addr = in->si_addr; 200 /* following cast works for sival_int because of padding */ 201 out->si_value.sival_ptr = in->si_value.sival_ptr; 202 out->si_band = in->si_band; /* range reduction */ 203} 204 205static int 206copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr) 207{ 208 if (is64) { 209 user64_siginfo_t sinfo64; 210 211 bzero(&sinfo64, sizeof (sinfo64)); 212 siginfo_user_to_user64(native, &sinfo64); 213 return (copyout(&sinfo64, uaddr, sizeof (sinfo64))); 214 } else { 215 user32_siginfo_t sinfo32; 216 217 bzero(&sinfo32, sizeof (sinfo32)); 218 siginfo_user_to_user32(native, &sinfo32); 219 return (copyout(&sinfo32, uaddr, sizeof (sinfo32))); 220 } 221} 222 223/* 224 * exit -- 225 * Death of process. 226 */ 227void 228exit(proc_t p, struct exit_args *uap, int *retval) 229{ 230 exit1(p, W_EXITCODE(uap->rval, 0), retval); 231 232 /* drop funnel before we return */ 233 thread_exception_return(); 234 /* NOTREACHED */ 235 while (TRUE) 236 thread_block(THREAD_CONTINUE_NULL); 237 /* NOTREACHED */ 238} 239 240/* 241 * Exit: deallocate address space and other resources, change proc state 242 * to zombie, and unlink proc from allproc and parent's lists. Save exit 243 * status and rusage for wait(). Check for child processes and orphan them. 244 */ 245int 246exit1(proc_t p, int rv, int *retval) 247{ 248 return exit1_internal(p, rv, retval, TRUE, TRUE); 249} 250 251int 252exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify) 253{ 254 thread_t self = current_thread(); 255 struct task *task = p->task; 256 struct uthread *ut; 257 int error = 0; 258 259 /* 260 * If a thread in this task has already 261 * called exit(), then halt any others 262 * right here. 263 */ 264 265 ut = get_bsdthread_info(self); 266 if (ut->uu_flag & UT_VFORK) { 267 if (!thread_can_terminate) { 268 return EINVAL; 269 } 270 271 vfork_exit(p, rv); 272 vfork_return(p , retval, p->p_pid); 273 unix_syscall_return(0); 274 /* NOT REACHED */ 275 } 276 277 /* 278 * The parameter list of audit_syscall_exit() was augmented to 279 * take the Darwin syscall number as the first parameter, 280 * which is currently required by mac_audit_postselect(). 281 */ 282 283 /* 284 * The BSM token contains two components: an exit status as passed 285 * to exit(), and a return value to indicate what sort of exit it 286 * was. The exit status is WEXITSTATUS(rv), but it's not clear 287 * what the return value is. 288 */ 289 AUDIT_ARG(exit, WEXITSTATUS(rv), 0); 290 AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */ 291 292 DTRACE_PROC1(exit, int, CLD_EXITED); 293 294 /* mark process is going to exit and pull out of DBG/disk throttle */ 295 proc_removethrottle(p); 296 297#if CONFIG_MEMORYSTATUS 298 memorystatus_list_remove(p->p_pid); 299#endif 300 301 proc_lock(p); 302 error = proc_transstart(p, 1); 303 if (error == EDEADLK) { 304 /* Temp: If deadlock error, then it implies multithreaded exec is 305 * in progress. Instread of letting exit continue and 306 * corrupting the freed memory, let the exit thread 307 * return. This will save corruption in remote case. 308 */ 309 proc_unlock(p); 310 if (current_proc() == p){ 311 thread_exception_return(); 312 } else { 313 /* external termination like jetsam */ 314 return(error); 315 } 316 } 317 318 while (p->exit_thread != self) { 319 if (sig_try_locked(p) <= 0) { 320 proc_transend(p, 1); 321 if (get_threadtask(self) != task) { 322 proc_unlock(p); 323 return(0); 324 } 325 proc_unlock(p); 326 327 thread_terminate(self); 328 if (!thread_can_terminate) { 329 return 0; 330 } 331 332 thread_exception_return(); 333 /* NOTREACHED */ 334 } 335 sig_lock_to_exit(p); 336 } 337 if (p == initproc) { 338 proc_unlock(p); 339 printf("pid 1 exited (signal %d, exit %d)", 340 WTERMSIG(rv), WEXITSTATUS(rv)); 341 panic("%s died\nState at Last Exception:\n\n%s", 342 (p->p_comm[0] != '\0' ? 343 p->p_comm : 344 "launchd"), 345 init_task_failure_data); 346 } 347 348 p->p_lflag |= P_LEXIT; 349 p->p_xstat = rv; 350 351 proc_transend(p, 1); 352 proc_unlock(p); 353 354 proc_prepareexit(p, rv, perf_notify); 355 356 /* Last thread to terminate will call proc_exit() */ 357 task_terminate_internal(task); 358 359 return(0); 360} 361 362void 363proc_prepareexit(proc_t p, int rv, boolean_t perf_notify) 364{ 365 mach_exception_data_type_t code, subcode; 366 struct uthread *ut; 367 thread_t self = current_thread(); 368 ut = get_bsdthread_info(self); 369 370 /* If a core should be generated, notify crash reporter */ 371 if (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0)) { 372 /* 373 * Workaround for processes checking up on PT_DENY_ATTACH: 374 * should be backed out post-Leopard (details in 5431025). 375 */ 376 if ((SIGSEGV == WTERMSIG(rv)) && 377 (p->p_pptr->p_lflag & P_LNOATTACH)) { 378 goto skipcheck; 379 } 380 381 /* 382 * Crash Reporter looks for the signal value, original exception 383 * type, and low 20 bits of the original code in code[0] 384 * (8, 4, and 20 bits respectively). code[1] is unmodified. 385 */ 386 code = ((WTERMSIG(rv) & 0xff) << 24) | 387 ((ut->uu_exception & 0x0f) << 20) | 388 ((int)ut->uu_code & 0xfffff); 389 subcode = ut->uu_subcode; 390 (void) task_exception_notify(EXC_CRASH, code, subcode); 391 } 392 393skipcheck: 394 /* Notify the perf server? */ 395 if (perf_notify) { 396 (void)sys_perf_notify(self, p->p_pid); 397 } 398 399 /* 400 * Remove proc from allproc queue and from pidhash chain. 401 * Need to do this before we do anything that can block. 402 * Not doing causes things like mount() find this on allproc 403 * in partially cleaned state. 404 */ 405 406 proc_list_lock(); 407 408 LIST_REMOVE(p, p_list); 409 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ 410 /* will not be visible via proc_find */ 411 p->p_listflag |= P_LIST_EXITED; 412 413 proc_list_unlock(); 414 415 416#ifdef PGINPROF 417 vmsizmon(); 418#endif 419 /* 420 * If parent is waiting for us to exit or exec, 421 * P_LPPWAIT is set; we will wakeup the parent below. 422 */ 423 proc_lock(p); 424 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT); 425 p->p_sigignore = ~(sigcantmask); 426 ut->uu_siglist = 0; 427 proc_unlock(p); 428} 429 430void 431proc_exit(proc_t p) 432{ 433 proc_t q; 434 proc_t pp; 435 struct task *task = p->task; 436 vnode_t tvp = NULLVP; 437 struct pgrp * pg; 438 struct session *sessp; 439 struct uthread * uth; 440 pid_t pid; 441 int exitval; 442 int knote_hint; 443 444 uth = (struct uthread *)get_bsdthread_info(current_thread()); 445 446 proc_lock(p); 447 proc_transstart(p, 1); 448 if( !(p->p_lflag & P_LEXIT)) { 449 /* 450 * This can happen if a thread_terminate() occurs 451 * in a single-threaded process. 452 */ 453 p->p_lflag |= P_LEXIT; 454 proc_transend(p, 1); 455 proc_unlock(p); 456 proc_prepareexit(p, 0, TRUE); 457 (void) task_terminate_internal(task); 458 proc_lock(p); 459 } else { 460 proc_transend(p, 1); 461 } 462 463 p->p_lflag |= P_LPEXIT; 464 465 /* 466 * Other kernel threads may be in the middle of signalling this process. 467 * Wait for those threads to wrap it up before making the process 468 * disappear on them. 469 */ 470 if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) { 471 p->p_sigwaitcnt++; 472 while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) 473 msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL); 474 p->p_sigwaitcnt--; 475 } 476 477 proc_unlock(p); 478 pid = p->p_pid; 479 exitval = p->p_xstat; 480 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, 481 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START, 482 pid, exitval, 0, 0, 0); 483 484#if CONFIG_DTRACE 485 /* 486 * Free any outstanding lazy dof entries. It is imperative we 487 * always call dtrace_lazy_dofs_destroy, rather than null check 488 * and call if !NULL. If we NULL test, during lazy dof faulting 489 * we can race with the faulting code and proceed from here to 490 * beyond the helpers cleanup. The lazy dof faulting will then 491 * install new helpers which will never be cleaned up, and leak. 492 */ 493 dtrace_lazy_dofs_destroy(p); 494 495 /* 496 * Clean up any DTrace helper actions or probes for the process. 497 */ 498 if (p->p_dtrace_helpers != NULL) { 499 (*dtrace_helpers_cleanup)(p); 500 } 501 502 /* 503 * Clean up any DTrace probes associated with this process. 504 */ 505 /* 506 * APPLE NOTE: We release ptss pages/entries in dtrace_fasttrap_exit_ptr(), 507 * call this after dtrace_helpers_cleanup() 508 */ 509 proc_lock(p); 510 if (p->p_dtrace_probes && dtrace_fasttrap_exit_ptr) { 511 (*dtrace_fasttrap_exit_ptr)(p); 512 } 513 proc_unlock(p); 514#endif 515 516 /* XXX Zombie allocation may fail, in which case stats get lost */ 517 MALLOC_ZONE(p->p_ru, struct rusage *, 518 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK); 519 520 nspace_proc_exit(p); 521 522#if VM_PRESSURE_EVENTS 523 vm_pressure_proc_cleanup(p); 524#endif 525 526 /* 527 * need to cancel async IO requests that can be cancelled and wait for those 528 * already active. MAY BLOCK! 529 */ 530 531 proc_refdrain(p); 532 533 /* if any pending cpu limits action, clear it */ 534 task_clear_cpuusage(p->task); 535 536 workqueue_mark_exiting(p); 537 workqueue_exit(p); 538 539 _aio_exit( p ); 540 541 /* 542 * Close open files and release open-file table. 543 * This may block! 544 */ 545 fdfree(p); 546 547 if (uth->uu_lowpri_window) { 548 /* 549 * task is marked as a low priority I/O type 550 * and the I/O we issued while in flushing files on close 551 * collided with normal I/O operations... 552 * no need to throttle this thread since its going away 553 * but we do need to update our bookeeping w/r to throttled threads 554 */ 555 throttle_lowpri_io(FALSE); 556 } 557 558#if !CONFIG_EMBEDDED 559 if (p->p_legacy_behavior & PROC_LEGACY_BEHAVIOR_IOTHROTTLE) { 560 throttle_legacy_process_decr(); 561 } 562#endif 563 564#if SYSV_SHM 565 /* Close ref SYSV Shared memory*/ 566 if (p->vm_shm) 567 shmexit(p); 568#endif 569#if SYSV_SEM 570 /* Release SYSV semaphores */ 571 semexit(p); 572#endif 573 574#if PSYNCH 575 pth_proc_hashdelete(p); 576#endif /* PSYNCH */ 577 578 sessp = proc_session(p); 579 if (SESS_LEADER(p, sessp)) { 580 581 if (sessp->s_ttyvp != NULLVP) { 582 struct vnode *ttyvp; 583 int ttyvid; 584 struct vfs_context context; 585 struct tty *tp; 586 587 /* 588 * Controlling process. 589 * Signal foreground pgrp, 590 * drain controlling terminal 591 * and revoke access to controlling terminal. 592 */ 593 session_lock(sessp); 594 tp = SESSION_TP(sessp); 595 if ((tp != TTY_NULL) && (tp->t_session == sessp)) { 596 session_unlock(sessp); 597 598 tty_pgsignal(tp, SIGHUP, 1); 599 600 session_lock(sessp); 601 tp = SESSION_TP(sessp); 602 } 603 ttyvp = sessp->s_ttyvp; 604 ttyvid = sessp->s_ttyvid; 605 sessp->s_ttyvp = NULLVP; 606 sessp->s_ttyvid = 0; 607 sessp->s_ttyp = TTY_NULL; 608 sessp->s_ttypgrpid = NO_PID; 609 session_unlock(sessp); 610 611 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) { 612 if (tp != TTY_NULL) { 613 tty_lock(tp); 614 (void) ttywait(tp); 615 tty_unlock(tp); 616 } 617 context.vc_thread = proc_thread(p); /* XXX */ 618 context.vc_ucred = kauth_cred_proc_ref(p); 619 vnode_rele(ttyvp); 620 VNOP_REVOKE(ttyvp, REVOKEALL, &context); 621 vnode_put(ttyvp); 622 kauth_cred_unref(&context.vc_ucred); 623 ttyvp = NULLVP; 624 } 625 if (ttyvp) 626 vnode_rele(ttyvp); 627 if (tp) 628 ttyfree(tp); 629 } 630 session_lock(sessp); 631 sessp->s_leader = NULL; 632 session_unlock(sessp); 633 } 634 session_rele(sessp); 635 636 pg = proc_pgrp(p); 637 fixjobc(p, pg, 0); 638 pg_rele(pg); 639 640 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; 641 (void)acct_process(p); 642 643 proc_list_lock(); 644 645 if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) { 646 p->p_listflag &= ~P_LIST_EXITCOUNT; 647 proc_shutdown_exitcount--; 648 if (proc_shutdown_exitcount == 0) 649 wakeup(&proc_shutdown_exitcount); 650 } 651 652 /* wait till parentrefs are dropped and grant no more */ 653 proc_childdrainstart(p); 654 while ((q = p->p_children.lh_first) != NULL) { 655 int reparentedtoinit = (q->p_listflag & P_LIST_DEADPARENT) ? 1 : 0; 656 q->p_listflag |= P_LIST_DEADPARENT; 657 if (q->p_stat == SZOMB) { 658 if (p != q->p_pptr) 659 panic("parent child linkage broken"); 660 /* check for sysctl zomb lookup */ 661 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { 662 msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0); 663 } 664 q->p_listflag |= P_LIST_WAITING; 665 /* 666 * This is a named reference and it is not granted 667 * if the reap is already in progress. So we get 668 * the reference here exclusively and their can be 669 * no waiters. So there is no need for a wakeup 670 * after we are done. Also the reap frees the structure 671 * and the proc struct cannot be used for wakeups as well. 672 * It is safe to use q here as this is system reap 673 */ 674 (void)reap_child_locked(p, q, 1, reparentedtoinit, 1, 0); 675 } else { 676 proc_reparentlocked(q, initproc, 0, 1); 677 /* 678 * Traced processes are killed 679 * since their existence means someone is messing up. 680 */ 681 if (q->p_lflag & P_LTRACED) { 682 /* 683 * Take a reference on the child process to 684 * ensure it doesn't exit and disappear between 685 * the time we drop the list_lock and attempt 686 * to acquire its proc_lock. 687 */ 688 if (proc_ref_locked(q) != q) 689 continue; 690 691 proc_list_unlock(); 692 proc_lock(q); 693 q->p_lflag &= ~P_LTRACED; 694 if (q->sigwait_thread) { 695 thread_t thread = q->sigwait_thread; 696 697 proc_unlock(q); 698 /* 699 * The sigwait_thread could be stopped at a 700 * breakpoint. Wake it up to kill. 701 * Need to do this as it could be a thread which is not 702 * the first thread in the task. So any attempts to kill 703 * the process would result into a deadlock on q->sigwait. 704 */ 705 thread_resume(thread); 706 clear_wait(thread, THREAD_INTERRUPTED); 707 threadsignal(thread, SIGKILL, 0); 708 } else { 709 proc_unlock(q); 710 } 711 712 psignal(q, SIGKILL); 713 proc_list_lock(); 714 proc_rele_locked(q); 715 } 716 } 717 } 718 719 proc_childdrainend(p); 720 proc_list_unlock(); 721 722 /* 723 * Release reference to text vnode 724 */ 725 tvp = p->p_textvp; 726 p->p_textvp = NULL; 727 if (tvp != NULLVP) { 728 vnode_rele(tvp); 729 } 730 731 /* 732 * Save exit status and final rusage info, adding in child rusage 733 * info and self times. If we were unable to allocate a zombie 734 * structure, this information is lost. 735 */ 736 /* No need for locking here as no one than this thread can access this */ 737 if (p->p_ru != NULL) { 738 calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL); 739 *p->p_ru = p->p_stats->p_ru; 740 741 ruadd(p->p_ru, &p->p_stats->p_cru); 742 } 743 744 /* 745 * Free up profiling buffers. 746 */ 747 { 748 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn; 749 750 p1 = p0->pr_next; 751 p0->pr_next = NULL; 752 p0->pr_scale = 0; 753 754 for (; p1 != NULL; p1 = pn) { 755 pn = p1->pr_next; 756 kfree(p1, sizeof *p1); 757 } 758 } 759 760 proc_spinlock(p); 761 if (thread_call_cancel(p->p_rcall)) 762 p->p_ractive--; 763 764 while (p->p_ractive > 0) { 765 proc_spinunlock(p); 766 767 delay(1); 768 769 proc_spinlock(p); 770 } 771 proc_spinunlock(p); 772 773 thread_call_free(p->p_rcall); 774 p->p_rcall = NULL; 775 776 /* 777 * Other substructures are freed from wait(). 778 */ 779 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS); 780 p->p_stats = NULL; 781 782 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS); 783 p->p_sigacts = NULL; 784 785 proc_limitdrop(p, 1); 786 p->p_limit = NULL; 787 788 789 /* 790 * Finish up by terminating the task 791 * and halt this thread (only if a 792 * member of the task exiting). 793 */ 794 p->task = TASK_NULL; 795 set_bsdtask_info(task, NULL); 796 797 knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff); 798 proc_knote(p, knote_hint); 799 800 /* mark the thread as the one that is doing proc_exit 801 * no need to hold proc lock in uthread_free 802 */ 803 uth->uu_flag |= UT_PROCEXIT; 804 /* 805 * Notify parent that we're gone. 806 */ 807 pp = proc_parent(p); 808 if (pp->p_flag & P_NOCLDWAIT) { 809 810#if 3839178 811 /* 812 * If the parent is ignoring SIGCHLD, then POSIX requires 813 * us to not add the resource usage to the parent process - 814 * we are only going to hand it off to init to get reaped. 815 * We should contest the standard in this case on the basis 816 * of RLIMIT_CPU. 817 */ 818#else /* !3839178 */ 819 /* 820 * Add child resource usage to parent before giving 821 * zombie to init. If we were unable to allocate a 822 * zombie structure, this information is lost. 823 */ 824 if (p->p_ru != NULL) { 825 proc_lock(pp); 826 ruadd(&pp->p_stats->p_cru, p->p_ru); 827 proc_unlock(pp); 828 } 829#endif /* !3839178 */ 830 831 /* kernel can reap this one, no need to move it to launchd */ 832 proc_list_lock(); 833 p->p_listflag |= P_LIST_DEADPARENT; 834 proc_list_unlock(); 835 } 836 if ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid) { 837 if (pp != initproc) { 838 proc_lock(pp); 839 pp->si_pid = p->p_pid; 840 pp->si_status = p->p_xstat; 841 pp->si_code = CLD_EXITED; 842 /* 843 * p_ucred usage is safe as it is an exiting process 844 * and reference is dropped in reap 845 */ 846 pp->si_uid = kauth_cred_getruid(p->p_ucred); 847 proc_unlock(pp); 848 } 849 /* mark as a zombie */ 850 /* No need to take proc lock as all refs are drained and 851 * no one except parent (reaping ) can look at this. 852 * The write is to an int and is coherent. Also parent is 853 * keyed off of list lock for reaping 854 */ 855 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, 856 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, 857 pid, exitval, 0, 0, 0); 858 p->p_stat = SZOMB; 859 /* 860 * The current process can be reaped so, no one 861 * can depend on this 862 */ 863 864 psignal(pp, SIGCHLD); 865 866 /* and now wakeup the parent */ 867 proc_list_lock(); 868 wakeup((caddr_t)pp); 869 proc_list_unlock(); 870 } else { 871 /* should be fine as parent proc would be initproc */ 872 /* mark as a zombie */ 873 /* No need to take proc lock as all refs are drained and 874 * no one except parent (reaping ) can look at this. 875 * The write is to an int and is coherent. Also parent is 876 * keyed off of list lock for reaping 877 */ 878 proc_list_lock(); 879 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, 880 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, 881 pid, exitval, 0, 0, 0); 882 /* check for sysctl zomb lookup */ 883 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { 884 msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0); 885 } 886 /* safe to use p as this is a system reap */ 887 p->p_stat = SZOMB; 888 p->p_listflag |= P_LIST_WAITING; 889 890 /* 891 * This is a named reference and it is not granted 892 * if the reap is already in progress. So we get 893 * the reference here exclusively and their can be 894 * no waiters. So there is no need for a wakeup 895 * after we are done. AlsO the reap frees the structure 896 * and the proc struct cannot be used for wakeups as well. 897 * It is safe to use p here as this is system reap 898 */ 899 (void)reap_child_locked(pp, p, 1, 0, 1, 1); 900 /* list lock dropped by reap_child_locked */ 901 } 902 if (uth->uu_lowpri_window) { 903 /* 904 * task is marked as a low priority I/O type and we've 905 * somehow picked up another throttle during exit processing... 906 * no need to throttle this thread since its going away 907 * but we do need to update our bookeeping w/r to throttled threads 908 */ 909 throttle_lowpri_io(FALSE); 910 } 911 912 proc_rele(pp); 913 914} 915 916 917/* 918 * reap_child_locked 919 * 920 * Description: Given a process from which all status information needed 921 * has already been extracted, if the process is a ptrace 922 * attach process, detach it and give it back to its real 923 * parent, else recover all resources remaining associated 924 * with it. 925 * 926 * Parameters: proc_t parent Parent of process being reaped 927 * proc_t child Process to reap 928 * 929 * Returns: 0 Process was not reaped because it 930 * came from an attach 931 * 1 Process was reaped 932 */ 933static int 934reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoinit, int locked, int droplock) 935{ 936 proc_t trace_parent = PROC_NULL; /* Traced parent process, if tracing */ 937 938 if (locked == 1) 939 proc_list_unlock(); 940 941 /* 942 * If we got the child via a ptrace 'attach', 943 * we need to give it back to the old parent. 944 * 945 * Exception: someone who has been reparented to launchd before being 946 * ptraced can simply be reaped, refer to radar 5677288 947 * p_oppid -> ptraced 948 * trace_parent == initproc -> away from launchd 949 * reparentedtoinit -> came to launchd by reparenting 950 */ 951 if (child->p_oppid) { 952 int knote_hint; 953 pid_t oppid; 954 955 proc_lock(child); 956 oppid = child->p_oppid; 957 child->p_oppid = 0; 958 knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff); 959 proc_unlock(child); 960 961 if ((trace_parent = proc_find(oppid)) 962 && !((trace_parent == initproc) && reparentedtoinit)) { 963 964 if (trace_parent != initproc) { 965 /* 966 * proc internal fileds and p_ucred usage safe 967 * here as child is dead and is not reaped or 968 * reparented yet 969 */ 970 proc_lock(trace_parent); 971 trace_parent->si_pid = child->p_pid; 972 trace_parent->si_status = child->p_xstat; 973 trace_parent->si_code = CLD_CONTINUED; 974 trace_parent->si_uid = kauth_cred_getruid(child->p_ucred); 975 proc_unlock(trace_parent); 976 } 977 proc_reparentlocked(child, trace_parent, 1, 0); 978 979 /* resend knote to original parent (and others) after reparenting */ 980 proc_knote(child, knote_hint); 981 982 psignal(trace_parent, SIGCHLD); 983 proc_list_lock(); 984 wakeup((caddr_t)trace_parent); 985 child->p_listflag &= ~P_LIST_WAITING; 986 wakeup(&child->p_stat); 987 proc_list_unlock(); 988 proc_rele(trace_parent); 989 if ((locked == 1) && (droplock == 0)) 990 proc_list_lock(); 991 return (0); 992 } 993 994 /* 995 * If we can't reparent (e.g. the original parent exited while child was being debugged, or 996 * original parent is the same as the debugger currently exiting), we still need to satisfy 997 * the knote lifecycle for other observers on the system. While the debugger was attached, 998 * the NOTE_EXIT would not have been broadcast during initial child termination. 999 */ 1000 proc_knote(child, knote_hint); 1001 1002 if (trace_parent != PROC_NULL) { 1003 proc_rele(trace_parent); 1004 } 1005 } 1006 1007 proc_knote(child, NOTE_REAP); 1008 proc_knote_drain(child); 1009 1010 child->p_xstat = 0; 1011 if (child->p_ru) { 1012 proc_lock(parent); 1013#if 3839178 1014 /* 1015 * If the parent is ignoring SIGCHLD, then POSIX requires 1016 * us to not add the resource usage to the parent process - 1017 * we are only going to hand it off to init to get reaped. 1018 * We should contest the standard in this case on the basis 1019 * of RLIMIT_CPU. 1020 */ 1021 if (!(parent->p_flag & P_NOCLDWAIT)) 1022#endif /* 3839178 */ 1023 ruadd(&parent->p_stats->p_cru, child->p_ru); 1024 proc_unlock(parent); 1025 FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE); 1026 child->p_ru = NULL; 1027 } else { 1028 printf("Warning : lost p_ru for %s\n", child->p_comm); 1029 } 1030 1031 AUDIT_SESSION_PROCEXIT(child); 1032 1033 /* 1034 * Decrement the count of procs running with this uid. 1035 * p_ucred usage is safe here as it is an exited process. 1036 * and refernce is dropped after these calls down below 1037 * (locking protection is provided by list lock held in chgproccnt) 1038 */ 1039 (void)chgproccnt(kauth_cred_getruid(child->p_ucred), -1); 1040 1041#if CONFIG_LCTX 1042 ALLLCTX_LOCK; 1043 leavelctx(child); 1044 ALLLCTX_UNLOCK; 1045#endif 1046 1047 /* 1048 * Free up credentials. 1049 */ 1050 if (IS_VALID_CRED(child->p_ucred)) { 1051 kauth_cred_unref(&child->p_ucred); 1052 } 1053 1054 /* XXXX Note NOT SAFE TO USE p_ucred from this point onwards */ 1055 1056 /* 1057 * Finally finished with old proc entry. 1058 * Unlink it from its process group and free it. 1059 */ 1060 leavepgrp(child); 1061 1062 proc_list_lock(); 1063 LIST_REMOVE(child, p_list); /* off zombproc */ 1064 parent->p_childrencnt--; 1065 LIST_REMOVE(child, p_sibling); 1066 /* If there are no more children wakeup parent */ 1067 if ((deadparent != 0) && (LIST_EMPTY(&parent->p_children))) 1068 wakeup((caddr_t)parent); /* with list lock held */ 1069 child->p_listflag &= ~P_LIST_WAITING; 1070 wakeup(&child->p_stat); 1071 1072 /* Take it out of process hash */ 1073 LIST_REMOVE(child, p_hash); 1074 child->p_listflag &= ~P_LIST_INHASH; 1075 proc_checkdeadrefs(child); 1076 nprocs--; 1077 1078 proc_list_unlock(); 1079 1080#if CONFIG_FINE_LOCK_GROUPS 1081 lck_mtx_destroy(&child->p_mlock, proc_mlock_grp); 1082 lck_mtx_destroy(&child->p_fdmlock, proc_fdmlock_grp); 1083#if CONFIG_DTRACE 1084 lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp); 1085#endif 1086 lck_spin_destroy(&child->p_slock, proc_slock_grp); 1087#else /* CONFIG_FINE_LOCK_GROUPS */ 1088 lck_mtx_destroy(&child->p_mlock, proc_lck_grp); 1089 lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp); 1090#if CONFIG_DTRACE 1091 lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp); 1092#endif 1093 lck_spin_destroy(&child->p_slock, proc_lck_grp); 1094#endif /* CONFIG_FINE_LOCK_GROUPS */ 1095 workqueue_destroy_lock(child); 1096 1097 FREE_ZONE(child, sizeof *child, M_PROC); 1098 if ((locked == 1) && (droplock == 0)) 1099 proc_list_lock(); 1100 1101 return (1); 1102} 1103 1104 1105int 1106wait1continue(int result) 1107{ 1108 void *vt; 1109 thread_t thread; 1110 int *retval; 1111 proc_t p; 1112 1113 if (result) 1114 return(result); 1115 1116 p = current_proc(); 1117 thread = current_thread(); 1118 vt = get_bsduthreadarg(thread); 1119 retval = get_bsduthreadrval(thread); 1120 return(wait4(p, (struct wait4_args *)vt, retval)); 1121} 1122 1123int 1124wait4(proc_t q, struct wait4_args *uap, int32_t *retval) 1125{ 1126 __pthread_testcancel(1); 1127 return(wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval)); 1128} 1129 1130int 1131wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval) 1132{ 1133 int nfound; 1134 int sibling_count; 1135 proc_t p; 1136 int status, error; 1137 1138 AUDIT_ARG(pid, uap->pid); 1139 1140 if (uap->pid == 0) 1141 uap->pid = -q->p_pgrpid; 1142 1143loop: 1144 proc_list_lock(); 1145loop1: 1146 nfound = 0; 1147 sibling_count = 0; 1148 1149 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) { 1150 if ( p->p_sibling.le_next != 0 ) 1151 sibling_count++; 1152 if (uap->pid != WAIT_ANY && 1153 p->p_pid != uap->pid && 1154 p->p_pgrpid != -(uap->pid)) 1155 continue; 1156 1157 nfound++; 1158 1159 /* XXX This is racy because we don't get the lock!!!! */ 1160 1161 if (p->p_listflag & P_LIST_WAITING) { 1162 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0); 1163 goto loop1; 1164 } 1165 p->p_listflag |= P_LIST_WAITING; /* only allow single thread to wait() */ 1166 1167 1168 if (p->p_stat == SZOMB) { 1169 int reparentedtoinit = (p->p_listflag & P_LIST_DEADPARENT) ? 1 : 0; 1170 1171 proc_list_unlock(); 1172#if CONFIG_MACF 1173 if ((error = mac_proc_check_wait(q, p)) != 0) 1174 goto out; 1175#endif 1176 retval[0] = p->p_pid; 1177 if (uap->status) { 1178 /* Legacy apps expect only 8 bits of status */ 1179 status = 0xffff & p->p_xstat; /* convert to int */ 1180 error = copyout((caddr_t)&status, 1181 uap->status, 1182 sizeof(status)); 1183 if (error) 1184 goto out; 1185 } 1186 if (uap->rusage) { 1187 if (p->p_ru == NULL) { 1188 error = ENOMEM; 1189 } else { 1190 if (IS_64BIT_PROCESS(q)) { 1191 struct user64_rusage my_rusage; 1192 munge_user64_rusage(p->p_ru, &my_rusage); 1193 error = copyout((caddr_t)&my_rusage, 1194 uap->rusage, 1195 sizeof (my_rusage)); 1196 } 1197 else { 1198 struct user32_rusage my_rusage; 1199 munge_user32_rusage(p->p_ru, &my_rusage); 1200 error = copyout((caddr_t)&my_rusage, 1201 uap->rusage, 1202 sizeof (my_rusage)); 1203 } 1204 } 1205 /* information unavailable? */ 1206 if (error) 1207 goto out; 1208 } 1209 1210 /* Conformance change for 6577252. 1211 * When SIGCHLD is blocked and wait() returns because the status 1212 * of a child process is available and there are no other 1213 * children processes, then any pending SIGCHLD signal is cleared. 1214 */ 1215 if ( sibling_count == 0 ) { 1216 int mask = sigmask(SIGCHLD); 1217 uthread_t uth = (struct uthread *)get_bsdthread_info(current_thread()); 1218 1219 if ( (uth->uu_sigmask & mask) != 0 ) { 1220 /* we are blocking SIGCHLD signals. clear any pending SIGCHLD. 1221 * This locking looks funny but it is protecting access to the 1222 * thread via p_uthlist. 1223 */ 1224 proc_lock(q); 1225 uth->uu_siglist &= ~mask; /* clear pending signal */ 1226 proc_unlock(q); 1227 } 1228 } 1229 1230 /* Clean up */ 1231 (void)reap_child_locked(q, p, 0, reparentedtoinit, 0, 0); 1232 1233 return (0); 1234 } 1235 if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 && 1236 (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) { 1237 proc_list_unlock(); 1238#if CONFIG_MACF 1239 if ((error = mac_proc_check_wait(q, p)) != 0) 1240 goto out; 1241#endif 1242 proc_lock(p); 1243 p->p_lflag |= P_LWAITED; 1244 proc_unlock(p); 1245 retval[0] = p->p_pid; 1246 if (uap->status) { 1247 status = W_STOPCODE(p->p_xstat); 1248 error = copyout((caddr_t)&status, 1249 uap->status, 1250 sizeof(status)); 1251 } else 1252 error = 0; 1253 goto out; 1254 } 1255 /* 1256 * If we are waiting for continued processses, and this 1257 * process was continued 1258 */ 1259 if ((uap->options & WCONTINUED) && 1260 (p->p_flag & P_CONTINUED)) { 1261 proc_list_unlock(); 1262#if CONFIG_MACF 1263 if ((error = mac_proc_check_wait(q, p)) != 0) 1264 goto out; 1265#endif 1266 1267 /* Prevent other process for waiting for this event */ 1268 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag); 1269 retval[0] = p->p_pid; 1270 if (uap->status) { 1271 status = W_STOPCODE(SIGCONT); 1272 error = copyout((caddr_t)&status, 1273 uap->status, 1274 sizeof(status)); 1275 } else 1276 error = 0; 1277 goto out; 1278 } 1279 p->p_listflag &= ~P_LIST_WAITING; 1280 wakeup(&p->p_stat); 1281 } 1282 /* list lock is held when we get here any which way */ 1283 if (nfound == 0) { 1284 proc_list_unlock(); 1285 return (ECHILD); 1286 } 1287 1288 if (uap->options & WNOHANG) { 1289 retval[0] = 0; 1290 proc_list_unlock(); 1291 return (0); 1292 } 1293 1294 if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) 1295 return (error); 1296 1297 goto loop; 1298out: 1299 proc_list_lock(); 1300 p->p_listflag &= ~P_LIST_WAITING; 1301 wakeup(&p->p_stat); 1302 proc_list_unlock(); 1303 return (error); 1304} 1305 1306#if DEBUG 1307#define ASSERT_LCK_MTX_OWNED(lock) \ 1308 lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED) 1309#else 1310#define ASSERT_LCK_MTX_OWNED(lock) /* nothing */ 1311#endif 1312 1313int 1314waitidcontinue(int result) 1315{ 1316 void *vt; 1317 thread_t thread; 1318 int *retval; 1319 1320 if (result) 1321 return (result); 1322 1323 thread = current_thread(); 1324 vt = get_bsduthreadarg(thread); 1325 retval = get_bsduthreadrval(thread); 1326 return (waitid(current_proc(), (struct waitid_args *)vt, retval)); 1327} 1328 1329/* 1330 * Description: Suspend the calling thread until one child of the process 1331 * containing the calling thread changes state. 1332 * 1333 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL 1334 * uap->id pid_t or gid_t or ignored 1335 * uap->infop Address of siginfo_t struct in 1336 * user space into which to return status 1337 * uap->options flag values 1338 * 1339 * Returns: 0 Success 1340 * !0 Error returning status to user space 1341 */ 1342int 1343waitid(proc_t q, struct waitid_args *uap, int32_t *retval) 1344{ 1345 __pthread_testcancel(1); 1346 return (waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval)); 1347} 1348 1349int 1350waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap, 1351 __unused int32_t *retval) 1352{ 1353 user_siginfo_t siginfo; /* siginfo data to return to caller */ 1354 boolean_t caller64 = IS_64BIT_PROCESS(q); 1355 int nfound; 1356 proc_t p; 1357 int error; 1358 1359 if (uap->options == 0 || 1360 (uap->options & ~(WNOHANG|WNOWAIT|WCONTINUED|WSTOPPED|WEXITED))) 1361 return (EINVAL); /* bits set that aren't recognized */ 1362 1363 switch (uap->idtype) { 1364 case P_PID: /* child with process ID equal to... */ 1365 case P_PGID: /* child with process group ID equal to... */ 1366 if (((int)uap->id) < 0) 1367 return (EINVAL); 1368 break; 1369 case P_ALL: /* any child */ 1370 break; 1371 } 1372 1373loop: 1374 proc_list_lock(); 1375loop1: 1376 nfound = 0; 1377 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) { 1378 1379 switch (uap->idtype) { 1380 case P_PID: /* child with process ID equal to... */ 1381 if (p->p_pid != (pid_t)uap->id) 1382 continue; 1383 break; 1384 case P_PGID: /* child with process group ID equal to... */ 1385 if (p->p_pgrpid != (pid_t)uap->id) 1386 continue; 1387 break; 1388 case P_ALL: /* any child */ 1389 break; 1390 } 1391 1392 /* XXX This is racy because we don't get the lock!!!! */ 1393 1394 /* 1395 * Wait collision; go to sleep and restart; used to maintain 1396 * the single return for waited process guarantee. 1397 */ 1398 if (p->p_listflag & P_LIST_WAITING) { 1399 (void) msleep(&p->p_stat, proc_list_mlock, 1400 PWAIT, "waitidcoll", 0); 1401 goto loop1; 1402 } 1403 p->p_listflag |= P_LIST_WAITING; /* mark busy */ 1404 1405 nfound++; 1406 1407 bzero(&siginfo, sizeof (siginfo)); 1408 1409 switch (p->p_stat) { 1410 case SZOMB: /* Exited */ 1411 if (!(uap->options & WEXITED)) 1412 break; 1413 proc_list_unlock(); 1414#if CONFIG_MACF 1415 if ((error = mac_proc_check_wait(q, p)) != 0) 1416 goto out; 1417#endif 1418 siginfo.si_signo = SIGCHLD; 1419 siginfo.si_pid = p->p_pid; 1420 siginfo.si_status = WEXITSTATUS(p->p_xstat); 1421 if (WIFSIGNALED(p->p_xstat)) { 1422 siginfo.si_code = WCOREDUMP(p->p_xstat) ? 1423 CLD_DUMPED : CLD_KILLED; 1424 } else 1425 siginfo.si_code = CLD_EXITED; 1426 1427 if ((error = copyoutsiginfo(&siginfo, 1428 caller64, uap->infop)) != 0) 1429 goto out; 1430 1431 /* Prevent other process for waiting for this event? */ 1432 if (!(uap->options & WNOWAIT)) { 1433 (void) reap_child_locked(q, p, 0, 0, 0, 0); 1434 return (0); 1435 } 1436 goto out; 1437 1438 case SSTOP: /* Stopped */ 1439 /* 1440 * If we are not interested in stopped processes, then 1441 * ignore this one. 1442 */ 1443 if (!(uap->options & WSTOPPED)) 1444 break; 1445 1446 /* 1447 * If someone has already waited it, we lost a race 1448 * to be the one to return status. 1449 */ 1450 if ((p->p_lflag & P_LWAITED) != 0) 1451 break; 1452 proc_list_unlock(); 1453#if CONFIG_MACF 1454 if ((error = mac_proc_check_wait(q, p)) != 0) 1455 goto out; 1456#endif 1457 siginfo.si_signo = SIGCHLD; 1458 siginfo.si_pid = p->p_pid; 1459 siginfo.si_status = p->p_xstat; /* signal number */ 1460 siginfo.si_code = CLD_STOPPED; 1461 1462 if ((error = copyoutsiginfo(&siginfo, 1463 caller64, uap->infop)) != 0) 1464 goto out; 1465 1466 /* Prevent other process for waiting for this event? */ 1467 if (!(uap->options & WNOWAIT)) { 1468 proc_lock(p); 1469 p->p_lflag |= P_LWAITED; 1470 proc_unlock(p); 1471 } 1472 goto out; 1473 1474 default: /* All other states => Continued */ 1475 if (!(uap->options & WCONTINUED)) 1476 break; 1477 1478 /* 1479 * If the flag isn't set, then this process has not 1480 * been stopped and continued, or the status has 1481 * already been reaped by another caller of waitid(). 1482 */ 1483 if ((p->p_flag & P_CONTINUED) == 0) 1484 break; 1485 proc_list_unlock(); 1486#if CONFIG_MACF 1487 if ((error = mac_proc_check_wait(q, p)) != 0) 1488 goto out; 1489#endif 1490 siginfo.si_signo = SIGCHLD; 1491 siginfo.si_code = CLD_CONTINUED; 1492 proc_lock(p); 1493 siginfo.si_pid = p->p_contproc; 1494 siginfo.si_status = p->p_xstat; 1495 proc_unlock(p); 1496 1497 if ((error = copyoutsiginfo(&siginfo, 1498 caller64, uap->infop)) != 0) 1499 goto out; 1500 1501 /* Prevent other process for waiting for this event? */ 1502 if (!(uap->options & WNOWAIT)) { 1503 OSBitAndAtomic(~((uint32_t)P_CONTINUED), 1504 &p->p_flag); 1505 } 1506 goto out; 1507 } 1508 ASSERT_LCK_MTX_OWNED(proc_list_mlock); 1509 1510 /* Not a process we are interested in; go on to next child */ 1511 1512 p->p_listflag &= ~P_LIST_WAITING; 1513 wakeup(&p->p_stat); 1514 } 1515 ASSERT_LCK_MTX_OWNED(proc_list_mlock); 1516 1517 /* No child processes that could possibly satisfy the request? */ 1518 1519 if (nfound == 0) { 1520 proc_list_unlock(); 1521 return (ECHILD); 1522 } 1523 1524 if (uap->options & WNOHANG) { 1525 proc_list_unlock(); 1526#if CONFIG_MACF 1527 if ((error = mac_proc_check_wait(q, p)) != 0) 1528 return (error); 1529#endif 1530 /* 1531 * The state of the siginfo structure in this case 1532 * is undefined. Some implementations bzero it, some 1533 * (like here) leave it untouched for efficiency. 1534 * 1535 * Thus the most portable check for "no matching pid with 1536 * WNOHANG" is to store a zero into si_pid before 1537 * invocation, then check for a non-zero value afterwards. 1538 */ 1539 return (0); 1540 } 1541 1542 if ((error = msleep0(q, proc_list_mlock, 1543 PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) 1544 return (error); 1545 1546 goto loop; 1547out: 1548 proc_list_lock(); 1549 p->p_listflag &= ~P_LIST_WAITING; 1550 wakeup(&p->p_stat); 1551 proc_list_unlock(); 1552 return (error); 1553} 1554 1555/* 1556 * make process 'parent' the new parent of process 'child'. 1557 */ 1558void 1559proc_reparentlocked(proc_t child, proc_t parent, int cansignal, int locked) 1560{ 1561 proc_t oldparent = PROC_NULL; 1562 1563 if (child->p_pptr == parent) 1564 return; 1565 1566 if (locked == 0) 1567 proc_list_lock(); 1568 1569 oldparent = child->p_pptr; 1570#if __PROC_INTERNAL_DEBUG 1571 if (oldparent == PROC_NULL) 1572 panic("proc_reparent: process %p does not have a parent\n", child); 1573#endif 1574 1575 LIST_REMOVE(child, p_sibling); 1576#if __PROC_INTERNAL_DEBUG 1577 if (oldparent->p_childrencnt == 0) 1578 panic("process children count already 0\n"); 1579#endif 1580 oldparent->p_childrencnt--; 1581#if __PROC_INTERNAL_DEBUG1 1582 if (oldparent->p_childrencnt < 0) 1583 panic("process children count -ve\n"); 1584#endif 1585 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 1586 parent->p_childrencnt++; 1587 child->p_pptr = parent; 1588 child->p_ppid = parent->p_pid; 1589 1590 proc_list_unlock(); 1591 1592 if ((cansignal != 0) && (initproc == parent) && (child->p_stat == SZOMB)) 1593 psignal(initproc, SIGCHLD); 1594 if (locked == 1) 1595 proc_list_lock(); 1596} 1597 1598/* 1599 * Exit: deallocate address space and other resources, change proc state 1600 * to zombie, and unlink proc from allproc and parent's lists. Save exit 1601 * status and rusage for wait(). Check for child processes and orphan them. 1602 */ 1603 1604void 1605vfork_exit(proc_t p, int rv) 1606{ 1607 vfork_exit_internal(p, rv, 0); 1608} 1609 1610void 1611vfork_exit_internal(proc_t p, int rv, int forceexit) 1612{ 1613 thread_t self = current_thread(); 1614#ifdef FIXME 1615 struct task *task = p->task; 1616#endif 1617 struct uthread *ut; 1618 1619 /* 1620 * If a thread in this task has already 1621 * called exit(), then halt any others 1622 * right here. 1623 */ 1624 1625 ut = get_bsdthread_info(self); 1626 1627 1628 proc_lock(p); 1629 if ((p->p_lflag & P_LPEXIT) == P_LPEXIT) { 1630 /* 1631 * This happens when a parent exits/killed and vfork is in progress 1632 * other threads. But shutdown code for ex has already called exit1() 1633 */ 1634 proc_unlock(p); 1635 return; 1636 } 1637 p->p_lflag |= (P_LEXIT | P_LPEXIT); 1638 proc_unlock(p); 1639 1640 if (forceexit == 0) { 1641 /* 1642 * parent of a vfork child has already called exit() and the 1643 * thread that has vfork in proress terminates. So there is no 1644 * separate address space here and it has already been marked for 1645 * termination. This was never covered before and could cause problems 1646 * if we block here for outside code. 1647 */ 1648 /* Notify the perf server */ 1649 (void)sys_perf_notify(self, p->p_pid); 1650 } 1651 1652 /* 1653 * Remove proc from allproc queue and from pidhash chain. 1654 * Need to do this before we do anything that can block. 1655 * Not doing causes things like mount() find this on allproc 1656 * in partially cleaned state. 1657 */ 1658 1659 proc_list_lock(); 1660 1661 LIST_REMOVE(p, p_list); 1662 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ 1663 /* will not be visible via proc_find */ 1664 p->p_listflag |= P_LIST_EXITED; 1665 1666 proc_list_unlock(); 1667 1668 proc_lock(p); 1669 p->p_xstat = rv; 1670 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT); 1671 p->p_sigignore = ~0; 1672 proc_unlock(p); 1673 1674 proc_spinlock(p); 1675 if (thread_call_cancel(p->p_rcall)) 1676 p->p_ractive--; 1677 1678 while (p->p_ractive > 0) { 1679 proc_spinunlock(p); 1680 1681 delay(1); 1682 1683 proc_spinlock(p); 1684 } 1685 proc_spinunlock(p); 1686 1687 thread_call_free(p->p_rcall); 1688 p->p_rcall = NULL; 1689 1690 ut->uu_siglist = 0; 1691 1692 vproc_exit(p); 1693} 1694 1695void 1696vproc_exit(proc_t p) 1697{ 1698 proc_t q; 1699 proc_t pp; 1700 1701 vnode_t tvp; 1702#ifdef FIXME 1703 struct task *task = p->task; 1704#endif 1705 struct pgrp * pg; 1706 struct session *sessp; 1707 1708 /* XXX Zombie allocation may fail, in which case stats get lost */ 1709 MALLOC_ZONE(p->p_ru, struct rusage *, 1710 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK); 1711 1712 1713 proc_refdrain(p); 1714 1715 /* 1716 * Close open files and release open-file table. 1717 * This may block! 1718 */ 1719 fdfree(p); 1720 1721#if !CONFIG_EMBEDDED 1722 if (p->p_legacy_behavior & PROC_LEGACY_BEHAVIOR_IOTHROTTLE) { 1723 throttle_legacy_process_decr(); 1724 } 1725#endif 1726 1727 sessp = proc_session(p); 1728 if (SESS_LEADER(p, sessp)) { 1729 1730 if (sessp->s_ttyvp != NULLVP) { 1731 struct vnode *ttyvp; 1732 int ttyvid; 1733 struct vfs_context context; 1734 struct tty *tp; 1735 1736 /* 1737 * Controlling process. 1738 * Signal foreground pgrp, 1739 * drain controlling terminal 1740 * and revoke access to controlling terminal. 1741 */ 1742 session_lock(sessp); 1743 tp = SESSION_TP(sessp); 1744 if ((tp != TTY_NULL) && (tp->t_session == sessp)) { 1745 session_unlock(sessp); 1746 1747 tty_pgsignal(tp, SIGHUP, 1); 1748 1749 session_lock(sessp); 1750 tp = SESSION_TP(sessp); 1751 } 1752 ttyvp = sessp->s_ttyvp; 1753 ttyvid = sessp->s_ttyvid; 1754 sessp->s_ttyvp = NULL; 1755 sessp->s_ttyvid = 0; 1756 sessp->s_ttyp = TTY_NULL; 1757 sessp->s_ttypgrpid = NO_PID; 1758 session_unlock(sessp); 1759 1760 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) { 1761 if (tp != TTY_NULL) { 1762 tty_lock(tp); 1763 (void) ttywait(tp); 1764 tty_unlock(tp); 1765 } 1766 context.vc_thread = proc_thread(p); /* XXX */ 1767 context.vc_ucred = kauth_cred_proc_ref(p); 1768 vnode_rele(ttyvp); 1769 VNOP_REVOKE(ttyvp, REVOKEALL, &context); 1770 vnode_put(ttyvp); 1771 kauth_cred_unref(&context.vc_ucred); 1772 ttyvp = NULLVP; 1773 } 1774 if (ttyvp) 1775 vnode_rele(ttyvp); 1776 if (tp) 1777 ttyfree(tp); 1778 } 1779 session_lock(sessp); 1780 sessp->s_leader = NULL; 1781 session_unlock(sessp); 1782 } 1783 session_rele(sessp); 1784 1785 pg = proc_pgrp(p); 1786 fixjobc(p, pg, 0); 1787 pg_rele(pg); 1788 1789 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; 1790 1791 proc_list_lock(); 1792 proc_childdrainstart(p); 1793 while ((q = p->p_children.lh_first) != NULL) { 1794 q->p_listflag |= P_LIST_DEADPARENT; 1795 if (q->p_stat == SZOMB) { 1796 if (p != q->p_pptr) 1797 panic("parent child linkage broken"); 1798 /* check for lookups by zomb sysctl */ 1799 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { 1800 msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0); 1801 } 1802 q->p_listflag |= P_LIST_WAITING; 1803 /* 1804 * This is a named reference and it is not granted 1805 * if the reap is already in progress. So we get 1806 * the reference here exclusively and their can be 1807 * no waiters. So there is no need for a wakeup 1808 * after we are done. AlsO the reap frees the structure 1809 * and the proc struct cannot be used for wakeups as well. 1810 * It is safe to use q here as this is system reap 1811 */ 1812 (void)reap_child_locked(p, q, 1, 0, 1, 0); 1813 } else { 1814 proc_reparentlocked(q, initproc, 0, 1); 1815 /* 1816 * Traced processes are killed 1817 * since their existence means someone is messing up. 1818 */ 1819 if (q->p_lflag & P_LTRACED) { 1820 proc_list_unlock(); 1821 proc_lock(q); 1822 q->p_lflag &= ~P_LTRACED; 1823 if (q->sigwait_thread) { 1824 thread_t thread = q->sigwait_thread; 1825 1826 proc_unlock(q); 1827 /* 1828 * The sigwait_thread could be stopped at a 1829 * breakpoint. Wake it up to kill. 1830 * Need to do this as it could be a thread which is not 1831 * the first thread in the task. So any attempts to kill 1832 * the process would result into a deadlock on q->sigwait. 1833 */ 1834 thread_resume(thread); 1835 clear_wait(thread, THREAD_INTERRUPTED); 1836 threadsignal(thread, SIGKILL, 0); 1837 } else { 1838 proc_unlock(q); 1839 } 1840 1841 psignal(q, SIGKILL); 1842 proc_list_lock(); 1843 } 1844 } 1845 } 1846 1847 proc_childdrainend(p); 1848 proc_list_unlock(); 1849 1850 /* 1851 * Release reference to text vnode 1852 */ 1853 tvp = p->p_textvp; 1854 p->p_textvp = NULL; 1855 if (tvp != NULLVP) { 1856 vnode_rele(tvp); 1857 } 1858 1859 /* 1860 * Save exit status and final rusage info, adding in child rusage 1861 * info and self times. If we were unable to allocate a zombie 1862 * structure, this information is lost. 1863 */ 1864 /* No need for locking here as no one than this thread can access this */ 1865 if (p->p_ru != NULL) { 1866 *p->p_ru = p->p_stats->p_ru; 1867 timerclear(&p->p_ru->ru_utime); 1868 timerclear(&p->p_ru->ru_stime); 1869 1870#ifdef FIXME 1871 if (task) { 1872 mach_task_basic_info_data_t tinfo; 1873 task_thread_times_info_data_t ttimesinfo; 1874 int task_info_stuff, task_ttimes_stuff; 1875 struct timeval ut,st; 1876 1877 task_info_stuff = MACH_TASK_BASIC_INFO_COUNT; 1878 task_info(task, MACH_TASK_BASIC_INFO, 1879 &tinfo, &task_info_stuff); 1880 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds; 1881 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds; 1882 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds; 1883 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds; 1884 1885 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; 1886 task_info(task, TASK_THREAD_TIMES_INFO, 1887 &ttimesinfo, &task_ttimes_stuff); 1888 1889 ut.tv_sec = ttimesinfo.user_time.seconds; 1890 ut.tv_usec = ttimesinfo.user_time.microseconds; 1891 st.tv_sec = ttimesinfo.system_time.seconds; 1892 st.tv_usec = ttimesinfo.system_time.microseconds; 1893 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime); 1894 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime); 1895 } 1896#endif /* FIXME */ 1897 1898 ruadd(p->p_ru, &p->p_stats->p_cru); 1899 } 1900 1901 /* 1902 * Free up profiling buffers. 1903 */ 1904 { 1905 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn; 1906 1907 p1 = p0->pr_next; 1908 p0->pr_next = NULL; 1909 p0->pr_scale = 0; 1910 1911 for (; p1 != NULL; p1 = pn) { 1912 pn = p1->pr_next; 1913 kfree(p1, sizeof *p1); 1914 } 1915 } 1916 1917#if PSYNCH 1918 pth_proc_hashdelete(p); 1919#endif /* PSYNCH */ 1920 1921 /* 1922 * Other substructures are freed from wait(). 1923 */ 1924 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS); 1925 p->p_stats = NULL; 1926 1927 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS); 1928 p->p_sigacts = NULL; 1929 1930 proc_limitdrop(p, 1); 1931 p->p_limit = NULL; 1932 1933 /* 1934 * Finish up by terminating the task 1935 * and halt this thread (only if a 1936 * member of the task exiting). 1937 */ 1938 p->task = TASK_NULL; 1939 1940 /* 1941 * Notify parent that we're gone. 1942 */ 1943 pp = proc_parent(p); 1944 if ((p->p_listflag & P_LIST_DEADPARENT) == 0) { 1945 if (pp != initproc) { 1946 proc_lock(pp); 1947 pp->si_pid = p->p_pid; 1948 pp->si_status = p->p_xstat; 1949 pp->si_code = CLD_EXITED; 1950 /* 1951 * p_ucred usage is safe as it is an exiting process 1952 * and reference is dropped in reap 1953 */ 1954 pp->si_uid = kauth_cred_getruid(p->p_ucred); 1955 proc_unlock(pp); 1956 } 1957 /* mark as a zombie */ 1958 /* mark as a zombie */ 1959 /* No need to take proc lock as all refs are drained and 1960 * no one except parent (reaping ) can look at this. 1961 * The write is to an int and is coherent. Also parent is 1962 * keyed off of list lock for reaping 1963 */ 1964 p->p_stat = SZOMB; 1965 1966 psignal(pp, SIGCHLD); 1967 1968 /* and now wakeup the parent */ 1969 proc_list_lock(); 1970 wakeup((caddr_t)pp); 1971 proc_list_unlock(); 1972 } else { 1973 proc_list_lock(); 1974 /* check for lookups by zomb sysctl */ 1975 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { 1976 msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0); 1977 } 1978 p->p_stat = SZOMB; 1979 p->p_listflag |= P_LIST_WAITING; 1980 1981 /* 1982 * This is a named reference and it is not granted 1983 * if the reap is already in progress. So we get 1984 * the reference here exclusively and their can be 1985 * no waiters. So there is no need for a wakeup 1986 * after we are done. AlsO the reap frees the structure 1987 * and the proc struct cannot be used for wakeups as well. 1988 * It is safe to use p here as this is system reap 1989 */ 1990 (void)reap_child_locked(pp, p, 0, 0, 1, 1); 1991 /* list lock dropped by reap_child_locked */ 1992 } 1993 proc_rele(pp); 1994} 1995 1996 1997/* 1998 * munge_rusage 1999 * LP64 support - long is 64 bits if we are dealing with a 64 bit user 2000 * process. We munge the kernel version of rusage into the 2001 * 64 bit version. 2002 */ 2003__private_extern__ void 2004munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p) 2005{ 2006 /* timeval changes size, so utime and stime need special handling */ 2007 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec; 2008 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec; 2009 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec; 2010 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec; 2011 /* 2012 * everything else can be a direct assign, since there is no loss 2013 * of precision implied boing 32->64. 2014 */ 2015 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss; 2016 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss; 2017 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss; 2018 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss; 2019 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt; 2020 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt; 2021 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap; 2022 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock; 2023 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock; 2024 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd; 2025 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv; 2026 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals; 2027 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw; 2028 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw; 2029} 2030 2031/* For a 64-bit kernel and 32-bit userspace, munging may be needed */ 2032__private_extern__ void 2033munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p) 2034{ 2035 /* timeval changes size, so utime and stime need special handling */ 2036 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec; 2037 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec; 2038 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec; 2039 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec; 2040 /* 2041 * everything else can be a direct assign. We currently ignore 2042 * the loss of precision 2043 */ 2044 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss; 2045 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss; 2046 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss; 2047 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss; 2048 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt; 2049 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt; 2050 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap; 2051 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock; 2052 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock; 2053 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd; 2054 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv; 2055 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals; 2056 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw; 2057 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw; 2058} 2059