1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1991, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD$");
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysproto.h>
45#include <sys/file.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/mutex.h>
50#include <sys/priv.h>
51#include <sys/proc.h>
52#include <sys/refcount.h>
53#include <sys/racct.h>
54#include <sys/resourcevar.h>
55#include <sys/rwlock.h>
56#include <sys/sched.h>
57#include <sys/sx.h>
58#include <sys/syscallsubr.h>
59#include <sys/sysctl.h>
60#include <sys/sysent.h>
61#include <sys/time.h>
62#include <sys/umtx.h>
63
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68
69
70static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
71static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
72#define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
73static struct rwlock uihashtbl_lock;
74static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
75static u_long uihash;		/* size of hash table - 1 */
76
77static void	calcru1(struct proc *p, struct rusage_ext *ruxp,
78		    struct timeval *up, struct timeval *sp);
79static int	donice(struct thread *td, struct proc *chgp, int n);
80static struct uidinfo *uilookup(uid_t uid);
81static void	ruxagg_locked(struct rusage_ext *rux, struct thread *td);
82
83/*
84 * Resource controls and accounting.
85 */
86#ifndef _SYS_SYSPROTO_H_
87struct getpriority_args {
88	int	which;
89	int	who;
90};
91#endif
92int
93sys_getpriority(struct thread *td, struct getpriority_args *uap)
94{
95	struct proc *p;
96	struct pgrp *pg;
97	int error, low;
98
99	error = 0;
100	low = PRIO_MAX + 1;
101	switch (uap->which) {
102
103	case PRIO_PROCESS:
104		if (uap->who == 0)
105			low = td->td_proc->p_nice;
106		else {
107			p = pfind(uap->who);
108			if (p == NULL)
109				break;
110			if (p_cansee(td, p) == 0)
111				low = p->p_nice;
112			PROC_UNLOCK(p);
113		}
114		break;
115
116	case PRIO_PGRP:
117		sx_slock(&proctree_lock);
118		if (uap->who == 0) {
119			pg = td->td_proc->p_pgrp;
120			PGRP_LOCK(pg);
121		} else {
122			pg = pgfind(uap->who);
123			if (pg == NULL) {
124				sx_sunlock(&proctree_lock);
125				break;
126			}
127		}
128		sx_sunlock(&proctree_lock);
129		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
130			PROC_LOCK(p);
131			if (p->p_state == PRS_NORMAL &&
132			    p_cansee(td, p) == 0) {
133				if (p->p_nice < low)
134					low = p->p_nice;
135			}
136			PROC_UNLOCK(p);
137		}
138		PGRP_UNLOCK(pg);
139		break;
140
141	case PRIO_USER:
142		if (uap->who == 0)
143			uap->who = td->td_ucred->cr_uid;
144		sx_slock(&allproc_lock);
145		FOREACH_PROC_IN_SYSTEM(p) {
146			PROC_LOCK(p);
147			if (p->p_state == PRS_NORMAL &&
148			    p_cansee(td, p) == 0 &&
149			    p->p_ucred->cr_uid == uap->who) {
150				if (p->p_nice < low)
151					low = p->p_nice;
152			}
153			PROC_UNLOCK(p);
154		}
155		sx_sunlock(&allproc_lock);
156		break;
157
158	default:
159		error = EINVAL;
160		break;
161	}
162	if (low == PRIO_MAX + 1 && error == 0)
163		error = ESRCH;
164	td->td_retval[0] = low;
165	return (error);
166}
167
168#ifndef _SYS_SYSPROTO_H_
169struct setpriority_args {
170	int	which;
171	int	who;
172	int	prio;
173};
174#endif
175int
176sys_setpriority(struct thread *td, struct setpriority_args *uap)
177{
178	struct proc *curp, *p;
179	struct pgrp *pg;
180	int found = 0, error = 0;
181
182	curp = td->td_proc;
183	switch (uap->which) {
184	case PRIO_PROCESS:
185		if (uap->who == 0) {
186			PROC_LOCK(curp);
187			error = donice(td, curp, uap->prio);
188			PROC_UNLOCK(curp);
189		} else {
190			p = pfind(uap->who);
191			if (p == NULL)
192				break;
193			error = p_cansee(td, p);
194			if (error == 0)
195				error = donice(td, p, uap->prio);
196			PROC_UNLOCK(p);
197		}
198		found++;
199		break;
200
201	case PRIO_PGRP:
202		sx_slock(&proctree_lock);
203		if (uap->who == 0) {
204			pg = curp->p_pgrp;
205			PGRP_LOCK(pg);
206		} else {
207			pg = pgfind(uap->who);
208			if (pg == NULL) {
209				sx_sunlock(&proctree_lock);
210				break;
211			}
212		}
213		sx_sunlock(&proctree_lock);
214		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
215			PROC_LOCK(p);
216			if (p->p_state == PRS_NORMAL &&
217			    p_cansee(td, p) == 0) {
218				error = donice(td, p, uap->prio);
219				found++;
220			}
221			PROC_UNLOCK(p);
222		}
223		PGRP_UNLOCK(pg);
224		break;
225
226	case PRIO_USER:
227		if (uap->who == 0)
228			uap->who = td->td_ucred->cr_uid;
229		sx_slock(&allproc_lock);
230		FOREACH_PROC_IN_SYSTEM(p) {
231			PROC_LOCK(p);
232			if (p->p_state == PRS_NORMAL &&
233			    p->p_ucred->cr_uid == uap->who &&
234			    p_cansee(td, p) == 0) {
235				error = donice(td, p, uap->prio);
236				found++;
237			}
238			PROC_UNLOCK(p);
239		}
240		sx_sunlock(&allproc_lock);
241		break;
242
243	default:
244		error = EINVAL;
245		break;
246	}
247	if (found == 0 && error == 0)
248		error = ESRCH;
249	return (error);
250}
251
252/*
253 * Set "nice" for a (whole) process.
254 */
255static int
256donice(struct thread *td, struct proc *p, int n)
257{
258	int error;
259
260	PROC_LOCK_ASSERT(p, MA_OWNED);
261	if ((error = p_cansched(td, p)))
262		return (error);
263	if (n > PRIO_MAX)
264		n = PRIO_MAX;
265	if (n < PRIO_MIN)
266		n = PRIO_MIN;
267	if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
268		return (EACCES);
269	sched_nice(p, n);
270	return (0);
271}
272
273static int unprivileged_idprio;
274SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
275    &unprivileged_idprio, 0, "Allow non-root users to set an idle priority");
276
277/*
278 * Set realtime priority for LWP.
279 */
280#ifndef _SYS_SYSPROTO_H_
281struct rtprio_thread_args {
282	int		function;
283	lwpid_t		lwpid;
284	struct rtprio	*rtp;
285};
286#endif
287int
288sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
289{
290	struct proc *p;
291	struct rtprio rtp;
292	struct thread *td1;
293	int cierror, error;
294
295	/* Perform copyin before acquiring locks if needed. */
296	if (uap->function == RTP_SET)
297		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
298	else
299		cierror = 0;
300
301	if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
302		p = td->td_proc;
303		td1 = td;
304		PROC_LOCK(p);
305	} else {
306		td1 = tdfind(uap->lwpid, -1);
307		if (td1 == NULL)
308			return (ESRCH);
309		p = td1->td_proc;
310	}
311
312	switch (uap->function) {
313	case RTP_LOOKUP:
314		if ((error = p_cansee(td, p)))
315			break;
316		pri_to_rtp(td1, &rtp);
317		PROC_UNLOCK(p);
318		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
319	case RTP_SET:
320		if ((error = p_cansched(td, p)) || (error = cierror))
321			break;
322
323		/* Disallow setting rtprio in most cases if not superuser. */
324
325		/*
326		 * Realtime priority has to be restricted for reasons which
327		 * should be obvious.  However, for idleprio processes, there is
328		 * a potential for system deadlock if an idleprio process gains
329		 * a lock on a resource that other processes need (and the
330		 * idleprio process can't run due to a CPU-bound normal
331		 * process).  Fix me!  XXX
332		 *
333		 * This problem is not only related to idleprio process.
334		 * A user level program can obtain a file lock and hold it
335		 * indefinitely.  Additionally, without idleprio processes it is
336		 * still conceivable that a program with low priority will never
337		 * get to run.  In short, allowing this feature might make it
338		 * easier to lock a resource indefinitely, but it is not the
339		 * only thing that makes it possible.
340		 */
341		if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
342		    (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
343		    unprivileged_idprio == 0)) {
344			error = priv_check(td, PRIV_SCHED_RTPRIO);
345			if (error)
346				break;
347		}
348		error = rtp_to_pri(&rtp, td1);
349		break;
350	default:
351		error = EINVAL;
352		break;
353	}
354	PROC_UNLOCK(p);
355	return (error);
356}
357
358/*
359 * Set realtime priority.
360 */
361#ifndef _SYS_SYSPROTO_H_
362struct rtprio_args {
363	int		function;
364	pid_t		pid;
365	struct rtprio	*rtp;
366};
367#endif
368int
369sys_rtprio(struct thread *td, struct rtprio_args *uap)
370{
371	struct proc *p;
372	struct thread *tdp;
373	struct rtprio rtp;
374	int cierror, error;
375
376	/* Perform copyin before acquiring locks if needed. */
377	if (uap->function == RTP_SET)
378		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
379	else
380		cierror = 0;
381
382	if (uap->pid == 0) {
383		p = td->td_proc;
384		PROC_LOCK(p);
385	} else {
386		p = pfind(uap->pid);
387		if (p == NULL)
388			return (ESRCH);
389	}
390
391	switch (uap->function) {
392	case RTP_LOOKUP:
393		if ((error = p_cansee(td, p)))
394			break;
395		/*
396		 * Return OUR priority if no pid specified,
397		 * or if one is, report the highest priority
398		 * in the process.  There isn't much more you can do as
399		 * there is only room to return a single priority.
400		 * Note: specifying our own pid is not the same
401		 * as leaving it zero.
402		 */
403		if (uap->pid == 0) {
404			pri_to_rtp(td, &rtp);
405		} else {
406			struct rtprio rtp2;
407
408			rtp.type = RTP_PRIO_IDLE;
409			rtp.prio = RTP_PRIO_MAX;
410			FOREACH_THREAD_IN_PROC(p, tdp) {
411				pri_to_rtp(tdp, &rtp2);
412				if (rtp2.type <  rtp.type ||
413				    (rtp2.type == rtp.type &&
414				    rtp2.prio < rtp.prio)) {
415					rtp.type = rtp2.type;
416					rtp.prio = rtp2.prio;
417				}
418			}
419		}
420		PROC_UNLOCK(p);
421		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
422	case RTP_SET:
423		if ((error = p_cansched(td, p)) || (error = cierror))
424			break;
425
426		/*
427		 * Disallow setting rtprio in most cases if not superuser.
428		 * See the comment in sys_rtprio_thread about idprio
429		 * threads holding a lock.
430		 */
431		if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
432		    (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
433		    !unprivileged_idprio)) {
434			error = priv_check(td, PRIV_SCHED_RTPRIO);
435			if (error)
436				break;
437		}
438
439		/*
440		 * If we are setting our own priority, set just our
441		 * thread but if we are doing another process,
442		 * do all the threads on that process. If we
443		 * specify our own pid we do the latter.
444		 */
445		if (uap->pid == 0) {
446			error = rtp_to_pri(&rtp, td);
447		} else {
448			FOREACH_THREAD_IN_PROC(p, td) {
449				if ((error = rtp_to_pri(&rtp, td)) != 0)
450					break;
451			}
452		}
453		break;
454	default:
455		error = EINVAL;
456		break;
457	}
458	PROC_UNLOCK(p);
459	return (error);
460}
461
462int
463rtp_to_pri(struct rtprio *rtp, struct thread *td)
464{
465	u_char  newpri, oldclass, oldpri;
466
467	switch (RTP_PRIO_BASE(rtp->type)) {
468	case RTP_PRIO_REALTIME:
469		if (rtp->prio > RTP_PRIO_MAX)
470			return (EINVAL);
471		newpri = PRI_MIN_REALTIME + rtp->prio;
472		break;
473	case RTP_PRIO_NORMAL:
474		if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
475			return (EINVAL);
476		newpri = PRI_MIN_TIMESHARE + rtp->prio;
477		break;
478	case RTP_PRIO_IDLE:
479		if (rtp->prio > RTP_PRIO_MAX)
480			return (EINVAL);
481		newpri = PRI_MIN_IDLE + rtp->prio;
482		break;
483	default:
484		return (EINVAL);
485	}
486
487	thread_lock(td);
488	oldclass = td->td_pri_class;
489	sched_class(td, rtp->type);	/* XXX fix */
490	oldpri = td->td_user_pri;
491	sched_user_prio(td, newpri);
492	if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
493	    td->td_pri_class != RTP_PRIO_NORMAL))
494		sched_prio(td, td->td_user_pri);
495	if (TD_ON_UPILOCK(td) && oldpri != newpri) {
496		critical_enter();
497		thread_unlock(td);
498		umtx_pi_adjust(td, oldpri);
499		critical_exit();
500	} else
501		thread_unlock(td);
502	return (0);
503}
504
505void
506pri_to_rtp(struct thread *td, struct rtprio *rtp)
507{
508
509	thread_lock(td);
510	switch (PRI_BASE(td->td_pri_class)) {
511	case PRI_REALTIME:
512		rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
513		break;
514	case PRI_TIMESHARE:
515		rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
516		break;
517	case PRI_IDLE:
518		rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
519		break;
520	default:
521		break;
522	}
523	rtp->type = td->td_pri_class;
524	thread_unlock(td);
525}
526
527#if defined(COMPAT_43)
528#ifndef _SYS_SYSPROTO_H_
529struct osetrlimit_args {
530	u_int	which;
531	struct	orlimit *rlp;
532};
533#endif
534int
535osetrlimit(struct thread *td, struct osetrlimit_args *uap)
536{
537	struct orlimit olim;
538	struct rlimit lim;
539	int error;
540
541	if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
542		return (error);
543	lim.rlim_cur = olim.rlim_cur;
544	lim.rlim_max = olim.rlim_max;
545	error = kern_setrlimit(td, uap->which, &lim);
546	return (error);
547}
548
549#ifndef _SYS_SYSPROTO_H_
550struct ogetrlimit_args {
551	u_int	which;
552	struct	orlimit *rlp;
553};
554#endif
555int
556ogetrlimit(struct thread *td, struct ogetrlimit_args *uap)
557{
558	struct orlimit olim;
559	struct rlimit rl;
560	int error;
561
562	if (uap->which >= RLIM_NLIMITS)
563		return (EINVAL);
564	lim_rlimit(td, uap->which, &rl);
565
566	/*
567	 * XXX would be more correct to convert only RLIM_INFINITY to the
568	 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
569	 * values.  Most 64->32 and 32->16 conversions, including not
570	 * unimportant ones of uids are even more broken than what we
571	 * do here (they blindly truncate).  We don't do this correctly
572	 * here since we have little experience with EOVERFLOW yet.
573	 * Elsewhere, getuid() can't fail...
574	 */
575	olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
576	olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
577	error = copyout(&olim, uap->rlp, sizeof(olim));
578	return (error);
579}
580#endif /* COMPAT_43 */
581
582#ifndef _SYS_SYSPROTO_H_
583struct __setrlimit_args {
584	u_int	which;
585	struct	rlimit *rlp;
586};
587#endif
588int
589sys_setrlimit(struct thread *td, struct __setrlimit_args *uap)
590{
591	struct rlimit alim;
592	int error;
593
594	if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
595		return (error);
596	error = kern_setrlimit(td, uap->which, &alim);
597	return (error);
598}
599
600static void
601lim_cb(void *arg)
602{
603	struct rlimit rlim;
604	struct thread *td;
605	struct proc *p;
606
607	p = arg;
608	PROC_LOCK_ASSERT(p, MA_OWNED);
609	/*
610	 * Check if the process exceeds its cpu resource allocation.  If
611	 * it reaches the max, arrange to kill the process in ast().
612	 */
613	if (p->p_cpulimit == RLIM_INFINITY)
614		return;
615	PROC_STATLOCK(p);
616	FOREACH_THREAD_IN_PROC(p, td) {
617		ruxagg(p, td);
618	}
619	PROC_STATUNLOCK(p);
620	if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
621		lim_rlimit_proc(p, RLIMIT_CPU, &rlim);
622		if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
623			killproc(p, "exceeded maximum CPU limit");
624		} else {
625			if (p->p_cpulimit < rlim.rlim_max)
626				p->p_cpulimit += 5;
627			kern_psignal(p, SIGXCPU);
628		}
629	}
630	if ((p->p_flag & P_WEXIT) == 0)
631		callout_reset_sbt(&p->p_limco, SBT_1S, 0,
632		    lim_cb, p, C_PREL(1));
633}
634
635int
636kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
637{
638
639	return (kern_proc_setrlimit(td, td->td_proc, which, limp));
640}
641
642int
643kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
644    struct rlimit *limp)
645{
646	struct plimit *newlim, *oldlim;
647	struct rlimit *alimp;
648	struct rlimit oldssiz;
649	int error;
650
651	if (which >= RLIM_NLIMITS)
652		return (EINVAL);
653
654	/*
655	 * Preserve historical bugs by treating negative limits as unsigned.
656	 */
657	if (limp->rlim_cur < 0)
658		limp->rlim_cur = RLIM_INFINITY;
659	if (limp->rlim_max < 0)
660		limp->rlim_max = RLIM_INFINITY;
661
662	oldssiz.rlim_cur = 0;
663	newlim = lim_alloc();
664	PROC_LOCK(p);
665	oldlim = p->p_limit;
666	alimp = &oldlim->pl_rlimit[which];
667	if (limp->rlim_cur > alimp->rlim_max ||
668	    limp->rlim_max > alimp->rlim_max)
669		if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
670			PROC_UNLOCK(p);
671			lim_free(newlim);
672			return (error);
673		}
674	if (limp->rlim_cur > limp->rlim_max)
675		limp->rlim_cur = limp->rlim_max;
676	lim_copy(newlim, oldlim);
677	alimp = &newlim->pl_rlimit[which];
678
679	switch (which) {
680
681	case RLIMIT_CPU:
682		if (limp->rlim_cur != RLIM_INFINITY &&
683		    p->p_cpulimit == RLIM_INFINITY)
684			callout_reset_sbt(&p->p_limco, SBT_1S, 0,
685			    lim_cb, p, C_PREL(1));
686		p->p_cpulimit = limp->rlim_cur;
687		break;
688	case RLIMIT_DATA:
689		if (limp->rlim_cur > maxdsiz)
690			limp->rlim_cur = maxdsiz;
691		if (limp->rlim_max > maxdsiz)
692			limp->rlim_max = maxdsiz;
693		break;
694
695	case RLIMIT_STACK:
696		if (limp->rlim_cur > maxssiz)
697			limp->rlim_cur = maxssiz;
698		if (limp->rlim_max > maxssiz)
699			limp->rlim_max = maxssiz;
700		oldssiz = *alimp;
701		if (p->p_sysent->sv_fixlimit != NULL)
702			p->p_sysent->sv_fixlimit(&oldssiz,
703			    RLIMIT_STACK);
704		break;
705
706	case RLIMIT_NOFILE:
707		if (limp->rlim_cur > maxfilesperproc)
708			limp->rlim_cur = maxfilesperproc;
709		if (limp->rlim_max > maxfilesperproc)
710			limp->rlim_max = maxfilesperproc;
711		break;
712
713	case RLIMIT_NPROC:
714		if (limp->rlim_cur > maxprocperuid)
715			limp->rlim_cur = maxprocperuid;
716		if (limp->rlim_max > maxprocperuid)
717			limp->rlim_max = maxprocperuid;
718		if (limp->rlim_cur < 1)
719			limp->rlim_cur = 1;
720		if (limp->rlim_max < 1)
721			limp->rlim_max = 1;
722		break;
723	}
724	if (p->p_sysent->sv_fixlimit != NULL)
725		p->p_sysent->sv_fixlimit(limp, which);
726	*alimp = *limp;
727	p->p_limit = newlim;
728	PROC_UPDATE_COW(p);
729	PROC_UNLOCK(p);
730	lim_free(oldlim);
731
732	if (which == RLIMIT_STACK &&
733	    /*
734	     * Skip calls from exec_new_vmspace(), done when stack is
735	     * not mapped yet.
736	     */
737	    (td != curthread || (p->p_flag & P_INEXEC) == 0)) {
738		/*
739		 * Stack is allocated to the max at exec time with only
740		 * "rlim_cur" bytes accessible.  If stack limit is going
741		 * up make more accessible, if going down make inaccessible.
742		 */
743		if (limp->rlim_cur != oldssiz.rlim_cur) {
744			vm_offset_t addr;
745			vm_size_t size;
746			vm_prot_t prot;
747
748			if (limp->rlim_cur > oldssiz.rlim_cur) {
749				prot = p->p_sysent->sv_stackprot;
750				size = limp->rlim_cur - oldssiz.rlim_cur;
751				addr = p->p_sysent->sv_usrstack -
752				    limp->rlim_cur;
753			} else {
754				prot = VM_PROT_NONE;
755				size = oldssiz.rlim_cur - limp->rlim_cur;
756				addr = p->p_sysent->sv_usrstack -
757				    oldssiz.rlim_cur;
758			}
759			addr = trunc_page(addr);
760			size = round_page(size);
761			(void)vm_map_protect(&p->p_vmspace->vm_map,
762			    addr, addr + size, prot, FALSE);
763		}
764	}
765
766	return (0);
767}
768
769#ifndef _SYS_SYSPROTO_H_
770struct __getrlimit_args {
771	u_int	which;
772	struct	rlimit *rlp;
773};
774#endif
775/* ARGSUSED */
776int
777sys_getrlimit(struct thread *td, struct __getrlimit_args *uap)
778{
779	struct rlimit rlim;
780	int error;
781
782	if (uap->which >= RLIM_NLIMITS)
783		return (EINVAL);
784	lim_rlimit(td, uap->which, &rlim);
785	error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
786	return (error);
787}
788
789/*
790 * Transform the running time and tick information for children of proc p
791 * into user and system time usage.
792 */
793void
794calccru(struct proc *p, struct timeval *up, struct timeval *sp)
795{
796
797	PROC_LOCK_ASSERT(p, MA_OWNED);
798	calcru1(p, &p->p_crux, up, sp);
799}
800
801/*
802 * Transform the running time and tick information in proc p into user
803 * and system time usage.  If appropriate, include the current time slice
804 * on this CPU.
805 */
806void
807calcru(struct proc *p, struct timeval *up, struct timeval *sp)
808{
809	struct thread *td;
810	uint64_t runtime, u;
811
812	PROC_LOCK_ASSERT(p, MA_OWNED);
813	PROC_STATLOCK_ASSERT(p, MA_OWNED);
814	/*
815	 * If we are getting stats for the current process, then add in the
816	 * stats that this thread has accumulated in its current time slice.
817	 * We reset the thread and CPU state as if we had performed a context
818	 * switch right here.
819	 */
820	td = curthread;
821	if (td->td_proc == p) {
822		u = cpu_ticks();
823		runtime = u - PCPU_GET(switchtime);
824		td->td_runtime += runtime;
825		td->td_incruntime += runtime;
826		PCPU_SET(switchtime, u);
827	}
828	/* Make sure the per-thread stats are current. */
829	FOREACH_THREAD_IN_PROC(p, td) {
830		if (td->td_incruntime == 0)
831			continue;
832		ruxagg(p, td);
833	}
834	calcru1(p, &p->p_rux, up, sp);
835}
836
837/* Collect resource usage for a single thread. */
838void
839rufetchtd(struct thread *td, struct rusage *ru)
840{
841	struct proc *p;
842	uint64_t runtime, u;
843
844	p = td->td_proc;
845	PROC_STATLOCK_ASSERT(p, MA_OWNED);
846	THREAD_LOCK_ASSERT(td, MA_OWNED);
847	/*
848	 * If we are getting stats for the current thread, then add in the
849	 * stats that this thread has accumulated in its current time slice.
850	 * We reset the thread and CPU state as if we had performed a context
851	 * switch right here.
852	 */
853	if (td == curthread) {
854		u = cpu_ticks();
855		runtime = u - PCPU_GET(switchtime);
856		td->td_runtime += runtime;
857		td->td_incruntime += runtime;
858		PCPU_SET(switchtime, u);
859	}
860	ruxagg(p, td);
861	*ru = td->td_ru;
862	calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
863}
864
865/* XXX: the MI version is too slow to use: */
866#ifndef __HAVE_INLINE_FLSLL
867#define	flsll(x)	(fls((x) >> 32) != 0 ? fls((x) >> 32) + 32 : fls(x))
868#endif
869
870static uint64_t
871mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c)
872{
873	uint64_t acc, bh, bl;
874	int i, s, sa, sb;
875
876	/*
877	 * Calculate (a * b) / c accurately enough without overflowing.  c
878	 * must be nonzero, and its top bit must be 0.  a or b must be
879	 * <= c, and the implementation is tuned for b <= c.
880	 *
881	 * The comments about times are for use in calcru1() with units of
882	 * microseconds for 'a' and stathz ticks at 128 Hz for b and c.
883	 *
884	 * Let n be the number of top zero bits in c.  Each iteration
885	 * either returns, or reduces b by right shifting it by at least n.
886	 * The number of iterations is at most 1 + 64 / n, and the error is
887	 * at most the number of iterations.
888	 *
889	 * It is very unusual to need even 2 iterations.  Previous
890	 * implementations overflowed essentially by returning early in the
891	 * first iteration, with n = 38 giving overflow at 105+ hours and
892	 * n = 32 giving overlow at at 388+ days despite a more careful
893	 * calculation.  388 days is a reasonable uptime, and the calculation
894	 * needs to work for the uptime times the number of CPUs since 'a'
895	 * is per-process.
896	 */
897	if (a >= (uint64_t)1 << 63)
898		return (0);		/* Unsupported arg -- can't happen. */
899	acc = 0;
900	for (i = 0; i < 128; i++) {
901		sa = flsll(a);
902		sb = flsll(b);
903		if (sa + sb <= 64)
904			/* Up to 105 hours on first iteration. */
905			return (acc + (a * b) / c);
906		if (a >= c) {
907			/*
908			 * This reduction is based on a = q * c + r, with the
909			 * remainder r < c.  'a' may be large to start, and
910			 * moving bits from b into 'a' at the end of the loop
911			 * sets the top bit of 'a', so the reduction makes
912			 * significant progress.
913			 */
914			acc += (a / c) * b;
915			a %= c;
916			sa = flsll(a);
917			if (sa + sb <= 64)
918				/* Up to 388 days on first iteration. */
919				return (acc + (a * b) / c);
920		}
921
922		/*
923		 * This step writes a * b as a * ((bh << s) + bl) =
924		 * a * (bh << s) + a * bl = (a << s) * bh + a * bl.  The 2
925		 * additive terms are handled separately.  Splitting in
926		 * this way is linear except for rounding errors.
927		 *
928		 * s = 64 - sa is the maximum such that a << s fits in 64
929		 * bits.  Since a < c and c has at least 1 zero top bit,
930		 * sa < 64 and s > 0.  Thus this step makes progress by
931		 * reducing b (it increases 'a', but taking remainders on
932		 * the next iteration completes the reduction).
933		 *
934		 * Finally, the choice for s is just what is needed to keep
935		 * a * bl from overflowing, so we don't need complications
936		 * like a recursive call mul64_by_fraction(a, bl, c) to
937		 * handle the second additive term.
938		 */
939		s = 64 - sa;
940		bh = b >> s;
941		bl = b - (bh << s);
942		acc += (a * bl) / c;
943		a <<= s;
944		b = bh;
945	}
946	return (0);		/* Algorithm failure -- can't happen. */
947}
948
949static void
950calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
951    struct timeval *sp)
952{
953	/* {user, system, interrupt, total} {ticks, usec}: */
954	uint64_t ut, uu, st, su, it, tt, tu;
955
956	ut = ruxp->rux_uticks;
957	st = ruxp->rux_sticks;
958	it = ruxp->rux_iticks;
959	tt = ut + st + it;
960	if (tt == 0) {
961		/* Avoid divide by zero */
962		st = 1;
963		tt = 1;
964	}
965	tu = cputick2usec(ruxp->rux_runtime);
966	if ((int64_t)tu < 0) {
967		/* XXX: this should be an assert /phk */
968		printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
969		    (intmax_t)tu, p->p_pid, p->p_comm);
970		tu = ruxp->rux_tu;
971	}
972
973	/* Subdivide tu.  Avoid overflow in the multiplications. */
974	if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) {
975		/* Up to 76 hours when stathz is 128. */
976		uu = (tu * ut) / tt;
977		su = (tu * st) / tt;
978	} else {
979		uu = mul64_by_fraction(tu, ut, tt);
980		su = mul64_by_fraction(tu, st, tt);
981	}
982
983	if (tu >= ruxp->rux_tu) {
984		/*
985		 * The normal case, time increased.
986		 * Enforce monotonicity of bucketed numbers.
987		 */
988		if (uu < ruxp->rux_uu)
989			uu = ruxp->rux_uu;
990		if (su < ruxp->rux_su)
991			su = ruxp->rux_su;
992	} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
993		/*
994		 * When we calibrate the cputicker, it is not uncommon to
995		 * see the presumably fixed frequency increase slightly over
996		 * time as a result of thermal stabilization and NTP
997		 * discipline (of the reference clock).  We therefore ignore
998		 * a bit of backwards slop because we  expect to catch up
999		 * shortly.  We use a 3 microsecond limit to catch low
1000		 * counts and a 1% limit for high counts.
1001		 */
1002		uu = ruxp->rux_uu;
1003		su = ruxp->rux_su;
1004		tu = ruxp->rux_tu;
1005	} else { /* tu < ruxp->rux_tu */
1006		/*
1007		 * What happened here was likely that a laptop, which ran at
1008		 * a reduced clock frequency at boot, kicked into high gear.
1009		 * The wisdom of spamming this message in that case is
1010		 * dubious, but it might also be indicative of something
1011		 * serious, so lets keep it and hope laptops can be made
1012		 * more truthful about their CPU speed via ACPI.
1013		 */
1014		printf("calcru: runtime went backwards from %ju usec "
1015		    "to %ju usec for pid %d (%s)\n",
1016		    (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
1017		    p->p_pid, p->p_comm);
1018	}
1019
1020	ruxp->rux_uu = uu;
1021	ruxp->rux_su = su;
1022	ruxp->rux_tu = tu;
1023
1024	up->tv_sec = uu / 1000000;
1025	up->tv_usec = uu % 1000000;
1026	sp->tv_sec = su / 1000000;
1027	sp->tv_usec = su % 1000000;
1028}
1029
1030#ifndef _SYS_SYSPROTO_H_
1031struct getrusage_args {
1032	int	who;
1033	struct	rusage *rusage;
1034};
1035#endif
1036int
1037sys_getrusage(struct thread *td, struct getrusage_args *uap)
1038{
1039	struct rusage ru;
1040	int error;
1041
1042	error = kern_getrusage(td, uap->who, &ru);
1043	if (error == 0)
1044		error = copyout(&ru, uap->rusage, sizeof(struct rusage));
1045	return (error);
1046}
1047
1048int
1049kern_getrusage(struct thread *td, int who, struct rusage *rup)
1050{
1051	struct proc *p;
1052	int error;
1053
1054	error = 0;
1055	p = td->td_proc;
1056	PROC_LOCK(p);
1057	switch (who) {
1058	case RUSAGE_SELF:
1059		rufetchcalc(p, rup, &rup->ru_utime,
1060		    &rup->ru_stime);
1061		break;
1062
1063	case RUSAGE_CHILDREN:
1064		*rup = p->p_stats->p_cru;
1065		calccru(p, &rup->ru_utime, &rup->ru_stime);
1066		break;
1067
1068	case RUSAGE_THREAD:
1069		PROC_STATLOCK(p);
1070		thread_lock(td);
1071		rufetchtd(td, rup);
1072		thread_unlock(td);
1073		PROC_STATUNLOCK(p);
1074		break;
1075
1076	default:
1077		error = EINVAL;
1078	}
1079	PROC_UNLOCK(p);
1080	return (error);
1081}
1082
1083void
1084rucollect(struct rusage *ru, struct rusage *ru2)
1085{
1086	long *ip, *ip2;
1087	int i;
1088
1089	if (ru->ru_maxrss < ru2->ru_maxrss)
1090		ru->ru_maxrss = ru2->ru_maxrss;
1091	ip = &ru->ru_first;
1092	ip2 = &ru2->ru_first;
1093	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1094		*ip++ += *ip2++;
1095}
1096
1097void
1098ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1099    struct rusage_ext *rux2)
1100{
1101
1102	rux->rux_runtime += rux2->rux_runtime;
1103	rux->rux_uticks += rux2->rux_uticks;
1104	rux->rux_sticks += rux2->rux_sticks;
1105	rux->rux_iticks += rux2->rux_iticks;
1106	rux->rux_uu += rux2->rux_uu;
1107	rux->rux_su += rux2->rux_su;
1108	rux->rux_tu += rux2->rux_tu;
1109	rucollect(ru, ru2);
1110}
1111
1112/*
1113 * Aggregate tick counts into the proc's rusage_ext.
1114 */
1115static void
1116ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1117{
1118
1119	THREAD_LOCK_ASSERT(td, MA_OWNED);
1120	PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
1121	rux->rux_runtime += td->td_incruntime;
1122	rux->rux_uticks += td->td_uticks;
1123	rux->rux_sticks += td->td_sticks;
1124	rux->rux_iticks += td->td_iticks;
1125}
1126
1127void
1128ruxagg(struct proc *p, struct thread *td)
1129{
1130
1131	thread_lock(td);
1132	ruxagg_locked(&p->p_rux, td);
1133	ruxagg_locked(&td->td_rux, td);
1134	td->td_incruntime = 0;
1135	td->td_uticks = 0;
1136	td->td_iticks = 0;
1137	td->td_sticks = 0;
1138	thread_unlock(td);
1139}
1140
1141/*
1142 * Update the rusage_ext structure and fetch a valid aggregate rusage
1143 * for proc p if storage for one is supplied.
1144 */
1145void
1146rufetch(struct proc *p, struct rusage *ru)
1147{
1148	struct thread *td;
1149
1150	PROC_STATLOCK_ASSERT(p, MA_OWNED);
1151
1152	*ru = p->p_ru;
1153	if (p->p_numthreads > 0)  {
1154		FOREACH_THREAD_IN_PROC(p, td) {
1155			ruxagg(p, td);
1156			rucollect(ru, &td->td_ru);
1157		}
1158	}
1159}
1160
1161/*
1162 * Atomically perform a rufetch and a calcru together.
1163 * Consumers, can safely assume the calcru is executed only once
1164 * rufetch is completed.
1165 */
1166void
1167rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1168    struct timeval *sp)
1169{
1170
1171	PROC_STATLOCK(p);
1172	rufetch(p, ru);
1173	calcru(p, up, sp);
1174	PROC_STATUNLOCK(p);
1175}
1176
1177/*
1178 * Allocate a new resource limits structure and initialize its
1179 * reference count and mutex pointer.
1180 */
1181struct plimit *
1182lim_alloc()
1183{
1184	struct plimit *limp;
1185
1186	limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1187	refcount_init(&limp->pl_refcnt, 1);
1188	return (limp);
1189}
1190
1191struct plimit *
1192lim_hold(struct plimit *limp)
1193{
1194
1195	refcount_acquire(&limp->pl_refcnt);
1196	return (limp);
1197}
1198
1199void
1200lim_fork(struct proc *p1, struct proc *p2)
1201{
1202
1203	PROC_LOCK_ASSERT(p1, MA_OWNED);
1204	PROC_LOCK_ASSERT(p2, MA_OWNED);
1205
1206	p2->p_limit = lim_hold(p1->p_limit);
1207	callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1208	if (p1->p_cpulimit != RLIM_INFINITY)
1209		callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
1210		    lim_cb, p2, C_PREL(1));
1211}
1212
1213void
1214lim_free(struct plimit *limp)
1215{
1216
1217	if (refcount_release(&limp->pl_refcnt))
1218		free((void *)limp, M_PLIMIT);
1219}
1220
1221/*
1222 * Make a copy of the plimit structure.
1223 * We share these structures copy-on-write after fork.
1224 */
1225void
1226lim_copy(struct plimit *dst, struct plimit *src)
1227{
1228
1229	KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit"));
1230	bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1231}
1232
1233/*
1234 * Return the hard limit for a particular system resource.  The
1235 * which parameter specifies the index into the rlimit array.
1236 */
1237rlim_t
1238lim_max(struct thread *td, int which)
1239{
1240	struct rlimit rl;
1241
1242	lim_rlimit(td, which, &rl);
1243	return (rl.rlim_max);
1244}
1245
1246rlim_t
1247lim_max_proc(struct proc *p, int which)
1248{
1249	struct rlimit rl;
1250
1251	lim_rlimit_proc(p, which, &rl);
1252	return (rl.rlim_max);
1253}
1254
1255/*
1256 * Return the current (soft) limit for a particular system resource.
1257 * The which parameter which specifies the index into the rlimit array
1258 */
1259rlim_t
1260lim_cur(struct thread *td, int which)
1261{
1262	struct rlimit rl;
1263
1264	lim_rlimit(td, which, &rl);
1265	return (rl.rlim_cur);
1266}
1267
1268rlim_t
1269lim_cur_proc(struct proc *p, int which)
1270{
1271	struct rlimit rl;
1272
1273	lim_rlimit_proc(p, which, &rl);
1274	return (rl.rlim_cur);
1275}
1276
1277/*
1278 * Return a copy of the entire rlimit structure for the system limit
1279 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1280 */
1281void
1282lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
1283{
1284	struct proc *p = td->td_proc;
1285
1286	MPASS(td == curthread);
1287	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1288	    ("request for invalid resource limit"));
1289	*rlp = td->td_limit->pl_rlimit[which];
1290	if (p->p_sysent->sv_fixlimit != NULL)
1291		p->p_sysent->sv_fixlimit(rlp, which);
1292}
1293
1294void
1295lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
1296{
1297
1298	PROC_LOCK_ASSERT(p, MA_OWNED);
1299	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1300	    ("request for invalid resource limit"));
1301	*rlp = p->p_limit->pl_rlimit[which];
1302	if (p->p_sysent->sv_fixlimit != NULL)
1303		p->p_sysent->sv_fixlimit(rlp, which);
1304}
1305
1306void
1307uihashinit()
1308{
1309
1310	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1311	rw_init(&uihashtbl_lock, "uidinfo hash");
1312}
1313
1314/*
1315 * Look up a uidinfo struct for the parameter uid.
1316 * uihashtbl_lock must be locked.
1317 * Increase refcount on uidinfo struct returned.
1318 */
1319static struct uidinfo *
1320uilookup(uid_t uid)
1321{
1322	struct uihashhead *uipp;
1323	struct uidinfo *uip;
1324
1325	rw_assert(&uihashtbl_lock, RA_LOCKED);
1326	uipp = UIHASH(uid);
1327	LIST_FOREACH(uip, uipp, ui_hash)
1328		if (uip->ui_uid == uid) {
1329			uihold(uip);
1330			break;
1331		}
1332
1333	return (uip);
1334}
1335
1336/*
1337 * Find or allocate a struct uidinfo for a particular uid.
1338 * Returns with uidinfo struct referenced.
1339 * uifree() should be called on a struct uidinfo when released.
1340 */
1341struct uidinfo *
1342uifind(uid_t uid)
1343{
1344	struct uidinfo *new_uip, *uip;
1345	struct ucred *cred;
1346
1347	cred = curthread->td_ucred;
1348	if (cred->cr_uidinfo->ui_uid == uid) {
1349		uip = cred->cr_uidinfo;
1350		uihold(uip);
1351		return (uip);
1352	} else if (cred->cr_ruidinfo->ui_uid == uid) {
1353		uip = cred->cr_ruidinfo;
1354		uihold(uip);
1355		return (uip);
1356	}
1357
1358	rw_rlock(&uihashtbl_lock);
1359	uip = uilookup(uid);
1360	rw_runlock(&uihashtbl_lock);
1361	if (uip != NULL)
1362		return (uip);
1363
1364	new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO);
1365	racct_create(&new_uip->ui_racct);
1366	refcount_init(&new_uip->ui_ref, 1);
1367	new_uip->ui_uid = uid;
1368
1369	rw_wlock(&uihashtbl_lock);
1370	/*
1371	 * There's a chance someone created our uidinfo while we
1372	 * were in malloc and not holding the lock, so we have to
1373	 * make sure we don't insert a duplicate uidinfo.
1374	 */
1375	if ((uip = uilookup(uid)) == NULL) {
1376		LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash);
1377		rw_wunlock(&uihashtbl_lock);
1378		uip = new_uip;
1379	} else {
1380		rw_wunlock(&uihashtbl_lock);
1381		racct_destroy(&new_uip->ui_racct);
1382		free(new_uip, M_UIDINFO);
1383	}
1384	return (uip);
1385}
1386
1387/*
1388 * Place another refcount on a uidinfo struct.
1389 */
1390void
1391uihold(struct uidinfo *uip)
1392{
1393
1394	refcount_acquire(&uip->ui_ref);
1395}
1396
1397/*-
1398 * Since uidinfo structs have a long lifetime, we use an
1399 * opportunistic refcounting scheme to avoid locking the lookup hash
1400 * for each release.
1401 *
1402 * If the refcount hits 0, we need to free the structure,
1403 * which means we need to lock the hash.
1404 * Optimal case:
1405 *   After locking the struct and lowering the refcount, if we find
1406 *   that we don't need to free, simply unlock and return.
1407 * Suboptimal case:
1408 *   If refcount lowering results in need to free, bump the count
1409 *   back up, lose the lock and acquire the locks in the proper
1410 *   order to try again.
1411 */
1412void
1413uifree(struct uidinfo *uip)
1414{
1415	int old;
1416
1417	/* Prepare for optimal case. */
1418	old = uip->ui_ref;
1419	if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1420		return;
1421
1422	/* Prepare for suboptimal case. */
1423	rw_wlock(&uihashtbl_lock);
1424	if (refcount_release(&uip->ui_ref) == 0) {
1425		rw_wunlock(&uihashtbl_lock);
1426		return;
1427	}
1428
1429	racct_destroy(&uip->ui_racct);
1430	LIST_REMOVE(uip, ui_hash);
1431	rw_wunlock(&uihashtbl_lock);
1432
1433	if (uip->ui_sbsize != 0)
1434		printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1435		    uip->ui_uid, uip->ui_sbsize);
1436	if (uip->ui_proccnt != 0)
1437		printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1438		    uip->ui_uid, uip->ui_proccnt);
1439	if (uip->ui_vmsize != 0)
1440		printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1441		    uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1442	free(uip, M_UIDINFO);
1443}
1444
1445#ifdef RACCT
1446void
1447ui_racct_foreach(void (*callback)(struct racct *racct,
1448    void *arg2, void *arg3), void (*pre)(void), void (*post)(void),
1449    void *arg2, void *arg3)
1450{
1451	struct uidinfo *uip;
1452	struct uihashhead *uih;
1453
1454	rw_rlock(&uihashtbl_lock);
1455	if (pre != NULL)
1456		(pre)();
1457	for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1458		LIST_FOREACH(uip, uih, ui_hash) {
1459			(callback)(uip->ui_racct, arg2, arg3);
1460		}
1461	}
1462	if (post != NULL)
1463		(post)();
1464	rw_runlock(&uihashtbl_lock);
1465}
1466#endif
1467
1468static inline int
1469chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
1470{
1471	long new;
1472
1473	/* Don't allow them to exceed max, but allow subtraction. */
1474	new = atomic_fetchadd_long(limit, (long)diff) + diff;
1475	if (diff > 0 && max != 0) {
1476		if (new < 0 || new > max) {
1477			atomic_subtract_long(limit, (long)diff);
1478			return (0);
1479		}
1480	} else if (new < 0)
1481		printf("negative %s for uid = %d\n", name, uip->ui_uid);
1482	return (1);
1483}
1484
1485/*
1486 * Change the count associated with number of processes
1487 * a given user is using.  When 'max' is 0, don't enforce a limit
1488 */
1489int
1490chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
1491{
1492
1493	return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt"));
1494}
1495
1496/*
1497 * Change the total socket buffer size a user has used.
1498 */
1499int
1500chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
1501{
1502	int diff, rv;
1503
1504	diff = to - *hiwat;
1505	if (diff > 0 && max == 0) {
1506		rv = 0;
1507	} else {
1508		rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize");
1509		if (rv != 0)
1510			*hiwat = to;
1511	}
1512	return (rv);
1513}
1514
1515/*
1516 * Change the count associated with number of pseudo-terminals
1517 * a given user is using.  When 'max' is 0, don't enforce a limit
1518 */
1519int
1520chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
1521{
1522
1523	return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt"));
1524}
1525
1526int
1527chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
1528{
1529
1530	return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
1531}
1532
1533int
1534chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
1535{
1536
1537	return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));
1538}
1539