2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.18 2004/01/24 07:55:50 dillon Exp $
43 #include "opt_compat.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
57 #include <vm/vm_param.h>
60 #include <vm/vm_map.h>
62 static int donice (struct proc *chgp, int n);
64 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
65 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
66 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
67 static u_long uihash; /* size of hash table - 1 */
69 static struct uidinfo *uicreate (uid_t uid);
70 static struct uidinfo *uilookup (uid_t uid);
73 * Resource controls and accounting.
77 getpriority(struct getpriority_args *uap)
79 struct proc *curp = curproc;
81 int low = PRIO_MAX + 1;
91 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
102 else if ((pg = pgfind(uap->who)) == NULL)
104 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
105 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
112 uap->who = curp->p_ucred->cr_uid;
113 FOREACH_PROC_IN_SYSTEM(p)
114 if (PRISON_CHECK(curp->p_ucred, p->p_ucred) &&
115 p->p_ucred->cr_uid == uap->who &&
123 if (low == PRIO_MAX + 1)
125 uap->sysmsg_result = low;
131 setpriority(struct setpriority_args *uap)
133 struct proc *curp = curproc;
135 int found = 0, error = 0;
137 switch (uap->which) {
146 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
148 error = donice(p, uap->prio);
158 else if ((pg = pgfind(uap->who)) == NULL)
160 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
161 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
162 error = donice(p, uap->prio);
170 uap->who = curp->p_ucred->cr_uid;
171 FOREACH_PROC_IN_SYSTEM(p)
172 if (p->p_ucred->cr_uid == uap->who &&
173 PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
174 error = donice(p, uap->prio);
188 donice(struct proc *chgp, int n)
190 struct proc *curp = curproc;
191 struct ucred *cr = curp->p_ucred;
193 if (cr->cr_uid && cr->cr_ruid &&
194 cr->cr_uid != chgp->p_ucred->cr_uid &&
195 cr->cr_ruid != chgp->p_ucred->cr_uid)
201 if (n < chgp->p_nice && suser_cred(cr, 0))
204 (void)resetpriority(chgp);
209 * Set realtime priority
213 rtprio(struct rtprio_args *uap)
215 struct proc *curp = curproc;
217 struct ucred *cr = curp->p_ucred;
221 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
233 switch (uap->function) {
235 return (copyout(&p->p_rtprio, uap->rtp, sizeof(struct rtprio)));
237 if (cr->cr_uid && cr->cr_ruid &&
238 cr->cr_uid != p->p_ucred->cr_uid &&
239 cr->cr_ruid != p->p_ucred->cr_uid)
241 /* disallow setting rtprio in most cases if not superuser */
242 if (suser_cred(cr, 0)) {
243 /* can't set someone else's */
246 /* can't set realtime priority */
248 * Realtime priority has to be restricted for reasons which should be
249 * obvious. However, for idle priority, there is a potential for
250 * system deadlock if an idleprio process gains a lock on a resource
251 * that other processes need (and the idleprio process can't run
252 * due to a CPU-bound normal process). Fix me! XXX
254 if (RTP_PRIO_IS_REALTIME(rtp.type))
261 case RTP_PRIO_REALTIME:
262 case RTP_PRIO_NORMAL:
264 if (rtp.prio > RTP_PRIO_MAX)
278 setrlimit(struct __setrlimit_args *uap)
283 error = copyin(uap->rlp, &alim, sizeof(alim));
287 error = kern_setrlimit(uap->which, &alim);
293 kern_setrlimit(u_int which, struct rlimit *limp)
295 struct proc *p = curproc;
296 struct rlimit *alimp;
299 if (which >= RLIM_NLIMITS)
301 alimp = &p->p_rlimit[which];
304 * Preserve historical bugs by treating negative limits as unsigned.
306 if (limp->rlim_cur < 0)
307 limp->rlim_cur = RLIM_INFINITY;
308 if (limp->rlim_max < 0)
309 limp->rlim_max = RLIM_INFINITY;
311 if (limp->rlim_cur > alimp->rlim_max ||
312 limp->rlim_max > alimp->rlim_max)
313 if ((error = suser_cred(p->p_ucred, PRISON_ROOT)))
315 if (limp->rlim_cur > limp->rlim_max)
316 limp->rlim_cur = limp->rlim_max;
317 if (p->p_limit->p_refcnt > 1 &&
318 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
319 p->p_limit->p_refcnt--;
320 p->p_limit = limcopy(p->p_limit);
321 alimp = &p->p_rlimit[which];
327 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
328 p->p_limit->p_cpulimit = RLIM_INFINITY;
330 p->p_limit->p_cpulimit =
331 (rlim_t)1000000 * limp->rlim_cur;
334 if (limp->rlim_cur > maxdsiz)
335 limp->rlim_cur = maxdsiz;
336 if (limp->rlim_max > maxdsiz)
337 limp->rlim_max = maxdsiz;
341 if (limp->rlim_cur > maxssiz)
342 limp->rlim_cur = maxssiz;
343 if (limp->rlim_max > maxssiz)
344 limp->rlim_max = maxssiz;
346 * Stack is allocated to the max at exec time with only
347 * "rlim_cur" bytes accessible. If stack limit is going
348 * up make more accessible, if going down make inaccessible.
350 if (limp->rlim_cur != alimp->rlim_cur) {
355 if (limp->rlim_cur > alimp->rlim_cur) {
357 size = limp->rlim_cur - alimp->rlim_cur;
358 addr = USRSTACK - limp->rlim_cur;
361 size = alimp->rlim_cur - limp->rlim_cur;
362 addr = USRSTACK - alimp->rlim_cur;
364 addr = trunc_page(addr);
365 size = round_page(size);
366 (void) vm_map_protect(&p->p_vmspace->vm_map,
367 addr, addr+size, prot, FALSE);
372 if (limp->rlim_cur > maxfilesperproc)
373 limp->rlim_cur = maxfilesperproc;
374 if (limp->rlim_max > maxfilesperproc)
375 limp->rlim_max = maxfilesperproc;
379 if (limp->rlim_cur > maxprocperuid)
380 limp->rlim_cur = maxprocperuid;
381 if (limp->rlim_max > maxprocperuid)
382 limp->rlim_max = maxprocperuid;
383 if (limp->rlim_cur < 1)
385 if (limp->rlim_max < 1)
394 * The rlimit indexed by which is returned in the second argument.
397 kern_getrlimit(u_int which, struct rlimit *limp)
399 struct thread *td = curthread;
400 struct proc *p = td->td_proc;
402 if (which >= RLIM_NLIMITS)
405 *limp = p->p_rlimit[which];
411 getrlimit(struct __getrlimit_args *uap)
416 error = kern_getrlimit(uap->which, &lim);
419 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
424 * Transform the running time and tick information in proc p into user,
425 * system, and interrupt time usage.
427 * Since we are limited to statclock tick granularity this is a statisical
428 * calculation which will be correct over the long haul, but should not be
429 * expected to measure fine grained deltas.
432 calcru(struct proc *p, struct timeval *up, struct timeval *sp,
435 struct thread *td = p->p_thread;
439 * Calculate at the statclock level. YYY if the thread is owned by
440 * another cpu we need to forward the request to the other cpu, or
441 * have a token to interlock the information.
444 up->tv_sec = td->td_uticks / 1000000;
445 up->tv_usec = td->td_uticks % 1000000;
446 sp->tv_sec = td->td_sticks / 1000000;
447 sp->tv_usec = td->td_sticks % 1000000;
449 ip->tv_sec = td->td_iticks / 1000000;
450 ip->tv_usec = td->td_iticks % 1000000;
457 getrusage(struct getrusage_args *uap)
459 struct proc *p = curproc;
465 rup = &p->p_stats->p_ru;
466 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
469 case RUSAGE_CHILDREN:
470 rup = &p->p_stats->p_cru;
476 return (copyout((caddr_t)rup, (caddr_t)uap->rusage,
477 sizeof (struct rusage)));
481 ruadd(struct rusage *ru, struct rusage *ru2)
486 timevaladd(&ru->ru_utime, &ru2->ru_utime);
487 timevaladd(&ru->ru_stime, &ru2->ru_stime);
488 if (ru->ru_maxrss < ru2->ru_maxrss)
489 ru->ru_maxrss = ru2->ru_maxrss;
490 ip = &ru->ru_first; ip2 = &ru2->ru_first;
491 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
496 * Make a copy of the plimit structure.
497 * We share these structures copy-on-write after fork,
498 * and copy when a limit is changed.
501 limcopy(struct plimit *lim)
505 MALLOC(copy, struct plimit *, sizeof(struct plimit),
506 M_SUBPROC, M_WAITOK);
507 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
514 * Find the uidinfo structure for a uid. This structure is used to
515 * track the total resource consumption (process count, socket buffer
516 * size, etc.) for the uid and impose limits.
521 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
524 static struct uidinfo *
527 struct uihashhead *uipp;
531 LIST_FOREACH(uip, uipp, ui_hash) {
532 if (uip->ui_uid == uid)
538 static struct uidinfo *
541 struct uidinfo *uip, *norace;
544 * Allocate space and check for a race
546 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
547 norace = uilookup(uid);
548 if (norace != NULL) {
549 FREE(uip, M_UIDINFO);
554 * Initialize structure and enter it into the hash table
556 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
561 varsymset_init(&uip->ui_varsymset, NULL);
578 uifree(struct uidinfo *uip)
580 if (uip->ui_sbsize != 0)
581 /* XXX no %qd in kernel. Truncate. */
582 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
583 uip->ui_uid, (long)uip->ui_sbsize);
584 if (uip->ui_proccnt != 0)
585 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
586 uip->ui_uid, uip->ui_proccnt);
587 LIST_REMOVE(uip, ui_hash);
588 varsymset_clean(&uip->ui_varsymset);
589 FREE(uip, M_UIDINFO);
593 uihold(struct uidinfo *uip)
596 KKASSERT(uip->ui_ref > 0);
600 uidrop(struct uidinfo *uip)
602 KKASSERT(uip->ui_ref > 0);
603 if (--uip->ui_ref == 0)
608 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
615 * Change the count associated with number of processes
616 * a given user is using. When 'max' is 0, don't enforce a limit
619 chgproccnt(struct uidinfo *uip, int diff, int max)
621 /* don't allow them to exceed max, but allow subtraction */
622 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0)
624 uip->ui_proccnt += diff;
625 if (uip->ui_proccnt < 0)
626 printf("negative proccnt for uid = %d\n", uip->ui_uid);
631 * Change the total socket buffer size a user has used.
634 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
640 new = uip->ui_sbsize + to - *hiwat;
641 /* don't allow them to exceed max, but allow subtraction */
642 if (to > *hiwat && new > max) {
646 uip->ui_sbsize = new;
648 if (uip->ui_sbsize < 0)
649 printf("negative sbsize for uid = %d\n", uip->ui_uid);