2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
43 #include "opt_compat.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
56 #include <sys/lockf.h>
59 #include <vm/vm_param.h>
62 #include <vm/vm_map.h>
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
67 static int donice (struct proc *chgp, int n);
69 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
70 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
71 static struct spinlock uihash_lock;
72 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
73 static u_long uihash; /* size of hash table - 1 */
75 static struct uidinfo *uicreate (uid_t uid);
76 static struct uidinfo *uilookup (uid_t uid);
79 * Resource controls and accounting.
82 struct getpriority_info {
87 static int getpriority_callback(struct proc *p, void *data);
90 sys_getpriority(struct getpriority_args *uap)
92 struct getpriority_info info;
93 struct proc *curp = curproc;
95 int low = PRIO_MAX + 1;
105 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
116 else if ((pg = pgfind(uap->who)) == NULL)
118 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
119 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
126 uap->who = curp->p_ucred->cr_uid;
129 allproc_scan(getpriority_callback, &info);
136 if (low == PRIO_MAX + 1)
138 uap->sysmsg_result = low;
143 * Figure out the current lowest nice priority for processes owned
144 * by the specified user.
148 getpriority_callback(struct proc *p, void *data)
150 struct getpriority_info *info = data;
152 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
153 p->p_ucred->cr_uid == info->who &&
154 p->p_nice < info->low) {
155 info->low = p->p_nice;
160 struct setpriority_info {
167 static int setpriority_callback(struct proc *p, void *data);
170 sys_setpriority(struct setpriority_args *uap)
172 struct setpriority_info info;
173 struct proc *curp = curproc;
175 int found = 0, error = 0;
177 switch (uap->which) {
185 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
187 error = donice(p, uap->prio);
197 else if ((pg = pgfind(uap->who)) == NULL)
199 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
200 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
201 error = donice(p, uap->prio);
209 uap->who = curp->p_ucred->cr_uid;
210 info.prio = uap->prio;
214 allproc_scan(setpriority_callback, &info);
229 setpriority_callback(struct proc *p, void *data)
231 struct setpriority_info *info = data;
234 if (p->p_ucred->cr_uid == info->who &&
235 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
236 error = donice(p, info->prio);
245 donice(struct proc *chgp, int n)
247 struct proc *curp = curproc;
248 struct ucred *cr = curp->p_ucred;
251 if (cr->cr_uid && cr->cr_ruid &&
252 cr->cr_uid != chgp->p_ucred->cr_uid &&
253 cr->cr_ruid != chgp->p_ucred->cr_uid)
259 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
262 FOREACH_LWP_IN_PROC(lp, chgp)
263 chgp->p_usched->resetpriority(lp);
268 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
270 struct proc *p = curproc;
273 struct ucred *cr = p->p_ucred;
276 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
282 } else if (uap->pid == 0) {
283 /* curproc already loaded on p */
294 } else if (uap->tid == -1) {
296 * sadly, tid can be 0 so we can't use 0 here
299 lp = curthread->td_lwp;
301 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
306 switch (uap->function) {
308 return (copyout(&lp->lwp_rtprio, uap->rtp,
309 sizeof(struct rtprio)));
311 if (cr->cr_uid && cr->cr_ruid &&
312 cr->cr_uid != p->p_ucred->cr_uid &&
313 cr->cr_ruid != p->p_ucred->cr_uid) {
316 /* disallow setting rtprio in most cases if not superuser */
317 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
318 /* can't set someone else's */
319 if (uap->pid) { /* XXX */
322 /* can't set realtime priority */
324 * Realtime priority has to be restricted for reasons which should be
325 * obvious. However, for idle priority, there is a potential for
326 * system deadlock if an idleprio process gains a lock on a resource
327 * that other processes need (and the idleprio process can't run
328 * due to a CPU-bound normal process). Fix me! XXX
330 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
338 case RTP_PRIO_REALTIME:
339 case RTP_PRIO_NORMAL:
341 if (rtp.prio > RTP_PRIO_MAX)
343 lp->lwp_rtprio = rtp;
351 panic("can't get here");
355 * Set realtime priority
359 sys_rtprio(struct rtprio_args *uap)
361 struct proc *curp = curproc;
364 struct ucred *cr = curp->p_ucred;
368 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
381 lp = FIRST_LWP_IN_PROC(p);
382 switch (uap->function) {
384 return (copyout(&lp->lwp_rtprio, uap->rtp, sizeof(struct rtprio)));
386 if (cr->cr_uid && cr->cr_ruid &&
387 cr->cr_uid != p->p_ucred->cr_uid &&
388 cr->cr_ruid != p->p_ucred->cr_uid)
390 /* disallow setting rtprio in most cases if not superuser */
391 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
392 /* can't set someone else's */
395 /* can't set realtime priority */
397 * Realtime priority has to be restricted for reasons which should be
398 * obvious. However, for idle priority, there is a potential for
399 * system deadlock if an idleprio process gains a lock on a resource
400 * that other processes need (and the idleprio process can't run
401 * due to a CPU-bound normal process). Fix me! XXX
403 if (RTP_PRIO_IS_REALTIME(rtp.type))
410 case RTP_PRIO_REALTIME:
411 case RTP_PRIO_NORMAL:
413 if (rtp.prio > RTP_PRIO_MAX)
415 lp->lwp_rtprio = rtp;
427 sys_setrlimit(struct __setrlimit_args *uap)
432 error = copyin(uap->rlp, &alim, sizeof(alim));
436 error = kern_setrlimit(uap->which, &alim);
442 sys_getrlimit(struct __getrlimit_args *uap)
447 error = kern_getrlimit(uap->which, &lim);
450 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
455 * Transform the running time and tick information in lwp lp's thread into user,
456 * system, and interrupt time usage.
458 * Since we are limited to statclock tick granularity this is a statisical
459 * calculation which will be correct over the long haul, but should not be
460 * expected to measure fine grained deltas.
462 * It is possible to catch a lwp in the midst of being created, so
463 * check whether lwp_thread is NULL or not.
466 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
471 * Calculate at the statclock level. YYY if the thread is owned by
472 * another cpu we need to forward the request to the other cpu, or
473 * have a token to interlock the information in order to avoid racing
474 * thread destruction.
476 if ((td = lp->lwp_thread) != NULL) {
478 up->tv_sec = td->td_uticks / 1000000;
479 up->tv_usec = td->td_uticks % 1000000;
480 sp->tv_sec = td->td_sticks / 1000000;
481 sp->tv_usec = td->td_sticks % 1000000;
487 * Aggregate resource statistics of all lwps of a process.
489 * proc.p_ru keeps track of all statistics directly related to a proc. This
490 * consists of RSS usage and nswap information and aggregate numbers for all
491 * former lwps of this proc.
493 * proc.p_cru is the sum of all stats of reaped children.
495 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
496 * packet, scheduler switch or page fault counts, etc. This information gets
497 * added to lwp.lwp_proc.p_ru when the lwp exits.
500 calcru_proc(struct proc *p, struct rusage *ru)
502 struct timeval upt, spt;
508 FOREACH_LWP_IN_PROC(lp, p) {
509 calcru(lp, &upt, &spt);
510 timevaladd(&ru->ru_utime, &upt);
511 timevaladd(&ru->ru_stime, &spt);
512 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
513 rip1 <= &ru->ru_last;
522 sys_getrusage(struct getrusage_args *uap)
531 calcru_proc(curproc, rup);
534 case RUSAGE_CHILDREN:
535 rup = &curproc->p_cru;
541 return (copyout((caddr_t)rup, (caddr_t)uap->rusage,
542 sizeof (struct rusage)));
546 ruadd(struct rusage *ru, struct rusage *ru2)
551 timevaladd(&ru->ru_utime, &ru2->ru_utime);
552 timevaladd(&ru->ru_stime, &ru2->ru_stime);
553 if (ru->ru_maxrss < ru2->ru_maxrss)
554 ru->ru_maxrss = ru2->ru_maxrss;
555 ip = &ru->ru_first; ip2 = &ru2->ru_first;
556 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
561 * Find the uidinfo structure for a uid. This structure is used to
562 * track the total resource consumption (process count, socket buffer
563 * size, etc.) for the uid and impose limits.
568 spin_init(&uihash_lock);
569 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
572 static struct uidinfo *
575 struct uihashhead *uipp;
579 LIST_FOREACH(uip, uipp, ui_hash) {
580 if (uip->ui_uid == uid)
586 static struct uidinfo *
589 struct uidinfo *uip, *tmp;
591 * Allocate space and check for a race
593 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
595 * Initialize structure and enter it into the hash table
597 spin_init(&uip->ui_lock);
601 uip->ui_ref = 1; /* we're returning a ref */
602 uip->ui_posixlocks = 0;
603 varsymset_init(&uip->ui_varsymset, NULL);
606 * Somebody may have already created the uidinfo for this
607 * uid. If so, return that instead.
609 spin_lock_wr(&uihash_lock);
612 varsymset_clean(&uip->ui_varsymset);
613 spin_uninit(&uip->ui_lock);
614 FREE(uip, M_UIDINFO);
617 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
619 spin_unlock_wr(&uihash_lock);
629 spin_lock_rd(&uihash_lock);
632 spin_unlock_rd(&uihash_lock);
636 spin_unlock_rd(&uihash_lock);
642 uifree(struct uidinfo *uip)
644 spin_lock_wr(&uihash_lock);
647 * Note that we're taking a read lock even though we
648 * modify the structure because we know nobody can find
649 * it now that we've locked uihash_lock. If somebody
650 * can get to it through a stored pointer, the reference
651 * count will not be 0 and in that case we don't modify
654 spin_lock_rd(&uip->ui_lock);
655 if (uip->ui_ref != 0) {
657 * Someone found the uid and got a ref when we
658 * unlocked. No need to free any more.
660 spin_unlock_rd(&uip->ui_lock);
663 if (uip->ui_sbsize != 0)
664 /* XXX no %qd in kernel. Truncate. */
665 kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n",
666 uip->ui_uid, (long)uip->ui_sbsize);
667 if (uip->ui_proccnt != 0)
668 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
669 uip->ui_uid, uip->ui_proccnt);
671 LIST_REMOVE(uip, ui_hash);
672 spin_unlock_wr(&uihash_lock);
673 varsymset_clean(&uip->ui_varsymset);
674 lockuninit(&uip->ui_varsymset.vx_lock);
675 spin_unlock_rd(&uip->ui_lock);
676 spin_uninit(&uip->ui_lock);
677 FREE(uip, M_UIDINFO);
681 uihold(struct uidinfo *uip)
683 atomic_add_int(&uip->ui_ref, 1);
684 KKASSERT(uip->ui_ref > 0);
688 uidrop(struct uidinfo *uip)
690 if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
693 KKASSERT(uip->ui_ref > 0);
698 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
705 * Change the count associated with number of processes
706 * a given user is using. When 'max' is 0, don't enforce a limit
709 chgproccnt(struct uidinfo *uip, int diff, int max)
712 spin_lock_wr(&uip->ui_lock);
713 /* don't allow them to exceed max, but allow subtraction */
714 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
717 uip->ui_proccnt += diff;
718 if (uip->ui_proccnt < 0)
719 kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
722 spin_unlock_wr(&uip->ui_lock);
727 * Change the total socket buffer size a user has used.
730 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
734 spin_lock_wr(&uip->ui_lock);
735 new = uip->ui_sbsize + to - *hiwat;
739 * If we are trying to increase the socket buffer size
740 * Scale down the hi water mark when we exceed the user's
741 * allowed socket buffer space.
743 * We can't scale down too much or we will blow up atomic packet
746 if (to > *hiwat && to > MCLBYTES && new > max) {
751 uip->ui_sbsize = new;
753 spin_unlock_wr(&uip->ui_lock);