2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
43 #include "opt_compat.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
56 #include <sys/lockf.h>
59 #include <vm/vm_param.h>
62 #include <vm/vm_map.h>
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
66 #include <sys/mplock2.h>
68 static int donice (struct proc *chgp, int n);
69 static int doionice (struct proc *chgp, int n);
71 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
72 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
73 static struct spinlock uihash_lock;
74 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
75 static u_long uihash; /* size of hash table - 1 */
77 static struct uidinfo *uicreate (uid_t uid);
78 static struct uidinfo *uilookup (uid_t uid);
81 * Resource controls and accounting.
84 struct getpriority_info {
89 static int getpriority_callback(struct proc *p, void *data);
95 sys_getpriority(struct getpriority_args *uap)
97 struct getpriority_info info;
98 struct proc *curp = curproc;
100 int low = PRIO_MAX + 1;
105 switch (uap->which) {
113 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
124 else if ((pg = pgfind(uap->who)) == NULL)
126 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
127 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
134 uap->who = curp->p_ucred->cr_uid;
137 allproc_scan(getpriority_callback, &info);
145 if (low == PRIO_MAX + 1) {
149 uap->sysmsg_result = low;
157 * Figure out the current lowest nice priority for processes owned
158 * by the specified user.
162 getpriority_callback(struct proc *p, void *data)
164 struct getpriority_info *info = data;
166 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
167 p->p_ucred->cr_uid == info->who &&
168 p->p_nice < info->low) {
169 info->low = p->p_nice;
174 struct setpriority_info {
181 static int setpriority_callback(struct proc *p, void *data);
187 sys_setpriority(struct setpriority_args *uap)
189 struct setpriority_info info;
190 struct proc *curp = curproc;
192 int found = 0, error = 0;
196 switch (uap->which) {
204 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
206 error = donice(p, uap->prio);
216 else if ((pg = pgfind(uap->who)) == NULL)
218 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
219 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
220 error = donice(p, uap->prio);
228 uap->who = curp->p_ucred->cr_uid;
229 info.prio = uap->prio;
233 allproc_scan(setpriority_callback, &info);
252 setpriority_callback(struct proc *p, void *data)
254 struct setpriority_info *info = data;
257 if (p->p_ucred->cr_uid == info->who &&
258 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
259 error = donice(p, info->prio);
268 donice(struct proc *chgp, int n)
270 struct proc *curp = curproc;
271 struct ucred *cr = curp->p_ucred;
274 if (cr->cr_uid && cr->cr_ruid &&
275 cr->cr_uid != chgp->p_ucred->cr_uid &&
276 cr->cr_ruid != chgp->p_ucred->cr_uid)
282 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
285 FOREACH_LWP_IN_PROC(lp, chgp)
286 chgp->p_usched->resetpriority(lp);
291 struct ioprio_get_info {
296 static int ioprio_get_callback(struct proc *p, void *data);
302 sys_ioprio_get(struct ioprio_get_args *uap)
304 struct ioprio_get_info info;
305 struct proc *curp = curproc;
307 int high = IOPRIO_MIN-2;
312 switch (uap->which) {
320 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
331 else if ((pg = pgfind(uap->who)) == NULL)
333 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
334 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice > high))
341 uap->who = curp->p_ucred->cr_uid;
344 allproc_scan(ioprio_get_callback, &info);
352 if (high == IOPRIO_MIN-2) {
356 uap->sysmsg_result = high;
364 * Figure out the current lowest nice priority for processes owned
365 * by the specified user.
369 ioprio_get_callback(struct proc *p, void *data)
371 struct ioprio_get_info *info = data;
373 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
374 p->p_ucred->cr_uid == info->who &&
375 p->p_ionice > info->high) {
376 info->high = p->p_ionice;
382 struct ioprio_set_info {
389 static int ioprio_set_callback(struct proc *p, void *data);
395 sys_ioprio_set(struct ioprio_set_args *uap)
397 struct ioprio_set_info info;
398 struct proc *curp = curproc;
400 int found = 0, error = 0;
404 switch (uap->which) {
412 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
414 error = doionice(p, uap->prio);
424 else if ((pg = pgfind(uap->who)) == NULL)
426 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
427 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
428 error = doionice(p, uap->prio);
436 uap->who = curp->p_ucred->cr_uid;
437 info.prio = uap->prio;
441 allproc_scan(ioprio_set_callback, &info);
460 ioprio_set_callback(struct proc *p, void *data)
462 struct ioprio_set_info *info = data;
465 if (p->p_ucred->cr_uid == info->who &&
466 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
467 error = doionice(p, info->prio);
476 doionice(struct proc *chgp, int n)
478 struct proc *curp = curproc;
479 struct ucred *cr = curp->p_ucred;
481 if (cr->cr_uid && cr->cr_ruid &&
482 cr->cr_uid != chgp->p_ucred->cr_uid &&
483 cr->cr_ruid != chgp->p_ucred->cr_uid)
489 if (n < chgp->p_ionice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
501 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
503 struct proc *p = curproc;
506 struct ucred *cr = curthread->td_ucred;
509 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
517 /* curproc already loaded on p */
531 if (uap->tid == -1) {
533 * sadly, tid can be 0 so we can't use 0 here
536 lp = curthread->td_lwp;
538 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
545 switch (uap->function) {
547 error = copyout(&lp->lwp_rtprio, uap->rtp,
548 sizeof(struct rtprio));
551 if (cr->cr_uid && cr->cr_ruid &&
552 cr->cr_uid != p->p_ucred->cr_uid &&
553 cr->cr_ruid != p->p_ucred->cr_uid) {
557 /* disallow setting rtprio in most cases if not superuser */
558 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
559 /* can't set someone else's */
560 if (uap->pid) { /* XXX */
564 /* can't set realtime priority */
566 * Realtime priority has to be restricted for reasons which should be
567 * obvious. However, for idle priority, there is a potential for
568 * system deadlock if an idleprio process gains a lock on a resource
569 * that other processes need (and the idleprio process can't run
570 * due to a CPU-bound normal process). Fix me! XXX
572 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
581 case RTP_PRIO_REALTIME:
582 case RTP_PRIO_NORMAL:
584 if (rtp.prio > RTP_PRIO_MAX) {
587 lp->lwp_rtprio = rtp;
607 * Set realtime priority
612 sys_rtprio(struct rtprio_args *uap)
614 struct proc *curp = curproc;
617 struct ucred *cr = curthread->td_ucred;
621 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
637 lp = FIRST_LWP_IN_PROC(p);
638 switch (uap->function) {
640 error = copyout(&lp->lwp_rtprio, uap->rtp,
641 sizeof(struct rtprio));
644 if (cr->cr_uid && cr->cr_ruid &&
645 cr->cr_uid != p->p_ucred->cr_uid &&
646 cr->cr_ruid != p->p_ucred->cr_uid) {
650 /* disallow setting rtprio in most cases if not superuser */
651 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
652 /* can't set someone else's */
657 /* can't set realtime priority */
659 * Realtime priority has to be restricted for reasons which should be
660 * obvious. However, for idle priority, there is a potential for
661 * system deadlock if an idleprio process gains a lock on a resource
662 * that other processes need (and the idleprio process can't run
663 * due to a CPU-bound normal process). Fix me! XXX
665 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
674 case RTP_PRIO_REALTIME:
675 case RTP_PRIO_NORMAL:
677 if (rtp.prio > RTP_PRIO_MAX) {
681 lp->lwp_rtprio = rtp;
702 sys_setrlimit(struct __setrlimit_args *uap)
707 error = copyin(uap->rlp, &alim, sizeof(alim));
711 error = kern_setrlimit(uap->which, &alim);
720 sys_getrlimit(struct __getrlimit_args *uap)
725 error = kern_getrlimit(uap->which, &lim);
728 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
733 * Transform the running time and tick information in lwp lp's thread into user,
734 * system, and interrupt time usage.
736 * Since we are limited to statclock tick granularity this is a statisical
737 * calculation which will be correct over the long haul, but should not be
738 * expected to measure fine grained deltas.
740 * It is possible to catch a lwp in the midst of being created, so
741 * check whether lwp_thread is NULL or not.
744 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
749 * Calculate at the statclock level. YYY if the thread is owned by
750 * another cpu we need to forward the request to the other cpu, or
751 * have a token to interlock the information in order to avoid racing
752 * thread destruction.
754 if ((td = lp->lwp_thread) != NULL) {
756 up->tv_sec = td->td_uticks / 1000000;
757 up->tv_usec = td->td_uticks % 1000000;
758 sp->tv_sec = td->td_sticks / 1000000;
759 sp->tv_usec = td->td_sticks % 1000000;
765 * Aggregate resource statistics of all lwps of a process.
767 * proc.p_ru keeps track of all statistics directly related to a proc. This
768 * consists of RSS usage and nswap information and aggregate numbers for all
769 * former lwps of this proc.
771 * proc.p_cru is the sum of all stats of reaped children.
773 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
774 * packet, scheduler switch or page fault counts, etc. This information gets
775 * added to lwp.lwp_proc.p_ru when the lwp exits.
778 calcru_proc(struct proc *p, struct rusage *ru)
780 struct timeval upt, spt;
786 FOREACH_LWP_IN_PROC(lp, p) {
787 calcru(lp, &upt, &spt);
788 timevaladd(&ru->ru_utime, &upt);
789 timevaladd(&ru->ru_stime, &spt);
790 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
791 rip1 <= &ru->ru_last;
802 sys_getrusage(struct getrusage_args *uap)
813 calcru_proc(curproc, rup);
816 case RUSAGE_CHILDREN:
817 rup = &curproc->p_cru;
825 error = copyout(rup, uap->rusage, sizeof(struct rusage));
831 ruadd(struct rusage *ru, struct rusage *ru2)
836 timevaladd(&ru->ru_utime, &ru2->ru_utime);
837 timevaladd(&ru->ru_stime, &ru2->ru_stime);
838 if (ru->ru_maxrss < ru2->ru_maxrss)
839 ru->ru_maxrss = ru2->ru_maxrss;
840 ip = &ru->ru_first; ip2 = &ru2->ru_first;
841 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
846 * Find the uidinfo structure for a uid. This structure is used to
847 * track the total resource consumption (process count, socket buffer
848 * size, etc.) for the uid and impose limits.
853 spin_init(&uihash_lock);
854 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
858 * NOTE: Must be called with uihash_lock held
862 static struct uidinfo *
865 struct uihashhead *uipp;
869 LIST_FOREACH(uip, uipp, ui_hash) {
870 if (uip->ui_uid == uid)
877 * Helper function to creat ea uid that could not be found.
878 * This function will properly deal with races.
882 static struct uidinfo *
885 struct uidinfo *uip, *tmp;
888 * Allocate space and check for a race
890 uip = kmalloc(sizeof(*uip), M_UIDINFO, M_WAITOK|M_ZERO);
893 * Initialize structure and enter it into the hash table
895 spin_init(&uip->ui_lock);
897 uip->ui_ref = 1; /* we're returning a ref */
898 varsymset_init(&uip->ui_varsymset, NULL);
901 * Somebody may have already created the uidinfo for this
902 * uid. If so, return that instead.
904 spin_lock(&uihash_lock);
908 spin_unlock(&uihash_lock);
910 spin_uninit(&uip->ui_lock);
911 varsymset_clean(&uip->ui_varsymset);
912 FREE(uip, M_UIDINFO);
915 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
916 spin_unlock(&uihash_lock);
931 spin_lock(&uihash_lock);
934 spin_unlock(&uihash_lock);
938 spin_unlock(&uihash_lock);
944 * Helper funtion to remove a uidinfo whos reference count is
945 * transitioning from 1->0. The reference count is 1 on call.
947 * Zero is returned on success, otherwise non-zero and the
948 * uiphas not been removed.
953 uifree(struct uidinfo *uip)
956 * If we are still the only holder after acquiring the uihash_lock
957 * we can safely unlink the uip and destroy it. Otherwise we lost
958 * a race and must fail.
960 spin_lock(&uihash_lock);
961 if (uip->ui_ref != 1) {
962 spin_unlock(&uihash_lock);
965 LIST_REMOVE(uip, ui_hash);
966 spin_unlock(&uihash_lock);
969 * The uip is now orphaned and we can destroy it at our
972 if (uip->ui_sbsize != 0)
973 kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
974 uip->ui_uid, (intmax_t)uip->ui_sbsize);
975 if (uip->ui_proccnt != 0)
976 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
977 uip->ui_uid, uip->ui_proccnt);
979 varsymset_clean(&uip->ui_varsymset);
980 lockuninit(&uip->ui_varsymset.vx_lock);
981 spin_uninit(&uip->ui_lock);
982 FREE(uip, M_UIDINFO);
990 uihold(struct uidinfo *uip)
992 atomic_add_int(&uip->ui_ref, 1);
993 KKASSERT(uip->ui_ref >= 0);
997 * NOTE: It is important for us to not drop the ref count to 0
998 * because this can cause a 2->0/2->0 race with another
999 * concurrent dropper. Losing the race in that situation
1000 * can cause uip to become stale for one of the other
1006 uidrop(struct uidinfo *uip)
1010 KKASSERT(uip->ui_ref > 0);
1016 if (uifree(uip) == 0)
1018 } else if (atomic_cmpset_int(&uip->ui_ref, ref, ref - 1)) {
1026 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
1033 * Change the count associated with number of processes
1034 * a given user is using. When 'max' is 0, don't enforce a limit
1037 chgproccnt(struct uidinfo *uip, int diff, int max)
1040 spin_lock(&uip->ui_lock);
1041 /* don't allow them to exceed max, but allow subtraction */
1042 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1045 uip->ui_proccnt += diff;
1046 if (uip->ui_proccnt < 0)
1047 kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
1050 spin_unlock(&uip->ui_lock);
1055 * Change the total socket buffer size a user has used.
1058 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
1062 spin_lock(&uip->ui_lock);
1063 new = uip->ui_sbsize + to - *hiwat;
1067 * If we are trying to increase the socket buffer size
1068 * Scale down the hi water mark when we exceed the user's
1069 * allowed socket buffer space.
1071 * We can't scale down too much or we will blow up atomic packet
1074 if (to > *hiwat && to > MCLBYTES && new > max) {
1075 to = to * max / new;
1079 uip->ui_sbsize = new;
1081 spin_unlock(&uip->ui_lock);