priv: Narrow down privileges
[dragonfly.git] / sys / kern / kern_resource.c
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
41 */
42
43#include "opt_compat.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/file.h>
49#include <sys/kern_syscall.h>
50#include <sys/kernel.h>
51#include <sys/resourcevar.h>
52#include <sys/malloc.h>
53#include <sys/proc.h>
54#include <sys/priv.h>
55#include <sys/time.h>
56#include <sys/lockf.h>
57
58#include <vm/vm.h>
59#include <vm/vm_param.h>
60#include <sys/lock.h>
61#include <vm/pmap.h>
62#include <vm/vm_map.h>
63
64#include <sys/thread2.h>
65#include <sys/spinlock2.h>
66
67static int donice (struct proc *chgp, int n);
68
69static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
70#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
71static struct spinlock uihash_lock;
72static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
73static u_long uihash; /* size of hash table - 1 */
74
75static struct uidinfo *uicreate (uid_t uid);
76static struct uidinfo *uilookup (uid_t uid);
77
78/*
79 * Resource controls and accounting.
80 */
81
82struct getpriority_info {
83 int low;
84 int who;
85};
86
87static int getpriority_callback(struct proc *p, void *data);
88
89int
90sys_getpriority(struct getpriority_args *uap)
91{
92 struct getpriority_info info;
93 struct proc *curp = curproc;
94 struct proc *p;
95 int low = PRIO_MAX + 1;
96
97 switch (uap->which) {
98 case PRIO_PROCESS:
99 if (uap->who == 0)
100 p = curp;
101 else
102 p = pfind(uap->who);
103 if (p == 0)
104 break;
105 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
106 break;
107 low = p->p_nice;
108 break;
109
110 case PRIO_PGRP:
111 {
112 struct pgrp *pg;
113
114 if (uap->who == 0)
115 pg = curp->p_pgrp;
116 else if ((pg = pgfind(uap->who)) == NULL)
117 break;
118 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
119 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
120 low = p->p_nice;
121 }
122 break;
123 }
124 case PRIO_USER:
125 if (uap->who == 0)
126 uap->who = curp->p_ucred->cr_uid;
127 info.low = low;
128 info.who = uap->who;
129 allproc_scan(getpriority_callback, &info);
130 low = info.low;
131 break;
132
133 default:
134 return (EINVAL);
135 }
136 if (low == PRIO_MAX + 1)
137 return (ESRCH);
138 uap->sysmsg_result = low;
139 return (0);
140}
141
142/*
143 * Figure out the current lowest nice priority for processes owned
144 * by the specified user.
145 */
146static
147int
148getpriority_callback(struct proc *p, void *data)
149{
150 struct getpriority_info *info = data;
151
152 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
153 p->p_ucred->cr_uid == info->who &&
154 p->p_nice < info->low) {
155 info->low = p->p_nice;
156 }
157 return(0);
158}
159
160struct setpriority_info {
161 int prio;
162 int who;
163 int error;
164 int found;
165};
166
167static int setpriority_callback(struct proc *p, void *data);
168
169int
170sys_setpriority(struct setpriority_args *uap)
171{
172 struct setpriority_info info;
173 struct proc *curp = curproc;
174 struct proc *p;
175 int found = 0, error = 0;
176
177 switch (uap->which) {
178 case PRIO_PROCESS:
179 if (uap->who == 0)
180 p = curp;
181 else
182 p = pfind(uap->who);
183 if (p == 0)
184 break;
185 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
186 break;
187 error = donice(p, uap->prio);
188 found++;
189 break;
190
191 case PRIO_PGRP:
192 {
193 struct pgrp *pg;
194
195 if (uap->who == 0)
196 pg = curp->p_pgrp;
197 else if ((pg = pgfind(uap->who)) == NULL)
198 break;
199 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
200 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
201 error = donice(p, uap->prio);
202 found++;
203 }
204 }
205 break;
206 }
207 case PRIO_USER:
208 if (uap->who == 0)
209 uap->who = curp->p_ucred->cr_uid;
210 info.prio = uap->prio;
211 info.who = uap->who;
212 info.error = 0;
213 info.found = 0;
214 allproc_scan(setpriority_callback, &info);
215 error = info.error;
216 found = info.found;
217 break;
218
219 default:
220 return (EINVAL);
221 }
222 if (found == 0)
223 return (ESRCH);
224 return (error);
225}
226
227static
228int
229setpriority_callback(struct proc *p, void *data)
230{
231 struct setpriority_info *info = data;
232 int error;
233
234 if (p->p_ucred->cr_uid == info->who &&
235 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
236 error = donice(p, info->prio);
237 if (error)
238 info->error = error;
239 ++info->found;
240 }
241 return(0);
242}
243
244static int
245donice(struct proc *chgp, int n)
246{
247 struct proc *curp = curproc;
248 struct ucred *cr = curp->p_ucred;
249 struct lwp *lp;
250
251 if (cr->cr_uid && cr->cr_ruid &&
252 cr->cr_uid != chgp->p_ucred->cr_uid &&
253 cr->cr_ruid != chgp->p_ucred->cr_uid)
254 return (EPERM);
255 if (n > PRIO_MAX)
256 n = PRIO_MAX;
257 if (n < PRIO_MIN)
258 n = PRIO_MIN;
259 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
260 return (EACCES);
261 chgp->p_nice = n;
262 FOREACH_LWP_IN_PROC(lp, chgp)
263 chgp->p_usched->resetpriority(lp);
264 return (0);
265}
266
267int
268sys_lwp_rtprio(struct lwp_rtprio_args *uap)
269{
270 struct proc *p = curproc;
271 struct lwp *lp;
272 struct rtprio rtp;
273 struct ucred *cr = p->p_ucred;
274 int error;
275
276 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
277 if (error)
278 return error;
279
280 if (uap->pid < 0) {
281 return EINVAL;
282 } else if (uap->pid == 0) {
283 /* curproc already loaded on p */
284 } else {
285 p = pfind(uap->pid);
286 }
287
288 if (p == 0) {
289 return ESRCH;
290 }
291
292 if (uap->tid < -1) {
293 return EINVAL;
294 } else if (uap->tid == -1) {
295 /*
296 * sadly, tid can be 0 so we can't use 0 here
297 * like sys_rtprio()
298 */
299 lp = curthread->td_lwp;
300 } else {
301 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
302 if (lp == NULL)
303 return ESRCH;
304 }
305
306 switch (uap->function) {
307 case RTP_LOOKUP:
308 return (copyout(&lp->lwp_rtprio, uap->rtp,
309 sizeof(struct rtprio)));
310 case RTP_SET:
311 if (cr->cr_uid && cr->cr_ruid &&
312 cr->cr_uid != p->p_ucred->cr_uid &&
313 cr->cr_ruid != p->p_ucred->cr_uid) {
314 return EPERM;
315 }
316 /* disallow setting rtprio in most cases if not superuser */
317 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
318 /* can't set someone else's */
319 if (uap->pid) { /* XXX */
320 return EPERM;
321 }
322 /* can't set realtime priority */
323/*
324 * Realtime priority has to be restricted for reasons which should be
325 * obvious. However, for idle priority, there is a potential for
326 * system deadlock if an idleprio process gains a lock on a resource
327 * that other processes need (and the idleprio process can't run
328 * due to a CPU-bound normal process). Fix me! XXX
329 */
330 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
331 return EPERM;
332 }
333 }
334 switch (rtp.type) {
335#ifdef RTP_PRIO_FIFO
336 case RTP_PRIO_FIFO:
337#endif
338 case RTP_PRIO_REALTIME:
339 case RTP_PRIO_NORMAL:
340 case RTP_PRIO_IDLE:
341 if (rtp.prio > RTP_PRIO_MAX)
342 return EINVAL;
343 lp->lwp_rtprio = rtp;
344 return 0;
345 default:
346 return EINVAL;
347 }
348 default:
349 return EINVAL;
350 }
351 panic("can't get here");
352}
353
354/*
355 * Set realtime priority
356 */
357/* ARGSUSED */
358int
359sys_rtprio(struct rtprio_args *uap)
360{
361 struct proc *curp = curproc;
362 struct proc *p;
363 struct lwp *lp;
364 struct ucred *cr = curp->p_ucred;
365 struct rtprio rtp;
366 int error;
367
368 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
369 if (error)
370 return (error);
371
372 if (uap->pid == 0)
373 p = curp;
374 else
375 p = pfind(uap->pid);
376
377 if (p == 0)
378 return (ESRCH);
379
380 /* XXX lwp */
381 lp = FIRST_LWP_IN_PROC(p);
382 switch (uap->function) {
383 case RTP_LOOKUP:
384 return (copyout(&lp->lwp_rtprio, uap->rtp, sizeof(struct rtprio)));
385 case RTP_SET:
386 if (cr->cr_uid && cr->cr_ruid &&
387 cr->cr_uid != p->p_ucred->cr_uid &&
388 cr->cr_ruid != p->p_ucred->cr_uid)
389 return (EPERM);
390 /* disallow setting rtprio in most cases if not superuser */
391 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
392 /* can't set someone else's */
393 if (uap->pid)
394 return (EPERM);
395 /* can't set realtime priority */
396/*
397 * Realtime priority has to be restricted for reasons which should be
398 * obvious. However, for idle priority, there is a potential for
399 * system deadlock if an idleprio process gains a lock on a resource
400 * that other processes need (and the idleprio process can't run
401 * due to a CPU-bound normal process). Fix me! XXX
402 */
403 if (RTP_PRIO_IS_REALTIME(rtp.type))
404 return (EPERM);
405 }
406 switch (rtp.type) {
407#ifdef RTP_PRIO_FIFO
408 case RTP_PRIO_FIFO:
409#endif
410 case RTP_PRIO_REALTIME:
411 case RTP_PRIO_NORMAL:
412 case RTP_PRIO_IDLE:
413 if (rtp.prio > RTP_PRIO_MAX)
414 return (EINVAL);
415 lp->lwp_rtprio = rtp;
416 return (0);
417 default:
418 return (EINVAL);
419 }
420
421 default:
422 return (EINVAL);
423 }
424}
425
426int
427sys_setrlimit(struct __setrlimit_args *uap)
428{
429 struct rlimit alim;
430 int error;
431
432 error = copyin(uap->rlp, &alim, sizeof(alim));
433 if (error)
434 return (error);
435
436 error = kern_setrlimit(uap->which, &alim);
437
438 return (error);
439}
440
441int
442sys_getrlimit(struct __getrlimit_args *uap)
443{
444 struct rlimit lim;
445 int error;
446
447 error = kern_getrlimit(uap->which, &lim);
448
449 if (error == 0)
450 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
451 return error;
452}
453
454/*
455 * Transform the running time and tick information in lwp lp's thread into user,
456 * system, and interrupt time usage.
457 *
458 * Since we are limited to statclock tick granularity this is a statisical
459 * calculation which will be correct over the long haul, but should not be
460 * expected to measure fine grained deltas.
461 *
462 * It is possible to catch a lwp in the midst of being created, so
463 * check whether lwp_thread is NULL or not.
464 */
465void
466calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
467{
468 struct thread *td;
469
470 /*
471 * Calculate at the statclock level. YYY if the thread is owned by
472 * another cpu we need to forward the request to the other cpu, or
473 * have a token to interlock the information in order to avoid racing
474 * thread destruction.
475 */
476 if ((td = lp->lwp_thread) != NULL) {
477 crit_enter();
478 up->tv_sec = td->td_uticks / 1000000;
479 up->tv_usec = td->td_uticks % 1000000;
480 sp->tv_sec = td->td_sticks / 1000000;
481 sp->tv_usec = td->td_sticks % 1000000;
482 crit_exit();
483 }
484}
485
486/*
487 * Aggregate resource statistics of all lwps of a process.
488 *
489 * proc.p_ru keeps track of all statistics directly related to a proc. This
490 * consists of RSS usage and nswap information and aggregate numbers for all
491 * former lwps of this proc.
492 *
493 * proc.p_cru is the sum of all stats of reaped children.
494 *
495 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
496 * packet, scheduler switch or page fault counts, etc. This information gets
497 * added to lwp.lwp_proc.p_ru when the lwp exits.
498 */
499void
500calcru_proc(struct proc *p, struct rusage *ru)
501{
502 struct timeval upt, spt;
503 long *rip1, *rip2;
504 struct lwp *lp;
505
506 *ru = p->p_ru;
507
508 FOREACH_LWP_IN_PROC(lp, p) {
509 calcru(lp, &upt, &spt);
510 timevaladd(&ru->ru_utime, &upt);
511 timevaladd(&ru->ru_stime, &spt);
512 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
513 rip1 <= &ru->ru_last;
514 rip1++, rip2++)
515 *rip1 += *rip2;
516 }
517}
518
519
520/* ARGSUSED */
521int
522sys_getrusage(struct getrusage_args *uap)
523{
524 struct rusage ru;
525 struct rusage *rup;
526
527 switch (uap->who) {
528
529 case RUSAGE_SELF:
530 rup = &ru;
531 calcru_proc(curproc, rup);
532 break;
533
534 case RUSAGE_CHILDREN:
535 rup = &curproc->p_cru;
536 break;
537
538 default:
539 return (EINVAL);
540 }
541 return (copyout((caddr_t)rup, (caddr_t)uap->rusage,
542 sizeof (struct rusage)));
543}
544
545void
546ruadd(struct rusage *ru, struct rusage *ru2)
547{
548 long *ip, *ip2;
549 int i;
550
551 timevaladd(&ru->ru_utime, &ru2->ru_utime);
552 timevaladd(&ru->ru_stime, &ru2->ru_stime);
553 if (ru->ru_maxrss < ru2->ru_maxrss)
554 ru->ru_maxrss = ru2->ru_maxrss;
555 ip = &ru->ru_first; ip2 = &ru2->ru_first;
556 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
557 *ip++ += *ip2++;
558}
559
560/*
561 * Find the uidinfo structure for a uid. This structure is used to
562 * track the total resource consumption (process count, socket buffer
563 * size, etc.) for the uid and impose limits.
564 */
565void
566uihashinit(void)
567{
568 spin_init(&uihash_lock);
569 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
570}
571
572static struct uidinfo *
573uilookup(uid_t uid)
574{
575 struct uihashhead *uipp;
576 struct uidinfo *uip;
577
578 uipp = UIHASH(uid);
579 LIST_FOREACH(uip, uipp, ui_hash) {
580 if (uip->ui_uid == uid)
581 break;
582 }
583 return (uip);
584}
585
586static struct uidinfo *
587uicreate(uid_t uid)
588{
589 struct uidinfo *uip, *tmp;
590 /*
591 * Allocate space and check for a race
592 */
593 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
594 /*
595 * Initialize structure and enter it into the hash table
596 */
597 spin_init(&uip->ui_lock);
598 uip->ui_uid = uid;
599 uip->ui_proccnt = 0;
600 uip->ui_sbsize = 0;
601 uip->ui_ref = 1; /* we're returning a ref */
602 uip->ui_posixlocks = 0;
603 varsymset_init(&uip->ui_varsymset, NULL);
604
605 /*
606 * Somebody may have already created the uidinfo for this
607 * uid. If so, return that instead.
608 */
609 spin_lock_wr(&uihash_lock);
610 tmp = uilookup(uid);
611 if (tmp != NULL) {
612 varsymset_clean(&uip->ui_varsymset);
613 spin_uninit(&uip->ui_lock);
614 FREE(uip, M_UIDINFO);
615 uip = tmp;
616 } else {
617 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
618 }
619 spin_unlock_wr(&uihash_lock);
620
621 return (uip);
622}
623
624struct uidinfo *
625uifind(uid_t uid)
626{
627 struct uidinfo *uip;
628
629 spin_lock_rd(&uihash_lock);
630 uip = uilookup(uid);
631 if (uip == NULL) {
632 spin_unlock_rd(&uihash_lock);
633 uip = uicreate(uid);
634 } else {
635 uihold(uip);
636 spin_unlock_rd(&uihash_lock);
637 }
638 return (uip);
639}
640
641static __inline void
642uifree(struct uidinfo *uip)
643{
644 spin_lock_wr(&uihash_lock);
645
646 /*
647 * Note that we're taking a read lock even though we
648 * modify the structure because we know nobody can find
649 * it now that we've locked uihash_lock. If somebody
650 * can get to it through a stored pointer, the reference
651 * count will not be 0 and in that case we don't modify
652 * the struct.
653 */
654 spin_lock_rd(&uip->ui_lock);
655 if (uip->ui_ref != 0) {
656 /*
657 * Someone found the uid and got a ref when we
658 * unlocked. No need to free any more.
659 */
660 spin_unlock_rd(&uip->ui_lock);
661 return;
662 }
663 if (uip->ui_sbsize != 0)
664 /* XXX no %qd in kernel. Truncate. */
665 kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n",
666 uip->ui_uid, (long)uip->ui_sbsize);
667 if (uip->ui_proccnt != 0)
668 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
669 uip->ui_uid, uip->ui_proccnt);
670
671 LIST_REMOVE(uip, ui_hash);
672 spin_unlock_wr(&uihash_lock);
673 varsymset_clean(&uip->ui_varsymset);
674 lockuninit(&uip->ui_varsymset.vx_lock);
675 spin_unlock_rd(&uip->ui_lock);
676 spin_uninit(&uip->ui_lock);
677 FREE(uip, M_UIDINFO);
678}
679
680void
681uihold(struct uidinfo *uip)
682{
683 atomic_add_int(&uip->ui_ref, 1);
684 KKASSERT(uip->ui_ref > 0);
685}
686
687void
688uidrop(struct uidinfo *uip)
689{
690 if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
691 uifree(uip);
692 } else {
693 KKASSERT(uip->ui_ref > 0);
694 }
695}
696
697void
698uireplace(struct uidinfo **puip, struct uidinfo *nuip)
699{
700 uidrop(*puip);
701 *puip = nuip;
702}
703
704/*
705 * Change the count associated with number of processes
706 * a given user is using. When 'max' is 0, don't enforce a limit
707 */
708int
709chgproccnt(struct uidinfo *uip, int diff, int max)
710{
711 int ret;
712 spin_lock_wr(&uip->ui_lock);
713 /* don't allow them to exceed max, but allow subtraction */
714 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
715 ret = 0;
716 } else {
717 uip->ui_proccnt += diff;
718 if (uip->ui_proccnt < 0)
719 kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
720 ret = 1;
721 }
722 spin_unlock_wr(&uip->ui_lock);
723 return ret;
724}
725
726/*
727 * Change the total socket buffer size a user has used.
728 */
729int
730chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
731{
732 rlim_t new;
733
734 spin_lock_wr(&uip->ui_lock);
735 new = uip->ui_sbsize + to - *hiwat;
736 KKASSERT(new >= 0);
737
738 /*
739 * If we are trying to increase the socket buffer size
740 * Scale down the hi water mark when we exceed the user's
741 * allowed socket buffer space.
742 *
743 * We can't scale down too much or we will blow up atomic packet
744 * operations.
745 */
746 if (to > *hiwat && to > MCLBYTES && new > max) {
747 to = to * max / new;
748 if (to < MCLBYTES)
749 to = MCLBYTES;
750 }
751 uip->ui_sbsize = new;
752 *hiwat = to;
753 spin_unlock_wr(&uip->ui_lock);
754 return (1);
755}
756