sys/kern: Adjust some function declaration vs. definition mismatches.
[dragonfly.git] / sys / kern / kern_resource.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
dc71b7ab 18 * 3. Neither the name of the University nor the names of its contributors
984263bc
MD
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
36 */
37
38#include "opt_compat.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/sysproto.h>
43#include <sys/file.h>
9697c509 44#include <sys/kern_syscall.h>
984263bc
MD
45#include <sys/kernel.h>
46#include <sys/resourcevar.h>
47#include <sys/malloc.h>
48#include <sys/proc.h>
895c1f85 49#include <sys/priv.h>
984263bc 50#include <sys/time.h>
508ceb09 51#include <sys/lockf.h>
984263bc
MD
52
53#include <vm/vm.h>
54#include <vm/vm_param.h>
55#include <sys/lock.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58
37af14fe 59#include <sys/thread2.h>
9d7a637e 60#include <sys/spinlock2.h>
37af14fe 61
402ed7e1 62static int donice (struct proc *chgp, int n);
aa166ad1 63static int doionice (struct proc *chgp, int n);
984263bc
MD
64
65static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
66#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
9d7a637e 67static struct spinlock uihash_lock;
984263bc
MD
68static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
69static u_long uihash; /* size of hash table - 1 */
70
402ed7e1
RG
71static struct uidinfo *uicreate (uid_t uid);
72static struct uidinfo *uilookup (uid_t uid);
984263bc
MD
73
74/*
75 * Resource controls and accounting.
76 */
77
8fa76237
MD
78struct getpriority_info {
79 int low;
80 int who;
81};
82
83static int getpriority_callback(struct proc *p, void *data);
84
3919ced0
MD
85/*
86 * MPALMOSTSAFE
87 */
984263bc 88int
753fd850 89sys_getpriority(struct getpriority_args *uap)
984263bc 90{
8fa76237 91 struct getpriority_info info;
b7ebebd1 92 thread_t curtd = curthread;
41c20dac
MD
93 struct proc *curp = curproc;
94 struct proc *p;
b7ebebd1 95 struct pgrp *pg;
41c20dac 96 int low = PRIO_MAX + 1;
3919ced0
MD
97 int error;
98
984263bc 99 switch (uap->which) {
984263bc 100 case PRIO_PROCESS:
58c2553a 101 if (uap->who == 0) {
b7ebebd1 102 low = curp->p_nice;
58c2553a 103 } else {
984263bc 104 p = pfind(uap->who);
b7ebebd1
MD
105 if (p) {
106 lwkt_gettoken_shared(&p->p_token);
107 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred))
108 low = p->p_nice;
109 lwkt_reltoken(&p->p_token);
110 PRELE(p);
58c2553a 111 }
58c2553a 112 }
984263bc 113 break;
41c20dac 114 case PRIO_PGRP:
58c2553a 115 if (uap->who == 0) {
b7ebebd1 116 lwkt_gettoken_shared(&curp->p_token);
984263bc 117 pg = curp->p_pgrp;
58c2553a 118 pgref(pg);
b7ebebd1 119 lwkt_reltoken(&curp->p_token);
58c2553a 120 } else if ((pg = pgfind(uap->who)) == NULL) {
984263bc 121 break;
58c2553a
MD
122 } /* else ref held from pgfind */
123
b7ebebd1 124 lwkt_gettoken_shared(&pg->pg_token);
984263bc 125 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
b7ebebd1 126 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred) &&
58c2553a 127 p->p_nice < low) {
984263bc 128 low = p->p_nice;
58c2553a 129 }
984263bc 130 }
b7ebebd1 131 lwkt_reltoken(&pg->pg_token);
58c2553a 132 pgrel(pg);
984263bc 133 break;
984263bc
MD
134 case PRIO_USER:
135 if (uap->who == 0)
b7ebebd1 136 uap->who = curtd->td_ucred->cr_uid;
8fa76237
MD
137 info.low = low;
138 info.who = uap->who;
139 allproc_scan(getpriority_callback, &info);
140 low = info.low;
984263bc
MD
141 break;
142
143 default:
3919ced0
MD
144 error = EINVAL;
145 goto done;
146 }
147 if (low == PRIO_MAX + 1) {
148 error = ESRCH;
149 goto done;
984263bc 150 }
c7114eea 151 uap->sysmsg_result = low;
3919ced0
MD
152 error = 0;
153done:
3919ced0 154 return (error);
984263bc
MD
155}
156
8fa76237
MD
157/*
158 * Figure out the current lowest nice priority for processes owned
159 * by the specified user.
160 */
161static
162int
163getpriority_callback(struct proc *p, void *data)
164{
165 struct getpriority_info *info = data;
166
b7ebebd1
MD
167 lwkt_gettoken_shared(&p->p_token);
168 if (PRISON_CHECK(curthread->td_ucred, p->p_ucred) &&
8fa76237
MD
169 p->p_ucred->cr_uid == info->who &&
170 p->p_nice < info->low) {
171 info->low = p->p_nice;
172 }
b7ebebd1 173 lwkt_reltoken(&p->p_token);
8fa76237
MD
174 return(0);
175}
176
177struct setpriority_info {
178 int prio;
179 int who;
180 int error;
181 int found;
182};
183
184static int setpriority_callback(struct proc *p, void *data);
185
3919ced0
MD
186/*
187 * MPALMOSTSAFE
188 */
984263bc 189int
753fd850 190sys_setpriority(struct setpriority_args *uap)
984263bc 191{
8fa76237 192 struct setpriority_info info;
b7ebebd1 193 thread_t curtd = curthread;
41c20dac
MD
194 struct proc *curp = curproc;
195 struct proc *p;
b7ebebd1 196 struct pgrp *pg;
984263bc
MD
197 int found = 0, error = 0;
198
199 switch (uap->which) {
984263bc 200 case PRIO_PROCESS:
58c2553a 201 if (uap->who == 0) {
b7ebebd1
MD
202 lwkt_gettoken(&curp->p_token);
203 error = donice(curp, uap->prio);
204 found++;
205 lwkt_reltoken(&curp->p_token);
58c2553a 206 } else {
984263bc 207 p = pfind(uap->who);
b7ebebd1
MD
208 if (p) {
209 lwkt_gettoken(&p->p_token);
210 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
211 error = donice(p, uap->prio);
212 found++;
213 }
214 lwkt_reltoken(&p->p_token);
215 PRELE(p);
58c2553a 216 }
58c2553a 217 }
984263bc 218 break;
41c20dac 219 case PRIO_PGRP:
58c2553a 220 if (uap->who == 0) {
b7ebebd1 221 lwkt_gettoken_shared(&curp->p_token);
984263bc 222 pg = curp->p_pgrp;
58c2553a 223 pgref(pg);
b7ebebd1 224 lwkt_reltoken(&curp->p_token);
58c2553a 225 } else if ((pg = pgfind(uap->who)) == NULL) {
984263bc 226 break;
58c2553a
MD
227 } /* else ref held from pgfind */
228
b7ebebd1
MD
229 lwkt_gettoken(&pg->pg_token);
230restart:
984263bc 231 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
b7ebebd1
MD
232 PHOLD(p);
233 lwkt_gettoken(&p->p_token);
234 if (p->p_pgrp == pg &&
235 PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
41c20dac 236 error = donice(p, uap->prio);
984263bc
MD
237 found++;
238 }
b7ebebd1
MD
239 lwkt_reltoken(&p->p_token);
240 if (p->p_pgrp != pg) {
241 PRELE(p);
242 goto restart;
243 }
244 PRELE(p);
984263bc 245 }
b7ebebd1 246 lwkt_reltoken(&pg->pg_token);
58c2553a 247 pgrel(pg);
984263bc 248 break;
984263bc
MD
249 case PRIO_USER:
250 if (uap->who == 0)
b7ebebd1 251 uap->who = curtd->td_ucred->cr_uid;
8fa76237
MD
252 info.prio = uap->prio;
253 info.who = uap->who;
254 info.error = 0;
255 info.found = 0;
256 allproc_scan(setpriority_callback, &info);
257 error = info.error;
258 found = info.found;
984263bc 259 break;
984263bc 260 default:
3919ced0
MD
261 error = EINVAL;
262 found = 1;
263 break;
984263bc 264 }
3919ced0 265
984263bc 266 if (found == 0)
3919ced0 267 error = ESRCH;
984263bc
MD
268 return (error);
269}
270
8fa76237
MD
271static
272int
273setpriority_callback(struct proc *p, void *data)
274{
275 struct setpriority_info *info = data;
276 int error;
277
b7ebebd1 278 lwkt_gettoken(&p->p_token);
8fa76237 279 if (p->p_ucred->cr_uid == info->who &&
b7ebebd1 280 PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
8fa76237
MD
281 error = donice(p, info->prio);
282 if (error)
283 info->error = error;
284 ++info->found;
285 }
b7ebebd1 286 lwkt_reltoken(&p->p_token);
8fa76237
MD
287 return(0);
288}
289
b7ebebd1
MD
290/*
291 * Caller must hold chgp->p_token
292 */
984263bc 293static int
41c20dac 294donice(struct proc *chgp, int n)
984263bc 295{
b7ebebd1 296 struct ucred *cr = curthread->td_ucred;
08f2f1bb 297 struct lwp *lp;
984263bc 298
41c20dac
MD
299 if (cr->cr_uid && cr->cr_ruid &&
300 cr->cr_uid != chgp->p_ucred->cr_uid &&
301 cr->cr_ruid != chgp->p_ucred->cr_uid)
984263bc
MD
302 return (EPERM);
303 if (n > PRIO_MAX)
304 n = PRIO_MAX;
305 if (n < PRIO_MIN)
306 n = PRIO_MIN;
3b1d99e9 307 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
984263bc
MD
308 return (EACCES);
309 chgp->p_nice = n;
4643740a
MD
310 FOREACH_LWP_IN_PROC(lp, chgp) {
311 LWPHOLD(lp);
08f2f1bb 312 chgp->p_usched->resetpriority(lp);
4643740a
MD
313 LWPRELE(lp);
314 }
984263bc
MD
315 return (0);
316}
317
aa166ad1
AH
318
319struct ioprio_get_info {
320 int high;
321 int who;
322};
323
324static int ioprio_get_callback(struct proc *p, void *data);
325
326/*
327 * MPALMOSTSAFE
328 */
329int
330sys_ioprio_get(struct ioprio_get_args *uap)
331{
332 struct ioprio_get_info info;
b7ebebd1 333 thread_t curtd = curthread;
aa166ad1
AH
334 struct proc *curp = curproc;
335 struct proc *p;
b7ebebd1 336 struct pgrp *pg;
aa166ad1
AH
337 int high = IOPRIO_MIN-2;
338 int error;
339
aa166ad1
AH
340 switch (uap->which) {
341 case PRIO_PROCESS:
58c2553a 342 if (uap->who == 0) {
b7ebebd1 343 high = curp->p_ionice;
58c2553a 344 } else {
aa166ad1 345 p = pfind(uap->who);
b7ebebd1
MD
346 if (p) {
347 lwkt_gettoken_shared(&p->p_token);
348 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred))
349 high = p->p_ionice;
350 lwkt_reltoken(&p->p_token);
351 PRELE(p);
352 }
58c2553a 353 }
aa166ad1 354 break;
aa166ad1 355 case PRIO_PGRP:
58c2553a 356 if (uap->who == 0) {
b7ebebd1 357 lwkt_gettoken_shared(&curp->p_token);
aa166ad1 358 pg = curp->p_pgrp;
58c2553a 359 pgref(pg);
b7ebebd1 360 lwkt_reltoken(&curp->p_token);
58c2553a 361 } else if ((pg = pgfind(uap->who)) == NULL) {
aa166ad1 362 break;
58c2553a
MD
363 } /* else ref held from pgfind */
364
b7ebebd1 365 lwkt_gettoken_shared(&pg->pg_token);
aa166ad1 366 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
b7ebebd1 367 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred) &&
58c2553a 368 p->p_nice > high)
aa166ad1
AH
369 high = p->p_ionice;
370 }
b7ebebd1 371 lwkt_reltoken(&pg->pg_token);
58c2553a 372 pgrel(pg);
aa166ad1 373 break;
aa166ad1
AH
374 case PRIO_USER:
375 if (uap->who == 0)
b7ebebd1 376 uap->who = curtd->td_ucred->cr_uid;
aa166ad1
AH
377 info.high = high;
378 info.who = uap->who;
379 allproc_scan(ioprio_get_callback, &info);
380 high = info.high;
381 break;
aa166ad1
AH
382 default:
383 error = EINVAL;
384 goto done;
385 }
386 if (high == IOPRIO_MIN-2) {
387 error = ESRCH;
388 goto done;
389 }
390 uap->sysmsg_result = high;
391 error = 0;
392done:
aa166ad1
AH
393 return (error);
394}
395
396/*
397 * Figure out the current lowest nice priority for processes owned
398 * by the specified user.
399 */
400static
401int
402ioprio_get_callback(struct proc *p, void *data)
403{
404 struct ioprio_get_info *info = data;
405
b7ebebd1
MD
406 lwkt_gettoken_shared(&p->p_token);
407 if (PRISON_CHECK(curthread->td_ucred, p->p_ucred) &&
aa166ad1
AH
408 p->p_ucred->cr_uid == info->who &&
409 p->p_ionice > info->high) {
410 info->high = p->p_ionice;
411 }
b7ebebd1 412 lwkt_reltoken(&p->p_token);
aa166ad1
AH
413 return(0);
414}
415
416
417struct ioprio_set_info {
418 int prio;
419 int who;
420 int error;
421 int found;
422};
423
424static int ioprio_set_callback(struct proc *p, void *data);
425
426/*
427 * MPALMOSTSAFE
428 */
429int
430sys_ioprio_set(struct ioprio_set_args *uap)
431{
432 struct ioprio_set_info info;
b7ebebd1 433 thread_t curtd = curthread;
aa166ad1
AH
434 struct proc *curp = curproc;
435 struct proc *p;
b7ebebd1 436 struct pgrp *pg;
aa166ad1
AH
437 int found = 0, error = 0;
438
aa166ad1
AH
439 switch (uap->which) {
440 case PRIO_PROCESS:
58c2553a 441 if (uap->who == 0) {
b7ebebd1
MD
442 lwkt_gettoken(&curp->p_token);
443 error = doionice(curp, uap->prio);
444 lwkt_reltoken(&curp->p_token);
445 found++;
58c2553a 446 } else {
aa166ad1 447 p = pfind(uap->who);
b7ebebd1
MD
448 if (p) {
449 lwkt_gettoken(&p->p_token);
450 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
451 error = doionice(p, uap->prio);
452 found++;
453 }
454 lwkt_reltoken(&p->p_token);
455 PRELE(p);
58c2553a 456 }
58c2553a 457 }
aa166ad1 458 break;
aa166ad1 459 case PRIO_PGRP:
58c2553a 460 if (uap->who == 0) {
b7ebebd1 461 lwkt_gettoken_shared(&curp->p_token);
aa166ad1 462 pg = curp->p_pgrp;
58c2553a 463 pgref(pg);
b7ebebd1 464 lwkt_reltoken(&curp->p_token);
58c2553a 465 } else if ((pg = pgfind(uap->who)) == NULL) {
aa166ad1 466 break;
58c2553a
MD
467 } /* else ref held from pgfind */
468
b7ebebd1
MD
469 lwkt_gettoken(&pg->pg_token);
470restart:
aa166ad1 471 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
b7ebebd1
MD
472 PHOLD(p);
473 lwkt_gettoken(&p->p_token);
474 if (p->p_pgrp == pg &&
475 PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
aa166ad1
AH
476 error = doionice(p, uap->prio);
477 found++;
478 }
b7ebebd1
MD
479 lwkt_reltoken(&p->p_token);
480 if (p->p_pgrp != pg) {
481 PRELE(p);
482 goto restart;
483 }
484 PRELE(p);
aa166ad1 485 }
b7ebebd1 486 lwkt_reltoken(&pg->pg_token);
58c2553a 487 pgrel(pg);
aa166ad1 488 break;
aa166ad1
AH
489 case PRIO_USER:
490 if (uap->who == 0)
b7ebebd1 491 uap->who = curtd->td_ucred->cr_uid;
aa166ad1
AH
492 info.prio = uap->prio;
493 info.who = uap->who;
494 info.error = 0;
495 info.found = 0;
496 allproc_scan(ioprio_set_callback, &info);
497 error = info.error;
498 found = info.found;
499 break;
aa166ad1
AH
500 default:
501 error = EINVAL;
502 found = 1;
503 break;
504 }
505
aa166ad1
AH
506 if (found == 0)
507 error = ESRCH;
508 return (error);
509}
510
511static
512int
513ioprio_set_callback(struct proc *p, void *data)
514{
515 struct ioprio_set_info *info = data;
516 int error;
517
b7ebebd1 518 lwkt_gettoken(&p->p_token);
aa166ad1 519 if (p->p_ucred->cr_uid == info->who &&
b7ebebd1 520 PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
aa166ad1
AH
521 error = doionice(p, info->prio);
522 if (error)
523 info->error = error;
524 ++info->found;
525 }
b7ebebd1 526 lwkt_reltoken(&p->p_token);
aa166ad1
AH
527 return(0);
528}
529
59b728a7 530static int
aa166ad1
AH
531doionice(struct proc *chgp, int n)
532{
b7ebebd1 533 struct ucred *cr = curthread->td_ucred;
aa166ad1
AH
534
535 if (cr->cr_uid && cr->cr_ruid &&
536 cr->cr_uid != chgp->p_ucred->cr_uid &&
537 cr->cr_ruid != chgp->p_ucred->cr_uid)
538 return (EPERM);
539 if (n > IOPRIO_MAX)
540 n = IOPRIO_MAX;
541 if (n < IOPRIO_MIN)
542 n = IOPRIO_MIN;
b7ebebd1
MD
543 if (n < chgp->p_ionice &&
544 priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
aa166ad1
AH
545 return (EACCES);
546 chgp->p_ionice = n;
547
548 return (0);
549
550}
551
3919ced0
MD
552/*
553 * MPALMOSTSAFE
554 */
649d3bd2
MD
555int
556sys_lwp_rtprio(struct lwp_rtprio_args *uap)
557{
b7ebebd1 558 struct ucred *cr = curthread->td_ucred;
58c2553a 559 struct proc *p;
649d3bd2
MD
560 struct lwp *lp;
561 struct rtprio rtp;
649d3bd2
MD
562 int error;
563
564 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
565 if (error)
566 return error;
3919ced0 567 if (uap->pid < 0)
649d3bd2 568 return EINVAL;
3919ced0 569
3919ced0 570 if (uap->pid == 0) {
58c2553a
MD
571 p = curproc;
572 PHOLD(p);
649d3bd2
MD
573 } else {
574 p = pfind(uap->pid);
575 }
3919ced0
MD
576 if (p == NULL) {
577 error = ESRCH;
578 goto done;
649d3bd2 579 }
b7ebebd1 580 lwkt_gettoken(&p->p_token);
649d3bd2
MD
581
582 if (uap->tid < -1) {
3919ced0
MD
583 error = EINVAL;
584 goto done;
585 }
586 if (uap->tid == -1) {
649d3bd2
MD
587 /*
588 * sadly, tid can be 0 so we can't use 0 here
589 * like sys_rtprio()
590 */
591 lp = curthread->td_lwp;
592 } else {
3e291793 593 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
3919ced0
MD
594 if (lp == NULL) {
595 error = ESRCH;
596 goto done;
597 }
649d3bd2
MD
598 }
599
600 switch (uap->function) {
601 case RTP_LOOKUP:
3919ced0
MD
602 error = copyout(&lp->lwp_rtprio, uap->rtp,
603 sizeof(struct rtprio));
604 break;
649d3bd2
MD
605 case RTP_SET:
606 if (cr->cr_uid && cr->cr_ruid &&
607 cr->cr_uid != p->p_ucred->cr_uid &&
608 cr->cr_ruid != p->p_ucred->cr_uid) {
3919ced0
MD
609 error = EPERM;
610 break;
649d3bd2
MD
611 }
612 /* disallow setting rtprio in most cases if not superuser */
3b1d99e9 613 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
649d3bd2
MD
614 /* can't set someone else's */
615 if (uap->pid) { /* XXX */
3919ced0
MD
616 error = EPERM;
617 break;
649d3bd2
MD
618 }
619 /* can't set realtime priority */
620/*
621 * Realtime priority has to be restricted for reasons which should be
622 * obvious. However, for idle priority, there is a potential for
623 * system deadlock if an idleprio process gains a lock on a resource
624 * that other processes need (and the idleprio process can't run
625 * due to a CPU-bound normal process). Fix me! XXX
626 */
627 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
3919ced0
MD
628 error = EPERM;
629 break;
649d3bd2
MD
630 }
631 }
632 switch (rtp.type) {
633#ifdef RTP_PRIO_FIFO
634 case RTP_PRIO_FIFO:
635#endif
636 case RTP_PRIO_REALTIME:
637 case RTP_PRIO_NORMAL:
638 case RTP_PRIO_IDLE:
12358529
MD
639 if (rtp.prio > RTP_PRIO_MAX) {
640 error = EINVAL;
641 } else {
642 lp->lwp_rtprio = rtp;
643 error = 0;
644 }
3919ced0 645 break;
649d3bd2 646 default:
3919ced0
MD
647 error = EINVAL;
648 break;
649d3bd2 649 }
3919ced0 650 break;
649d3bd2 651 default:
3919ced0
MD
652 error = EINVAL;
653 break;
649d3bd2 654 }
3919ced0
MD
655
656done:
b7ebebd1
MD
657 if (p) {
658 lwkt_reltoken(&p->p_token);
58c2553a 659 PRELE(p);
b7ebebd1 660 }
3919ced0 661 return (error);
649d3bd2
MD
662}
663
984263bc
MD
664/*
665 * Set realtime priority
3919ced0
MD
666 *
667 * MPALMOSTSAFE
984263bc 668 */
984263bc 669int
753fd850 670sys_rtprio(struct rtprio_args *uap)
984263bc 671{
b7ebebd1 672 struct ucred *cr = curthread->td_ucred;
41c20dac 673 struct proc *p;
08f2f1bb 674 struct lwp *lp;
984263bc
MD
675 struct rtprio rtp;
676 int error;
677
678 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
679 if (error)
680 return (error);
681
58c2553a
MD
682 if (uap->pid == 0) {
683 p = curproc;
684 PHOLD(p);
685 } else {
984263bc 686 p = pfind(uap->pid);
58c2553a 687 }
984263bc 688
3919ced0
MD
689 if (p == NULL) {
690 error = ESRCH;
691 goto done;
692 }
b7ebebd1 693 lwkt_gettoken(&p->p_token);
984263bc 694
08f2f1bb
SS
695 /* XXX lwp */
696 lp = FIRST_LWP_IN_PROC(p);
984263bc
MD
697 switch (uap->function) {
698 case RTP_LOOKUP:
3919ced0
MD
699 error = copyout(&lp->lwp_rtprio, uap->rtp,
700 sizeof(struct rtprio));
701 break;
984263bc 702 case RTP_SET:
41c20dac
MD
703 if (cr->cr_uid && cr->cr_ruid &&
704 cr->cr_uid != p->p_ucred->cr_uid &&
3919ced0
MD
705 cr->cr_ruid != p->p_ucred->cr_uid) {
706 error = EPERM;
707 break;
708 }
984263bc 709 /* disallow setting rtprio in most cases if not superuser */
3b1d99e9 710 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
984263bc 711 /* can't set someone else's */
3919ced0
MD
712 if (uap->pid) {
713 error = EPERM;
714 break;
715 }
984263bc
MD
716 /* can't set realtime priority */
717/*
718 * Realtime priority has to be restricted for reasons which should be
719 * obvious. However, for idle priority, there is a potential for
720 * system deadlock if an idleprio process gains a lock on a resource
721 * that other processes need (and the idleprio process can't run
722 * due to a CPU-bound normal process). Fix me! XXX
723 */
3919ced0
MD
724 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
725 error = EPERM;
726 break;
727 }
984263bc
MD
728 }
729 switch (rtp.type) {
730#ifdef RTP_PRIO_FIFO
731 case RTP_PRIO_FIFO:
732#endif
733 case RTP_PRIO_REALTIME:
734 case RTP_PRIO_NORMAL:
735 case RTP_PRIO_IDLE:
3919ced0
MD
736 if (rtp.prio > RTP_PRIO_MAX) {
737 error = EINVAL;
738 break;
739 }
08f2f1bb 740 lp->lwp_rtprio = rtp;
3919ced0
MD
741 error = 0;
742 break;
984263bc 743 default:
3919ced0
MD
744 error = EINVAL;
745 break;
984263bc 746 }
3919ced0 747 break;
984263bc 748 default:
3919ced0
MD
749 error = EINVAL;
750 break;
984263bc 751 }
3919ced0 752done:
b7ebebd1
MD
753 if (p) {
754 lwkt_reltoken(&p->p_token);
58c2553a 755 PRELE(p);
b7ebebd1 756 }
58c2553a 757
3919ced0 758 return (error);
984263bc
MD
759}
760
3919ced0
MD
761/*
762 * MPSAFE
763 */
984263bc 764int
753fd850 765sys_setrlimit(struct __setrlimit_args *uap)
984263bc
MD
766{
767 struct rlimit alim;
768 int error;
769
9697c509
DRJ
770 error = copyin(uap->rlp, &alim, sizeof(alim));
771 if (error)
984263bc 772 return (error);
9697c509
DRJ
773
774 error = kern_setrlimit(uap->which, &alim);
775
776 return (error);
984263bc
MD
777}
778
3919ced0
MD
779/*
780 * MPSAFE
781 */
9697c509 782int
753fd850 783sys_getrlimit(struct __getrlimit_args *uap)
9697c509
DRJ
784{
785 struct rlimit lim;
786 int error;
787
788 error = kern_getrlimit(uap->which, &lim);
789
790 if (error == 0)
791 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
792 return error;
984263bc
MD
793}
794
795/*
fde7ac71 796 * Transform the running time and tick information in lwp lp's thread into user,
984263bc 797 * system, and interrupt time usage.
d16a8831
MD
798 *
799 * Since we are limited to statclock tick granularity this is a statisical
800 * calculation which will be correct over the long haul, but should not be
801 * expected to measure fine grained deltas.
585aafb6
MD
802 *
803 * It is possible to catch a lwp in the midst of being created, so
804 * check whether lwp_thread is NULL or not.
984263bc
MD
805 */
806void
fde7ac71 807calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
984263bc 808{
585aafb6 809 struct thread *td;
984263bc 810
d16a8831
MD
811 /*
812 * Calculate at the statclock level. YYY if the thread is owned by
813 * another cpu we need to forward the request to the other cpu, or
585aafb6
MD
814 * have a token to interlock the information in order to avoid racing
815 * thread destruction.
d16a8831 816 */
585aafb6
MD
817 if ((td = lp->lwp_thread) != NULL) {
818 crit_enter();
819 up->tv_sec = td->td_uticks / 1000000;
820 up->tv_usec = td->td_uticks % 1000000;
821 sp->tv_sec = td->td_sticks / 1000000;
822 sp->tv_usec = td->td_sticks % 1000000;
823 crit_exit();
824 }
984263bc
MD
825}
826
fde7ac71
SS
827/*
828 * Aggregate resource statistics of all lwps of a process.
829 *
830 * proc.p_ru keeps track of all statistics directly related to a proc. This
831 * consists of RSS usage and nswap information and aggregate numbers for all
832 * former lwps of this proc.
833 *
834 * proc.p_cru is the sum of all stats of reaped children.
835 *
836 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
837 * packet, scheduler switch or page fault counts, etc. This information gets
838 * added to lwp.lwp_proc.p_ru when the lwp exits.
839 */
840void
841calcru_proc(struct proc *p, struct rusage *ru)
842{
843 struct timeval upt, spt;
844 long *rip1, *rip2;
845 struct lwp *lp;
846
847 *ru = p->p_ru;
848
849 FOREACH_LWP_IN_PROC(lp, p) {
850 calcru(lp, &upt, &spt);
851 timevaladd(&ru->ru_utime, &upt);
852 timevaladd(&ru->ru_stime, &spt);
853 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
854 rip1 <= &ru->ru_last;
855 rip1++, rip2++)
856 *rip1 += *rip2;
857 }
858}
859
860
3919ced0
MD
861/*
862 * MPALMOSTSAFE
863 */
984263bc 864int
753fd850 865sys_getrusage(struct getrusage_args *uap)
984263bc 866{
b7ebebd1 867 struct proc *p = curproc;
fde7ac71 868 struct rusage ru;
41c20dac 869 struct rusage *rup;
3919ced0 870 int error;
984263bc 871
b7ebebd1 872 lwkt_gettoken(&p->p_token);
984263bc 873
3919ced0 874 switch (uap->who) {
984263bc 875 case RUSAGE_SELF:
fde7ac71 876 rup = &ru;
b7ebebd1 877 calcru_proc(p, rup);
3919ced0 878 error = 0;
984263bc 879 break;
984263bc 880 case RUSAGE_CHILDREN:
b7ebebd1 881 rup = &p->p_cru;
3919ced0 882 error = 0;
984263bc 883 break;
984263bc 884 default:
3919ced0
MD
885 error = EINVAL;
886 break;
984263bc 887 }
b7ebebd1
MD
888 lwkt_reltoken(&p->p_token);
889
3919ced0
MD
890 if (error == 0)
891 error = copyout(rup, uap->rusage, sizeof(struct rusage));
3919ced0 892 return (error);
984263bc
MD
893}
894
895void
792033e7 896ruadd(struct rusage *ru, struct rusage *ru2)
984263bc 897{
1fd87d54
RG
898 long *ip, *ip2;
899 int i;
984263bc
MD
900
901 timevaladd(&ru->ru_utime, &ru2->ru_utime);
902 timevaladd(&ru->ru_stime, &ru2->ru_stime);
903 if (ru->ru_maxrss < ru2->ru_maxrss)
904 ru->ru_maxrss = ru2->ru_maxrss;
905 ip = &ru->ru_first; ip2 = &ru2->ru_first;
906 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
907 *ip++ += *ip2++;
908}
909
984263bc
MD
910/*
911 * Find the uidinfo structure for a uid. This structure is used to
912 * track the total resource consumption (process count, socket buffer
913 * size, etc.) for the uid and impose limits.
914 */
915void
792033e7 916uihashinit(void)
984263bc 917{
ba87a4ab 918 spin_init(&uihash_lock, "uihashinit");
984263bc
MD
919 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
920}
921
0d355d3b
MD
922/*
923 * NOTE: Must be called with uihash_lock held
924 *
925 * MPSAFE
926 */
984263bc 927static struct uidinfo *
792033e7 928uilookup(uid_t uid)
984263bc
MD
929{
930 struct uihashhead *uipp;
931 struct uidinfo *uip;
932
933 uipp = UIHASH(uid);
792033e7 934 LIST_FOREACH(uip, uipp, ui_hash) {
984263bc
MD
935 if (uip->ui_uid == uid)
936 break;
792033e7 937 }
984263bc
MD
938 return (uip);
939}
940
0d355d3b 941/*
c9110f4f
MD
942 * Helper function to creat ea uid that could not be found.
943 * This function will properly deal with races.
944 *
0d355d3b
MD
945 * MPSAFE
946 */
984263bc 947static struct uidinfo *
792033e7 948uicreate(uid_t uid)
984263bc 949{
9d7a637e 950 struct uidinfo *uip, *tmp;
d37c8f7f 951
f61e468f
MD
952 /*
953 * Allocate space and check for a race
954 */
d37c8f7f
MD
955 uip = kmalloc(sizeof(*uip), M_UIDINFO, M_WAITOK|M_ZERO);
956
f61e468f
MD
957 /*
958 * Initialize structure and enter it into the hash table
959 */
ba87a4ab 960 spin_init(&uip->ui_lock, "uicreate");
984263bc 961 uip->ui_uid = uid;
9d7a637e 962 uip->ui_ref = 1; /* we're returning a ref */
98a7f915 963 varsymset_init(&uip->ui_varsymset, NULL);
9d7a637e
AE
964
965 /*
966 * Somebody may have already created the uidinfo for this
967 * uid. If so, return that instead.
968 */
287a8577 969 spin_lock(&uihash_lock);
9d7a637e
AE
970 tmp = uilookup(uid);
971 if (tmp != NULL) {
c9110f4f 972 uihold(tmp);
287a8577 973 spin_unlock(&uihash_lock);
c9110f4f 974
9d7a637e 975 spin_uninit(&uip->ui_lock);
c9110f4f 976 varsymset_clean(&uip->ui_varsymset);
884717e1 977 kfree(uip, M_UIDINFO);
9d7a637e
AE
978 uip = tmp;
979 } else {
980 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
287a8577 981 spin_unlock(&uihash_lock);
9d7a637e 982 }
984263bc
MD
983 return (uip);
984}
985
0d355d3b 986/*
c9110f4f
MD
987 *
988 *
0d355d3b
MD
989 * MPSAFE
990 */
984263bc 991struct uidinfo *
792033e7 992uifind(uid_t uid)
984263bc
MD
993{
994 struct uidinfo *uip;
995
287a8577 996 spin_lock(&uihash_lock);
984263bc 997 uip = uilookup(uid);
9d7a637e 998 if (uip == NULL) {
287a8577 999 spin_unlock(&uihash_lock);
984263bc 1000 uip = uicreate(uid);
9d7a637e
AE
1001 } else {
1002 uihold(uip);
287a8577 1003 spin_unlock(&uihash_lock);
9d7a637e 1004 }
984263bc
MD
1005 return (uip);
1006}
1007
0d355d3b 1008/*
c9110f4f
MD
1009 * Helper funtion to remove a uidinfo whos reference count is
1010 * transitioning from 1->0. The reference count is 1 on call.
1011 *
1012 * Zero is returned on success, otherwise non-zero and the
1013 * uiphas not been removed.
1014 *
0d355d3b
MD
1015 * MPSAFE
1016 */
c9110f4f 1017static __inline int
792033e7 1018uifree(struct uidinfo *uip)
984263bc 1019{
c9110f4f
MD
1020 /*
1021 * If we are still the only holder after acquiring the uihash_lock
1022 * we can safely unlink the uip and destroy it. Otherwise we lost
1023 * a race and must fail.
1024 */
287a8577 1025 spin_lock(&uihash_lock);
c9110f4f 1026 if (uip->ui_ref != 1) {
287a8577 1027 spin_unlock(&uihash_lock);
c9110f4f
MD
1028 return(-1);
1029 }
1030 LIST_REMOVE(uip, ui_hash);
287a8577 1031 spin_unlock(&uihash_lock);
9d7a637e
AE
1032
1033 /*
c9110f4f
MD
1034 * The uip is now orphaned and we can destroy it at our
1035 * leisure.
9d7a637e 1036 */
792033e7 1037 if (uip->ui_sbsize != 0)
c9110f4f
MD
1038 kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1039 uip->ui_uid, (intmax_t)uip->ui_sbsize);
792033e7 1040 if (uip->ui_proccnt != 0)
6ea70f76 1041 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
792033e7 1042 uip->ui_uid, uip->ui_proccnt);
9d7a637e 1043
98a7f915 1044 varsymset_clean(&uip->ui_varsymset);
9d7a637e 1045 lockuninit(&uip->ui_varsymset.vx_lock);
9d7a637e 1046 spin_uninit(&uip->ui_lock);
884717e1 1047 kfree(uip, M_UIDINFO);
c9110f4f 1048 return(0);
792033e7 1049}
984263bc 1050
0d355d3b
MD
1051/*
1052 * MPSAFE
1053 */
792033e7
MD
1054void
1055uihold(struct uidinfo *uip)
1056{
9d7a637e 1057 atomic_add_int(&uip->ui_ref, 1);
61f96b6f 1058 KKASSERT(uip->ui_ref >= 0);
792033e7
MD
1059}
1060
0d355d3b 1061/*
c9110f4f
MD
1062 * NOTE: It is important for us to not drop the ref count to 0
1063 * because this can cause a 2->0/2->0 race with another
1064 * concurrent dropper. Losing the race in that situation
1065 * can cause uip to become stale for one of the other
1066 * threads.
1067 *
0d355d3b
MD
1068 * MPSAFE
1069 */
792033e7
MD
1070void
1071uidrop(struct uidinfo *uip)
1072{
c9110f4f
MD
1073 int ref;
1074
61f96b6f 1075 KKASSERT(uip->ui_ref > 0);
c9110f4f
MD
1076
1077 for (;;) {
1078 ref = uip->ui_ref;
1079 cpu_ccfence();
1080 if (ref == 1) {
1081 if (uifree(uip) == 0)
1082 break;
1083 } else if (atomic_cmpset_int(&uip->ui_ref, ref, ref - 1)) {
1084 break;
1085 }
1086 /* else retry */
9d7a637e 1087 }
792033e7
MD
1088}
1089
1090void
1091uireplace(struct uidinfo **puip, struct uidinfo *nuip)
1092{
1093 uidrop(*puip);
1094 *puip = nuip;
984263bc
MD
1095}
1096
1097/*
1098 * Change the count associated with number of processes
1099 * a given user is using. When 'max' is 0, don't enforce a limit
1100 */
1101int
792033e7 1102chgproccnt(struct uidinfo *uip, int diff, int max)
984263bc 1103{
9d7a637e 1104 int ret;
287a8577 1105 spin_lock(&uip->ui_lock);
984263bc 1106 /* don't allow them to exceed max, but allow subtraction */
9d7a637e
AE
1107 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1108 ret = 0;
1109 } else {
1110 uip->ui_proccnt += diff;
1111 if (uip->ui_proccnt < 0)
1112 kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
1113 ret = 1;
1114 }
287a8577 1115 spin_unlock(&uip->ui_lock);
9d7a637e 1116 return ret;
984263bc
MD
1117}
1118
1119/*
1120 * Change the total socket buffer size a user has used.
1121 */
1122int
792033e7 1123chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
984263bc
MD
1124{
1125 rlim_t new;
984263bc 1126
119a387f
SZ
1127#ifdef __x86_64__
1128 rlim_t sbsize;
1129
1130 sbsize = atomic_fetchadd_long(&uip->ui_sbsize, to - *hiwat);
1131 new = sbsize + to - *hiwat;
1132#else
287a8577 1133 spin_lock(&uip->ui_lock);
984263bc 1134 new = uip->ui_sbsize + to - *hiwat;
47ab75a0
SZ
1135 uip->ui_sbsize = new;
1136 spin_unlock(&uip->ui_lock);
119a387f
SZ
1137#endif
1138 KKASSERT(new >= 0);
3a6117bb
MD
1139
1140 /*
1141 * If we are trying to increase the socket buffer size
1142 * Scale down the hi water mark when we exceed the user's
1143 * allowed socket buffer space.
1144 *
1145 * We can't scale down too much or we will blow up atomic packet
1146 * operations.
1147 */
1148 if (to > *hiwat && to > MCLBYTES && new > max) {
1149 to = to * max / new;
1150 if (to < MCLBYTES)
1151 to = MCLBYTES;
984263bc 1152 }
984263bc 1153 *hiwat = to;
984263bc
MD
1154 return (1);
1155}