proc->thread stage 4: rework the VFS and DEVICE subsystems to take thread
[dragonfly.git] / sys / kern / kern_resource.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
dadab5e9 40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.6 2003/06/25 03:55:57 dillon Exp $
984263bc
MD
41 */
42
43#include "opt_compat.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/file.h>
49#include <sys/kernel.h>
50#include <sys/resourcevar.h>
51#include <sys/malloc.h>
52#include <sys/proc.h>
53#include <sys/time.h>
54
55#include <vm/vm.h>
56#include <vm/vm_param.h>
57#include <sys/lock.h>
58#include <vm/pmap.h>
59#include <vm/vm_map.h>
60
41c20dac 61static int donice __P((struct proc *chgp, int n));
984263bc 62/* dosetrlimit non-static: Needed by SysVR4 emulator */
41c20dac 63int dosetrlimit __P((u_int which, struct rlimit *limp));
984263bc
MD
64
65static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
66#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
67static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
68static u_long uihash; /* size of hash table - 1 */
69
70static struct uidinfo *uicreate __P((uid_t uid));
71static struct uidinfo *uilookup __P((uid_t uid));
72
73/*
74 * Resource controls and accounting.
75 */
76
77#ifndef _SYS_SYSPROTO_H_
78struct getpriority_args {
79 int which;
80 int who;
81};
82#endif
83int
41c20dac 84getpriority(struct getpriority_args *uap)
984263bc 85{
41c20dac
MD
86 struct proc *curp = curproc;
87 struct proc *p;
88 int low = PRIO_MAX + 1;
984263bc
MD
89
90 switch (uap->which) {
984263bc
MD
91 case PRIO_PROCESS:
92 if (uap->who == 0)
93 p = curp;
94 else
95 p = pfind(uap->who);
96 if (p == 0)
97 break;
41c20dac 98 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
984263bc
MD
99 break;
100 low = p->p_nice;
101 break;
102
41c20dac
MD
103 case PRIO_PGRP:
104 {
984263bc
MD
105 register struct pgrp *pg;
106
107 if (uap->who == 0)
108 pg = curp->p_pgrp;
109 else if ((pg = pgfind(uap->who)) == NULL)
110 break;
111 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
41c20dac 112 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
984263bc
MD
113 low = p->p_nice;
114 }
115 break;
116 }
984263bc
MD
117 case PRIO_USER:
118 if (uap->who == 0)
119 uap->who = curp->p_ucred->cr_uid;
120 LIST_FOREACH(p, &allproc, p_list)
41c20dac 121 if (PRISON_CHECK(curp->p_ucred, p->p_ucred) &&
984263bc
MD
122 p->p_ucred->cr_uid == uap->who &&
123 p->p_nice < low)
124 low = p->p_nice;
125 break;
126
127 default:
128 return (EINVAL);
129 }
130 if (low == PRIO_MAX + 1)
131 return (ESRCH);
132 curp->p_retval[0] = low;
133 return (0);
134}
135
136#ifndef _SYS_SYSPROTO_H_
137struct setpriority_args {
138 int which;
139 int who;
140 int prio;
141};
142#endif
143/* ARGSUSED */
144int
41c20dac 145setpriority(struct setpriority_args *uap)
984263bc 146{
41c20dac
MD
147 struct proc *curp = curproc;
148 struct proc *p;
984263bc
MD
149 int found = 0, error = 0;
150
151 switch (uap->which) {
152
153 case PRIO_PROCESS:
154 if (uap->who == 0)
155 p = curp;
156 else
157 p = pfind(uap->who);
158 if (p == 0)
159 break;
41c20dac 160 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
984263bc 161 break;
41c20dac 162 error = donice(p, uap->prio);
984263bc
MD
163 found++;
164 break;
165
41c20dac
MD
166 case PRIO_PGRP:
167 {
984263bc
MD
168 register struct pgrp *pg;
169
170 if (uap->who == 0)
171 pg = curp->p_pgrp;
172 else if ((pg = pgfind(uap->who)) == NULL)
173 break;
174 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
41c20dac
MD
175 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
176 error = donice(p, uap->prio);
984263bc
MD
177 found++;
178 }
179 }
180 break;
181 }
984263bc
MD
182 case PRIO_USER:
183 if (uap->who == 0)
184 uap->who = curp->p_ucred->cr_uid;
185 LIST_FOREACH(p, &allproc, p_list)
186 if (p->p_ucred->cr_uid == uap->who &&
41c20dac
MD
187 PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
188 error = donice(p, uap->prio);
984263bc
MD
189 found++;
190 }
191 break;
192
193 default:
194 return (EINVAL);
195 }
196 if (found == 0)
197 return (ESRCH);
198 return (error);
199}
200
201static int
41c20dac 202donice(struct proc *chgp, int n)
984263bc 203{
41c20dac
MD
204 struct proc *curp = curproc;
205 struct ucred *cr = curp->p_ucred;
984263bc 206
41c20dac
MD
207 if (cr->cr_uid && cr->cr_ruid &&
208 cr->cr_uid != chgp->p_ucred->cr_uid &&
209 cr->cr_ruid != chgp->p_ucred->cr_uid)
984263bc
MD
210 return (EPERM);
211 if (n > PRIO_MAX)
212 n = PRIO_MAX;
213 if (n < PRIO_MIN)
214 n = PRIO_MIN;
dadab5e9 215 if (n < chgp->p_nice && suser_cred(cr, 0))
984263bc
MD
216 return (EACCES);
217 chgp->p_nice = n;
218 (void)resetpriority(chgp);
219 return (0);
220}
221
222/* rtprio system call */
223#ifndef _SYS_SYSPROTO_H_
224struct rtprio_args {
225 int function;
226 pid_t pid;
227 struct rtprio *rtp;
228};
229#endif
230
231/*
232 * Set realtime priority
233 */
234
235/* ARGSUSED */
236int
41c20dac 237rtprio(register struct rtprio_args *uap)
984263bc 238{
41c20dac
MD
239 struct proc *curp = curproc;
240 struct proc *p;
241 struct ucred *cr = curp->p_ucred;
984263bc
MD
242 struct rtprio rtp;
243 int error;
244
245 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
246 if (error)
247 return (error);
248
249 if (uap->pid == 0)
250 p = curp;
251 else
252 p = pfind(uap->pid);
253
254 if (p == 0)
255 return (ESRCH);
256
257 switch (uap->function) {
258 case RTP_LOOKUP:
259 return (copyout(&p->p_rtprio, uap->rtp, sizeof(struct rtprio)));
260 case RTP_SET:
41c20dac
MD
261 if (cr->cr_uid && cr->cr_ruid &&
262 cr->cr_uid != p->p_ucred->cr_uid &&
263 cr->cr_ruid != p->p_ucred->cr_uid)
984263bc
MD
264 return (EPERM);
265 /* disallow setting rtprio in most cases if not superuser */
dadab5e9 266 if (suser_cred(cr, 0)) {
984263bc
MD
267 /* can't set someone else's */
268 if (uap->pid)
269 return (EPERM);
270 /* can't set realtime priority */
271/*
272 * Realtime priority has to be restricted for reasons which should be
273 * obvious. However, for idle priority, there is a potential for
274 * system deadlock if an idleprio process gains a lock on a resource
275 * that other processes need (and the idleprio process can't run
276 * due to a CPU-bound normal process). Fix me! XXX
277 */
278#if 0
279 if (RTP_PRIO_IS_REALTIME(rtp.type))
280#endif
281 if (rtp.type != RTP_PRIO_NORMAL)
282 return (EPERM);
283 }
284 switch (rtp.type) {
285#ifdef RTP_PRIO_FIFO
286 case RTP_PRIO_FIFO:
287#endif
288 case RTP_PRIO_REALTIME:
289 case RTP_PRIO_NORMAL:
290 case RTP_PRIO_IDLE:
291 if (rtp.prio > RTP_PRIO_MAX)
292 return (EINVAL);
293 p->p_rtprio = rtp;
294 return (0);
295 default:
296 return (EINVAL);
297 }
298
299 default:
300 return (EINVAL);
301 }
302}
303
304#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
305#ifndef _SYS_SYSPROTO_H_
306struct osetrlimit_args {
307 u_int which;
308 struct orlimit *rlp;
309};
310#endif
311/* ARGSUSED */
312int
41c20dac 313osetrlimit(struct osetrlimit_args *uap)
984263bc
MD
314{
315 struct orlimit olim;
316 struct rlimit lim;
317 int error;
318
319 if ((error =
320 copyin((caddr_t)uap->rlp, (caddr_t)&olim, sizeof(struct orlimit))))
321 return (error);
322 lim.rlim_cur = olim.rlim_cur;
323 lim.rlim_max = olim.rlim_max;
41c20dac 324 return (dosetrlimit(uap->which, &lim));
984263bc
MD
325}
326
327#ifndef _SYS_SYSPROTO_H_
328struct ogetrlimit_args {
329 u_int which;
330 struct orlimit *rlp;
331};
332#endif
333/* ARGSUSED */
334int
41c20dac 335ogetrlimit(struct ogetrlimit_args *uap)
984263bc 336{
41c20dac 337 struct proc *p = curproc;
984263bc
MD
338 struct orlimit olim;
339
340 if (uap->which >= RLIM_NLIMITS)
341 return (EINVAL);
342 olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur;
343 if (olim.rlim_cur == -1)
344 olim.rlim_cur = 0x7fffffff;
345 olim.rlim_max = p->p_rlimit[uap->which].rlim_max;
346 if (olim.rlim_max == -1)
347 olim.rlim_max = 0x7fffffff;
348 return (copyout((caddr_t)&olim, (caddr_t)uap->rlp, sizeof(olim)));
349}
350#endif /* COMPAT_43 || COMPAT_SUNOS */
351
352#ifndef _SYS_SYSPROTO_H_
353struct __setrlimit_args {
354 u_int which;
355 struct rlimit *rlp;
356};
357#endif
358/* ARGSUSED */
359int
41c20dac 360setrlimit(struct __setrlimit_args *uap)
984263bc
MD
361{
362 struct rlimit alim;
363 int error;
364
365 if ((error =
366 copyin((caddr_t)uap->rlp, (caddr_t)&alim, sizeof (struct rlimit))))
367 return (error);
41c20dac 368 return (dosetrlimit(uap->which, &alim));
984263bc
MD
369}
370
371int
41c20dac 372dosetrlimit(u_int which, struct rlimit *limp)
984263bc 373{
41c20dac
MD
374 struct proc *p = curproc;
375 struct rlimit *alimp;
984263bc
MD
376 int error;
377
378 if (which >= RLIM_NLIMITS)
379 return (EINVAL);
380 alimp = &p->p_rlimit[which];
381
382 /*
383 * Preserve historical bugs by treating negative limits as unsigned.
384 */
385 if (limp->rlim_cur < 0)
386 limp->rlim_cur = RLIM_INFINITY;
387 if (limp->rlim_max < 0)
388 limp->rlim_max = RLIM_INFINITY;
389
390 if (limp->rlim_cur > alimp->rlim_max ||
391 limp->rlim_max > alimp->rlim_max)
dadab5e9 392 if ((error = suser_cred(p->p_ucred, PRISON_ROOT)))
984263bc
MD
393 return (error);
394 if (limp->rlim_cur > limp->rlim_max)
395 limp->rlim_cur = limp->rlim_max;
396 if (p->p_limit->p_refcnt > 1 &&
397 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
398 p->p_limit->p_refcnt--;
399 p->p_limit = limcopy(p->p_limit);
400 alimp = &p->p_rlimit[which];
401 }
402
403 switch (which) {
404
405 case RLIMIT_CPU:
406 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
407 p->p_limit->p_cpulimit = RLIM_INFINITY;
408 else
409 p->p_limit->p_cpulimit =
410 (rlim_t)1000000 * limp->rlim_cur;
411 break;
412 case RLIMIT_DATA:
413 if (limp->rlim_cur > maxdsiz)
414 limp->rlim_cur = maxdsiz;
415 if (limp->rlim_max > maxdsiz)
416 limp->rlim_max = maxdsiz;
417 break;
418
419 case RLIMIT_STACK:
420 if (limp->rlim_cur > maxssiz)
421 limp->rlim_cur = maxssiz;
422 if (limp->rlim_max > maxssiz)
423 limp->rlim_max = maxssiz;
424 /*
425 * Stack is allocated to the max at exec time with only
426 * "rlim_cur" bytes accessible. If stack limit is going
427 * up make more accessible, if going down make inaccessible.
428 */
429 if (limp->rlim_cur != alimp->rlim_cur) {
430 vm_offset_t addr;
431 vm_size_t size;
432 vm_prot_t prot;
433
434 if (limp->rlim_cur > alimp->rlim_cur) {
435 prot = VM_PROT_ALL;
436 size = limp->rlim_cur - alimp->rlim_cur;
437 addr = USRSTACK - limp->rlim_cur;
438 } else {
439 prot = VM_PROT_NONE;
440 size = alimp->rlim_cur - limp->rlim_cur;
441 addr = USRSTACK - alimp->rlim_cur;
442 }
443 addr = trunc_page(addr);
444 size = round_page(size);
445 (void) vm_map_protect(&p->p_vmspace->vm_map,
446 addr, addr+size, prot, FALSE);
447 }
448 break;
449
450 case RLIMIT_NOFILE:
451 if (limp->rlim_cur > maxfilesperproc)
452 limp->rlim_cur = maxfilesperproc;
453 if (limp->rlim_max > maxfilesperproc)
454 limp->rlim_max = maxfilesperproc;
455 break;
456
457 case RLIMIT_NPROC:
458 if (limp->rlim_cur > maxprocperuid)
459 limp->rlim_cur = maxprocperuid;
460 if (limp->rlim_max > maxprocperuid)
461 limp->rlim_max = maxprocperuid;
462 if (limp->rlim_cur < 1)
463 limp->rlim_cur = 1;
464 if (limp->rlim_max < 1)
465 limp->rlim_max = 1;
466 break;
467 }
468 *alimp = *limp;
469 return (0);
470}
471
472#ifndef _SYS_SYSPROTO_H_
473struct __getrlimit_args {
474 u_int which;
475 struct rlimit *rlp;
476};
477#endif
478/* ARGSUSED */
479int
41c20dac 480getrlimit(struct __getrlimit_args *uap)
984263bc 481{
41c20dac 482 struct proc *p = curproc;
984263bc
MD
483
484 if (uap->which >= RLIM_NLIMITS)
485 return (EINVAL);
486 return (copyout((caddr_t)&p->p_rlimit[uap->which], (caddr_t)uap->rlp,
487 sizeof (struct rlimit)));
488}
489
490/*
491 * Transform the running time and tick information in proc p into user,
492 * system, and interrupt time usage.
d16a8831
MD
493 *
494 * Since we are limited to statclock tick granularity this is a statisical
495 * calculation which will be correct over the long haul, but should not be
496 * expected to measure fine grained deltas.
984263bc
MD
497 */
498void
499calcru(p, up, sp, ip)
500 struct proc *p;
501 struct timeval *up;
502 struct timeval *sp;
503 struct timeval *ip;
504{
d16a8831 505 struct thread *td = p->p_thread;
984263bc 506 int s;
984263bc 507
d16a8831
MD
508 /*
509 * Calculate at the statclock level. YYY if the thread is owned by
510 * another cpu we need to forward the request to the other cpu, or
511 * have a token to interlock the information.
512 */
984263bc 513 s = splstatclock();
d16a8831
MD
514 up->tv_sec = td->td_uticks / 1000000;
515 up->tv_usec = td->td_uticks % 1000000;
516 sp->tv_sec = td->td_sticks / 1000000;
517 sp->tv_usec = td->td_sticks % 1000000;
984263bc 518 if (ip != NULL) {
d16a8831
MD
519 ip->tv_sec = td->td_iticks / 1000000;
520 ip->tv_usec = td->td_iticks % 1000000;
984263bc 521 }
d16a8831 522 splx(s);
984263bc
MD
523}
524
525#ifndef _SYS_SYSPROTO_H_
526struct getrusage_args {
527 int who;
528 struct rusage *rusage;
529};
530#endif
531/* ARGSUSED */
532int
41c20dac 533getrusage(struct getrusage_args *uap)
984263bc 534{
41c20dac
MD
535 struct proc *p = curproc;
536 struct rusage *rup;
984263bc
MD
537
538 switch (uap->who) {
539
540 case RUSAGE_SELF:
541 rup = &p->p_stats->p_ru;
542 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
543 break;
544
545 case RUSAGE_CHILDREN:
546 rup = &p->p_stats->p_cru;
547 break;
548
549 default:
550 return (EINVAL);
551 }
552 return (copyout((caddr_t)rup, (caddr_t)uap->rusage,
553 sizeof (struct rusage)));
554}
555
556void
557ruadd(ru, ru2)
558 register struct rusage *ru, *ru2;
559{
560 register long *ip, *ip2;
561 register int i;
562
563 timevaladd(&ru->ru_utime, &ru2->ru_utime);
564 timevaladd(&ru->ru_stime, &ru2->ru_stime);
565 if (ru->ru_maxrss < ru2->ru_maxrss)
566 ru->ru_maxrss = ru2->ru_maxrss;
567 ip = &ru->ru_first; ip2 = &ru2->ru_first;
568 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
569 *ip++ += *ip2++;
570}
571
572/*
573 * Make a copy of the plimit structure.
574 * We share these structures copy-on-write after fork,
575 * and copy when a limit is changed.
576 */
577struct plimit *
578limcopy(lim)
579 struct plimit *lim;
580{
581 register struct plimit *copy;
582
583 MALLOC(copy, struct plimit *, sizeof(struct plimit),
584 M_SUBPROC, M_WAITOK);
585 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
586 copy->p_lflags = 0;
587 copy->p_refcnt = 1;
588 return (copy);
589}
590
591/*
592 * Find the uidinfo structure for a uid. This structure is used to
593 * track the total resource consumption (process count, socket buffer
594 * size, etc.) for the uid and impose limits.
595 */
596void
597uihashinit()
598{
599 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
600}
601
602static struct uidinfo *
603uilookup(uid)
604 uid_t uid;
605{
606 struct uihashhead *uipp;
607 struct uidinfo *uip;
608
609 uipp = UIHASH(uid);
610 LIST_FOREACH(uip, uipp, ui_hash)
611 if (uip->ui_uid == uid)
612 break;
613
614 return (uip);
615}
616
617static struct uidinfo *
618uicreate(uid)
619 uid_t uid;
620{
621 struct uidinfo *uip, *norace;
622
623 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_NOWAIT);
624 if (uip == NULL) {
625 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
626 /*
627 * if we M_WAITOK we must look afterwards or risk
628 * redundant entries
629 */
630 norace = uilookup(uid);
631 if (norace != NULL) {
632 FREE(uip, M_UIDINFO);
633 return (norace);
634 }
635 }
636 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
637 uip->ui_uid = uid;
638 uip->ui_proccnt = 0;
639 uip->ui_sbsize = 0;
640 uip->ui_ref = 0;
641 return (uip);
642}
643
644struct uidinfo *
645uifind(uid)
646 uid_t uid;
647{
648 struct uidinfo *uip;
649
650 uip = uilookup(uid);
651 if (uip == NULL)
652 uip = uicreate(uid);
653 uip->ui_ref++;
654 return (uip);
655}
656
657int
658uifree(uip)
659 struct uidinfo *uip;
660{
661
662 if (--uip->ui_ref == 0) {
663 if (uip->ui_sbsize != 0)
664 /* XXX no %qd in kernel. Truncate. */
665 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
666 uip->ui_uid, (long)uip->ui_sbsize);
667 if (uip->ui_proccnt != 0)
668 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
669 uip->ui_uid, uip->ui_proccnt);
670 LIST_REMOVE(uip, ui_hash);
671 FREE(uip, M_UIDINFO);
672 return (1);
673 }
674 return (0);
675}
676
677/*
678 * Change the count associated with number of processes
679 * a given user is using. When 'max' is 0, don't enforce a limit
680 */
681int
682chgproccnt(uip, diff, max)
683 struct uidinfo *uip;
684 int diff;
685 int max;
686{
687 /* don't allow them to exceed max, but allow subtraction */
688 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0)
689 return (0);
690 uip->ui_proccnt += diff;
691 if (uip->ui_proccnt < 0)
692 printf("negative proccnt for uid = %d\n", uip->ui_uid);
693 return (1);
694}
695
696/*
697 * Change the total socket buffer size a user has used.
698 */
699int
700chgsbsize(uip, hiwat, to, max)
701 struct uidinfo *uip;
702 u_long *hiwat;
703 u_long to;
704 rlim_t max;
705{
706 rlim_t new;
707 int s;
708
709 s = splnet();
710 new = uip->ui_sbsize + to - *hiwat;
711 /* don't allow them to exceed max, but allow subtraction */
712 if (to > *hiwat && new > max) {
713 splx(s);
714 return (0);
715 }
716 uip->ui_sbsize = new;
717 *hiwat = to;
718 if (uip->ui_sbsize < 0)
719 printf("negative sbsize for uid = %d\n", uip->ui_uid);
720 splx(s);
721 return (1);
722}