Remove the now obsolete /usr/include/g++. Cleanup after the nawk->awk
[dragonfly.git] / sys / kern / kern_fork.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
39 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
40 * $DragonFly: src/sys/kern/kern_fork.c,v 1.21 2004/03/30 19:14:11 dillon Exp $
41 */
42
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/filedesc.h>
49#include <sys/kernel.h>
50#include <sys/sysctl.h>
51#include <sys/malloc.h>
52#include <sys/proc.h>
53#include <sys/resourcevar.h>
54#include <sys/vnode.h>
55#include <sys/acct.h>
56#include <sys/ktrace.h>
57#include <sys/unistd.h>
58#include <sys/jail.h>
59#include <sys/caps.h>
60
61#include <vm/vm.h>
62#include <sys/lock.h>
63#include <vm/pmap.h>
64#include <vm/vm_map.h>
65#include <vm/vm_extern.h>
66#include <vm/vm_zone.h>
67
68#include <sys/vmmeter.h>
69#include <sys/user.h>
70
71static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
72
73/*
74 * These are the stuctures used to create a callout list for things to do
75 * when forking a process
76 */
77struct forklist {
78 forklist_fn function;
79 TAILQ_ENTRY(forklist) next;
80};
81
82TAILQ_HEAD(forklist_head, forklist);
83static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
84
85int forksleep; /* Place for fork1() to sleep on. */
86
87/* ARGSUSED */
88int
89fork(struct fork_args *uap)
90{
91 struct proc *p = curproc;
92 struct proc *p2;
93 int error;
94
95 error = fork1(p, RFFDG | RFPROC, &p2);
96 if (error == 0) {
97 start_forked_proc(p, p2);
98 uap->sysmsg_fds[0] = p2->p_pid;
99 uap->sysmsg_fds[1] = 0;
100 }
101 return error;
102}
103
104/* ARGSUSED */
105int
106vfork(struct vfork_args *uap)
107{
108 struct proc *p = curproc;
109 struct proc *p2;
110 int error;
111
112 error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2);
113 if (error == 0) {
114 start_forked_proc(p, p2);
115 uap->sysmsg_fds[0] = p2->p_pid;
116 uap->sysmsg_fds[1] = 0;
117 }
118 return error;
119}
120
121int
122rfork(struct rfork_args *uap)
123{
124 struct proc *p = curproc;
125 struct proc *p2;
126 int error;
127
128 /* Don't allow kernel only flags */
129 if ((uap->flags & RFKERNELONLY) != 0)
130 return (EINVAL);
131
132 error = fork1(p, uap->flags, &p2);
133 if (error == 0) {
134 start_forked_proc(p, p2);
135 uap->sysmsg_fds[0] = p2 ? p2->p_pid : 0;
136 uap->sysmsg_fds[1] = 0;
137 }
138 return error;
139}
140
141
142int nprocs = 1; /* process 0 */
143static int nextpid = 0;
144
145/*
146 * Random component to nextpid generation. We mix in a random factor to make
147 * it a little harder to predict. We sanity check the modulus value to avoid
148 * doing it in critical paths. Don't let it be too small or we pointlessly
149 * waste randomness entropy, and don't let it be impossibly large. Using a
150 * modulus that is too big causes a LOT more process table scans and slows
151 * down fork processing as the pidchecked caching is defeated.
152 */
153static int randompid = 0;
154
155static int
156sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
157{
158 int error, pid;
159
160 pid = randompid;
161 error = sysctl_handle_int(oidp, &pid, 0, req);
162 if (error || !req->newptr)
163 return (error);
164 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
165 pid = PID_MAX - 100;
166 else if (pid < 2) /* NOP */
167 pid = 0;
168 else if (pid < 100) /* Make it reasonable */
169 pid = 100;
170 randompid = pid;
171 return (error);
172}
173
174SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
175 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
176
177int
178fork1(p1, flags, procp)
179 struct proc *p1;
180 int flags;
181 struct proc **procp;
182{
183 struct proc *p2, *pptr;
184 uid_t uid;
185 struct proc *newproc;
186 int ok;
187 static int curfail = 0, pidchecked = 0;
188 static struct timeval lastfail;
189 struct forklist *ep;
190 struct filedesc_to_leader *fdtol;
191
192 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
193 return (EINVAL);
194
195 /*
196 * Here we don't create a new process, but we divorce
197 * certain parts of a process from itself.
198 */
199 if ((flags & RFPROC) == 0) {
200
201 vm_fork(p1, 0, flags);
202
203 /*
204 * Close all file descriptors.
205 */
206 if (flags & RFCFDG) {
207 struct filedesc *fdtmp;
208 fdtmp = fdinit(p1);
209 fdfree(p1);
210 p1->p_fd = fdtmp;
211 }
212
213 /*
214 * Unshare file descriptors (from parent.)
215 */
216 if (flags & RFFDG) {
217 if (p1->p_fd->fd_refcnt > 1) {
218 struct filedesc *newfd;
219 newfd = fdcopy(p1);
220 fdfree(p1);
221 p1->p_fd = newfd;
222 }
223 }
224 *procp = NULL;
225 return (0);
226 }
227
228 /*
229 * Although process entries are dynamically created, we still keep
230 * a global limit on the maximum number we will create. Don't allow
231 * a nonprivileged user to use the last ten processes; don't let root
232 * exceed the limit. The variable nprocs is the current number of
233 * processes, maxproc is the limit.
234 */
235 uid = p1->p_ucred->cr_ruid;
236 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
237 if (ppsratecheck(&lastfail, &curfail, 1))
238 printf("maxproc limit exceeded by uid %d, please "
239 "see tuning(7) and login.conf(5).\n", uid);
240 tsleep(&forksleep, 0, "fork", hz / 2);
241 return (EAGAIN);
242 }
243 /*
244 * Increment the nprocs resource before blocking can occur. There
245 * are hard-limits as to the number of processes that can run.
246 */
247 nprocs++;
248
249 /*
250 * Increment the count of procs running with this uid. Don't allow
251 * a nonprivileged user to exceed their current limit.
252 */
253 ok = chgproccnt(p1->p_ucred->cr_ruidinfo, 1,
254 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
255 if (!ok) {
256 /*
257 * Back out the process count
258 */
259 nprocs--;
260 if (ppsratecheck(&lastfail, &curfail, 1))
261 printf("maxproc limit exceeded by uid %d, please "
262 "see tuning(7) and login.conf(5).\n", uid);
263 tsleep(&forksleep, 0, "fork", hz / 2);
264 return (EAGAIN);
265 }
266
267 /* Allocate new proc. */
268 newproc = zalloc(proc_zone);
269
270 /*
271 * Setup linkage for kernel based threading
272 */
273 if((flags & RFTHREAD) != 0) {
274 newproc->p_peers = p1->p_peers;
275 p1->p_peers = newproc;
276 newproc->p_leader = p1->p_leader;
277 } else {
278 newproc->p_peers = 0;
279 newproc->p_leader = newproc;
280 }
281
282 newproc->p_wakeup = 0;
283 newproc->p_vmspace = NULL;
284
285 /*
286 * Find an unused process ID. We remember a range of unused IDs
287 * ready to use (from nextpid+1 through pidchecked-1).
288 */
289 nextpid++;
290 if (randompid)
291 nextpid += arc4random() % randompid;
292retry:
293 /*
294 * If the process ID prototype has wrapped around,
295 * restart somewhat above 0, as the low-numbered procs
296 * tend to include daemons that don't exit.
297 */
298 if (nextpid >= PID_MAX) {
299 nextpid = nextpid % PID_MAX;
300 if (nextpid < 100)
301 nextpid += 100;
302 pidchecked = 0;
303 }
304 if (nextpid >= pidchecked) {
305 int doingzomb = 0;
306
307 pidchecked = PID_MAX;
308 /*
309 * Scan the active and zombie procs to check whether this pid
310 * is in use. Remember the lowest pid that's greater
311 * than nextpid, so we can avoid checking for a while.
312 */
313 p2 = LIST_FIRST(&allproc);
314again:
315 for (; p2 != 0; p2 = LIST_NEXT(p2, p_list)) {
316 while (p2->p_pid == nextpid ||
317 p2->p_pgrp->pg_id == nextpid ||
318 p2->p_session->s_sid == nextpid) {
319 nextpid++;
320 if (nextpid >= pidchecked)
321 goto retry;
322 }
323 if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
324 pidchecked = p2->p_pid;
325 if (p2->p_pgrp->pg_id > nextpid &&
326 pidchecked > p2->p_pgrp->pg_id)
327 pidchecked = p2->p_pgrp->pg_id;
328 if (p2->p_session->s_sid > nextpid &&
329 pidchecked > p2->p_session->s_sid)
330 pidchecked = p2->p_session->s_sid;
331 }
332 if (!doingzomb) {
333 doingzomb = 1;
334 p2 = LIST_FIRST(&zombproc);
335 goto again;
336 }
337 }
338
339 p2 = newproc;
340 p2->p_stat = SIDL; /* protect against others */
341 p2->p_pid = nextpid;
342 LIST_INSERT_HEAD(&allproc, p2, p_list);
343 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
344
345 /*
346 * Make a proc table entry for the new process.
347 * Start by zeroing the section of proc that is zero-initialized,
348 * then copy the section that is copied directly from the parent.
349 */
350 bzero(&p2->p_startzero,
351 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
352 bcopy(&p1->p_startcopy, &p2->p_startcopy,
353 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
354
355 p2->p_aioinfo = NULL;
356
357 /*
358 * Duplicate sub-structures as needed.
359 * Increase reference counts on shared objects.
360 * The p_stats and p_sigacts substructs are set in vm_fork.
361 *
362 * P_CP_RELEASED indicates that the process is starting out in
363 * the kernel (in the fork trampoline). The flag will be cleared
364 * when the new process calls userret() and acquires its current
365 * process designation for the return to userland.
366 */
367 p2->p_flag = P_INMEM | P_CP_RELEASED;
368 if (p1->p_flag & P_PROFIL)
369 startprofclock(p2);
370 p2->p_ucred = crhold(p1->p_ucred);
371
372 if (p2->p_ucred->cr_prison) {
373 p2->p_ucred->cr_prison->pr_ref++;
374 p2->p_flag |= P_JAILED;
375 }
376
377 if (p2->p_args)
378 p2->p_args->ar_ref++;
379
380 if (flags & RFSIGSHARE) {
381 p2->p_procsig = p1->p_procsig;
382 p2->p_procsig->ps_refcnt++;
383 if (p1->p_sigacts == &p1->p_addr->u_sigacts) {
384 struct sigacts *newsigacts;
385 int s;
386
387 /* Create the shared sigacts structure */
388 MALLOC(newsigacts, struct sigacts *,
389 sizeof(struct sigacts), M_SUBPROC, M_WAITOK);
390 s = splhigh();
391 /*
392 * Set p_sigacts to the new shared structure.
393 * Note that this is updating p1->p_sigacts at the
394 * same time, since p_sigacts is just a pointer to
395 * the shared p_procsig->ps_sigacts.
396 */
397 p2->p_sigacts = newsigacts;
398 bcopy(&p1->p_addr->u_sigacts, p2->p_sigacts,
399 sizeof(*p2->p_sigacts));
400 *p2->p_sigacts = p1->p_addr->u_sigacts;
401 splx(s);
402 }
403 } else {
404 MALLOC(p2->p_procsig, struct procsig *, sizeof(struct procsig),
405 M_SUBPROC, M_WAITOK);
406 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig));
407 p2->p_procsig->ps_refcnt = 1;
408 p2->p_sigacts = NULL; /* finished in vm_fork() */
409 }
410 if (flags & RFLINUXTHPN)
411 p2->p_sigparent = SIGUSR1;
412 else
413 p2->p_sigparent = SIGCHLD;
414
415 /* bump references to the text vnode (for procfs) */
416 p2->p_textvp = p1->p_textvp;
417 if (p2->p_textvp)
418 VREF(p2->p_textvp);
419
420 if (flags & RFCFDG) {
421 p2->p_fd = fdinit(p1);
422 fdtol = NULL;
423 } else if (flags & RFFDG) {
424 p2->p_fd = fdcopy(p1);
425 fdtol = NULL;
426 } else {
427 p2->p_fd = fdshare(p1);
428 if (p1->p_fdtol == NULL)
429 p1->p_fdtol =
430 filedesc_to_leader_alloc(NULL,
431 p1->p_leader);
432 if ((flags & RFTHREAD) != 0) {
433 /*
434 * Shared file descriptor table and
435 * shared process leaders.
436 */
437 fdtol = p1->p_fdtol;
438 fdtol->fdl_refcount++;
439 } else {
440 /*
441 * Shared file descriptor table, and
442 * different process leaders
443 */
444 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2);
445 }
446 }
447 p2->p_fdtol = fdtol;
448
449 /*
450 * If p_limit is still copy-on-write, bump refcnt,
451 * otherwise get a copy that won't be modified.
452 * (If PL_SHAREMOD is clear, the structure is shared
453 * copy-on-write.)
454 */
455 if (p1->p_limit->p_lflags & PL_SHAREMOD)
456 p2->p_limit = limcopy(p1->p_limit);
457 else {
458 p2->p_limit = p1->p_limit;
459 p2->p_limit->p_refcnt++;
460 }
461
462 /*
463 * Preserve some more flags in subprocess. P_PROFIL has already
464 * been preserved.
465 */
466 p2->p_flag |= p1->p_flag & (P_SUGID | P_ALTSTACK);
467 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
468 p2->p_flag |= P_CONTROLT;
469 if (flags & RFPPWAIT)
470 p2->p_flag |= P_PPWAIT;
471
472 LIST_INSERT_AFTER(p1, p2, p_pglist);
473
474 /*
475 * Attach the new process to its parent.
476 *
477 * If RFNOWAIT is set, the newly created process becomes a child
478 * of init. This effectively disassociates the child from the
479 * parent.
480 */
481 if (flags & RFNOWAIT)
482 pptr = initproc;
483 else
484 pptr = p1;
485 p2->p_pptr = pptr;
486 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
487 LIST_INIT(&p2->p_children);
488 varsymset_init(&p2->p_varsymset, &p1->p_varsymset);
489
490#ifdef KTRACE
491 /*
492 * Copy traceflag and tracefile if enabled. If not inherited,
493 * these were zeroed above but we still could have a trace race
494 * so make sure p2's p_tracep is NULL.
495 */
496 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracep == NULL) {
497 p2->p_traceflag = p1->p_traceflag;
498 if ((p2->p_tracep = p1->p_tracep) != NULL)
499 VREF(p2->p_tracep);
500 }
501#endif
502
503 /*
504 * Give the child process an estcpu skewed towards the batch side
505 * of the parent. This prevents batch programs from glitching
506 * interactive programs when they are first started. If the child
507 * is not a batch program it's priority will be corrected by the
508 * scheduler.
509 */
510 p2->p_estcpu_fork = p2->p_estcpu =
511 ESTCPULIM(p1->p_estcpu + ESTCPURAMP);
512
513 /*
514 * This begins the section where we must prevent the parent
515 * from being swapped.
516 */
517 PHOLD(p1);
518
519 /*
520 * Finish creating the child process. It will return via a different
521 * execution path later. (ie: directly into user mode)
522 */
523 vm_fork(p1, p2, flags);
524 caps_fork(p1, p2, flags);
525
526 if (flags == (RFFDG | RFPROC)) {
527 mycpu->gd_cnt.v_forks++;
528 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
529 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
530 mycpu->gd_cnt.v_vforks++;
531 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
532 } else if (p1 == &proc0) {
533 mycpu->gd_cnt.v_kthreads++;
534 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
535 } else {
536 mycpu->gd_cnt.v_rforks++;
537 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
538 }
539
540 /*
541 * Both processes are set up, now check if any loadable modules want
542 * to adjust anything.
543 * What if they have an error? XXX
544 */
545 TAILQ_FOREACH(ep, &fork_list, next) {
546 (*ep->function)(p1, p2, flags);
547 }
548
549 /*
550 * Make child runnable and add to run queue.
551 */
552 microtime(&(p2->p_stats->p_start));
553 p2->p_acflag = AFORK;
554
555 /*
556 * tell any interested parties about the new process
557 */
558 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
559
560 /*
561 * Return child proc pointer to parent.
562 */
563 *procp = p2;
564 return (0);
565}
566
567/*
568 * The next two functionms are general routines to handle adding/deleting
569 * items on the fork callout list.
570 *
571 * at_fork():
572 * Take the arguments given and put them onto the fork callout list,
573 * However first make sure that it's not already there.
574 * Returns 0 on success or a standard error number.
575 */
576
577int
578at_fork(function)
579 forklist_fn function;
580{
581 struct forklist *ep;
582
583#ifdef INVARIANTS
584 /* let the programmer know if he's been stupid */
585 if (rm_at_fork(function))
586 printf("WARNING: fork callout entry (%p) already present\n",
587 function);
588#endif
589 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT);
590 if (ep == NULL)
591 return (ENOMEM);
592 ep->function = function;
593 TAILQ_INSERT_TAIL(&fork_list, ep, next);
594 return (0);
595}
596
597/*
598 * Scan the exit callout list for the given item and remove it..
599 * Returns the number of items removed (0 or 1)
600 */
601
602int
603rm_at_fork(function)
604 forklist_fn function;
605{
606 struct forklist *ep;
607
608 TAILQ_FOREACH(ep, &fork_list, next) {
609 if (ep->function == function) {
610 TAILQ_REMOVE(&fork_list, ep, next);
611 free(ep, M_ATFORK);
612 return(1);
613 }
614 }
615 return (0);
616}
617
618/*
619 * Add a forked process to the run queue after any remaining setup, such
620 * as setting the fork handler, has been completed.
621 */
622
623void
624start_forked_proc(struct proc *p1, struct proc *p2)
625{
626 /*
627 * Move from SIDL to RUN queue, and activate the process's thread.
628 * Activation of the thread effectively makes the process "a"
629 * current process, so we do not setrunqueue().
630 */
631 KASSERT(p2->p_stat == SIDL,
632 ("cannot start forked process, bad status: %p", p2));
633 resetpriority(p2);
634 (void) splhigh();
635 p2->p_stat = SRUN;
636 setrunqueue(p2);
637 (void) spl0();
638
639 /*
640 * Now can be swapped.
641 */
642 PRELE(p1);
643
644 /*
645 * Preserve synchronization semantics of vfork. If waiting for
646 * child to exec or exit, set P_PPWAIT on child, and sleep on our
647 * proc (in case of exit).
648 */
649 while (p2->p_flag & P_PPWAIT)
650 tsleep(p1, 0, "ppwait", 0);
651}
652