- Introduce new -g option. This allows for the broadcast time to be reduced
[dragonfly.git] / sys / kern / kern_fork.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
39 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
40 * $DragonFly: src/sys/kern/kern_fork.c,v 1.35 2005/06/26 04:36:31 dillon Exp $
41 */
42
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/filedesc.h>
49#include <sys/kernel.h>
50#include <sys/sysctl.h>
51#include <sys/malloc.h>
52#include <sys/proc.h>
53#include <sys/resourcevar.h>
54#include <sys/vnode.h>
55#include <sys/acct.h>
56#include <sys/ktrace.h>
57#include <sys/unistd.h>
58#include <sys/jail.h>
59#include <sys/caps.h>
60
61#include <vm/vm.h>
62#include <sys/lock.h>
63#include <vm/pmap.h>
64#include <vm/vm_map.h>
65#include <vm/vm_extern.h>
66#include <vm/vm_zone.h>
67
68#include <sys/vmmeter.h>
69#include <sys/user.h>
70#include <sys/thread2.h>
71
72static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
73
74/*
75 * These are the stuctures used to create a callout list for things to do
76 * when forking a process
77 */
78struct forklist {
79 forklist_fn function;
80 TAILQ_ENTRY(forklist) next;
81};
82
83TAILQ_HEAD(forklist_head, forklist);
84static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
85
86int forksleep; /* Place for fork1() to sleep on. */
87
88/* ARGSUSED */
89int
90fork(struct fork_args *uap)
91{
92 struct proc *p = curproc;
93 struct proc *p2;
94 int error;
95
96 error = fork1(p, RFFDG | RFPROC, &p2);
97 if (error == 0) {
98 start_forked_proc(p, p2);
99 uap->sysmsg_fds[0] = p2->p_pid;
100 uap->sysmsg_fds[1] = 0;
101 }
102 return error;
103}
104
105/* ARGSUSED */
106int
107vfork(struct vfork_args *uap)
108{
109 struct proc *p = curproc;
110 struct proc *p2;
111 int error;
112
113 error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2);
114 if (error == 0) {
115 start_forked_proc(p, p2);
116 uap->sysmsg_fds[0] = p2->p_pid;
117 uap->sysmsg_fds[1] = 0;
118 }
119 return error;
120}
121
122/*
123 * Handle rforks. An rfork may (1) operate on the current process without
124 * creating a new, (2) create a new process that shared the current process's
125 * vmspace, signals, and/or descriptors, or (3) create a new process that does
126 * not share these things (normal fork).
127 *
128 * Note that we only call start_forked_proc() if a new process is actually
129 * created.
130 *
131 * rfork { int flags }
132 */
133int
134rfork(struct rfork_args *uap)
135{
136 struct proc *p = curproc;
137 struct proc *p2;
138 int error;
139
140 if ((uap->flags & RFKERNELONLY) != 0)
141 return (EINVAL);
142
143 error = fork1(p, uap->flags, &p2);
144 if (error == 0) {
145 if (p2)
146 start_forked_proc(p, p2);
147 uap->sysmsg_fds[0] = p2 ? p2->p_pid : 0;
148 uap->sysmsg_fds[1] = 0;
149 }
150 return error;
151}
152
153
154int nprocs = 1; /* process 0 */
155static int nextpid = 0;
156
157/*
158 * Random component to nextpid generation. We mix in a random factor to make
159 * it a little harder to predict. We sanity check the modulus value to avoid
160 * doing it in critical paths. Don't let it be too small or we pointlessly
161 * waste randomness entropy, and don't let it be impossibly large. Using a
162 * modulus that is too big causes a LOT more process table scans and slows
163 * down fork processing as the pidchecked caching is defeated.
164 */
165static int randompid = 0;
166
167static int
168sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
169{
170 int error, pid;
171
172 pid = randompid;
173 error = sysctl_handle_int(oidp, &pid, 0, req);
174 if (error || !req->newptr)
175 return (error);
176 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
177 pid = PID_MAX - 100;
178 else if (pid < 2) /* NOP */
179 pid = 0;
180 else if (pid < 100) /* Make it reasonable */
181 pid = 100;
182 randompid = pid;
183 return (error);
184}
185
186SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
187 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
188
189int
190fork1(struct proc *p1, int flags, struct proc **procp)
191{
192 struct proc *p2, *pptr;
193 uid_t uid;
194 struct proc *newproc;
195 int ok;
196 static int curfail = 0, pidchecked = 0;
197 static struct timeval lastfail;
198 struct forklist *ep;
199 struct filedesc_to_leader *fdtol;
200
201 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
202 return (EINVAL);
203
204 /*
205 * Here we don't create a new process, but we divorce
206 * certain parts of a process from itself.
207 */
208 if ((flags & RFPROC) == 0) {
209
210 vm_fork(p1, 0, flags);
211
212 /*
213 * Close all file descriptors.
214 */
215 if (flags & RFCFDG) {
216 struct filedesc *fdtmp;
217 fdtmp = fdinit(p1);
218 fdfree(p1);
219 p1->p_fd = fdtmp;
220 }
221
222 /*
223 * Unshare file descriptors (from parent.)
224 */
225 if (flags & RFFDG) {
226 if (p1->p_fd->fd_refcnt > 1) {
227 struct filedesc *newfd;
228 newfd = fdcopy(p1);
229 fdfree(p1);
230 p1->p_fd = newfd;
231 }
232 }
233 *procp = NULL;
234 return (0);
235 }
236
237 /*
238 * Although process entries are dynamically created, we still keep
239 * a global limit on the maximum number we will create. Don't allow
240 * a nonprivileged user to use the last ten processes; don't let root
241 * exceed the limit. The variable nprocs is the current number of
242 * processes, maxproc is the limit.
243 */
244 uid = p1->p_ucred->cr_ruid;
245 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
246 if (ppsratecheck(&lastfail, &curfail, 1))
247 printf("maxproc limit exceeded by uid %d, please "
248 "see tuning(7) and login.conf(5).\n", uid);
249 tsleep(&forksleep, 0, "fork", hz / 2);
250 return (EAGAIN);
251 }
252 /*
253 * Increment the nprocs resource before blocking can occur. There
254 * are hard-limits as to the number of processes that can run.
255 */
256 nprocs++;
257
258 /*
259 * Increment the count of procs running with this uid. Don't allow
260 * a nonprivileged user to exceed their current limit.
261 */
262 ok = chgproccnt(p1->p_ucred->cr_ruidinfo, 1,
263 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
264 if (!ok) {
265 /*
266 * Back out the process count
267 */
268 nprocs--;
269 if (ppsratecheck(&lastfail, &curfail, 1))
270 printf("maxproc limit exceeded by uid %d, please "
271 "see tuning(7) and login.conf(5).\n", uid);
272 tsleep(&forksleep, 0, "fork", hz / 2);
273 return (EAGAIN);
274 }
275
276 /* Allocate new proc. */
277 newproc = zalloc(proc_zone);
278
279 /*
280 * Setup linkage for kernel based threading
281 */
282 if ((flags & RFTHREAD) != 0) {
283 newproc->p_peers = p1->p_peers;
284 p1->p_peers = newproc;
285 newproc->p_leader = p1->p_leader;
286 } else {
287 newproc->p_peers = 0;
288 newproc->p_leader = newproc;
289 }
290
291 newproc->p_wakeup = 0;
292 newproc->p_vmspace = NULL;
293 TAILQ_INIT(&newproc->p_sysmsgq);
294
295 /*
296 * Find an unused process ID. We remember a range of unused IDs
297 * ready to use (from nextpid+1 through pidchecked-1).
298 */
299 nextpid++;
300 if (randompid)
301 nextpid += arc4random() % randompid;
302retry:
303 /*
304 * If the process ID prototype has wrapped around,
305 * restart somewhat above 0, as the low-numbered procs
306 * tend to include daemons that don't exit.
307 */
308 if (nextpid >= PID_MAX) {
309 nextpid = nextpid % PID_MAX;
310 if (nextpid < 100)
311 nextpid += 100;
312 pidchecked = 0;
313 }
314 if (nextpid >= pidchecked) {
315 int doingzomb = 0;
316
317 pidchecked = PID_MAX;
318 /*
319 * Scan the active and zombie procs to check whether this pid
320 * is in use. Remember the lowest pid that's greater
321 * than nextpid, so we can avoid checking for a while.
322 */
323 p2 = LIST_FIRST(&allproc);
324again:
325 for (; p2 != 0; p2 = LIST_NEXT(p2, p_list)) {
326 while (p2->p_pid == nextpid ||
327 p2->p_pgrp->pg_id == nextpid ||
328 p2->p_session->s_sid == nextpid) {
329 nextpid++;
330 if (nextpid >= pidchecked)
331 goto retry;
332 }
333 if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
334 pidchecked = p2->p_pid;
335 if (p2->p_pgrp->pg_id > nextpid &&
336 pidchecked > p2->p_pgrp->pg_id)
337 pidchecked = p2->p_pgrp->pg_id;
338 if (p2->p_session->s_sid > nextpid &&
339 pidchecked > p2->p_session->s_sid)
340 pidchecked = p2->p_session->s_sid;
341 }
342 if (!doingzomb) {
343 doingzomb = 1;
344 p2 = LIST_FIRST(&zombproc);
345 goto again;
346 }
347 }
348
349 p2 = newproc;
350 p2->p_stat = SIDL; /* protect against others */
351 p2->p_pid = nextpid;
352 LIST_INSERT_HEAD(&allproc, p2, p_list);
353 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
354
355 /*
356 * Make a proc table entry for the new process.
357 * Start by zeroing the section of proc that is zero-initialized,
358 * then copy the section that is copied directly from the parent.
359 */
360 bzero(&p2->p_startzero,
361 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
362 bcopy(&p1->p_startcopy, &p2->p_startcopy,
363 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
364
365 p2->p_aioinfo = NULL;
366
367 /*
368 * Duplicate sub-structures as needed.
369 * Increase reference counts on shared objects.
370 * The p_stats and p_sigacts substructs are set in vm_fork.
371 */
372 p2->p_flag = P_INMEM;
373 if (p1->p_flag & P_PROFIL)
374 startprofclock(p2);
375 p2->p_ucred = crhold(p1->p_ucred);
376
377 if (jailed(p2->p_ucred))
378 p2->p_flag |= P_JAILED;
379
380 if (p2->p_args)
381 p2->p_args->ar_ref++;
382
383 if (flags & RFSIGSHARE) {
384 p2->p_procsig = p1->p_procsig;
385 p2->p_procsig->ps_refcnt++;
386 if (p1->p_sigacts == &p1->p_addr->u_sigacts) {
387 struct sigacts *newsigacts;
388
389 /* Create the shared sigacts structure */
390 MALLOC(newsigacts, struct sigacts *,
391 sizeof(struct sigacts), M_SUBPROC, M_WAITOK);
392 crit_enter();
393 /*
394 * Set p_sigacts to the new shared structure.
395 * Note that this is updating p1->p_sigacts at the
396 * same time, since p_sigacts is just a pointer to
397 * the shared p_procsig->ps_sigacts.
398 */
399 p2->p_sigacts = newsigacts;
400 bcopy(&p1->p_addr->u_sigacts, p2->p_sigacts,
401 sizeof(*p2->p_sigacts));
402 *p2->p_sigacts = p1->p_addr->u_sigacts;
403 crit_exit();
404 }
405 } else {
406 MALLOC(p2->p_procsig, struct procsig *, sizeof(struct procsig),
407 M_SUBPROC, M_WAITOK);
408 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig));
409 p2->p_procsig->ps_refcnt = 1;
410 p2->p_sigacts = NULL; /* finished in vm_fork() */
411 }
412 if (flags & RFLINUXTHPN)
413 p2->p_sigparent = SIGUSR1;
414 else
415 p2->p_sigparent = SIGCHLD;
416
417 /* bump references to the text vnode (for procfs) */
418 p2->p_textvp = p1->p_textvp;
419 if (p2->p_textvp)
420 vref(p2->p_textvp);
421
422 if (flags & RFCFDG) {
423 p2->p_fd = fdinit(p1);
424 fdtol = NULL;
425 } else if (flags & RFFDG) {
426 p2->p_fd = fdcopy(p1);
427 fdtol = NULL;
428 } else {
429 p2->p_fd = fdshare(p1);
430 if (p1->p_fdtol == NULL)
431 p1->p_fdtol =
432 filedesc_to_leader_alloc(NULL,
433 p1->p_leader);
434 if ((flags & RFTHREAD) != 0) {
435 /*
436 * Shared file descriptor table and
437 * shared process leaders.
438 */
439 fdtol = p1->p_fdtol;
440 fdtol->fdl_refcount++;
441 } else {
442 /*
443 * Shared file descriptor table, and
444 * different process leaders
445 */
446 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2);
447 }
448 }
449 p2->p_fdtol = fdtol;
450
451 /*
452 * If p_limit is still copy-on-write, bump refcnt,
453 * otherwise get a copy that won't be modified.
454 * (If PL_SHAREMOD is clear, the structure is shared
455 * copy-on-write.)
456 */
457 if (p1->p_limit->p_lflags & PL_SHAREMOD) {
458 p2->p_limit = limcopy(p1->p_limit);
459 } else {
460 p2->p_limit = p1->p_limit;
461 p2->p_limit->p_refcnt++;
462 }
463
464 /*
465 * Preserve some more flags in subprocess. P_PROFIL has already
466 * been preserved.
467 */
468 p2->p_flag |= p1->p_flag & (P_SUGID | P_ALTSTACK);
469 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
470 p2->p_flag |= P_CONTROLT;
471 if (flags & RFPPWAIT)
472 p2->p_flag |= P_PPWAIT;
473
474 /*
475 * Once we are on a pglist we may receive signals. XXX we might
476 * race a ^C being sent to the process group by not receiving it
477 * at all prior to this line.
478 */
479 LIST_INSERT_AFTER(p1, p2, p_pglist);
480
481 /*
482 * Attach the new process to its parent.
483 *
484 * If RFNOWAIT is set, the newly created process becomes a child
485 * of init. This effectively disassociates the child from the
486 * parent.
487 */
488 if (flags & RFNOWAIT)
489 pptr = initproc;
490 else
491 pptr = p1;
492 p2->p_pptr = pptr;
493 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
494 LIST_INIT(&p2->p_children);
495 varsymset_init(&p2->p_varsymset, &p1->p_varsymset);
496 callout_init(&p2->p_ithandle);
497
498#ifdef KTRACE
499 /*
500 * Copy traceflag and tracefile if enabled. If not inherited,
501 * these were zeroed above but we still could have a trace race
502 * so make sure p2's p_tracep is NULL.
503 */
504 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracep == NULL) {
505 p2->p_traceflag = p1->p_traceflag;
506 if ((p2->p_tracep = p1->p_tracep) != NULL)
507 vref(p2->p_tracep);
508 }
509#endif
510
511 /*
512 * Inherit the scheduler and initialize scheduler-related fields.
513 */
514 p2->p_usched = p1->p_usched;
515 p2->p_usched->heuristic_forking(p1, p2);
516
517 /*
518 * This begins the section where we must prevent the parent
519 * from being swapped.
520 */
521 PHOLD(p1);
522
523 /*
524 * Finish creating the child process. It will return via a different
525 * execution path later. (ie: directly into user mode)
526 */
527 vm_fork(p1, p2, flags);
528 caps_fork(p1, p2, flags);
529
530 if (flags == (RFFDG | RFPROC)) {
531 mycpu->gd_cnt.v_forks++;
532 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
533 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
534 mycpu->gd_cnt.v_vforks++;
535 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
536 } else if (p1 == &proc0) {
537 mycpu->gd_cnt.v_kthreads++;
538 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
539 } else {
540 mycpu->gd_cnt.v_rforks++;
541 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
542 }
543
544 /*
545 * Both processes are set up, now check if any loadable modules want
546 * to adjust anything.
547 * What if they have an error? XXX
548 */
549 TAILQ_FOREACH(ep, &fork_list, next) {
550 (*ep->function)(p1, p2, flags);
551 }
552
553 /*
554 * Set the start time. Note that the process is not runnable. The
555 * caller is responsible for making it runnable.
556 */
557 microtime(&p2->p_thread->td_start);
558 p2->p_acflag = AFORK;
559
560 /*
561 * tell any interested parties about the new process
562 */
563 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
564
565 /*
566 * Return child proc pointer to parent.
567 */
568 *procp = p2;
569 return (0);
570}
571
572/*
573 * The next two functionms are general routines to handle adding/deleting
574 * items on the fork callout list.
575 *
576 * at_fork():
577 * Take the arguments given and put them onto the fork callout list,
578 * However first make sure that it's not already there.
579 * Returns 0 on success or a standard error number.
580 */
581int
582at_fork(forklist_fn function)
583{
584 struct forklist *ep;
585
586#ifdef INVARIANTS
587 /* let the programmer know if he's been stupid */
588 if (rm_at_fork(function)) {
589 printf("WARNING: fork callout entry (%p) already present\n",
590 function);
591 }
592#endif
593 ep = malloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO);
594 ep->function = function;
595 TAILQ_INSERT_TAIL(&fork_list, ep, next);
596 return (0);
597}
598
599/*
600 * Scan the exit callout list for the given item and remove it..
601 * Returns the number of items removed (0 or 1)
602 */
603int
604rm_at_fork(forklist_fn function)
605{
606 struct forklist *ep;
607
608 TAILQ_FOREACH(ep, &fork_list, next) {
609 if (ep->function == function) {
610 TAILQ_REMOVE(&fork_list, ep, next);
611 free(ep, M_ATFORK);
612 return(1);
613 }
614 }
615 return (0);
616}
617
618/*
619 * Add a forked process to the run queue after any remaining setup, such
620 * as setting the fork handler, has been completed.
621 */
622void
623start_forked_proc(struct proc *p1, struct proc *p2)
624{
625 /*
626 * Move from SIDL to RUN queue, and activate the process's thread.
627 * Activation of the thread effectively makes the process "a"
628 * current process, so we do not setrunqueue().
629 *
630 * YYY setrunqueue works here but we should clean up the trampoline
631 * code so we just schedule the LWKT thread and let the trampoline
632 * deal with the userland scheduler on return to userland.
633 */
634 KASSERT(p2 && p2->p_stat == SIDL,
635 ("cannot start forked process, bad status: %p", p2));
636 p2->p_usched->resetpriority(p2);
637 crit_enter();
638 p2->p_stat = SRUN;
639 p2->p_usched->setrunqueue(p2);
640 crit_exit();
641
642 /*
643 * Now can be swapped.
644 */
645 PRELE(p1);
646
647 /*
648 * Preserve synchronization semantics of vfork. If waiting for
649 * child to exec or exit, set P_PPWAIT on child, and sleep on our
650 * proc (in case of exit).
651 */
652 while (p2->p_flag & P_PPWAIT)
653 tsleep(p1, 0, "ppwait", 0);
654}
655