2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysproto.h>
43 #include <sys/filedesc.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/vnode.h>
51 #include <sys/ktrace.h>
52 #include <sys/unistd.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_extern.h>
61 #include <sys/vmmeter.h>
62 #include <sys/refcount.h>
63 #include <sys/thread2.h>
64 #include <sys/signal2.h>
65 #include <sys/spinlock2.h>
67 #include <sys/dsched.h>
69 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
72 * These are the stuctures used to create a callout list for things to do
73 * when forking a process
77 TAILQ_ENTRY(forklist) next;
80 TAILQ_HEAD(forklist_head, forklist);
81 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
83 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags);
85 int forksleep; /* Place for fork1() to sleep on. */
88 * Red-Black tree support for LWPs
92 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2)
94 if (lp1->lwp_tid < lp2->lwp_tid)
96 if (lp1->lwp_tid > lp2->lwp_tid)
101 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid);
109 sys_fork(struct fork_args *uap)
111 struct lwp *lp = curthread->td_lwp;
115 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
118 start_forked_proc(lp, p2);
119 uap->sysmsg_fds[0] = p2->p_pid;
120 uap->sysmsg_fds[1] = 0;
130 sys_vfork(struct vfork_args *uap)
132 struct lwp *lp = curthread->td_lwp;
136 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
139 start_forked_proc(lp, p2);
140 uap->sysmsg_fds[0] = p2->p_pid;
141 uap->sysmsg_fds[1] = 0;
148 * Handle rforks. An rfork may (1) operate on the current process without
149 * creating a new, (2) create a new process that shared the current process's
150 * vmspace, signals, and/or descriptors, or (3) create a new process that does
151 * not share these things (normal fork).
153 * Note that we only call start_forked_proc() if a new process is actually
156 * rfork { int flags }
161 sys_rfork(struct rfork_args *uap)
163 struct lwp *lp = curthread->td_lwp;
167 if ((uap->flags & RFKERNELONLY) != 0)
170 error = fork1(lp, uap->flags | RFPGLOCK, &p2);
174 start_forked_proc(lp, p2);
175 uap->sysmsg_fds[0] = p2->p_pid;
176 uap->sysmsg_fds[1] = 0;
179 uap->sysmsg_fds[0] = 0;
180 uap->sysmsg_fds[1] = 0;
190 sys_lwp_create(struct lwp_create_args *uap)
192 struct proc *p = curproc;
194 struct lwp_params params;
197 error = copyin(uap->params, ¶ms, sizeof(params));
201 lwkt_gettoken(&p->p_token);
202 plimit_lwp_fork(p); /* force exclusive access */
203 lp = lwp_fork(curthread->td_lwp, p, RFPROC);
204 error = cpu_prepare_lwp(lp, ¶ms);
207 if (params.tid1 != NULL &&
208 (error = copyout(&lp->lwp_tid, params.tid1, sizeof(lp->lwp_tid))))
210 if (params.tid2 != NULL &&
211 (error = copyout(&lp->lwp_tid, params.tid2, sizeof(lp->lwp_tid))))
215 * Now schedule the new lwp.
217 p->p_usched->resetpriority(lp);
219 lp->lwp_stat = LSRUN;
220 p->p_usched->setrunqueue(lp);
222 lwkt_reltoken(&p->p_token);
227 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
229 /* lwp_dispose expects an exited lwp, and a held proc */
230 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
231 lp->lwp_thread->td_flags |= TDF_EXITING;
232 lwkt_remove_tdallq(lp->lwp_thread);
234 biosched_done(lp->lwp_thread);
235 dsched_exit_thread(lp->lwp_thread);
237 lwkt_reltoken(&p->p_token);
242 int nprocs = 1; /* process 0 */
245 fork1(struct lwp *lp1, int flags, struct proc **procp)
247 struct proc *p1 = lp1->lwp_proc;
254 static int curfail = 0;
255 static struct timeval lastfail;
257 struct filedesc_to_leader *fdtol;
259 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
262 lwkt_gettoken(&p1->p_token);
267 * Here we don't create a new process, but we divorce
268 * certain parts of a process from itself.
270 if ((flags & RFPROC) == 0) {
272 * This kind of stunt does not work anymore if
273 * there are native threads (lwps) running
275 if (p1->p_nthreads != 1) {
280 vm_fork(p1, 0, flags);
283 * Close all file descriptors.
285 if (flags & RFCFDG) {
286 struct filedesc *fdtmp;
292 * Unshare file descriptors (from parent.)
295 if (p1->p_fd->fd_refcnt > 1) {
296 struct filedesc *newfd;
297 error = fdcopy(p1, &newfd);
311 * Interlock against process group signal delivery. If signals
312 * are pending after the interlock is obtained we have to restart
313 * the system call to process the signals. If we don't the child
314 * can miss a pgsignal (such as ^C) sent during the fork.
316 * We can't use CURSIG() here because it will process any STOPs
317 * and cause the process group lock to be held indefinitely. If
318 * a STOP occurs, the fork will be restarted after the CONT.
321 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) {
323 lockmgr(&plkgrp->pg_lock, LK_SHARED);
324 if (CURSIG_NOBLOCK(lp1)) {
331 * Although process entries are dynamically created, we still keep
332 * a global limit on the maximum number we will create. Don't allow
333 * a nonprivileged user to use the last ten processes; don't let root
334 * exceed the limit. The variable nprocs is the current number of
335 * processes, maxproc is the limit.
337 uid = lp1->lwp_thread->td_ucred->cr_ruid;
338 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
339 if (ppsratecheck(&lastfail, &curfail, 1))
340 kprintf("maxproc limit exceeded by uid %d, please "
341 "see tuning(7) and login.conf(5).\n", uid);
342 tsleep(&forksleep, 0, "fork", hz / 2);
348 * Increment the nprocs resource before blocking can occur. There
349 * are hard-limits as to the number of processes that can run.
351 atomic_add_int(&nprocs, 1);
354 * Increment the count of procs running with this uid. Don't allow
355 * a nonprivileged user to exceed their current limit.
357 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1,
358 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
361 * Back out the process count
363 atomic_add_int(&nprocs, -1);
364 if (ppsratecheck(&lastfail, &curfail, 1))
365 kprintf("maxproc limit exceeded by uid %d, please "
366 "see tuning(7) and login.conf(5).\n", uid);
367 tsleep(&forksleep, 0, "fork", hz / 2);
373 * Allocate a new process, don't get fancy: zero the structure.
375 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO);
378 * Core initialization. SIDL is a safety state that protects the
379 * partially initialized process once it starts getting hooked
380 * into system structures and becomes addressable.
382 * We must be sure to acquire p2->p_token as well, we must hold it
383 * once the process is on the allproc list to avoid things such
384 * as competing modifications to p_flags.
386 p2->p_lasttid = -1; /* first tid will be 0 */
389 RB_INIT(&p2->p_lwp_tree);
390 spin_init(&p2->p_spin);
391 lwkt_token_init(&p2->p_token, "proc");
392 lwkt_gettoken(&p2->p_token);
395 * Setup linkage for kernel based threading XXX lwp. Also add the
396 * process to the allproclist.
398 * The process structure is addressable after this point.
400 if (flags & RFTHREAD) {
401 p2->p_peers = p1->p_peers;
403 p2->p_leader = p1->p_leader;
407 proc_add_allproc(p2);
410 * Initialize the section which is copied verbatim from the parent.
412 bcopy(&p1->p_startcopy, &p2->p_startcopy,
413 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
416 * Duplicate sub-structures as needed. Increase reference counts
419 * NOTE: because we are now on the allproc list it is possible for
420 * other consumers to gain temporary references to p2
421 * (p2->p_lock can change).
423 if (p1->p_flags & P_PROFIL)
425 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred);
427 if (jailed(p2->p_ucred))
428 p2->p_flags |= P_JAILED;
431 refcount_acquire(&p2->p_args->ar_ref);
433 p2->p_usched = p1->p_usched;
434 /* XXX: verify copy of the secondary iosched stuff */
437 if (flags & RFSIGSHARE) {
438 p2->p_sigacts = p1->p_sigacts;
439 refcount_acquire(&p2->p_sigacts->ps_refcnt);
441 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts),
442 M_SUBPROC, M_WAITOK);
443 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts));
444 refcount_init(&p2->p_sigacts->ps_refcnt, 1);
446 if (flags & RFLINUXTHPN)
447 p2->p_sigparent = SIGUSR1;
449 p2->p_sigparent = SIGCHLD;
451 /* bump references to the text vnode (for procfs) */
452 p2->p_textvp = p1->p_textvp;
456 /* copy namecache handle to the text file */
457 if (p1->p_textnch.mount)
458 cache_copy(&p1->p_textnch, &p2->p_textnch);
461 * Handle file descriptors
463 if (flags & RFCFDG) {
464 p2->p_fd = fdinit(p1);
466 } else if (flags & RFFDG) {
467 error = fdcopy(p1, &p2->p_fd);
474 p2->p_fd = fdshare(p1);
475 if (p1->p_fdtol == NULL) {
476 p1->p_fdtol = filedesc_to_leader_alloc(NULL,
479 if ((flags & RFTHREAD) != 0) {
481 * Shared file descriptor table and
482 * shared process leaders.
485 fdtol->fdl_refcount++;
488 * Shared file descriptor table, and
489 * different process leaders
491 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2);
495 p2->p_limit = plimit_fork(p1);
498 * Preserve some more flags in subprocess. P_PROFIL has already
501 p2->p_flags |= p1->p_flags & P_SUGID;
502 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT))
503 p2->p_flags |= P_CONTROLT;
504 if (flags & RFPPWAIT)
505 p2->p_flags |= P_PPWAIT;
508 * Inherit the virtual kernel structure (allows a virtual kernel
509 * to fork to simulate multiple cpus).
512 vkernel_inherit(p1, p2);
515 * Once we are on a pglist we may receive signals. XXX we might
516 * race a ^C being sent to the process group by not receiving it
517 * at all prior to this line.
520 lwkt_gettoken(&p1grp->pg_token);
521 LIST_INSERT_AFTER(p1, p2, p_pglist);
522 lwkt_reltoken(&p1grp->pg_token);
525 * Attach the new process to its parent.
527 * If RFNOWAIT is set, the newly created process becomes a child
528 * of init. This effectively disassociates the child from the
531 if (flags & RFNOWAIT)
536 LIST_INIT(&p2->p_children);
538 lwkt_gettoken(&pptr->p_token);
539 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
540 lwkt_reltoken(&pptr->p_token);
542 varsymset_init(&p2->p_varsymset, &p1->p_varsymset);
543 callout_init_mp(&p2->p_ithandle);
547 * Copy traceflag and tracefile if enabled. If not inherited,
548 * these were zeroed above but we still could have a trace race
549 * so make sure p2's p_tracenode is NULL.
551 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) {
552 p2->p_traceflag = p1->p_traceflag;
553 p2->p_tracenode = ktrinherit(p1->p_tracenode);
558 * This begins the section where we must prevent the parent
559 * from being swapped.
561 * Gets PRELE'd in the caller in start_forked_proc().
565 vm_fork(p1, p2, flags);
568 * Create the first lwp associated with the new proc.
569 * It will return via a different execution path later, directly
570 * into userland, after it was put on the runq by
571 * start_forked_proc().
573 lwp_fork(lp1, p2, flags);
575 if (flags == (RFFDG | RFPROC | RFPGLOCK)) {
576 mycpu->gd_cnt.v_forks++;
577 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize +
578 p2->p_vmspace->vm_ssize;
579 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) {
580 mycpu->gd_cnt.v_vforks++;
581 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize +
582 p2->p_vmspace->vm_ssize;
583 } else if (p1 == &proc0) {
584 mycpu->gd_cnt.v_kthreads++;
585 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize +
586 p2->p_vmspace->vm_ssize;
588 mycpu->gd_cnt.v_rforks++;
589 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize +
590 p2->p_vmspace->vm_ssize;
594 * Both processes are set up, now check if any loadable modules want
595 * to adjust anything.
596 * What if they have an error? XXX
598 TAILQ_FOREACH(ep, &fork_list, next) {
599 (*ep->function)(p1, p2, flags);
603 * Set the start time. Note that the process is not runnable. The
604 * caller is responsible for making it runnable.
606 microtime(&p2->p_start);
607 p2->p_acflag = AFORK;
610 * tell any interested parties about the new process
612 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
615 * Return child proc pointer to parent.
621 lwkt_reltoken(&p2->p_token);
622 lwkt_reltoken(&p1->p_token);
624 lockmgr(&plkgrp->pg_lock, LK_RELEASE);
631 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags)
633 globaldata_t gd = mycpu;
637 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO);
639 lp->lwp_proc = destproc;
640 lp->lwp_vmspace = destproc->p_vmspace;
641 lp->lwp_stat = LSRUN;
642 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy,
643 (unsigned) ((caddr_t)&lp->lwp_endcopy -
644 (caddr_t)&lp->lwp_startcopy));
645 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK;
647 * Set cpbase to the last timeout that occured (not the upcoming
650 * A critical section is required since a timer IPI can update
651 * scheduler specific data.
654 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
655 destproc->p_usched->heuristic_forking(origlp, lp);
657 lp->lwp_cpumask &= usched_mastermask;
658 lwkt_token_init(&lp->lwp_token, "lwp_token");
659 spin_init(&lp->lwp_spin);
662 * Assign the thread to the current cpu to begin with so we
665 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0);
667 td->td_proc = destproc;
669 td->td_switch = cpu_heavy_switch;
670 #ifdef NO_LWKT_SPLIT_USERPRI
671 lwkt_setpri(td, TDPRI_USER_NORM);
673 lwkt_setpri(td, TDPRI_KERN_USER);
675 lwkt_set_comm(td, "%s", destproc->p_comm);
678 * cpu_fork will copy and update the pcb, set up the kernel stack,
679 * and make the child ready to run.
681 cpu_fork(origlp, lp, flags);
682 kqueue_init(&lp->lwp_kqueue, destproc->p_fd);
685 * Assign a TID to the lp. Loop until the insert succeeds (returns
688 lp->lwp_tid = destproc->p_lasttid;
690 if (++lp->lwp_tid < 0)
692 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL);
693 destproc->p_lasttid = lp->lwp_tid;
694 destproc->p_nthreads++;
700 * The next two functionms are general routines to handle adding/deleting
701 * items on the fork callout list.
704 * Take the arguments given and put them onto the fork callout list,
705 * However first make sure that it's not already there.
706 * Returns 0 on success or a standard error number.
709 at_fork(forklist_fn function)
714 /* let the programmer know if he's been stupid */
715 if (rm_at_fork(function)) {
716 kprintf("WARNING: fork callout entry (%p) already present\n",
720 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO);
721 ep->function = function;
722 TAILQ_INSERT_TAIL(&fork_list, ep, next);
727 * Scan the exit callout list for the given item and remove it..
728 * Returns the number of items removed (0 or 1)
731 rm_at_fork(forklist_fn function)
735 TAILQ_FOREACH(ep, &fork_list, next) {
736 if (ep->function == function) {
737 TAILQ_REMOVE(&fork_list, ep, next);
746 * Add a forked process to the run queue after any remaining setup, such
747 * as setting the fork handler, has been completed.
749 * p2 is held by the caller.
752 start_forked_proc(struct lwp *lp1, struct proc *p2)
754 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2);
757 * Move from SIDL to RUN queue, and activate the process's thread.
758 * Activation of the thread effectively makes the process "a"
759 * current process, so we do not setrunqueue().
761 * YYY setrunqueue works here but we should clean up the trampoline
762 * code so we just schedule the LWKT thread and let the trampoline
763 * deal with the userland scheduler on return to userland.
765 KASSERT(p2->p_stat == SIDL,
766 ("cannot start forked process, bad status: %p", p2));
767 p2->p_usched->resetpriority(lp2);
769 p2->p_stat = SACTIVE;
770 lp2->lwp_stat = LSRUN;
771 p2->p_usched->setrunqueue(lp2);
775 * Now can be swapped.
777 PRELE(lp1->lwp_proc);
780 * Preserve synchronization semantics of vfork. If waiting for
781 * child to exec or exit, set P_PPWAIT on child, and sleep on our
782 * proc (in case of exec or exit).
784 * We must hold our p_token to interlock the flag/tsleep
786 lwkt_gettoken(&p2->p_token);
787 while (p2->p_flags & P_PPWAIT)
788 tsleep(lp1->lwp_proc, 0, "ppwait", 0);
789 lwkt_reltoken(&p2->p_token);