2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysproto.h>
43 #include <sys/filedesc.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/vnode.h>
51 #include <sys/ktrace.h>
52 #include <sys/unistd.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_extern.h>
61 #include <sys/vmmeter.h>
62 #include <sys/refcount.h>
63 #include <sys/thread2.h>
64 #include <sys/signal2.h>
65 #include <sys/spinlock2.h>
67 #include <sys/dsched.h>
69 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
70 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers");
73 * These are the stuctures used to create a callout list for things to do
74 * when forking a process
78 TAILQ_ENTRY(forklist) next;
81 TAILQ_HEAD(forklist_head, forklist);
82 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
84 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags);
86 int forksleep; /* Place for fork1() to sleep on. */
89 * Red-Black tree support for LWPs
93 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2)
95 if (lp1->lwp_tid < lp2->lwp_tid)
97 if (lp1->lwp_tid > lp2->lwp_tid)
102 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid);
108 sys_fork(struct fork_args *uap)
110 struct lwp *lp = curthread->td_lwp;
114 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
117 start_forked_proc(lp, p2);
118 uap->sysmsg_fds[0] = p2->p_pid;
119 uap->sysmsg_fds[1] = 0;
126 * vfork() system call
129 sys_vfork(struct vfork_args *uap)
131 struct lwp *lp = curthread->td_lwp;
135 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
138 start_forked_proc(lp, p2);
139 uap->sysmsg_fds[0] = p2->p_pid;
140 uap->sysmsg_fds[1] = 0;
147 * Handle rforks. An rfork may (1) operate on the current process without
148 * creating a new, (2) create a new process that shared the current process's
149 * vmspace, signals, and/or descriptors, or (3) create a new process that does
150 * not share these things (normal fork).
152 * Note that we only call start_forked_proc() if a new process is actually
155 * rfork { int flags }
158 sys_rfork(struct rfork_args *uap)
160 struct lwp *lp = curthread->td_lwp;
164 if ((uap->flags & RFKERNELONLY) != 0)
167 error = fork1(lp, uap->flags | RFPGLOCK, &p2);
171 start_forked_proc(lp, p2);
172 uap->sysmsg_fds[0] = p2->p_pid;
173 uap->sysmsg_fds[1] = 0;
176 uap->sysmsg_fds[0] = 0;
177 uap->sysmsg_fds[1] = 0;
184 * Low level thread create used by pthreads.
187 sys_lwp_create(struct lwp_create_args *uap)
189 struct proc *p = curproc;
191 struct lwp_params params;
194 error = copyin(uap->params, ¶ms, sizeof(params));
198 lwkt_gettoken(&p->p_token);
199 plimit_lwp_fork(p); /* force exclusive access */
200 lp = lwp_fork(curthread->td_lwp, p, RFPROC);
201 error = cpu_prepare_lwp(lp, ¶ms);
204 if (params.lwp_tid1 != NULL &&
205 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid))))
207 if (params.lwp_tid2 != NULL &&
208 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid))))
212 * Now schedule the new lwp.
214 p->p_usched->resetpriority(lp);
216 lp->lwp_stat = LSRUN;
217 p->p_usched->setrunqueue(lp);
219 lwkt_reltoken(&p->p_token);
225 * Make sure no one is using this lwp, before it is removed from
226 * the tree. If we didn't wait it here, lwp tree iteration with
227 * blocking operation would be broken.
229 while (lp->lwp_lock > 0)
230 tsleep(lp, 0, "lwpfail", 1);
231 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
233 /* lwp_dispose expects an exited lwp, and a held proc */
234 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
235 lp->lwp_thread->td_flags |= TDF_EXITING;
236 lwkt_remove_tdallq(lp->lwp_thread);
238 biosched_done(lp->lwp_thread);
239 dsched_exit_thread(lp->lwp_thread);
241 lwkt_reltoken(&p->p_token);
246 int nprocs = 1; /* process 0 */
249 fork1(struct lwp *lp1, int flags, struct proc **procp)
251 struct proc *p1 = lp1->lwp_proc;
256 struct sysreaper *reap;
259 static int curfail = 0;
260 static struct timeval lastfail;
262 struct filedesc_to_leader *fdtol;
264 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
267 lwkt_gettoken(&p1->p_token);
272 * Here we don't create a new process, but we divorce
273 * certain parts of a process from itself.
275 if ((flags & RFPROC) == 0) {
277 * This kind of stunt does not work anymore if
278 * there are native threads (lwps) running
280 if (p1->p_nthreads != 1) {
285 vm_fork(p1, 0, flags);
288 * Close all file descriptors.
290 if (flags & RFCFDG) {
291 struct filedesc *fdtmp;
297 * Unshare file descriptors (from parent.)
300 if (p1->p_fd->fd_refcnt > 1) {
301 struct filedesc *newfd;
302 error = fdcopy(p1, &newfd);
316 * Interlock against process group signal delivery. If signals
317 * are pending after the interlock is obtained we have to restart
318 * the system call to process the signals. If we don't the child
319 * can miss a pgsignal (such as ^C) sent during the fork.
321 * We can't use CURSIG() here because it will process any STOPs
322 * and cause the process group lock to be held indefinitely. If
323 * a STOP occurs, the fork will be restarted after the CONT.
326 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) {
328 lockmgr(&plkgrp->pg_lock, LK_SHARED);
329 if (CURSIG_NOBLOCK(lp1)) {
336 * Although process entries are dynamically created, we still keep
337 * a global limit on the maximum number we will create. Don't allow
338 * a nonprivileged user to use the last ten processes; don't let root
339 * exceed the limit. The variable nprocs is the current number of
340 * processes, maxproc is the limit.
342 uid = lp1->lwp_thread->td_ucred->cr_ruid;
343 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
344 if (ppsratecheck(&lastfail, &curfail, 1))
345 kprintf("maxproc limit exceeded by uid %d, please "
346 "see tuning(7) and login.conf(5).\n", uid);
347 tsleep(&forksleep, 0, "fork", hz / 2);
353 * Increment the nprocs resource before blocking can occur. There
354 * are hard-limits as to the number of processes that can run.
356 atomic_add_int(&nprocs, 1);
359 * Increment the count of procs running with this uid. Don't allow
360 * a nonprivileged user to exceed their current limit.
362 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1,
363 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
366 * Back out the process count
368 atomic_add_int(&nprocs, -1);
369 if (ppsratecheck(&lastfail, &curfail, 1))
370 kprintf("maxproc limit exceeded by uid %d, please "
371 "see tuning(7) and login.conf(5).\n", uid);
372 tsleep(&forksleep, 0, "fork", hz / 2);
378 * Allocate a new process, don't get fancy: zero the structure.
380 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO);
383 * Core initialization. SIDL is a safety state that protects the
384 * partially initialized process once it starts getting hooked
385 * into system structures and becomes addressable.
387 * We must be sure to acquire p2->p_token as well, we must hold it
388 * once the process is on the allproc list to avoid things such
389 * as competing modifications to p_flags.
391 mycpu->gd_forkid += ncpus;
392 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid;
393 p2->p_lasttid = -1; /* first tid will be 0 */
397 * NOTE: Process 0 will not have a reaper, but process 1 (init) and
398 * all other processes always will.
400 if ((reap = p1->p_reaper) != NULL) {
407 RB_INIT(&p2->p_lwp_tree);
408 spin_init(&p2->p_spin, "procfork1");
409 lwkt_token_init(&p2->p_token, "proc");
410 lwkt_gettoken(&p2->p_token);
413 * Setup linkage for kernel based threading XXX lwp. Also add the
414 * process to the allproclist.
416 * The process structure is addressable after this point.
418 if (flags & RFTHREAD) {
419 p2->p_peers = p1->p_peers;
421 p2->p_leader = p1->p_leader;
425 proc_add_allproc(p2);
428 * Initialize the section which is copied verbatim from the parent.
430 bcopy(&p1->p_startcopy, &p2->p_startcopy,
431 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
434 * Duplicate sub-structures as needed. Increase reference counts
437 * NOTE: because we are now on the allproc list it is possible for
438 * other consumers to gain temporary references to p2
439 * (p2->p_lock can change).
441 if (p1->p_flags & P_PROFIL)
443 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred);
445 if (jailed(p2->p_ucred))
446 p2->p_flags |= P_JAILED;
449 refcount_acquire(&p2->p_args->ar_ref);
451 p2->p_usched = p1->p_usched;
452 /* XXX: verify copy of the secondary iosched stuff */
453 dsched_enter_proc(p2);
455 if (flags & RFSIGSHARE) {
456 p2->p_sigacts = p1->p_sigacts;
457 refcount_acquire(&p2->p_sigacts->ps_refcnt);
459 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts),
460 M_SUBPROC, M_WAITOK);
461 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts));
462 refcount_init(&p2->p_sigacts->ps_refcnt, 1);
464 if (flags & RFLINUXTHPN)
465 p2->p_sigparent = SIGUSR1;
467 p2->p_sigparent = SIGCHLD;
469 /* bump references to the text vnode (for procfs) */
470 p2->p_textvp = p1->p_textvp;
474 /* copy namecache handle to the text file */
475 if (p1->p_textnch.mount)
476 cache_copy(&p1->p_textnch, &p2->p_textnch);
479 * Handle file descriptors
481 if (flags & RFCFDG) {
482 p2->p_fd = fdinit(p1);
484 } else if (flags & RFFDG) {
485 error = fdcopy(p1, &p2->p_fd);
492 p2->p_fd = fdshare(p1);
493 if (p1->p_fdtol == NULL) {
494 p1->p_fdtol = filedesc_to_leader_alloc(NULL,
497 if ((flags & RFTHREAD) != 0) {
499 * Shared file descriptor table and
500 * shared process leaders.
503 fdtol->fdl_refcount++;
506 * Shared file descriptor table, and
507 * different process leaders
509 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2);
513 p2->p_limit = plimit_fork(p1);
516 * Preserve some more flags in subprocess. P_PROFIL has already
519 p2->p_flags |= p1->p_flags & P_SUGID;
520 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT))
521 p2->p_flags |= P_CONTROLT;
522 if (flags & RFPPWAIT) {
523 p2->p_flags |= P_PPWAIT;
525 p1->p_upmap->invfork = 1;
530 * Inherit the virtual kernel structure (allows a virtual kernel
531 * to fork to simulate multiple cpus).
534 vkernel_inherit(p1, p2);
537 * Once we are on a pglist we may receive signals. XXX we might
538 * race a ^C being sent to the process group by not receiving it
539 * at all prior to this line.
542 lwkt_gettoken(&p1grp->pg_token);
543 LIST_INSERT_AFTER(p1, p2, p_pglist);
544 lwkt_reltoken(&p1grp->pg_token);
547 * Attach the new process to its parent.
549 * If RFNOWAIT is set, the newly created process becomes a child
550 * of the reaper (typically init). This effectively disassociates
551 * the child from the parent.
553 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts.
555 if (flags & RFNOWAIT) {
556 pptr = reaper_get(reap);
565 LIST_INIT(&p2->p_children);
567 lwkt_gettoken(&pptr->p_token);
568 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
569 lwkt_reltoken(&pptr->p_token);
571 if (flags & RFNOWAIT)
574 varsymset_init(&p2->p_varsymset, &p1->p_varsymset);
575 callout_init_mp(&p2->p_ithandle);
579 * Copy traceflag and tracefile if enabled. If not inherited,
580 * these were zeroed above but we still could have a trace race
581 * so make sure p2's p_tracenode is NULL.
583 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) {
584 p2->p_traceflag = p1->p_traceflag;
585 p2->p_tracenode = ktrinherit(p1->p_tracenode);
590 * This begins the section where we must prevent the parent
591 * from being swapped.
593 * Gets PRELE'd in the caller in start_forked_proc().
597 vm_fork(p1, p2, flags);
600 * Create the first lwp associated with the new proc.
601 * It will return via a different execution path later, directly
602 * into userland, after it was put on the runq by
603 * start_forked_proc().
605 lwp_fork(lp1, p2, flags);
607 if (flags == (RFFDG | RFPROC | RFPGLOCK)) {
608 mycpu->gd_cnt.v_forks++;
609 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize +
610 p2->p_vmspace->vm_ssize;
611 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) {
612 mycpu->gd_cnt.v_vforks++;
613 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize +
614 p2->p_vmspace->vm_ssize;
615 } else if (p1 == &proc0) {
616 mycpu->gd_cnt.v_kthreads++;
617 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize +
618 p2->p_vmspace->vm_ssize;
620 mycpu->gd_cnt.v_rforks++;
621 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize +
622 p2->p_vmspace->vm_ssize;
626 * Both processes are set up, now check if any loadable modules want
627 * to adjust anything.
628 * What if they have an error? XXX
630 TAILQ_FOREACH(ep, &fork_list, next) {
631 (*ep->function)(p1, p2, flags);
635 * Set the start time. Note that the process is not runnable. The
636 * caller is responsible for making it runnable.
638 microtime(&p2->p_start);
639 p2->p_acflag = AFORK;
642 * tell any interested parties about the new process
644 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
647 * Return child proc pointer to parent.
653 lwkt_reltoken(&p2->p_token);
654 lwkt_reltoken(&p1->p_token);
656 lockmgr(&plkgrp->pg_lock, LK_RELEASE);
663 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags)
665 globaldata_t gd = mycpu;
669 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO);
671 lp->lwp_proc = destproc;
672 lp->lwp_vmspace = destproc->p_vmspace;
673 lp->lwp_stat = LSRUN;
674 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy,
675 (unsigned) ((caddr_t)&lp->lwp_endcopy -
676 (caddr_t)&lp->lwp_startcopy));
677 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK;
679 * Set cpbase to the last timeout that occured (not the upcoming
682 * A critical section is required since a timer IPI can update
683 * scheduler specific data.
686 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
687 destproc->p_usched->heuristic_forking(origlp, lp);
689 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask);
690 lwkt_token_init(&lp->lwp_token, "lwp_token");
691 spin_init(&lp->lwp_spin, "lwptoken");
694 * Assign the thread to the current cpu to begin with so we
697 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0);
699 td->td_ucred = crhold(destproc->p_ucred);
700 td->td_proc = destproc;
702 td->td_switch = cpu_heavy_switch;
703 #ifdef NO_LWKT_SPLIT_USERPRI
704 lwkt_setpri(td, TDPRI_USER_NORM);
706 lwkt_setpri(td, TDPRI_KERN_USER);
708 lwkt_set_comm(td, "%s", destproc->p_comm);
711 * cpu_fork will copy and update the pcb, set up the kernel stack,
712 * and make the child ready to run.
714 cpu_fork(origlp, lp, flags);
715 kqueue_init(&lp->lwp_kqueue, destproc->p_fd);
718 * Assign a TID to the lp. Loop until the insert succeeds (returns
721 lp->lwp_tid = destproc->p_lasttid;
723 if (++lp->lwp_tid < 0)
725 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL);
726 destproc->p_lasttid = lp->lwp_tid;
727 destproc->p_nthreads++;
730 * This flag is set and never cleared. It means that the process
731 * was threaded at some point. Used to improve exit performance.
733 destproc->p_flags |= P_MAYBETHREADED;
739 * The next two functionms are general routines to handle adding/deleting
740 * items on the fork callout list.
743 * Take the arguments given and put them onto the fork callout list,
744 * However first make sure that it's not already there.
745 * Returns 0 on success or a standard error number.
748 at_fork(forklist_fn function)
753 /* let the programmer know if he's been stupid */
754 if (rm_at_fork(function)) {
755 kprintf("WARNING: fork callout entry (%p) already present\n",
759 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO);
760 ep->function = function;
761 TAILQ_INSERT_TAIL(&fork_list, ep, next);
766 * Scan the exit callout list for the given item and remove it..
767 * Returns the number of items removed (0 or 1)
770 rm_at_fork(forklist_fn function)
774 TAILQ_FOREACH(ep, &fork_list, next) {
775 if (ep->function == function) {
776 TAILQ_REMOVE(&fork_list, ep, next);
785 * Add a forked process to the run queue after any remaining setup, such
786 * as setting the fork handler, has been completed.
788 * p2 is held by the caller.
791 start_forked_proc(struct lwp *lp1, struct proc *p2)
793 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2);
797 * Move from SIDL to RUN queue, and activate the process's thread.
798 * Activation of the thread effectively makes the process "a"
799 * current process, so we do not setrunqueue().
801 * YYY setrunqueue works here but we should clean up the trampoline
802 * code so we just schedule the LWKT thread and let the trampoline
803 * deal with the userland scheduler on return to userland.
805 KASSERT(p2->p_stat == SIDL,
806 ("cannot start forked process, bad status: %p", p2));
807 p2->p_usched->resetpriority(lp2);
809 p2->p_stat = SACTIVE;
810 lp2->lwp_stat = LSRUN;
811 p2->p_usched->setrunqueue(lp2);
815 * Now can be swapped.
817 PRELE(lp1->lwp_proc);
820 * Preserve synchronization semantics of vfork. P_PPWAIT is set in
821 * the child until it has retired the parent's resources. The parent
822 * must wait for the flag to be cleared by the child.
824 * Interlock the flag/tsleep with atomic ops to avoid unnecessary
827 * XXX Is this use of an atomic op on a field that is not normally
828 * manipulated with atomic ops ok?
830 while ((pflags = p2->p_flags) & P_PPWAIT) {
832 tsleep_interlock(lp1->lwp_proc, 0);
833 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags))
834 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0);
839 * procctl (idtype_t idtype, id_t id, int cmd, void *arg)
842 sys_procctl(struct procctl_args *uap)
844 struct proc *p = curproc;
846 struct sysreaper *reap;
847 union reaper_info udata;
850 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid)
854 case PROC_REAP_ACQUIRE:
855 lwkt_gettoken(&p->p_token);
856 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO);
857 if (p->p_reaper == NULL || p->p_reaper->p != p) {
858 reaper_init(p, reap);
861 kfree(reap, M_REAPER);
864 lwkt_reltoken(&p->p_token);
866 case PROC_REAP_RELEASE:
867 lwkt_gettoken(&p->p_token);
870 KKASSERT(reap != NULL);
872 reaper_hold(reap); /* in case of thread race */
873 lockmgr(&reap->lock, LK_EXCLUSIVE);
875 lockmgr(&reap->lock, LK_RELEASE);
880 p->p_reaper = reap->parent;
882 reaper_hold(p->p_reaper);
883 lockmgr(&reap->lock, LK_RELEASE);
884 reaper_drop(reap); /* our ref */
885 reaper_drop(reap); /* old p_reaper ref */
890 lwkt_reltoken(&p->p_token);
892 case PROC_REAP_STATUS:
893 bzero(&udata, sizeof(udata));
894 lwkt_gettoken_shared(&p->p_token);
895 if ((reap = p->p_reaper) != NULL && reap->p == p) {
896 udata.status.flags = reap->flags;
897 udata.status.refs = reap->refs - 1; /* minus ours */
899 p2 = LIST_FIRST(&p->p_children);
900 udata.status.pid_head = p2 ? p2->p_pid : -1;
901 lwkt_reltoken(&p->p_token);
904 error = copyout(&udata, uap->data,
905 sizeof(udata.status));
918 * Bump ref on reaper, preventing destruction
921 reaper_hold(struct sysreaper *reap)
923 KKASSERT(reap->refs > 0);
924 refcount_acquire(&reap->refs);
928 * Drop ref on reaper, destroy the structure on the 1->0
929 * transition and loop on the parent.
932 reaper_drop(struct sysreaper *next)
934 struct sysreaper *reap;
936 while ((reap = next) != NULL) {
937 if (refcount_release(&reap->refs)) {
939 KKASSERT(reap->p == NULL);
941 kfree(reap, M_REAPER);
949 * Initialize a static or newly allocated reaper structure
952 reaper_init(struct proc *p, struct sysreaper *reap)
954 reap->parent = p->p_reaper;
957 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT;
960 reap->flags = REAPER_STAT_OWNED;
963 lockinit(&reap->lock, "subrp", 0, 0);
969 * Called with p->p_token held during exit.
971 * This is a bit simpler than RELEASE because there are no threads remaining
972 * to race. We only release if we own the reaper, the exit code will handle
973 * the final p_reaper release.
976 reaper_exit(struct proc *p)
978 struct sysreaper *reap;
981 * Release acquired reaper
983 if ((reap = p->p_reaper) != NULL && reap->p == p) {
984 lockmgr(&reap->lock, LK_EXCLUSIVE);
985 p->p_reaper = reap->parent;
987 reaper_hold(p->p_reaper);
989 lockmgr(&reap->lock, LK_RELEASE);
994 * Return and clear reaper (caller is holding p_token for us)
995 * (reap->p does not equal p). Caller must drop it.
997 if ((reap = p->p_reaper) != NULL) {
1004 * Return a held (PHOLD) process representing the reaper for process (p).
1005 * NULL should not normally be returned. Caller should PRELE() the returned
1006 * reaper process when finished.
1008 * Remove dead internal nodes while we are at it.
1010 * Process (p)'s token must be held on call.
1011 * The returned process's token is NOT acquired by this routine.
1014 reaper_get(struct sysreaper *reap)
1016 struct sysreaper *next;
1017 struct proc *reproc;
1023 * Extra hold for loop
1028 lockmgr(&reap->lock, LK_SHARED);
1036 lockmgr(&reap->lock, LK_RELEASE);
1044 lockmgr(&reap->lock, LK_RELEASE);
1049 * Traverse upwards in the reaper topology, destroy
1050 * dead internal nodes when possible.
1052 * NOTE: Our ref on next means that a dead node should
1053 * have 2 (ours and reap->parent's).
1055 next = reap->parent;
1058 if (next->refs == 2 && next->p == NULL) {
1059 lockmgr(&reap->lock, LK_RELEASE);
1060 lockmgr(&reap->lock, LK_EXCLUSIVE);
1061 if (next->refs == 2 &&
1062 reap->parent == next &&
1065 * reap->parent inherits ref from next.
1067 reap->parent = next->parent;
1068 next->parent = NULL;
1069 reaper_drop(next); /* ours */
1070 reaper_drop(next); /* old parent */
1071 next = reap->parent;
1072 continue; /* possible chain */
1077 lockmgr(&reap->lock, LK_RELEASE);