2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
35 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysproto.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
46 #include <sys/ktrace.h>
47 #include <sys/pioctl.h>
50 #include <sys/vnode.h>
51 #include <sys/resourcevar.h>
52 #include <sys/signalvar.h>
53 #include <sys/taskqueue.h>
54 #include <sys/ptrace.h>
55 #include <sys/acct.h> /* for acct_process() function prototype */
56 #include <sys/filedesc.h>
60 #include <sys/kern_syscall.h>
61 #include <sys/unistd.h>
62 #include <sys/eventhandler.h>
63 #include <sys/dsched.h>
66 #include <vm/vm_param.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_extern.h>
73 #include <sys/refcount.h>
74 #include <sys/spinlock2.h>
75 #include <sys/mplock2.h>
77 #include <machine/vmm.h>
79 static void reaplwps(void *context, int dummy);
80 static void reaplwp(struct lwp *lp);
81 static void killlwps(struct lwp *lp);
83 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
86 * callout list for things to do at exit time
90 TAILQ_ENTRY(exitlist) next;
93 TAILQ_HEAD(exit_list_head, exitlist);
94 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
99 static struct task *deadlwp_task[MAXCPU];
100 static struct lwplist deadlwp_list[MAXCPU];
101 static struct lwkt_token deadlwp_token[MAXCPU];
107 * SYS_EXIT_ARGS(int rval)
110 sys_exit(struct exit_args *uap)
112 exit1(W_EXITCODE(uap->rval, 0));
118 * Death of a lwp or process with optional bells and whistles.
121 sys_extexit(struct extexit_args *uap)
123 struct proc *p = curproc;
127 action = EXTEXIT_ACTION(uap->how);
128 who = EXTEXIT_WHO(uap->how);
130 /* Check parameters before we might perform some action */
143 error = copyout(&uap->status, uap->addr, sizeof(uap->status));
151 lwkt_gettoken(&p->p_token);
156 * Be sure only to perform a simple lwp exit if there is at
157 * least one more lwp in the proc, which will call exit1()
158 * later, otherwise the proc will be an UNDEAD and not even a
161 if (p->p_nthreads > 1) {
162 lwp_exit(0, NULL); /* called w/ p_token held */
165 /* else last lwp in proc: do the real thing */
167 default: /* to help gcc */
169 lwkt_reltoken(&p->p_token);
170 exit1(W_EXITCODE(uap->status, 0));
175 lwkt_reltoken(&p->p_token); /* safety */
179 * Kill all lwps associated with the current process except the
180 * current lwp. Return an error if we race another thread trying to
181 * do the same thing and lose the race.
183 * If forexec is non-zero the current thread and process flags are
184 * cleaned up so they can be reused.
187 killalllwps(int forexec)
189 struct lwp *lp = curthread->td_lwp;
190 struct proc *p = lp->lwp_proc;
194 * Interlock against P_WEXIT. Only one of the process's thread
195 * is allowed to do the master exit.
197 lwkt_gettoken(&p->p_token);
198 if (p->p_flags & P_WEXIT) {
199 lwkt_reltoken(&p->p_token);
202 p->p_flags |= P_WEXIT;
203 lwkt_gettoken(&lp->lwp_token);
206 * Set temporary stopped state in case we are racing a coredump.
207 * Otherwise the coredump may hang forever.
209 if (lp->lwp_mpflags & LWP_MP_WSTOP) {
212 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
215 wakeup(&p->p_nstopped);
219 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs
221 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
222 if (p->p_nthreads > 1)
226 * Undo temporary stopped state
228 if (fakestop && (lp->lwp_mpflags & LWP_MP_WSTOP)) {
229 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
234 * If doing this for an exec, clean up the remaining thread
235 * (us) for continuing operation after all the other threads
239 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
240 p->p_flags &= ~P_WEXIT;
242 lwkt_reltoken(&lp->lwp_token);
243 lwkt_reltoken(&p->p_token);
249 * Kill all LWPs except the current one. Do not try to signal
250 * LWPs which have exited on their own or have already been
254 killlwps(struct lwp *lp)
256 struct proc *p = lp->lwp_proc;
260 * Kill the remaining LWPs. We must send the signal before setting
261 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce
262 * races. tlp must be held across the call as it might block and
263 * allow the target lwp to rip itself out from under our loop.
265 FOREACH_LWP_IN_PROC(tlp, p) {
267 lwkt_gettoken(&tlp->lwp_token);
268 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) {
269 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT);
270 lwpsignal(p, tlp, SIGKILL);
272 lwkt_reltoken(&tlp->lwp_token);
277 * Wait for everything to clear out. Also make sure any tstop()s
278 * are signalled (we are holding p_token for the interlock).
281 while (p->p_nthreads > 1)
282 tsleep(&p->p_nthreads, 0, "killlwps", 0);
286 * Exit: deallocate address space and other resources, change proc state
287 * to zombie, and unlink proc from allproc and parent's lists. Save exit
288 * status and rusage for wait(). Check for child processes and orphan them.
293 struct thread *td = curthread;
294 struct proc *p = td->td_proc;
295 struct lwp *lp = td->td_lwp;
299 struct sysreaper *reap;
305 lwkt_gettoken(&p->p_token);
308 kprintf("init died (signal %d, exit %d)\n",
309 WTERMSIG(rv), WEXITSTATUS(rv));
310 panic("Going nowhere without my init!");
312 varsymset_clean(&p->p_varsymset);
313 lockuninit(&p->p_varsymset.vx_lock);
316 * Kill all lwps associated with the current process, return an
317 * error if we race another thread trying to do the same thing
320 error = killalllwps(0);
326 /* are we a task leader? */
327 if (p == p->p_leader) {
328 struct kill_args killArgs;
329 killArgs.signum = SIGKILL;
332 killArgs.pid = q->p_pid;
334 * The interface for kill is better
335 * than the internal signal
341 tsleep((caddr_t)p, 0, "exit1", 0);
347 STOPEVENT(p, S_EXIT, rv);
348 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */
351 * Check if any loadable modules need anything done at process exit.
352 * e.g. SYSV IPC stuff
353 * XXX what if one of these generates an error?
358 * XXX: imho, the eventhandler stuff is much cleaner than this.
359 * Maybe we should move everything to use eventhandler.
361 TAILQ_FOREACH(ep, &exit_list, next)
364 if (p->p_flags & P_PROFIL)
367 SIGEMPTYSET(p->p_siglist);
368 SIGEMPTYSET(lp->lwp_siglist);
369 if (timevalisset(&p->p_realtimer.it_value))
370 callout_terminate(&p->p_ithandle);
373 * Reset any sigio structures pointing to us as a result of
374 * F_SETOWN with our pid.
376 funsetownlst(&p->p_sigiolst);
379 * Close open files and release open-file table.
384 if (p->p_leader->p_peers) {
386 while(q->p_peers != p)
388 q->p_peers = p->p_peers;
389 wakeup((caddr_t)p->p_leader);
393 * XXX Shutdown SYSV semaphores
397 /* The next two chunks should probably be moved to vmspace_exit. */
401 * Clean up data related to virtual kernel operation. Clean up
402 * any vkernel context related to the current lwp now so we can
406 vkernel_lwp_exit(lp);
411 * Release the user portion of address space. The exitbump prevents
412 * the vmspace from being completely eradicated (using holdcnt).
413 * This releases references to vnodes, which could cause I/O if the
414 * file has been unlinked. We need to do this early enough that
415 * we can still sleep.
417 * We can't free the entire vmspace as the kernel stack may be mapped
418 * within that space also.
420 * Processes sharing the same vmspace may exit in one order, and
421 * get cleaned up by vmspace_exit() in a different order. The
422 * last exiting process to reach this point releases as much of
423 * the environment as it can, and the last process cleaned up
424 * by vmspace_exit() (which decrements exitingcnt) cleans up the
427 * NOTE: Releasing p_token around this call is helpful if the
428 * vmspace had a huge RSS. Otherwise some other process
429 * trying to do an allproc or other scan (like 'ps') may
430 * stall for a long time.
432 lwkt_reltoken(&p->p_token);
434 lwkt_gettoken(&p->p_token);
436 if (SESS_LEADER(p)) {
437 struct session *sp = p->p_session;
441 * We are the controlling process. Signal the
442 * foreground process group, drain the controlling
443 * terminal, and revoke access to the controlling
446 * NOTE: while waiting for the process group to exit
447 * it is possible that one of the processes in the
448 * group will revoke the tty, so the ttyclosesession()
449 * function will re-check sp->s_ttyvp.
451 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
452 if (sp->s_ttyp->t_pgrp)
453 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
455 ttyclosesession(sp, 1); /* also revoke */
458 * Release the tty. If someone has it open via
459 * /dev/tty then close it (since they no longer can
460 * once we've NULL'd it out).
462 ttyclosesession(sp, 0);
465 * s_ttyp is not zero'd; we use this to indicate
466 * that the session once had a controlling terminal.
467 * (for logging and informational purposes)
472 fixjobc(p, p->p_pgrp, 0);
473 (void)acct_process(p);
479 ktrdestroy(&p->p_tracenode);
483 * Release reference to text vnode
485 if ((vtmp = p->p_textvp) != NULL) {
490 /* Release namecache handle to text file */
491 if (p->p_textnch.ncp)
492 cache_drop(&p->p_textnch);
495 * We have to handle PPWAIT here or proc_move_allproc_zombie()
496 * will block on the PHOLD() the parent is doing.
498 * We are using the flag as an interlock so an atomic op is
499 * necessary to synchronize with the parent's cpu.
501 if (p->p_flags & P_PPWAIT) {
502 if (p->p_pptr && p->p_pptr->p_upmap)
503 atomic_add_int(&p->p_pptr->p_upmap->invfork, -1);
504 atomic_clear_int(&p->p_flags, P_PPWAIT);
509 * Move the process to the zombie list. This will block
510 * until the process p_lock count reaches 0. The process will
511 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
512 * which is called from cpu_proc_exit().
514 * Interlock against waiters using p_waitgen. We increment
515 * p_waitgen after completing the move of our process to the
518 * WARNING: pp becomes stale when we block, clear it now as a
521 proc_move_allproc_zombie(p);
523 atomic_add_long(&pp->p_waitgen, 1);
527 * release controlled reaper for exit if we own it and return the
528 * remaining reaper (the one for us), which we will drop after we
531 reap = reaper_exit(p);
534 * Reparent all of this process's children to the init process or
535 * to the designated reaper. We must hold the reaper's p_token in
536 * order to safely mess with p_children.
538 * We already hold p->p_token (to remove the children from our list).
541 q = LIST_FIRST(&p->p_children);
543 reproc = reaper_get(reap);
544 lwkt_gettoken(&reproc->p_token);
545 while ((q = LIST_FIRST(&p->p_children)) != NULL) {
547 lwkt_gettoken(&q->p_token);
548 if (q != LIST_FIRST(&p->p_children)) {
549 lwkt_reltoken(&q->p_token);
553 LIST_REMOVE(q, p_sibling);
554 LIST_INSERT_HEAD(&reproc->p_children, q, p_sibling);
556 q->p_ppid = reproc->p_pid;
557 q->p_sigparent = SIGCHLD;
560 * Traced processes are killed
561 * since their existence means someone is screwing up.
563 if (q->p_flags & P_TRACED) {
564 q->p_flags &= ~P_TRACED;
567 lwkt_reltoken(&q->p_token);
570 lwkt_reltoken(&reproc->p_token);
575 * Save exit status and final rusage info. We no longer add
576 * child rusage info into self times, wait4() and kern_wait()
577 * handles it in order to properly support wait6().
579 calcru_proc(p, &p->p_ru);
580 /*ruadd(&p->p_ru, &p->p_cru); REMOVED */
583 * notify interested parties of our demise.
585 KNOTE(&p->p_klist, NOTE_EXIT);
588 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
589 * flag set, or if the handler is set to SIG_IGN, notify the reaper
590 * instead (it will handle this situation).
592 * NOTE: The reaper can still be the parent process.
596 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
598 reproc = reaper_get(reap);
599 proc_reparent(p, reproc);
607 * Signal (possibly new) parent.
611 if (p->p_sigparent && pp != initproc) {
612 int sig = p->p_sigparent;
614 if (sig != SIGUSR1 && sig != SIGCHLD)
618 ksignal(pp, SIGCHLD);
620 p->p_flags &= ~P_TRACED;
624 * cpu_exit is responsible for clearing curproc, since
625 * it is heavily integrated with the thread/switching sequence.
627 * Other substructures are freed from wait().
630 struct plimit *rlimit;
638 * Finally, call machine-dependent code to release as many of the
639 * lwp's resources as we can and halt execution of this thread.
641 * pp is a wild pointer now but still the correct wakeup() target.
642 * lwp_exit() only uses it to send the wakeup() signal to the likely
643 * parent. Any reparenting race that occurs will get a signal
644 * automatically and not be an issue.
650 * Eventually called by every exiting LWP
652 * p->p_token must be held. mplock may be held and will be released.
655 lwp_exit(int masterexit, void *waddr)
657 struct thread *td = curthread;
658 struct lwp *lp = td->td_lwp;
659 struct proc *p = lp->lwp_proc;
663 * Release the current user process designation on the process so
664 * the userland scheduler can work in someone else.
666 p->p_usched->release_curproc(lp);
669 * lwp_exit() may be called without setting LWP_MP_WEXIT, so
670 * make sure it is set here.
672 ASSERT_LWKT_TOKEN_HELD(&p->p_token);
673 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
676 * Clean up any virtualization
679 vkernel_lwp_exit(lp);
685 * Clean up select/poll support
687 kqueue_terminate(&lp->lwp_kqueue);
690 * Clean up any syscall-cached ucred or rlimit.
693 crfree(td->td_ucred);
697 struct plimit *rlimit;
699 rlimit = td->td_limit;
705 * Cleanup any cached descriptors for this thread
711 * Nobody actually wakes us when the lock
712 * count reaches zero, so just wait one tick.
714 while (lp->lwp_lock > 0)
715 tsleep(lp, 0, "lwpexit", 1);
717 /* Hand down resource usage to our proc */
718 ruadd(&p->p_ru, &lp->lwp_ru);
721 * If we don't hold the process until the LWP is reaped wait*()
722 * may try to dispose of its vmspace before all the LWPs have
723 * actually terminated.
728 * Do any remaining work that might block on us. We should be
729 * coded such that further blocking is ok after decrementing
730 * p_nthreads but don't take the chance.
732 dsched_exit_thread(td);
733 biosched_done(curthread);
736 * We have to use the reaper for all the LWPs except the one doing
737 * the master exit. The LWP doing the master exit can just be
738 * left on p_lwps and the process reaper will deal with it
739 * synchronously, which is much faster.
741 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0.
743 * The process is left held until the reaper calls lwp_dispose() on
744 * the lp (after calling lwp_wait()).
746 if (masterexit == 0) {
749 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
751 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1)
753 lwkt_gettoken(&deadlwp_token[cpu]);
754 LIST_INSERT_HEAD(&deadlwp_list[cpu], lp, u.lwp_reap_entry);
755 taskqueue_enqueue(taskqueue_thread[cpu], deadlwp_task[cpu]);
756 lwkt_reltoken(&deadlwp_token[cpu]);
759 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1)
764 * We no longer need p_token.
766 * Tell the userland scheduler that we are going away
768 lwkt_reltoken(&p->p_token);
769 p->p_usched->heuristic_exiting(lp, p);
772 * Issue late wakeups after releasing our token to give us a chance
773 * to deschedule and switch away before another cpu in a wait*()
774 * reaps us. This is done as late as possible to reduce contention.
777 wakeup(&p->p_nthreads);
785 * Wait until a lwp is completely dead. The final interlock in this drama
786 * is when TDF_EXITING is set in cpu_thread_exit() just before the final
789 * At the point TDF_EXITING is set a complete exit is accomplished when
790 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two
791 * post-switch interlock flags that can be used to wait for the TDF_
794 * Returns non-zero on success, and zero if the caller needs to retry
798 lwp_wait(struct lwp *lp)
800 struct thread *td = lp->lwp_thread;
803 KKASSERT(lwkt_preempted_proc() != lp);
806 * This bit of code uses the thread destruction interlock
807 * managed by lwkt_switch_return() to wait for the lwp's
808 * thread to completely disengage.
810 * It is possible for us to race another cpu core so we
811 * have to do this correctly.
814 mpflags = td->td_mpflags;
816 if (mpflags & TDF_MP_EXITSIG)
818 tsleep_interlock(td, 0);
819 if (atomic_cmpset_int(&td->td_mpflags, mpflags,
820 mpflags | TDF_MP_EXITWAIT)) {
821 tsleep(td, PINTERLOCKED, "lwpxt", 0);
826 * We've already waited for the core exit but there can still
827 * be other refs from e.g. process scans and such.
829 if (lp->lwp_lock > 0) {
830 tsleep(lp, 0, "lwpwait1", 1);
834 tsleep(td, 0, "lwpwait2", 1);
839 * Now that we have the thread destruction interlock these flags
840 * really should already be cleaned up, keep a check for safety.
842 * We can't rip its stack out from under it until TDF_EXITING is
843 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
844 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
845 * will be cleared temporarily if a thread gets preempted.
847 while ((td->td_flags & (TDF_RUNNING |
850 TDF_EXITING)) != TDF_EXITING) {
851 tsleep(lp, 0, "lwpwait3", 1);
855 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0,
856 ("lwp_wait: td %p (%s) still on run or sleep queue",
862 * Release the resources associated with a lwp.
863 * The lwp must be completely dead.
866 lwp_dispose(struct lwp *lp)
868 struct thread *td = lp->lwp_thread;
870 KKASSERT(lwkt_preempted_proc() != lp);
871 KKASSERT(lp->lwp_lock == 0);
872 KKASSERT(td->td_refs == 0);
873 KKASSERT((td->td_flags & (TDF_RUNNING |
876 TDF_EXITING)) == TDF_EXITING);
883 lp->lwp_thread = NULL;
884 lwkt_free_thread(td);
890 sys_wait4(struct wait_args *uap)
892 struct __wrusage wrusage;
899 options = uap->options | WEXITED | WTRAPPED;
902 if (id == WAIT_ANY) {
904 } else if (id == WAIT_MYPGRP) {
906 id = curproc->p_pgid;
914 error = kern_wait(idtype, id, &status, options, &wrusage,
915 NULL, &uap->sysmsg_result);
917 if (error == 0 && uap->status)
918 error = copyout(&status, uap->status, sizeof(*uap->status));
919 if (error == 0 && uap->rusage) {
920 ruadd(&wrusage.wru_self, &wrusage.wru_children);
921 error = copyout(&wrusage.wru_self, uap->rusage, sizeof(*uap->rusage));
927 sys_wait6(struct wait6_args *uap)
929 struct __wrusage wrusage;
930 struct __siginfo info;
931 struct __siginfo *infop;
939 * NOTE: wait6() requires WEXITED and WTRAPPED to be specified if
942 options = uap->options;
943 idtype = uap->idtype;
945 infop = uap->info ? &info : NULL;
950 if (id == WAIT_MYPGRP) {
952 id = curproc->p_pgid;
956 /* let kern_wait deal with the remainder */
960 error = kern_wait(idtype, id, &status, options,
961 &wrusage, infop, &uap->sysmsg_result);
963 if (error == 0 && uap->status)
964 error = copyout(&status, uap->status, sizeof(*uap->status));
965 if (error == 0 && uap->wrusage)
966 error = copyout(&wrusage, uap->wrusage, sizeof(*uap->wrusage));
967 if (error == 0 && uap->info)
968 error = copyout(&info, uap->info, sizeof(*uap->info));
973 * kernel wait*() system call support
976 kern_wait(idtype_t idtype, id_t id, int *status, int options,
977 struct __wrusage *wrusage, struct __siginfo *info, int *res)
979 struct thread *td = curthread;
981 struct proc *q = td->td_proc;
990 * Must not have extraneous options. Must have at least one
993 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE|WSTOPPED|
994 WEXITED|WTRAPPED|WNOWAIT)) {
997 if ((options & (WEXITED | WUNTRACED | WCONTINUED | WTRAPPED)) == 0) {
1002 * Protect the q->p_children list
1004 lwkt_gettoken(&q->p_token);
1007 * All sorts of things can change due to blocking so we have to loop
1008 * all the way back up here.
1010 * The problem is that if a process group is stopped and the parent
1011 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
1012 * of the child and then stop itself when it tries to return from the
1013 * system call. When the process group is resumed the parent will
1014 * then get the STOP status even though the child has now resumed
1015 * (a followup wait*() will get the CONT status).
1017 * Previously the CONT would overwrite the STOP because the tstop
1018 * was handled within tsleep(), and the parent would only see
1019 * the CONT when both are stopped and continued together. This little
1020 * two-line hack restores this effect.
1022 if (STOPLWP(q, td->td_lwp))
1030 * NOTE: We don't want to break q's p_token in the loop for the
1031 * case where no children are found or we risk breaking the
1032 * interlock between child and parent.
1034 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000);
1035 LIST_FOREACH(p, &q->p_children, p_sibling) {
1037 * Filter, (p) will be held on fall-through. Try to optimize
1038 * this to avoid the atomic op until we are pretty sure we
1039 * want this process.
1046 if (p->p_pid != (pid_t)id)
1051 if (p->p_pgid != (pid_t)id)
1057 if (p->p_session && p->p_session->s_sid != (pid_t)id) {
1064 if (p->p_ucred->cr_uid != (uid_t)id) {
1071 if (p->p_ucred->cr_gid != (gid_t)id) {
1078 if (p->p_ucred->cr_prison &&
1079 p->p_ucred->cr_prison->pr_id != (int)id) {
1085 /* unsupported filter */
1088 /* (p) is held at this point */
1091 * This special case handles a kthread spawned by linux_clone
1092 * (see linux_misc.c). The linux_wait4 and linux_waitpid
1093 * functions need to be able to distinguish between waiting
1094 * on a process and waiting on a thread. It is a thread if
1095 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
1096 * signifies we want to wait for threads and not processes.
1098 if ((p->p_sigparent != SIGCHLD) ^
1099 ((options & WLINUXCLONE) != 0)) {
1105 if (p->p_stat == SZOMB && (options & WEXITED)) {
1107 * We may go into SZOMB with threads still present.
1108 * We must wait for them to exit before we can reap
1109 * the master thread, otherwise we may race reaping
1110 * non-master threads.
1112 * Only this routine can remove a process from
1113 * the zombie list and destroy it.
1119 lwkt_gettoken(&p->p_token);
1120 if (p->p_pptr != q) {
1121 lwkt_reltoken(&p->p_token);
1126 while (p->p_nthreads > 0) {
1127 tsleep(&p->p_nthreads, 0, "lwpzomb", hz);
1131 * Reap any LWPs left in p->p_lwps. This is usually
1132 * just the last LWP. This must be done before
1133 * we loop on p_lock since the lwps hold a ref on
1134 * it as a vmspace interlock.
1136 * Once that is accomplished p_nthreads had better
1139 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) {
1141 * Make sure no one is using this lwp, before
1142 * it is removed from the tree. If we didn't
1143 * wait it here, lwp tree iteration with
1144 * blocking operation would be broken.
1146 while (lp->lwp_lock > 0)
1147 tsleep(lp, 0, "zomblwp", 1);
1148 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
1151 KKASSERT(p->p_nthreads == 0);
1154 * Don't do anything really bad until all references
1155 * to the process go away. This may include other
1156 * LWPs which are still in the process of being
1157 * reaped. We can't just pull the rug out from under
1158 * them because they may still be using the VM space.
1160 * Certain kernel facilities such as /proc will also
1161 * put a hold on the process for short periods of
1164 PRELE(p); /* from top of loop */
1165 PSTALL(p, "reap3", 1); /* 1 ref (for PZOMBHOLD) */
1167 /* Take care of our return values. */
1170 *status = p->p_xstat;
1171 wrusage->wru_self = p->p_ru;
1172 wrusage->wru_children = p->p_cru;
1175 bzero(info, sizeof(*info));
1177 info->si_signo = SIGCHLD;
1178 if (WIFEXITED(p->p_xstat)) {
1179 info->si_code = CLD_EXITED;
1181 WEXITSTATUS(p->p_xstat);
1183 info->si_code = CLD_KILLED;
1184 info->si_status = WTERMSIG(p->p_xstat);
1186 info->si_pid = p->p_pid;
1187 info->si_uid = p->p_ucred->cr_uid;
1191 * WNOWAIT shortcuts to done here, leaving the
1192 * child on the zombie list.
1194 if (options & WNOWAIT) {
1195 lwkt_reltoken(&p->p_token);
1202 * If we got the child via a ptrace 'attach',
1203 * we need to give it back to the old parent.
1205 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
1207 proc_reparent(p, t);
1208 ksignal(t, SIGCHLD);
1211 lwkt_reltoken(&p->p_token);
1218 * Unlink the proc from its process group so that
1219 * the following operations won't lead to an
1220 * inconsistent state for processes running down
1223 proc_remove_zombie(p);
1225 lwkt_reltoken(&p->p_token);
1229 ruadd(&q->p_cru, &p->p_ru);
1230 ruadd(&q->p_cru, &p->p_cru);
1233 * Decrement the count of procs running with this uid.
1235 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
1238 * Free up credentials. p_spin is required to
1239 * avoid races against allproc scans.
1241 spin_lock(&p->p_spin);
1244 spin_unlock(&p->p_spin);
1248 * Remove unused arguments
1252 if (pa && refcount_release(&pa->ar_ref)) {
1258 p->p_sigacts = NULL;
1259 if (ps && refcount_release(&ps->ps_refcnt)) {
1260 kfree(ps, M_SUBPROC);
1265 * Our exitingcount was incremented when the process
1266 * became a zombie, now that the process has been
1267 * removed from (almost) all lists we should be able
1268 * to safely destroy its vmspace. Wait for any current
1269 * holders to go away (so the vmspace remains stable),
1272 * NOTE: Releasing the parent process (q) p_token
1273 * across the vmspace_exitfree() call is
1274 * important here to reduce stalls on
1275 * interactions with (q) (such as
1276 * fork/exec/wait or 'ps').
1278 PSTALL(p, "reap4", 1);
1279 lwkt_reltoken(&q->p_token);
1280 vmspace_exitfree(p);
1281 lwkt_gettoken(&q->p_token);
1282 PSTALL(p, "reap5", 1);
1285 * NOTE: We have to officially release ZOMB in order
1286 * to ensure that a racing thread in kern_wait()
1287 * which blocked on ZOMB is woken up.
1290 kfree(p->p_uidpcpu, M_SUBPROC);
1292 atomic_add_int(&nprocs, -1);
1298 * Process has not yet exited
1300 if ((p->p_stat == SSTOP || p->p_stat == SCORE) &&
1301 (p->p_flags & P_WAITED) == 0 &&
1302 (((p->p_flags & P_TRACED) && (options & WTRAPPED)) ||
1303 (options & WSTOPPED))) {
1304 lwkt_gettoken(&p->p_token);
1305 if (p->p_pptr != q) {
1306 lwkt_reltoken(&p->p_token);
1310 if ((p->p_stat != SSTOP && p->p_stat != SCORE) ||
1311 (p->p_flags & P_WAITED) != 0 ||
1312 ((p->p_flags & P_TRACED) == 0 &&
1313 (options & WUNTRACED) == 0)) {
1314 lwkt_reltoken(&p->p_token);
1320 * Don't set P_WAITED if WNOWAIT specified, leaving
1321 * the process in a waitable state.
1323 if ((options & WNOWAIT) == 0)
1324 p->p_flags |= P_WAITED;
1327 *status = W_STOPCODE(p->p_xstat);
1328 /* Zero rusage so we get something consistent. */
1329 bzero(wrusage, sizeof(*wrusage));
1332 bzero(info, sizeof(*info));
1333 if (p->p_flags & P_TRACED)
1334 info->si_code = CLD_TRAPPED;
1336 info->si_code = CLD_STOPPED;
1337 info->si_status = WSTOPSIG(p->p_xstat);
1339 lwkt_reltoken(&p->p_token);
1343 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) {
1344 lwkt_gettoken(&p->p_token);
1345 if (p->p_pptr != q) {
1346 lwkt_reltoken(&p->p_token);
1350 if ((p->p_flags & P_CONTINUED) == 0) {
1351 lwkt_reltoken(&p->p_token);
1359 * Don't set P_WAITED if WNOWAIT specified, leaving
1360 * the process in a waitable state.
1362 if ((options & WNOWAIT) == 0)
1363 p->p_flags &= ~P_CONTINUED;
1368 bzero(info, sizeof(*info));
1369 info->si_code = CLD_CONTINUED;
1370 info->si_status = WSTOPSIG(p->p_xstat);
1372 lwkt_reltoken(&p->p_token);
1382 if (options & WNOHANG) {
1389 * Wait for signal - interlocked using q->p_waitgen.
1392 while ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) {
1393 tsleep_interlock(q, PCATCH);
1394 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000);
1395 if ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) {
1396 error = tsleep(q, PCATCH | PINTERLOCKED, "wait", 0);
1402 lwkt_reltoken(&q->p_token);
1409 * Change child's parent process to parent.
1411 * p_children/p_sibling requires the parent's token, and
1412 * changing pptr requires the child's token, so we have to
1413 * get three tokens to do this operation. We also need to
1414 * hold pointers that might get ripped out from under us to
1415 * preserve structural integrity.
1417 * It is possible to race another reparent or disconnect or other
1418 * similar operation. We must retry when this situation occurs.
1419 * Once we successfully reparent the process we no longer care
1423 proc_reparent(struct proc *child, struct proc *parent)
1428 while ((opp = child->p_pptr) != parent) {
1430 lwkt_gettoken(&opp->p_token);
1431 lwkt_gettoken(&child->p_token);
1432 lwkt_gettoken(&parent->p_token);
1433 if (child->p_pptr != opp) {
1434 lwkt_reltoken(&parent->p_token);
1435 lwkt_reltoken(&child->p_token);
1436 lwkt_reltoken(&opp->p_token);
1440 LIST_REMOVE(child, p_sibling);
1441 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1442 child->p_pptr = parent;
1443 child->p_ppid = parent->p_pid;
1444 lwkt_reltoken(&parent->p_token);
1445 lwkt_reltoken(&child->p_token);
1446 lwkt_reltoken(&opp->p_token);
1447 if (LIST_EMPTY(&opp->p_children))
1456 * The next two functions are to handle adding/deleting items on the
1460 * Take the arguments given and put them onto the exit callout list,
1461 * However first make sure that it's not already there.
1462 * returns 0 on success.
1466 at_exit(exitlist_fn function)
1468 struct exitlist *ep;
1471 /* Be noisy if the programmer has lost track of things */
1472 if (rm_at_exit(function))
1473 kprintf("WARNING: exit callout entry (%p) already present\n",
1476 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
1479 ep->function = function;
1480 TAILQ_INSERT_TAIL(&exit_list, ep, next);
1485 * Scan the exit callout list for the given item and remove it.
1486 * Returns the number of items removed (0 or 1)
1489 rm_at_exit(exitlist_fn function)
1491 struct exitlist *ep;
1493 TAILQ_FOREACH(ep, &exit_list, next) {
1494 if (ep->function == function) {
1495 TAILQ_REMOVE(&exit_list, ep, next);
1496 kfree(ep, M_ATEXIT);
1504 * LWP reaper related code.
1507 reaplwps(void *context, int dummy)
1509 struct lwplist *lwplist = context;
1513 lwkt_gettoken(&deadlwp_token[cpu]);
1514 while ((lp = LIST_FIRST(lwplist))) {
1515 LIST_REMOVE(lp, u.lwp_reap_entry);
1518 lwkt_reltoken(&deadlwp_token[cpu]);
1522 reaplwp(struct lwp *lp)
1524 while (lwp_wait(lp) == 0)
1534 for (cpu = 0; cpu < ncpus; cpu++) {
1535 lwkt_token_init(&deadlwp_token[cpu], "deadlwpl");
1536 LIST_INIT(&deadlwp_list[cpu]);
1537 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]),
1538 M_DEVBUF, M_WAITOK);
1539 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]);
1543 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL);