2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.91 2008/05/18 20:02:02 nth Exp $
43 #include "opt_compat.h"
44 #include "opt_ktrace.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/sysproto.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
52 #include <sys/ktrace.h>
53 #include <sys/pioctl.h>
56 #include <sys/vnode.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/taskqueue.h>
60 #include <sys/ptrace.h>
61 #include <sys/acct.h> /* for acct_process() function prototype */
62 #include <sys/filedesc.h>
67 #include <sys/kern_syscall.h>
68 #include <sys/upcall.h>
70 #include <sys/unistd.h>
71 #include <sys/eventhandler.h>
72 #include <sys/dsched.h>
75 #include <vm/vm_param.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_extern.h>
82 #include <sys/thread2.h>
83 #include <sys/sysref2.h>
84 #include <sys/mplock2.h>
86 static void reaplwps(void *context, int dummy);
87 static void reaplwp(struct lwp *lp);
88 static void killlwps(struct lwp *lp);
90 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
91 static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
93 static struct lwkt_token deadlwp_token = LWKT_TOKEN_INITIALIZER(deadlwp_token);
96 * callout list for things to do at exit time
100 TAILQ_ENTRY(exitlist) next;
103 TAILQ_HEAD(exit_list_head, exitlist);
104 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
109 struct task *deadlwp_task[MAXCPU];
110 struct lwplist deadlwp_list[MAXCPU];
116 * SYS_EXIT_ARGS(int rval)
119 sys_exit(struct exit_args *uap)
121 exit1(W_EXITCODE(uap->rval, 0));
127 * Death of a lwp or process with optional bells and whistles.
132 sys_extexit(struct extexit_args *uap)
134 struct proc *p = curproc;
138 action = EXTEXIT_ACTION(uap->how);
139 who = EXTEXIT_WHO(uap->how);
141 /* Check parameters before we might perform some action */
154 error = copyout(&uap->status, uap->addr, sizeof(uap->status));
162 lwkt_gettoken(&p->p_token);
167 * Be sure only to perform a simple lwp exit if there is at
168 * least one more lwp in the proc, which will call exit1()
169 * later, otherwise the proc will be an UNDEAD and not even a
172 if (p->p_nthreads > 1) {
173 lwp_exit(0); /* called w/ p_token held */
176 /* else last lwp in proc: do the real thing */
178 default: /* to help gcc */
180 lwkt_reltoken(&p->p_token);
181 exit1(W_EXITCODE(uap->status, 0));
186 lwkt_reltoken(&p->p_token); /* safety */
190 * Kill all lwps associated with the current process except the
191 * current lwp. Return an error if we race another thread trying to
192 * do the same thing and lose the race.
194 * If forexec is non-zero the current thread and process flags are
195 * cleaned up so they can be reused.
198 killalllwps(int forexec)
200 struct lwp *lp = curthread->td_lwp;
201 struct proc *p = lp->lwp_proc;
204 * Interlock against P_WEXIT. Only one of the process's thread
205 * is allowed to do the master exit.
207 if (p->p_flag & P_WEXIT)
209 p->p_flag |= P_WEXIT;
212 * Interlock with LWP_WEXIT and kill any remaining LWPs
214 lp->lwp_flag |= LWP_WEXIT;
215 if (p->p_nthreads > 1)
219 * If doing this for an exec, clean up the remaining thread
220 * (us) for continuing operation after all the other threads
224 lp->lwp_flag &= ~LWP_WEXIT;
225 p->p_flag &= ~P_WEXIT;
231 * Kill all LWPs except the current one. Do not try to signal
232 * LWPs which have exited on their own or have already been
236 killlwps(struct lwp *lp)
238 struct proc *p = lp->lwp_proc;
242 * Kill the remaining LWPs. We must send the signal before setting
243 * LWP_WEXIT. The setting of WEXIT is optional but helps reduce
244 * races. tlp must be held across the call as it might block and
245 * allow the target lwp to rip itself out from under our loop.
247 FOREACH_LWP_IN_PROC(tlp, p) {
249 if ((tlp->lwp_flag & LWP_WEXIT) == 0) {
250 lwpsignal(p, tlp, SIGKILL);
251 tlp->lwp_flag |= LWP_WEXIT;
257 * Wait for everything to clear out.
259 while (p->p_nthreads > 1) {
260 tsleep(&p->p_nthreads, 0, "killlwps", 0);
265 * Exit: deallocate address space and other resources, change proc state
266 * to zombie, and unlink proc from allproc and parent's lists. Save exit
267 * status and rusage for wait(). Check for child processes and orphan them.
272 struct thread *td = curthread;
273 struct proc *p = td->td_proc;
274 struct lwp *lp = td->td_lwp;
281 lwkt_gettoken(&p->p_token);
284 kprintf("init died (signal %d, exit %d)\n",
285 WTERMSIG(rv), WEXITSTATUS(rv));
286 panic("Going nowhere without my init!");
288 varsymset_clean(&p->p_varsymset);
289 lockuninit(&p->p_varsymset.vx_lock);
291 * Kill all lwps associated with the current process, return an
292 * error if we race another thread trying to do the same thing
295 error = killalllwps(0);
301 caps_exit(lp->lwp_thread);
304 /* are we a task leader? */
305 if (p == p->p_leader) {
306 struct kill_args killArgs;
307 killArgs.signum = SIGKILL;
310 killArgs.pid = q->p_pid;
312 * The interface for kill is better
313 * than the internal signal
320 tsleep((caddr_t)p, 0, "exit1", 0);
326 STOPEVENT(p, S_EXIT, rv);
327 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */
330 * Check if any loadable modules need anything done at process exit.
331 * e.g. SYSV IPC stuff
332 * XXX what if one of these generates an error?
335 EVENTHANDLER_INVOKE(process_exit, p);
338 * XXX: imho, the eventhandler stuff is much cleaner than this.
339 * Maybe we should move everything to use eventhandler.
341 TAILQ_FOREACH(ep, &exit_list, next)
344 if (p->p_flag & P_PROFIL)
347 * If parent is waiting for us to exit or exec,
348 * P_PPWAIT is set; we will wakeup the parent below.
350 p->p_flag &= ~(P_TRACED | P_PPWAIT);
351 SIGEMPTYSET(p->p_siglist);
352 SIGEMPTYSET(lp->lwp_siglist);
353 if (timevalisset(&p->p_realtimer.it_value))
354 callout_stop(&p->p_ithandle);
357 * Reset any sigio structures pointing to us as a result of
358 * F_SETOWN with our pid.
360 funsetownlst(&p->p_sigiolst);
363 * Close open files and release open-file table.
368 if(p->p_leader->p_peers) {
370 while(q->p_peers != p)
372 q->p_peers = p->p_peers;
373 wakeup((caddr_t)p->p_leader);
377 * XXX Shutdown SYSV semaphores
381 KKASSERT(p->p_numposixlocks == 0);
383 /* The next two chunks should probably be moved to vmspace_exit. */
387 * Release upcalls associated with this process
393 * Clean up data related to virtual kernel operation. Clean up
394 * any vkernel context related to the current lwp now so we can
398 vkernel_lwp_exit(lp);
403 * Release user portion of address space.
404 * This releases references to vnodes,
405 * which could cause I/O if the file has been unlinked.
406 * Need to do this early enough that we can still sleep.
407 * Can't free the entire vmspace as the kernel stack
408 * may be mapped within that space also.
410 * Processes sharing the same vmspace may exit in one order, and
411 * get cleaned up by vmspace_exit() in a different order. The
412 * last exiting process to reach this point releases as much of
413 * the environment as it can, and the last process cleaned up
414 * by vmspace_exit() (which decrements exitingcnt) cleans up the
417 vmspace_exitbump(vm);
418 sysref_put(&vm->vm_sysref);
420 if (SESS_LEADER(p)) {
421 struct session *sp = p->p_session;
425 * We are the controlling process. Signal the
426 * foreground process group, drain the controlling
427 * terminal, and revoke access to the controlling
430 * NOTE: while waiting for the process group to exit
431 * it is possible that one of the processes in the
432 * group will revoke the tty, so the ttyclosesession()
433 * function will re-check sp->s_ttyvp.
435 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
436 if (sp->s_ttyp->t_pgrp)
437 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
439 ttyclosesession(sp, 1); /* also revoke */
442 * Release the tty. If someone has it open via
443 * /dev/tty then close it (since they no longer can
444 * once we've NULL'd it out).
446 ttyclosesession(sp, 0);
449 * s_ttyp is not zero'd; we use this to indicate
450 * that the session once had a controlling terminal.
451 * (for logging and informational purposes)
456 fixjobc(p, p->p_pgrp, 0);
457 (void)acct_process(p);
463 ktrdestroy(&p->p_tracenode);
467 * Release reference to text vnode
469 if ((vtmp = p->p_textvp) != NULL) {
474 /* Release namecache handle to text file */
475 if (p->p_textnch.ncp)
476 cache_drop(&p->p_textnch);
479 * Move the process to the zombie list. This will block
480 * until the process p_lock count reaches 0. The process will
481 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
482 * which is called from cpu_proc_exit().
484 proc_move_allproc_zombie(p);
486 q = LIST_FIRST(&p->p_children);
487 if (q) /* only need this if any child is S_ZOMB */
488 wakeup((caddr_t) initproc);
489 for (; q != 0; q = nq) {
490 nq = LIST_NEXT(q, p_sibling);
491 LIST_REMOVE(q, p_sibling);
492 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling);
493 q->p_pptr = initproc;
494 q->p_sigparent = SIGCHLD;
496 * Traced processes are killed
497 * since their existence means someone is screwing up.
499 if (q->p_flag & P_TRACED) {
500 q->p_flag &= ~P_TRACED;
506 * Save exit status and final rusage info, adding in child rusage
507 * info and self times.
509 calcru_proc(p, &p->p_ru);
510 ruadd(&p->p_ru, &p->p_cru);
513 * notify interested parties of our demise.
515 KNOTE(&p->p_klist, NOTE_EXIT);
518 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
519 * flag set, notify process 1 instead (and hope it will handle
522 if (p->p_pptr->p_sigacts->ps_flag & PS_NOCLDWAIT) {
523 struct proc *pp = p->p_pptr;
526 lwkt_gettoken(&pp->p_token);
527 proc_reparent(p, initproc);
530 * If this was the last child of our parent, notify
531 * parent, so in case he was wait(2)ing, he will
532 * continue. This function interlocks with pptr->p_token.
534 if (LIST_EMPTY(&pp->p_children))
536 lwkt_reltoken(&pp->p_token);
540 /* lwkt_gettoken(&proc_token); */
543 if (p->p_sigparent && q != initproc) {
544 ksignal(q, p->p_sigparent);
550 /* lwkt_reltoken(&proc_token); */
551 /* NOTE: p->p_pptr can get ripped out */
553 * cpu_exit is responsible for clearing curproc, since
554 * it is heavily integrated with the thread/switching sequence.
556 * Other substructures are freed from wait().
561 * Release the current user process designation on the process so
562 * the userland scheduler can work in someone else.
564 p->p_usched->release_curproc(lp);
567 * Finally, call machine-dependent code to release as many of the
568 * lwp's resources as we can and halt execution of this thread.
574 * Eventually called by every exiting LWP
576 * p->p_token must be held. mplock may be held and will be released.
579 lwp_exit(int masterexit)
581 struct thread *td = curthread;
582 struct lwp *lp = td->td_lwp;
583 struct proc *p = lp->lwp_proc;
586 * lwp_exit() may be called without setting LWP_WEXIT, so
587 * make sure it is set here.
589 ASSERT_LWKT_TOKEN_HELD(&p->p_token);
590 lp->lwp_flag |= LWP_WEXIT;
593 * Clean up any virtualization
596 vkernel_lwp_exit(lp);
599 * Clean up select/poll support
601 kqueue_terminate(&lp->lwp_kqueue);
604 * Clean up any syscall-cached ucred
607 crfree(td->td_ucred);
612 * Nobody actually wakes us when the lock
613 * count reaches zero, so just wait one tick.
615 while (lp->lwp_lock > 0)
616 tsleep(lp, 0, "lwpexit", 1);
618 /* Hand down resource usage to our proc */
619 ruadd(&p->p_ru, &lp->lwp_ru);
622 * If we don't hold the process until the LWP is reaped wait*()
623 * may try to dispose of its vmspace before all the LWPs have
624 * actually terminated.
629 * Do any remaining work that might block on us. We should be
630 * coded such that further blocking is ok after decrementing
631 * p_nthreads but don't take the chance.
633 dsched_exit_thread(td);
634 biosched_done(curthread);
637 * We have to use the reaper for all the LWPs except the one doing
638 * the master exit. The LWP doing the master exit can just be
639 * left on p_lwps and the process reaper will deal with it
640 * synchronously, which is much faster.
642 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0.
644 if (masterexit == 0) {
645 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
647 if (p->p_nthreads <= 1)
648 wakeup(&p->p_nthreads);
649 lwkt_gettoken(&deadlwp_token);
650 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry);
651 taskqueue_enqueue(taskqueue_thread[mycpuid],
652 deadlwp_task[mycpuid]);
653 lwkt_reltoken(&deadlwp_token);
656 if (p->p_nthreads <= 1)
657 wakeup(&p->p_nthreads);
661 * Release p_token. The mp_token may also be held and we depend on
662 * the lwkt_switch() code to clean it up.
664 lwkt_reltoken(&p->p_token);
669 * Wait until a lwp is completely dead.
671 * If the thread is still executing, which can't be waited upon,
672 * return failure. The caller is responsible of waiting a little
673 * bit and checking again.
676 * while (!lwp_wait(lp))
677 * tsleep(lp, 0, "lwpwait", 1);
680 lwp_wait(struct lwp *lp)
682 struct thread *td = lp->lwp_thread;;
684 KKASSERT(lwkt_preempted_proc() != lp);
686 while (lp->lwp_lock > 0)
687 tsleep(lp, 0, "lwpwait1", 1);
692 * The lwp's thread may still be in the middle
693 * of switching away, we can't rip its stack out from
694 * under it until TDF_EXITING is set and both
695 * TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
696 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
697 * will be cleared temporarily if a thread gets
700 * YYY no wakeup occurs, so we simply return failure
701 * and let the caller deal with sleeping and calling
704 if ((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
705 TDF_EXITING|TDF_RUNQ)) != TDF_EXITING) {
708 KASSERT((td->td_flags & TDF_TSLEEPQ) == 0,
709 ("lwp_wait: td %p (%s) still on sleep queue", td, td->td_comm));
714 * Release the resources associated with a lwp.
715 * The lwp must be completely dead.
718 lwp_dispose(struct lwp *lp)
720 struct thread *td = lp->lwp_thread;;
722 KKASSERT(lwkt_preempted_proc() != lp);
723 KKASSERT(td->td_refs == 0);
724 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) ==
732 lp->lwp_thread = NULL;
733 lwkt_free_thread(td);
742 sys_wait4(struct wait_args *uap)
744 struct rusage rusage;
747 error = kern_wait(uap->pid, (uap->status ? &status : NULL),
748 uap->options, (uap->rusage ? &rusage : NULL),
749 &uap->sysmsg_result);
751 if (error == 0 && uap->status)
752 error = copyout(&status, uap->status, sizeof(*uap->status));
753 if (error == 0 && uap->rusage)
754 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage));
761 * wait_args(int pid, int *status, int options, struct rusage *rusage)
766 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res)
768 struct thread *td = curthread;
770 struct proc *q = td->td_proc;
776 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE))
779 lwkt_gettoken(&q->p_token);
782 * All sorts of things can change due to blocking so we have to loop
783 * all the way back up here.
785 * The problem is that if a process group is stopped and the parent
786 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
787 * of the child and then stop itself when it tries to return from the
788 * system call. When the process group is resumed the parent will
789 * then get the STOP status even though the child has now resumed
790 * (a followup wait*() will get the CONT status).
792 * Previously the CONT would overwrite the STOP because the tstop
793 * was handled within tsleep(), and the parent would only see
794 * the CONT when both are stopped and continued together. This little
795 * two-line hack restores this effect.
797 while (q->p_stat == SSTOP)
802 LIST_FOREACH(p, &q->p_children, p_sibling) {
803 if (pid != WAIT_ANY &&
804 p->p_pid != pid && p->p_pgid != -pid) {
809 * This special case handles a kthread spawned by linux_clone
810 * (see linux_misc.c). The linux_wait4 and linux_waitpid
811 * functions need to be able to distinguish between waiting
812 * on a process and waiting on a thread. It is a thread if
813 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
814 * signifies we want to wait for threads and not processes.
816 if ((p->p_sigparent != SIGCHLD) ^
817 ((options & WLINUXCLONE) != 0)) {
822 if (p->p_stat == SZOMB) {
824 * We may go into SZOMB with threads still present.
825 * We must wait for them to exit before we can reap
826 * the master thread, otherwise we may race reaping
827 * non-master threads.
829 lwkt_gettoken(&p->p_token);
830 while (p->p_nthreads > 0) {
831 tsleep(&p->p_nthreads, 0, "lwpzomb", hz);
835 * Reap any LWPs left in p->p_lwps. This is usually
836 * just the last LWP. This must be done before
837 * we loop on p_lock since the lwps hold a ref on
838 * it as a vmspace interlock.
840 * Once that is accomplished p_nthreads had better
843 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) {
844 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
847 KKASSERT(p->p_nthreads == 0);
848 lwkt_reltoken(&p->p_token);
851 * Don't do anything really bad until all references
852 * to the process go away. This may include other
853 * LWPs which are still in the process of being
854 * reaped. We can't just pull the rug out from under
855 * them because they may still be using the VM space.
857 * Certain kernel facilities such as /proc will also
858 * put a hold on the process for short periods of
862 tsleep(p, 0, "reap3", hz);
864 /* Take care of our return values. */
867 *status = p->p_xstat;
871 * If we got the child via a ptrace 'attach',
872 * we need to give it back to the old parent.
874 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
885 * Unlink the proc from its process group so that
886 * the following operations won't lead to an
887 * inconsistent state for processes running down
890 KKASSERT(p->p_lock == 0);
891 proc_remove_zombie(p);
895 ruadd(&q->p_cru, &p->p_ru);
898 * Decrement the count of procs running with this uid.
900 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
903 * Free up credentials.
909 * Remove unused arguments
911 if (p->p_args && --p->p_args->ar_ref == 0)
912 FREE(p->p_args, M_PARGS);
914 if (--p->p_sigacts->ps_refcnt == 0) {
915 kfree(p->p_sigacts, M_SUBPROC);
925 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
926 (p->p_flag & P_TRACED || options & WUNTRACED)) {
927 p->p_flag |= P_WAITED;
931 *status = W_STOPCODE(p->p_xstat);
932 /* Zero rusage so we get something consistent. */
934 bzero(rusage, sizeof(rusage));
938 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
940 p->p_flag &= ~P_CONTINUED;
952 if (options & WNOHANG) {
959 * Wait for signal - interlocked using q->p_token.
961 error = tsleep(q, PCATCH, "wait", 0);
964 lwkt_reltoken(&q->p_token);
971 * Make process 'parent' the new parent of process 'child'.
974 proc_reparent(struct proc *child, struct proc *parent)
976 if (child->p_pptr == parent)
979 lwkt_gettoken(&child->p_token);
980 lwkt_gettoken(&parent->p_token);
981 LIST_REMOVE(child, p_sibling);
982 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
983 child->p_pptr = parent;
984 lwkt_reltoken(&parent->p_token);
985 lwkt_reltoken(&child->p_token);
990 * The next two functions are to handle adding/deleting items on the
994 * Take the arguments given and put them onto the exit callout list,
995 * However first make sure that it's not already there.
996 * returns 0 on success.
1000 at_exit(exitlist_fn function)
1002 struct exitlist *ep;
1005 /* Be noisy if the programmer has lost track of things */
1006 if (rm_at_exit(function))
1007 kprintf("WARNING: exit callout entry (%p) already present\n",
1010 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
1013 ep->function = function;
1014 TAILQ_INSERT_TAIL(&exit_list, ep, next);
1019 * Scan the exit callout list for the given item and remove it.
1020 * Returns the number of items removed (0 or 1)
1023 rm_at_exit(exitlist_fn function)
1025 struct exitlist *ep;
1027 TAILQ_FOREACH(ep, &exit_list, next) {
1028 if (ep->function == function) {
1029 TAILQ_REMOVE(&exit_list, ep, next);
1030 kfree(ep, M_ATEXIT);
1038 * LWP reaper related code.
1041 reaplwps(void *context, int dummy)
1043 struct lwplist *lwplist = context;
1046 lwkt_gettoken(&deadlwp_token);
1047 while ((lp = LIST_FIRST(lwplist))) {
1048 LIST_REMOVE(lp, u.lwp_reap_entry);
1051 lwkt_reltoken(&deadlwp_token);
1055 reaplwp(struct lwp *lp)
1057 while (lwp_wait(lp) == 0)
1058 tsleep(lp, 0, "lwpreap", 1);
1067 for (cpu = 0; cpu < ncpus; cpu++) {
1068 LIST_INIT(&deadlwp_list[cpu]);
1069 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), M_DEVBUF, M_WAITOK);
1070 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]);
1074 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL);