| 1 | /* |
| 2 | * Copyright (c) 1982, 1986, 1989, 1991, 1993 |
| 3 | * The Regents of the University of California. All rights reserved. |
| 4 | * (c) UNIX System Laboratories, Inc. |
| 5 | * All or some portions of this file are derived from material licensed |
| 6 | * to the University of California by American Telephone and Telegraph |
| 7 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
| 8 | * the permission of UNIX System Laboratories, Inc. |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * 1. Redistributions of source code must retain the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer. |
| 15 | * 2. Redistributions in binary form must reproduce the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer in the |
| 17 | * documentation and/or other materials provided with the distribution. |
| 18 | * 3. All advertising materials mentioning features or use of this software |
| 19 | * must display the following acknowledgement: |
| 20 | * This product includes software developed by the University of |
| 21 | * California, Berkeley and its contributors. |
| 22 | * 4. Neither the name of the University nor the names of its contributors |
| 23 | * may be used to endorse or promote products derived from this software |
| 24 | * without specific prior written permission. |
| 25 | * |
| 26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 36 | * SUCH DAMAGE. |
| 37 | * |
| 38 | * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 |
| 39 | * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ |
| 40 | * $DragonFly: src/sys/kern/kern_exit.c,v 1.91 2008/05/18 20:02:02 nth Exp $ |
| 41 | */ |
| 42 | |
| 43 | #include "opt_compat.h" |
| 44 | #include "opt_ktrace.h" |
| 45 | |
| 46 | #include <sys/param.h> |
| 47 | #include <sys/systm.h> |
| 48 | #include <sys/sysproto.h> |
| 49 | #include <sys/kernel.h> |
| 50 | #include <sys/malloc.h> |
| 51 | #include <sys/proc.h> |
| 52 | #include <sys/ktrace.h> |
| 53 | #include <sys/pioctl.h> |
| 54 | #include <sys/tty.h> |
| 55 | #include <sys/wait.h> |
| 56 | #include <sys/vnode.h> |
| 57 | #include <sys/resourcevar.h> |
| 58 | #include <sys/signalvar.h> |
| 59 | #include <sys/taskqueue.h> |
| 60 | #include <sys/ptrace.h> |
| 61 | #include <sys/acct.h> /* for acct_process() function prototype */ |
| 62 | #include <sys/filedesc.h> |
| 63 | #include <sys/shm.h> |
| 64 | #include <sys/sem.h> |
| 65 | #include <sys/jail.h> |
| 66 | #include <sys/kern_syscall.h> |
| 67 | #include <sys/upcall.h> |
| 68 | #include <sys/caps.h> |
| 69 | #include <sys/unistd.h> |
| 70 | #include <sys/eventhandler.h> |
| 71 | #include <sys/dsched.h> |
| 72 | |
| 73 | #include <vm/vm.h> |
| 74 | #include <vm/vm_param.h> |
| 75 | #include <sys/lock.h> |
| 76 | #include <vm/pmap.h> |
| 77 | #include <vm/vm_map.h> |
| 78 | #include <vm/vm_extern.h> |
| 79 | #include <sys/user.h> |
| 80 | |
| 81 | #include <sys/refcount.h> |
| 82 | #include <sys/thread2.h> |
| 83 | #include <sys/sysref2.h> |
| 84 | #include <sys/mplock2.h> |
| 85 | |
| 86 | static void reaplwps(void *context, int dummy); |
| 87 | static void reaplwp(struct lwp *lp); |
| 88 | static void killlwps(struct lwp *lp); |
| 89 | |
| 90 | static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); |
| 91 | static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); |
| 92 | |
| 93 | static struct lwkt_token deadlwp_token = LWKT_TOKEN_INITIALIZER(deadlwp_token); |
| 94 | |
| 95 | /* |
| 96 | * callout list for things to do at exit time |
| 97 | */ |
| 98 | struct exitlist { |
| 99 | exitlist_fn function; |
| 100 | TAILQ_ENTRY(exitlist) next; |
| 101 | }; |
| 102 | |
| 103 | TAILQ_HEAD(exit_list_head, exitlist); |
| 104 | static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); |
| 105 | |
| 106 | /* |
| 107 | * LWP reaper data |
| 108 | */ |
| 109 | struct task *deadlwp_task[MAXCPU]; |
| 110 | struct lwplist deadlwp_list[MAXCPU]; |
| 111 | |
| 112 | /* |
| 113 | * exit -- |
| 114 | * Death of process. |
| 115 | * |
| 116 | * SYS_EXIT_ARGS(int rval) |
| 117 | */ |
| 118 | int |
| 119 | sys_exit(struct exit_args *uap) |
| 120 | { |
| 121 | exit1(W_EXITCODE(uap->rval, 0)); |
| 122 | /* NOTREACHED */ |
| 123 | } |
| 124 | |
| 125 | /* |
| 126 | * Extended exit -- |
| 127 | * Death of a lwp or process with optional bells and whistles. |
| 128 | * |
| 129 | * MPALMOSTSAFE |
| 130 | */ |
| 131 | int |
| 132 | sys_extexit(struct extexit_args *uap) |
| 133 | { |
| 134 | struct proc *p = curproc; |
| 135 | int action, who; |
| 136 | int error; |
| 137 | |
| 138 | action = EXTEXIT_ACTION(uap->how); |
| 139 | who = EXTEXIT_WHO(uap->how); |
| 140 | |
| 141 | /* Check parameters before we might perform some action */ |
| 142 | switch (who) { |
| 143 | case EXTEXIT_PROC: |
| 144 | case EXTEXIT_LWP: |
| 145 | break; |
| 146 | default: |
| 147 | return (EINVAL); |
| 148 | } |
| 149 | |
| 150 | switch (action) { |
| 151 | case EXTEXIT_SIMPLE: |
| 152 | break; |
| 153 | case EXTEXIT_SETINT: |
| 154 | error = copyout(&uap->status, uap->addr, sizeof(uap->status)); |
| 155 | if (error) |
| 156 | return (error); |
| 157 | break; |
| 158 | default: |
| 159 | return (EINVAL); |
| 160 | } |
| 161 | |
| 162 | lwkt_gettoken(&p->p_token); |
| 163 | |
| 164 | switch (who) { |
| 165 | case EXTEXIT_LWP: |
| 166 | /* |
| 167 | * Be sure only to perform a simple lwp exit if there is at |
| 168 | * least one more lwp in the proc, which will call exit1() |
| 169 | * later, otherwise the proc will be an UNDEAD and not even a |
| 170 | * SZOMB! |
| 171 | */ |
| 172 | if (p->p_nthreads > 1) { |
| 173 | lwp_exit(0); /* called w/ p_token held */ |
| 174 | /* NOT REACHED */ |
| 175 | } |
| 176 | /* else last lwp in proc: do the real thing */ |
| 177 | /* FALLTHROUGH */ |
| 178 | default: /* to help gcc */ |
| 179 | case EXTEXIT_PROC: |
| 180 | lwkt_reltoken(&p->p_token); |
| 181 | exit1(W_EXITCODE(uap->status, 0)); |
| 182 | /* NOTREACHED */ |
| 183 | } |
| 184 | |
| 185 | /* NOTREACHED */ |
| 186 | lwkt_reltoken(&p->p_token); /* safety */ |
| 187 | } |
| 188 | |
| 189 | /* |
| 190 | * Kill all lwps associated with the current process except the |
| 191 | * current lwp. Return an error if we race another thread trying to |
| 192 | * do the same thing and lose the race. |
| 193 | * |
| 194 | * If forexec is non-zero the current thread and process flags are |
| 195 | * cleaned up so they can be reused. |
| 196 | * |
| 197 | * Caller must hold curproc->p_token |
| 198 | */ |
| 199 | int |
| 200 | killalllwps(int forexec) |
| 201 | { |
| 202 | struct lwp *lp = curthread->td_lwp; |
| 203 | struct proc *p = lp->lwp_proc; |
| 204 | |
| 205 | /* |
| 206 | * Interlock against P_WEXIT. Only one of the process's thread |
| 207 | * is allowed to do the master exit. |
| 208 | */ |
| 209 | if (p->p_flags & P_WEXIT) |
| 210 | return (EALREADY); |
| 211 | p->p_flags |= P_WEXIT; |
| 212 | |
| 213 | /* |
| 214 | * Interlock with LWP_MP_WEXIT and kill any remaining LWPs |
| 215 | */ |
| 216 | atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); |
| 217 | if (p->p_nthreads > 1) |
| 218 | killlwps(lp); |
| 219 | |
| 220 | /* |
| 221 | * If doing this for an exec, clean up the remaining thread |
| 222 | * (us) for continuing operation after all the other threads |
| 223 | * have been killed. |
| 224 | */ |
| 225 | if (forexec) { |
| 226 | atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT); |
| 227 | p->p_flags &= ~P_WEXIT; |
| 228 | } |
| 229 | return(0); |
| 230 | } |
| 231 | |
| 232 | /* |
| 233 | * Kill all LWPs except the current one. Do not try to signal |
| 234 | * LWPs which have exited on their own or have already been |
| 235 | * signaled. |
| 236 | */ |
| 237 | static void |
| 238 | killlwps(struct lwp *lp) |
| 239 | { |
| 240 | struct proc *p = lp->lwp_proc; |
| 241 | struct lwp *tlp; |
| 242 | |
| 243 | /* |
| 244 | * Kill the remaining LWPs. We must send the signal before setting |
| 245 | * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce |
| 246 | * races. tlp must be held across the call as it might block and |
| 247 | * allow the target lwp to rip itself out from under our loop. |
| 248 | */ |
| 249 | FOREACH_LWP_IN_PROC(tlp, p) { |
| 250 | LWPHOLD(tlp); |
| 251 | lwkt_gettoken(&tlp->lwp_token); |
| 252 | if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) { |
| 253 | lwpsignal(p, tlp, SIGKILL); |
| 254 | atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT); |
| 255 | } |
| 256 | lwkt_reltoken(&tlp->lwp_token); |
| 257 | LWPRELE(tlp); |
| 258 | } |
| 259 | |
| 260 | /* |
| 261 | * Wait for everything to clear out. |
| 262 | */ |
| 263 | while (p->p_nthreads > 1) { |
| 264 | tsleep(&p->p_nthreads, 0, "killlwps", 0); |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Exit: deallocate address space and other resources, change proc state |
| 270 | * to zombie, and unlink proc from allproc and parent's lists. Save exit |
| 271 | * status and rusage for wait(). Check for child processes and orphan them. |
| 272 | */ |
| 273 | void |
| 274 | exit1(int rv) |
| 275 | { |
| 276 | struct thread *td = curthread; |
| 277 | struct proc *p = td->td_proc; |
| 278 | struct lwp *lp = td->td_lwp; |
| 279 | struct proc *q, *nq; |
| 280 | struct vmspace *vm; |
| 281 | struct vnode *vtmp; |
| 282 | struct exitlist *ep; |
| 283 | int error; |
| 284 | |
| 285 | lwkt_gettoken(&p->p_token); |
| 286 | |
| 287 | if (p->p_pid == 1) { |
| 288 | kprintf("init died (signal %d, exit %d)\n", |
| 289 | WTERMSIG(rv), WEXITSTATUS(rv)); |
| 290 | panic("Going nowhere without my init!"); |
| 291 | } |
| 292 | varsymset_clean(&p->p_varsymset); |
| 293 | lockuninit(&p->p_varsymset.vx_lock); |
| 294 | |
| 295 | /* |
| 296 | * Kill all lwps associated with the current process, return an |
| 297 | * error if we race another thread trying to do the same thing |
| 298 | * and lose the race. |
| 299 | */ |
| 300 | error = killalllwps(0); |
| 301 | if (error) { |
| 302 | lwp_exit(0); |
| 303 | /* NOT REACHED */ |
| 304 | } |
| 305 | |
| 306 | caps_exit(lp->lwp_thread); |
| 307 | |
| 308 | /* are we a task leader? */ |
| 309 | if (p == p->p_leader) { |
| 310 | struct kill_args killArgs; |
| 311 | killArgs.signum = SIGKILL; |
| 312 | q = p->p_peers; |
| 313 | while(q) { |
| 314 | killArgs.pid = q->p_pid; |
| 315 | /* |
| 316 | * The interface for kill is better |
| 317 | * than the internal signal |
| 318 | */ |
| 319 | sys_kill(&killArgs); |
| 320 | nq = q; |
| 321 | q = q->p_peers; |
| 322 | } |
| 323 | while (p->p_peers) |
| 324 | tsleep((caddr_t)p, 0, "exit1", 0); |
| 325 | } |
| 326 | |
| 327 | #ifdef PGINPROF |
| 328 | vmsizmon(); |
| 329 | #endif |
| 330 | STOPEVENT(p, S_EXIT, rv); |
| 331 | p->p_flags |= P_POSTEXIT; /* stop procfs stepping */ |
| 332 | |
| 333 | /* |
| 334 | * Check if any loadable modules need anything done at process exit. |
| 335 | * e.g. SYSV IPC stuff |
| 336 | * XXX what if one of these generates an error? |
| 337 | */ |
| 338 | p->p_xstat = rv; |
| 339 | EVENTHANDLER_INVOKE(process_exit, p); |
| 340 | |
| 341 | /* |
| 342 | * XXX: imho, the eventhandler stuff is much cleaner than this. |
| 343 | * Maybe we should move everything to use eventhandler. |
| 344 | */ |
| 345 | TAILQ_FOREACH(ep, &exit_list, next) |
| 346 | (*ep->function)(td); |
| 347 | |
| 348 | if (p->p_flags & P_PROFIL) |
| 349 | stopprofclock(p); |
| 350 | |
| 351 | SIGEMPTYSET(p->p_siglist); |
| 352 | SIGEMPTYSET(lp->lwp_siglist); |
| 353 | if (timevalisset(&p->p_realtimer.it_value)) |
| 354 | callout_stop_sync(&p->p_ithandle); |
| 355 | |
| 356 | /* |
| 357 | * Reset any sigio structures pointing to us as a result of |
| 358 | * F_SETOWN with our pid. |
| 359 | */ |
| 360 | funsetownlst(&p->p_sigiolst); |
| 361 | |
| 362 | /* |
| 363 | * Close open files and release open-file table. |
| 364 | * This may block! |
| 365 | */ |
| 366 | fdfree(p, NULL); |
| 367 | |
| 368 | if(p->p_leader->p_peers) { |
| 369 | q = p->p_leader; |
| 370 | while(q->p_peers != p) |
| 371 | q = q->p_peers; |
| 372 | q->p_peers = p->p_peers; |
| 373 | wakeup((caddr_t)p->p_leader); |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * XXX Shutdown SYSV semaphores |
| 378 | */ |
| 379 | semexit(p); |
| 380 | |
| 381 | KKASSERT(p->p_numposixlocks == 0); |
| 382 | |
| 383 | /* The next two chunks should probably be moved to vmspace_exit. */ |
| 384 | vm = p->p_vmspace; |
| 385 | |
| 386 | /* |
| 387 | * Release upcalls associated with this process |
| 388 | */ |
| 389 | if (vm->vm_upcalls) |
| 390 | upc_release(vm, lp); |
| 391 | |
| 392 | /* |
| 393 | * Clean up data related to virtual kernel operation. Clean up |
| 394 | * any vkernel context related to the current lwp now so we can |
| 395 | * destroy p_vkernel. |
| 396 | */ |
| 397 | if (p->p_vkernel) { |
| 398 | vkernel_lwp_exit(lp); |
| 399 | vkernel_exit(p); |
| 400 | } |
| 401 | |
| 402 | /* |
| 403 | * Release user portion of address space. |
| 404 | * This releases references to vnodes, |
| 405 | * which could cause I/O if the file has been unlinked. |
| 406 | * Need to do this early enough that we can still sleep. |
| 407 | * Can't free the entire vmspace as the kernel stack |
| 408 | * may be mapped within that space also. |
| 409 | * |
| 410 | * Processes sharing the same vmspace may exit in one order, and |
| 411 | * get cleaned up by vmspace_exit() in a different order. The |
| 412 | * last exiting process to reach this point releases as much of |
| 413 | * the environment as it can, and the last process cleaned up |
| 414 | * by vmspace_exit() (which decrements exitingcnt) cleans up the |
| 415 | * remainder. |
| 416 | */ |
| 417 | vmspace_exitbump(vm); |
| 418 | sysref_put(&vm->vm_sysref); |
| 419 | |
| 420 | if (SESS_LEADER(p)) { |
| 421 | struct session *sp = p->p_session; |
| 422 | |
| 423 | if (sp->s_ttyvp) { |
| 424 | /* |
| 425 | * We are the controlling process. Signal the |
| 426 | * foreground process group, drain the controlling |
| 427 | * terminal, and revoke access to the controlling |
| 428 | * terminal. |
| 429 | * |
| 430 | * NOTE: while waiting for the process group to exit |
| 431 | * it is possible that one of the processes in the |
| 432 | * group will revoke the tty, so the ttyclosesession() |
| 433 | * function will re-check sp->s_ttyvp. |
| 434 | */ |
| 435 | if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { |
| 436 | if (sp->s_ttyp->t_pgrp) |
| 437 | pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); |
| 438 | ttywait(sp->s_ttyp); |
| 439 | ttyclosesession(sp, 1); /* also revoke */ |
| 440 | } |
| 441 | /* |
| 442 | * Release the tty. If someone has it open via |
| 443 | * /dev/tty then close it (since they no longer can |
| 444 | * once we've NULL'd it out). |
| 445 | */ |
| 446 | ttyclosesession(sp, 0); |
| 447 | |
| 448 | /* |
| 449 | * s_ttyp is not zero'd; we use this to indicate |
| 450 | * that the session once had a controlling terminal. |
| 451 | * (for logging and informational purposes) |
| 452 | */ |
| 453 | } |
| 454 | sp->s_leader = NULL; |
| 455 | } |
| 456 | fixjobc(p, p->p_pgrp, 0); |
| 457 | (void)acct_process(p); |
| 458 | #ifdef KTRACE |
| 459 | /* |
| 460 | * release trace file |
| 461 | */ |
| 462 | if (p->p_tracenode) |
| 463 | ktrdestroy(&p->p_tracenode); |
| 464 | p->p_traceflag = 0; |
| 465 | #endif |
| 466 | /* |
| 467 | * Release reference to text vnode |
| 468 | */ |
| 469 | if ((vtmp = p->p_textvp) != NULL) { |
| 470 | p->p_textvp = NULL; |
| 471 | vrele(vtmp); |
| 472 | } |
| 473 | |
| 474 | /* Release namecache handle to text file */ |
| 475 | if (p->p_textnch.ncp) |
| 476 | cache_drop(&p->p_textnch); |
| 477 | |
| 478 | /* |
| 479 | * We have to handle PPWAIT here or proc_move_allproc_zombie() |
| 480 | * will block on the PHOLD() the parent is doing. |
| 481 | */ |
| 482 | if (p->p_flags & P_PPWAIT) { |
| 483 | p->p_flags &= ~P_PPWAIT; |
| 484 | wakeup(p->p_pptr); |
| 485 | } |
| 486 | |
| 487 | /* |
| 488 | * Move the process to the zombie list. This will block |
| 489 | * until the process p_lock count reaches 0. The process will |
| 490 | * not be reaped until TDF_EXITING is set by cpu_thread_exit(), |
| 491 | * which is called from cpu_proc_exit(). |
| 492 | */ |
| 493 | proc_move_allproc_zombie(p); |
| 494 | |
| 495 | /* |
| 496 | * Reparent all of this process's children to the init process. |
| 497 | * We must hold initproc->p_token in order to mess with |
| 498 | * initproc->p_children. We already hold p->p_token (to remove |
| 499 | * the children from our list). |
| 500 | */ |
| 501 | q = LIST_FIRST(&p->p_children); |
| 502 | if (q) { |
| 503 | lwkt_gettoken(&initproc->p_token); |
| 504 | while ((q = LIST_FIRST(&p->p_children)) != NULL) { |
| 505 | PHOLD(q); |
| 506 | lwkt_gettoken(&q->p_token); |
| 507 | if (q != LIST_FIRST(&p->p_children)) { |
| 508 | lwkt_reltoken(&q->p_token); |
| 509 | PRELE(q); |
| 510 | continue; |
| 511 | } |
| 512 | LIST_REMOVE(q, p_sibling); |
| 513 | LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling); |
| 514 | q->p_pptr = initproc; |
| 515 | q->p_sigparent = SIGCHLD; |
| 516 | |
| 517 | /* |
| 518 | * Traced processes are killed |
| 519 | * since their existence means someone is screwing up. |
| 520 | */ |
| 521 | if (q->p_flags & P_TRACED) { |
| 522 | q->p_flags &= ~P_TRACED; |
| 523 | ksignal(q, SIGKILL); |
| 524 | } |
| 525 | lwkt_reltoken(&q->p_token); |
| 526 | PRELE(q); |
| 527 | } |
| 528 | lwkt_reltoken(&initproc->p_token); |
| 529 | wakeup(initproc); |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * Save exit status and final rusage info, adding in child rusage |
| 534 | * info and self times. |
| 535 | */ |
| 536 | calcru_proc(p, &p->p_ru); |
| 537 | ruadd(&p->p_ru, &p->p_cru); |
| 538 | |
| 539 | /* |
| 540 | * notify interested parties of our demise. |
| 541 | */ |
| 542 | KNOTE(&p->p_klist, NOTE_EXIT); |
| 543 | |
| 544 | /* |
| 545 | * Notify parent that we're gone. If parent has the PS_NOCLDWAIT |
| 546 | * flag set, or if the handler is set to SIG_IGN, notify process 1 |
| 547 | * instead (and hope it will handle this situation). |
| 548 | */ |
| 549 | if (p->p_pptr->p_sigacts->ps_flag & |
| 550 | (PS_NOCLDWAIT | PS_CLDSIGIGN)) { |
| 551 | struct proc *pp = p->p_pptr; |
| 552 | |
| 553 | PHOLD(pp); |
| 554 | proc_reparent(p, initproc); |
| 555 | |
| 556 | /* |
| 557 | * If this was the last child of our parent, notify |
| 558 | * parent, so in case he was wait(2)ing, he will |
| 559 | * continue. This function interlocks with pptr->p_token. |
| 560 | */ |
| 561 | if (LIST_EMPTY(&pp->p_children)) |
| 562 | wakeup((caddr_t)pp); |
| 563 | PRELE(pp); |
| 564 | } |
| 565 | |
| 566 | /* lwkt_gettoken(&proc_token); */ |
| 567 | q = p->p_pptr; |
| 568 | PHOLD(q); |
| 569 | if (p->p_sigparent && q != initproc) { |
| 570 | ksignal(q, p->p_sigparent); |
| 571 | } else { |
| 572 | ksignal(q, SIGCHLD); |
| 573 | } |
| 574 | |
| 575 | p->p_flags &= ~P_TRACED; |
| 576 | wakeup(p->p_pptr); |
| 577 | |
| 578 | PRELE(q); |
| 579 | /* lwkt_reltoken(&proc_token); */ |
| 580 | /* NOTE: p->p_pptr can get ripped out */ |
| 581 | /* |
| 582 | * cpu_exit is responsible for clearing curproc, since |
| 583 | * it is heavily integrated with the thread/switching sequence. |
| 584 | * |
| 585 | * Other substructures are freed from wait(). |
| 586 | */ |
| 587 | plimit_free(p); |
| 588 | |
| 589 | /* |
| 590 | * Release the current user process designation on the process so |
| 591 | * the userland scheduler can work in someone else. |
| 592 | */ |
| 593 | p->p_usched->release_curproc(lp); |
| 594 | |
| 595 | /* |
| 596 | * Finally, call machine-dependent code to release as many of the |
| 597 | * lwp's resources as we can and halt execution of this thread. |
| 598 | */ |
| 599 | lwp_exit(1); |
| 600 | } |
| 601 | |
| 602 | /* |
| 603 | * Eventually called by every exiting LWP |
| 604 | * |
| 605 | * p->p_token must be held. mplock may be held and will be released. |
| 606 | */ |
| 607 | void |
| 608 | lwp_exit(int masterexit) |
| 609 | { |
| 610 | struct thread *td = curthread; |
| 611 | struct lwp *lp = td->td_lwp; |
| 612 | struct proc *p = lp->lwp_proc; |
| 613 | int dowake = 0; |
| 614 | |
| 615 | /* |
| 616 | * lwp_exit() may be called without setting LWP_MP_WEXIT, so |
| 617 | * make sure it is set here. |
| 618 | */ |
| 619 | ASSERT_LWKT_TOKEN_HELD(&p->p_token); |
| 620 | atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); |
| 621 | |
| 622 | /* |
| 623 | * Clean up any virtualization |
| 624 | */ |
| 625 | if (lp->lwp_vkernel) |
| 626 | vkernel_lwp_exit(lp); |
| 627 | |
| 628 | /* |
| 629 | * Clean up select/poll support |
| 630 | */ |
| 631 | kqueue_terminate(&lp->lwp_kqueue); |
| 632 | |
| 633 | /* |
| 634 | * Clean up any syscall-cached ucred |
| 635 | */ |
| 636 | if (td->td_ucred) { |
| 637 | crfree(td->td_ucred); |
| 638 | td->td_ucred = NULL; |
| 639 | } |
| 640 | |
| 641 | /* |
| 642 | * Nobody actually wakes us when the lock |
| 643 | * count reaches zero, so just wait one tick. |
| 644 | */ |
| 645 | while (lp->lwp_lock > 0) |
| 646 | tsleep(lp, 0, "lwpexit", 1); |
| 647 | |
| 648 | /* Hand down resource usage to our proc */ |
| 649 | ruadd(&p->p_ru, &lp->lwp_ru); |
| 650 | |
| 651 | /* |
| 652 | * If we don't hold the process until the LWP is reaped wait*() |
| 653 | * may try to dispose of its vmspace before all the LWPs have |
| 654 | * actually terminated. |
| 655 | */ |
| 656 | PHOLD(p); |
| 657 | |
| 658 | /* |
| 659 | * Do any remaining work that might block on us. We should be |
| 660 | * coded such that further blocking is ok after decrementing |
| 661 | * p_nthreads but don't take the chance. |
| 662 | */ |
| 663 | dsched_exit_thread(td); |
| 664 | biosched_done(curthread); |
| 665 | |
| 666 | /* |
| 667 | * We have to use the reaper for all the LWPs except the one doing |
| 668 | * the master exit. The LWP doing the master exit can just be |
| 669 | * left on p_lwps and the process reaper will deal with it |
| 670 | * synchronously, which is much faster. |
| 671 | * |
| 672 | * Wakeup anyone waiting on p_nthreads to drop to 1 or 0. |
| 673 | * |
| 674 | * The process is left held until the reaper calls lwp_dispose() on |
| 675 | * the lp (after calling lwp_wait()). |
| 676 | */ |
| 677 | if (masterexit == 0) { |
| 678 | lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); |
| 679 | --p->p_nthreads; |
| 680 | if (p->p_nthreads <= 1) |
| 681 | dowake = 1; |
| 682 | lwkt_gettoken(&deadlwp_token); |
| 683 | LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry); |
| 684 | taskqueue_enqueue(taskqueue_thread[mycpuid], |
| 685 | deadlwp_task[mycpuid]); |
| 686 | lwkt_reltoken(&deadlwp_token); |
| 687 | } else { |
| 688 | --p->p_nthreads; |
| 689 | if (p->p_nthreads <= 1) |
| 690 | dowake = 1; |
| 691 | } |
| 692 | |
| 693 | /* |
| 694 | * Release p_token. Issue the wakeup() on p_nthreads if necessary, |
| 695 | * as late as possible to give us a chance to actually deschedule and |
| 696 | * switch away before another cpu core hits reaplwp(). |
| 697 | */ |
| 698 | lwkt_reltoken(&p->p_token); |
| 699 | if (dowake) |
| 700 | wakeup(&p->p_nthreads); |
| 701 | cpu_lwp_exit(); |
| 702 | } |
| 703 | |
| 704 | /* |
| 705 | * Wait until a lwp is completely dead. The final interlock in this drama |
| 706 | * is when TDF_EXITING is set in cpu_thread_exit() just before the final |
| 707 | * switchout. |
| 708 | * |
| 709 | * At the point TDF_EXITING is set a complete exit is accomplished when |
| 710 | * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. |
| 711 | * |
| 712 | * Returns non-zero on success, and zero if the caller needs to retry |
| 713 | * the lwp_wait(). |
| 714 | */ |
| 715 | static int |
| 716 | lwp_wait(struct lwp *lp) |
| 717 | { |
| 718 | struct thread *td = lp->lwp_thread;; |
| 719 | |
| 720 | KKASSERT(lwkt_preempted_proc() != lp); |
| 721 | |
| 722 | /* |
| 723 | * Wait until the lp has entered its low level exit and wait |
| 724 | * until other cores with refs on the lp (e.g. for ps or signaling) |
| 725 | * release them. |
| 726 | */ |
| 727 | if (lp->lwp_lock > 0) { |
| 728 | tsleep(lp, 0, "lwpwait1", 1); |
| 729 | return(0); |
| 730 | } |
| 731 | |
| 732 | /* |
| 733 | * Wait until the thread is no longer references and no longer |
| 734 | * runnable or preempted (i.e. finishes its low level exit). |
| 735 | */ |
| 736 | if (td->td_refs) { |
| 737 | tsleep(td, 0, "lwpwait2", 1); |
| 738 | return(0); |
| 739 | } |
| 740 | |
| 741 | /* |
| 742 | * The lwp's thread may still be in the middle |
| 743 | * of switching away, we can't rip its stack out from |
| 744 | * under it until TDF_EXITING is set and both |
| 745 | * TDF_RUNNING and TDF_PREEMPT_LOCK are clear. |
| 746 | * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING |
| 747 | * will be cleared temporarily if a thread gets |
| 748 | * preempted. |
| 749 | * |
| 750 | * YYY no wakeup occurs, so we simply return failure |
| 751 | * and let the caller deal with sleeping and calling |
| 752 | * us again. |
| 753 | */ |
| 754 | if ((td->td_flags & (TDF_RUNNING | |
| 755 | TDF_PREEMPT_LOCK | |
| 756 | TDF_EXITING)) != TDF_EXITING) { |
| 757 | tsleep(lp, 0, "lwpwait2", 1); |
| 758 | return (0); |
| 759 | } |
| 760 | KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0, |
| 761 | ("lwp_wait: td %p (%s) still on run or sleep queue", |
| 762 | td, td->td_comm)); |
| 763 | return (1); |
| 764 | } |
| 765 | |
| 766 | /* |
| 767 | * Release the resources associated with a lwp. |
| 768 | * The lwp must be completely dead. |
| 769 | */ |
| 770 | void |
| 771 | lwp_dispose(struct lwp *lp) |
| 772 | { |
| 773 | struct thread *td = lp->lwp_thread;; |
| 774 | |
| 775 | KKASSERT(lwkt_preempted_proc() != lp); |
| 776 | KKASSERT(td->td_refs == 0); |
| 777 | KKASSERT((td->td_flags & (TDF_RUNNING | |
| 778 | TDF_PREEMPT_LOCK | |
| 779 | TDF_EXITING)) == TDF_EXITING); |
| 780 | |
| 781 | PRELE(lp->lwp_proc); |
| 782 | lp->lwp_proc = NULL; |
| 783 | if (td != NULL) { |
| 784 | td->td_proc = NULL; |
| 785 | td->td_lwp = NULL; |
| 786 | lp->lwp_thread = NULL; |
| 787 | lwkt_free_thread(td); |
| 788 | } |
| 789 | kfree(lp, M_LWP); |
| 790 | } |
| 791 | |
| 792 | /* |
| 793 | * MPSAFE |
| 794 | */ |
| 795 | int |
| 796 | sys_wait4(struct wait_args *uap) |
| 797 | { |
| 798 | struct rusage rusage; |
| 799 | int error, status; |
| 800 | |
| 801 | error = kern_wait(uap->pid, (uap->status ? &status : NULL), |
| 802 | uap->options, (uap->rusage ? &rusage : NULL), |
| 803 | &uap->sysmsg_result); |
| 804 | |
| 805 | if (error == 0 && uap->status) |
| 806 | error = copyout(&status, uap->status, sizeof(*uap->status)); |
| 807 | if (error == 0 && uap->rusage) |
| 808 | error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); |
| 809 | return (error); |
| 810 | } |
| 811 | |
| 812 | /* |
| 813 | * wait1() |
| 814 | * |
| 815 | * wait_args(int pid, int *status, int options, struct rusage *rusage) |
| 816 | * |
| 817 | * MPALMOSTSAFE |
| 818 | */ |
| 819 | int |
| 820 | kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) |
| 821 | { |
| 822 | struct thread *td = curthread; |
| 823 | struct lwp *lp; |
| 824 | struct proc *q = td->td_proc; |
| 825 | struct proc *p, *t; |
| 826 | struct pargs *pa; |
| 827 | struct sigacts *ps; |
| 828 | int nfound, error; |
| 829 | |
| 830 | if (pid == 0) |
| 831 | pid = -q->p_pgid; |
| 832 | if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) |
| 833 | return (EINVAL); |
| 834 | |
| 835 | lwkt_gettoken(&q->p_token); |
| 836 | loop: |
| 837 | /* |
| 838 | * All sorts of things can change due to blocking so we have to loop |
| 839 | * all the way back up here. |
| 840 | * |
| 841 | * The problem is that if a process group is stopped and the parent |
| 842 | * is doing a wait*(..., WUNTRACED, ...), it will see the STOP |
| 843 | * of the child and then stop itself when it tries to return from the |
| 844 | * system call. When the process group is resumed the parent will |
| 845 | * then get the STOP status even though the child has now resumed |
| 846 | * (a followup wait*() will get the CONT status). |
| 847 | * |
| 848 | * Previously the CONT would overwrite the STOP because the tstop |
| 849 | * was handled within tsleep(), and the parent would only see |
| 850 | * the CONT when both are stopped and continued together. This little |
| 851 | * two-line hack restores this effect. |
| 852 | */ |
| 853 | while (q->p_stat == SSTOP) |
| 854 | tstop(); |
| 855 | |
| 856 | nfound = 0; |
| 857 | |
| 858 | /* |
| 859 | * Loop on children. |
| 860 | * |
| 861 | * NOTE: We don't want to break q's p_token in the loop for the |
| 862 | * case where no children are found or we risk breaking the |
| 863 | * interlock between child and parent. |
| 864 | */ |
| 865 | LIST_FOREACH(p, &q->p_children, p_sibling) { |
| 866 | if (pid != WAIT_ANY && |
| 867 | p->p_pid != pid && p->p_pgid != -pid) { |
| 868 | continue; |
| 869 | } |
| 870 | |
| 871 | /* |
| 872 | * This special case handles a kthread spawned by linux_clone |
| 873 | * (see linux_misc.c). The linux_wait4 and linux_waitpid |
| 874 | * functions need to be able to distinguish between waiting |
| 875 | * on a process and waiting on a thread. It is a thread if |
| 876 | * p_sigparent is not SIGCHLD, and the WLINUXCLONE option |
| 877 | * signifies we want to wait for threads and not processes. |
| 878 | */ |
| 879 | if ((p->p_sigparent != SIGCHLD) ^ |
| 880 | ((options & WLINUXCLONE) != 0)) { |
| 881 | continue; |
| 882 | } |
| 883 | |
| 884 | nfound++; |
| 885 | if (p->p_stat == SZOMB) { |
| 886 | /* |
| 887 | * We may go into SZOMB with threads still present. |
| 888 | * We must wait for them to exit before we can reap |
| 889 | * the master thread, otherwise we may race reaping |
| 890 | * non-master threads. |
| 891 | * |
| 892 | * Only this routine can remove a process from |
| 893 | * the zombie list and destroy it, use PACQUIREZOMB() |
| 894 | * to serialize us and loop if it blocks (interlocked |
| 895 | * by the parent's q->p_token). |
| 896 | * |
| 897 | * WARNING! (p) can be invalid when PHOLDZOMB(p) |
| 898 | * returns non-zero. Be sure not to |
| 899 | * mess with it. |
| 900 | */ |
| 901 | if (PHOLDZOMB(p)) |
| 902 | goto loop; |
| 903 | lwkt_gettoken(&p->p_token); |
| 904 | if (p->p_pptr != q) { |
| 905 | lwkt_reltoken(&p->p_token); |
| 906 | PRELEZOMB(p); |
| 907 | goto loop; |
| 908 | } |
| 909 | while (p->p_nthreads > 0) { |
| 910 | tsleep(&p->p_nthreads, 0, "lwpzomb", hz); |
| 911 | } |
| 912 | |
| 913 | /* |
| 914 | * Reap any LWPs left in p->p_lwps. This is usually |
| 915 | * just the last LWP. This must be done before |
| 916 | * we loop on p_lock since the lwps hold a ref on |
| 917 | * it as a vmspace interlock. |
| 918 | * |
| 919 | * Once that is accomplished p_nthreads had better |
| 920 | * be zero. |
| 921 | */ |
| 922 | while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { |
| 923 | lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); |
| 924 | reaplwp(lp); |
| 925 | } |
| 926 | KKASSERT(p->p_nthreads == 0); |
| 927 | |
| 928 | /* |
| 929 | * Don't do anything really bad until all references |
| 930 | * to the process go away. This may include other |
| 931 | * LWPs which are still in the process of being |
| 932 | * reaped. We can't just pull the rug out from under |
| 933 | * them because they may still be using the VM space. |
| 934 | * |
| 935 | * Certain kernel facilities such as /proc will also |
| 936 | * put a hold on the process for short periods of |
| 937 | * time. |
| 938 | */ |
| 939 | PRELE(p); |
| 940 | PSTALL(p, "reap3", 0); |
| 941 | |
| 942 | /* Take care of our return values. */ |
| 943 | *res = p->p_pid; |
| 944 | p->p_usched->heuristic_exiting(td->td_lwp, p); |
| 945 | |
| 946 | if (status) |
| 947 | *status = p->p_xstat; |
| 948 | if (rusage) |
| 949 | *rusage = p->p_ru; |
| 950 | /* |
| 951 | * If we got the child via a ptrace 'attach', |
| 952 | * we need to give it back to the old parent. |
| 953 | */ |
| 954 | if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { |
| 955 | PHOLD(p); |
| 956 | p->p_oppid = 0; |
| 957 | proc_reparent(p, t); |
| 958 | ksignal(t, SIGCHLD); |
| 959 | wakeup((caddr_t)t); |
| 960 | error = 0; |
| 961 | PRELE(t); |
| 962 | lwkt_reltoken(&p->p_token); |
| 963 | PRELEZOMB(p); |
| 964 | goto done; |
| 965 | } |
| 966 | |
| 967 | /* |
| 968 | * Unlink the proc from its process group so that |
| 969 | * the following operations won't lead to an |
| 970 | * inconsistent state for processes running down |
| 971 | * the zombie list. |
| 972 | */ |
| 973 | proc_remove_zombie(p); |
| 974 | lwkt_reltoken(&p->p_token); |
| 975 | leavepgrp(p); |
| 976 | |
| 977 | p->p_xstat = 0; |
| 978 | ruadd(&q->p_cru, &p->p_ru); |
| 979 | |
| 980 | /* |
| 981 | * Decrement the count of procs running with this uid. |
| 982 | */ |
| 983 | chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); |
| 984 | |
| 985 | /* |
| 986 | * Free up credentials. |
| 987 | */ |
| 988 | crfree(p->p_ucred); |
| 989 | p->p_ucred = NULL; |
| 990 | |
| 991 | /* |
| 992 | * Remove unused arguments |
| 993 | */ |
| 994 | pa = p->p_args; |
| 995 | p->p_args = NULL; |
| 996 | if (pa && refcount_release(&pa->ar_ref)) { |
| 997 | kfree(pa, M_PARGS); |
| 998 | pa = NULL; |
| 999 | } |
| 1000 | |
| 1001 | ps = p->p_sigacts; |
| 1002 | p->p_sigacts = NULL; |
| 1003 | if (ps && refcount_release(&ps->ps_refcnt)) { |
| 1004 | kfree(ps, M_SUBPROC); |
| 1005 | ps = NULL; |
| 1006 | } |
| 1007 | |
| 1008 | /* |
| 1009 | * Our exitingcount was incremented when the process |
| 1010 | * became a zombie, now that the process has been |
| 1011 | * removed from (almost) all lists we should be able |
| 1012 | * to safely destroy its vmspace. Wait for any current |
| 1013 | * holders to go away (so the vmspace remains stable), |
| 1014 | * then scrap it. |
| 1015 | */ |
| 1016 | PSTALL(p, "reap4", 0); |
| 1017 | vmspace_exitfree(p); |
| 1018 | PSTALL(p, "reap5", 0); |
| 1019 | |
| 1020 | /* |
| 1021 | * NOTE: We have to officially release ZOMB in order |
| 1022 | * to ensure that a racing thread in kern_wait() |
| 1023 | * which blocked on ZOMB is woken up. |
| 1024 | */ |
| 1025 | PHOLD(p); |
| 1026 | PRELEZOMB(p); |
| 1027 | kfree(p, M_PROC); |
| 1028 | atomic_add_int(&nprocs, -1); |
| 1029 | error = 0; |
| 1030 | goto done; |
| 1031 | } |
| 1032 | if (p->p_stat == SSTOP && (p->p_flags & P_WAITED) == 0 && |
| 1033 | ((p->p_flags & P_TRACED) || (options & WUNTRACED))) { |
| 1034 | PHOLD(p); |
| 1035 | lwkt_gettoken(&p->p_token); |
| 1036 | if (p->p_pptr != q) { |
| 1037 | lwkt_reltoken(&p->p_token); |
| 1038 | PRELE(p); |
| 1039 | goto loop; |
| 1040 | } |
| 1041 | if (p->p_stat != SSTOP || |
| 1042 | (p->p_flags & P_WAITED) != 0 || |
| 1043 | ((p->p_flags & P_TRACED) == 0 && |
| 1044 | (options & WUNTRACED) == 0)) { |
| 1045 | lwkt_reltoken(&p->p_token); |
| 1046 | PRELE(p); |
| 1047 | goto loop; |
| 1048 | } |
| 1049 | |
| 1050 | p->p_flags |= P_WAITED; |
| 1051 | |
| 1052 | *res = p->p_pid; |
| 1053 | p->p_usched->heuristic_exiting(td->td_lwp, p); |
| 1054 | if (status) |
| 1055 | *status = W_STOPCODE(p->p_xstat); |
| 1056 | /* Zero rusage so we get something consistent. */ |
| 1057 | if (rusage) |
| 1058 | bzero(rusage, sizeof(*rusage)); |
| 1059 | error = 0; |
| 1060 | lwkt_reltoken(&p->p_token); |
| 1061 | PRELE(p); |
| 1062 | goto done; |
| 1063 | } |
| 1064 | if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) { |
| 1065 | PHOLD(p); |
| 1066 | lwkt_gettoken(&p->p_token); |
| 1067 | if (p->p_pptr != q) { |
| 1068 | lwkt_reltoken(&p->p_token); |
| 1069 | PRELE(p); |
| 1070 | goto loop; |
| 1071 | } |
| 1072 | if ((p->p_flags & P_CONTINUED) == 0) { |
| 1073 | lwkt_reltoken(&p->p_token); |
| 1074 | PRELE(p); |
| 1075 | goto loop; |
| 1076 | } |
| 1077 | |
| 1078 | *res = p->p_pid; |
| 1079 | p->p_usched->heuristic_exiting(td->td_lwp, p); |
| 1080 | p->p_flags &= ~P_CONTINUED; |
| 1081 | |
| 1082 | if (status) |
| 1083 | *status = SIGCONT; |
| 1084 | error = 0; |
| 1085 | lwkt_reltoken(&p->p_token); |
| 1086 | PRELE(p); |
| 1087 | goto done; |
| 1088 | } |
| 1089 | } |
| 1090 | if (nfound == 0) { |
| 1091 | error = ECHILD; |
| 1092 | goto done; |
| 1093 | } |
| 1094 | if (options & WNOHANG) { |
| 1095 | *res = 0; |
| 1096 | error = 0; |
| 1097 | goto done; |
| 1098 | } |
| 1099 | |
| 1100 | /* |
| 1101 | * Wait for signal - interlocked using q->p_token. |
| 1102 | */ |
| 1103 | error = tsleep(q, PCATCH, "wait", 0); |
| 1104 | if (error) { |
| 1105 | done: |
| 1106 | lwkt_reltoken(&q->p_token); |
| 1107 | return (error); |
| 1108 | } |
| 1109 | goto loop; |
| 1110 | } |
| 1111 | |
| 1112 | /* |
| 1113 | * Make process 'parent' the new parent of process 'child'. |
| 1114 | * |
| 1115 | * p_children/p_sibling requires the parent's token, and |
| 1116 | * changing pptr requires the child's token, so we have to |
| 1117 | * get three tokens to do this operation. |
| 1118 | */ |
| 1119 | void |
| 1120 | proc_reparent(struct proc *child, struct proc *parent) |
| 1121 | { |
| 1122 | struct proc *opp = child->p_pptr; |
| 1123 | |
| 1124 | if (opp == parent) |
| 1125 | return; |
| 1126 | PHOLD(opp); |
| 1127 | PHOLD(parent); |
| 1128 | lwkt_gettoken(&opp->p_token); |
| 1129 | lwkt_gettoken(&child->p_token); |
| 1130 | lwkt_gettoken(&parent->p_token); |
| 1131 | KKASSERT(child->p_pptr == opp); |
| 1132 | LIST_REMOVE(child, p_sibling); |
| 1133 | LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); |
| 1134 | child->p_pptr = parent; |
| 1135 | lwkt_reltoken(&parent->p_token); |
| 1136 | lwkt_reltoken(&child->p_token); |
| 1137 | lwkt_reltoken(&opp->p_token); |
| 1138 | PRELE(parent); |
| 1139 | PRELE(opp); |
| 1140 | } |
| 1141 | |
| 1142 | /* |
| 1143 | * The next two functions are to handle adding/deleting items on the |
| 1144 | * exit callout list |
| 1145 | * |
| 1146 | * at_exit(): |
| 1147 | * Take the arguments given and put them onto the exit callout list, |
| 1148 | * However first make sure that it's not already there. |
| 1149 | * returns 0 on success. |
| 1150 | */ |
| 1151 | |
| 1152 | int |
| 1153 | at_exit(exitlist_fn function) |
| 1154 | { |
| 1155 | struct exitlist *ep; |
| 1156 | |
| 1157 | #ifdef INVARIANTS |
| 1158 | /* Be noisy if the programmer has lost track of things */ |
| 1159 | if (rm_at_exit(function)) |
| 1160 | kprintf("WARNING: exit callout entry (%p) already present\n", |
| 1161 | function); |
| 1162 | #endif |
| 1163 | ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); |
| 1164 | if (ep == NULL) |
| 1165 | return (ENOMEM); |
| 1166 | ep->function = function; |
| 1167 | TAILQ_INSERT_TAIL(&exit_list, ep, next); |
| 1168 | return (0); |
| 1169 | } |
| 1170 | |
| 1171 | /* |
| 1172 | * Scan the exit callout list for the given item and remove it. |
| 1173 | * Returns the number of items removed (0 or 1) |
| 1174 | */ |
| 1175 | int |
| 1176 | rm_at_exit(exitlist_fn function) |
| 1177 | { |
| 1178 | struct exitlist *ep; |
| 1179 | |
| 1180 | TAILQ_FOREACH(ep, &exit_list, next) { |
| 1181 | if (ep->function == function) { |
| 1182 | TAILQ_REMOVE(&exit_list, ep, next); |
| 1183 | kfree(ep, M_ATEXIT); |
| 1184 | return(1); |
| 1185 | } |
| 1186 | } |
| 1187 | return (0); |
| 1188 | } |
| 1189 | |
| 1190 | /* |
| 1191 | * LWP reaper related code. |
| 1192 | */ |
| 1193 | static void |
| 1194 | reaplwps(void *context, int dummy) |
| 1195 | { |
| 1196 | struct lwplist *lwplist = context; |
| 1197 | struct lwp *lp; |
| 1198 | |
| 1199 | lwkt_gettoken(&deadlwp_token); |
| 1200 | while ((lp = LIST_FIRST(lwplist))) { |
| 1201 | LIST_REMOVE(lp, u.lwp_reap_entry); |
| 1202 | reaplwp(lp); |
| 1203 | } |
| 1204 | lwkt_reltoken(&deadlwp_token); |
| 1205 | } |
| 1206 | |
| 1207 | static void |
| 1208 | reaplwp(struct lwp *lp) |
| 1209 | { |
| 1210 | while (lwp_wait(lp) == 0) |
| 1211 | ; |
| 1212 | lwp_dispose(lp); |
| 1213 | } |
| 1214 | |
| 1215 | static void |
| 1216 | deadlwp_init(void) |
| 1217 | { |
| 1218 | int cpu; |
| 1219 | |
| 1220 | for (cpu = 0; cpu < ncpus; cpu++) { |
| 1221 | LIST_INIT(&deadlwp_list[cpu]); |
| 1222 | deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), |
| 1223 | M_DEVBUF, M_WAITOK); |
| 1224 | TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); |
| 1225 | } |
| 1226 | } |
| 1227 | |
| 1228 | SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); |