proc->thread stage 3.5: Add an IO_CORE flag so coda doesn't have to dig
[dragonfly.git] / sys / kern / kern_fork.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
39 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.13 2003/06/06 20:21:32 tegge Exp $
41c20dac 40 * $DragonFly: src/sys/kern/kern_fork.c,v 1.7 2003/06/23 17:55:41 dillon Exp $
984263bc
MD
41 */
42
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/filedesc.h>
49#include <sys/kernel.h>
50#include <sys/sysctl.h>
51#include <sys/malloc.h>
52#include <sys/proc.h>
53#include <sys/resourcevar.h>
54#include <sys/vnode.h>
55#include <sys/acct.h>
56#include <sys/ktrace.h>
57#include <sys/unistd.h>
58#include <sys/jail.h>
59
60#include <vm/vm.h>
61#include <sys/lock.h>
62#include <vm/pmap.h>
63#include <vm/vm_map.h>
64#include <vm/vm_extern.h>
65#include <vm/vm_zone.h>
66
67#include <sys/vmmeter.h>
68#include <sys/user.h>
69
70static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
71
72/*
73 * These are the stuctures used to create a callout list for things to do
74 * when forking a process
75 */
76struct forklist {
77 forklist_fn function;
78 TAILQ_ENTRY(forklist) next;
79};
80
81TAILQ_HEAD(forklist_head, forklist);
82static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
83
84#ifndef _SYS_SYSPROTO_H_
85struct fork_args {
86 int dummy;
87};
88#endif
89
90int forksleep; /* Place for fork1() to sleep on. */
91
92/* ARGSUSED */
93int
41c20dac 94fork(struct fork_args *uap)
984263bc 95{
41c20dac 96 struct proc *p = curproc;
984263bc 97 struct proc *p2;
41c20dac 98 int error;
984263bc
MD
99
100 error = fork1(p, RFFDG | RFPROC, &p2);
101 if (error == 0) {
7d0bac62 102 start_forked_proc(p, p2);
984263bc
MD
103 p->p_retval[0] = p2->p_pid;
104 p->p_retval[1] = 0;
105 }
106 return error;
107}
108
109/* ARGSUSED */
110int
41c20dac 111vfork(struct vfork_args *uap)
984263bc 112{
41c20dac 113 struct proc *p = curproc;
984263bc 114 struct proc *p2;
41c20dac 115 int error;
984263bc
MD
116
117 error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2);
118 if (error == 0) {
7d0bac62 119 start_forked_proc(p, p2);
984263bc
MD
120 p->p_retval[0] = p2->p_pid;
121 p->p_retval[1] = 0;
122 }
123 return error;
124}
125
126int
41c20dac 127rfork(struct rfork_args *uap)
984263bc 128{
41c20dac 129 struct proc *p = curproc;
984263bc 130 struct proc *p2;
41c20dac 131 int error;
984263bc
MD
132
133 error = fork1(p, uap->flags, &p2);
134 if (error == 0) {
7d0bac62 135 start_forked_proc(p, p2);
984263bc
MD
136 p->p_retval[0] = p2 ? p2->p_pid : 0;
137 p->p_retval[1] = 0;
138 }
139 return error;
140}
141
142
143int nprocs = 1; /* process 0 */
144static int nextpid = 0;
145
146/*
147 * Random component to nextpid generation. We mix in a random factor to make
148 * it a little harder to predict. We sanity check the modulus value to avoid
149 * doing it in critical paths. Don't let it be too small or we pointlessly
150 * waste randomness entropy, and don't let it be impossibly large. Using a
151 * modulus that is too big causes a LOT more process table scans and slows
152 * down fork processing as the pidchecked caching is defeated.
153 */
154static int randompid = 0;
155
156static int
157sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
158{
159 int error, pid;
160
161 pid = randompid;
162 error = sysctl_handle_int(oidp, &pid, 0, req);
163 if (error || !req->newptr)
164 return (error);
165 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
166 pid = PID_MAX - 100;
167 else if (pid < 2) /* NOP */
168 pid = 0;
169 else if (pid < 100) /* Make it reasonable */
170 pid = 100;
171 randompid = pid;
172 return (error);
173}
174
175SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
176 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
177
178int
179fork1(p1, flags, procp)
180 struct proc *p1;
181 int flags;
182 struct proc **procp;
183{
184 struct proc *p2, *pptr;
185 uid_t uid;
186 struct proc *newproc;
187 int ok;
188 static int pidchecked = 0;
189 struct forklist *ep;
190 struct filedesc_to_leader *fdtol;
191
192 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
193 return (EINVAL);
194
195 /*
196 * Here we don't create a new process, but we divorce
197 * certain parts of a process from itself.
198 */
199 if ((flags & RFPROC) == 0) {
200
201 vm_fork(p1, 0, flags);
202
203 /*
204 * Close all file descriptors.
205 */
206 if (flags & RFCFDG) {
207 struct filedesc *fdtmp;
208 fdtmp = fdinit(p1);
209 fdfree(p1);
210 p1->p_fd = fdtmp;
211 }
212
213 /*
214 * Unshare file descriptors (from parent.)
215 */
216 if (flags & RFFDG) {
217 if (p1->p_fd->fd_refcnt > 1) {
218 struct filedesc *newfd;
219 newfd = fdcopy(p1);
220 fdfree(p1);
221 p1->p_fd = newfd;
222 }
223 }
224 *procp = NULL;
225 return (0);
226 }
227
228 /*
229 * Although process entries are dynamically created, we still keep
230 * a global limit on the maximum number we will create. Don't allow
231 * a nonprivileged user to use the last ten processes; don't let root
232 * exceed the limit. The variable nprocs is the current number of
233 * processes, maxproc is the limit.
234 */
41c20dac 235 uid = p1->p_ucred->cr_ruid;
984263bc
MD
236 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
237 tsleep(&forksleep, PUSER, "fork", hz / 2);
238 return (EAGAIN);
239 }
240 /*
241 * Increment the nprocs resource before blocking can occur. There
242 * are hard-limits as to the number of processes that can run.
243 */
244 nprocs++;
245
246 /*
247 * Increment the count of procs running with this uid. Don't allow
248 * a nonprivileged user to exceed their current limit.
249 */
41c20dac 250 ok = chgproccnt(p1->p_ucred->cr_ruidinfo, 1,
984263bc
MD
251 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
252 if (!ok) {
253 /*
254 * Back out the process count
255 */
256 nprocs--;
257 tsleep(&forksleep, PUSER, "fork", hz / 2);
258 return (EAGAIN);
259 }
260
261 /* Allocate new proc. */
262 newproc = zalloc(proc_zone);
263
264 /*
265 * Setup linkage for kernel based threading
266 */
267 if((flags & RFTHREAD) != 0) {
268 newproc->p_peers = p1->p_peers;
269 p1->p_peers = newproc;
270 newproc->p_leader = p1->p_leader;
271 } else {
272 newproc->p_peers = 0;
273 newproc->p_leader = newproc;
274 }
275
276 newproc->p_wakeup = 0;
984263bc
MD
277 newproc->p_vmspace = NULL;
278
279 /*
280 * Find an unused process ID. We remember a range of unused IDs
281 * ready to use (from nextpid+1 through pidchecked-1).
282 */
283 nextpid++;
284 if (randompid)
285 nextpid += arc4random() % randompid;
286retry:
287 /*
288 * If the process ID prototype has wrapped around,
289 * restart somewhat above 0, as the low-numbered procs
290 * tend to include daemons that don't exit.
291 */
292 if (nextpid >= PID_MAX) {
293 nextpid = nextpid % PID_MAX;
294 if (nextpid < 100)
295 nextpid += 100;
296 pidchecked = 0;
297 }
298 if (nextpid >= pidchecked) {
299 int doingzomb = 0;
300
301 pidchecked = PID_MAX;
302 /*
303 * Scan the active and zombie procs to check whether this pid
304 * is in use. Remember the lowest pid that's greater
305 * than nextpid, so we can avoid checking for a while.
306 */
307 p2 = LIST_FIRST(&allproc);
308again:
309 for (; p2 != 0; p2 = LIST_NEXT(p2, p_list)) {
310 while (p2->p_pid == nextpid ||
311 p2->p_pgrp->pg_id == nextpid ||
312 p2->p_session->s_sid == nextpid) {
313 nextpid++;
314 if (nextpid >= pidchecked)
315 goto retry;
316 }
317 if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
318 pidchecked = p2->p_pid;
319 if (p2->p_pgrp->pg_id > nextpid &&
320 pidchecked > p2->p_pgrp->pg_id)
321 pidchecked = p2->p_pgrp->pg_id;
322 if (p2->p_session->s_sid > nextpid &&
323 pidchecked > p2->p_session->s_sid)
324 pidchecked = p2->p_session->s_sid;
325 }
326 if (!doingzomb) {
327 doingzomb = 1;
328 p2 = LIST_FIRST(&zombproc);
329 goto again;
330 }
331 }
332
333 p2 = newproc;
334 p2->p_stat = SIDL; /* protect against others */
335 p2->p_pid = nextpid;
336 LIST_INSERT_HEAD(&allproc, p2, p_list);
337 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
338
339 /*
340 * Make a proc table entry for the new process.
341 * Start by zeroing the section of proc that is zero-initialized,
342 * then copy the section that is copied directly from the parent.
343 */
344 bzero(&p2->p_startzero,
345 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
346 bcopy(&p1->p_startcopy, &p2->p_startcopy,
347 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
348
349 p2->p_aioinfo = NULL;
350
351 /*
352 * Duplicate sub-structures as needed.
353 * Increase reference counts on shared objects.
354 * The p_stats and p_sigacts substructs are set in vm_fork.
355 */
356 p2->p_flag = P_INMEM;
357 if (p1->p_flag & P_PROFIL)
358 startprofclock(p2);
984263bc 359 crhold(p1->p_ucred);
41c20dac 360 p2->p_ucred = p1->p_ucred;
984263bc 361
41c20dac
MD
362 if (p2->p_ucred->cr_prison) {
363 p2->p_ucred->cr_prison->pr_ref++;
984263bc
MD
364 p2->p_flag |= P_JAILED;
365 }
366
367 if (p2->p_args)
368 p2->p_args->ar_ref++;
369
370 if (flags & RFSIGSHARE) {
371 p2->p_procsig = p1->p_procsig;
372 p2->p_procsig->ps_refcnt++;
373 if (p1->p_sigacts == &p1->p_addr->u_sigacts) {
374 struct sigacts *newsigacts;
375 int s;
376
377 /* Create the shared sigacts structure */
378 MALLOC(newsigacts, struct sigacts *,
379 sizeof(struct sigacts), M_SUBPROC, M_WAITOK);
380 s = splhigh();
381 /*
382 * Set p_sigacts to the new shared structure.
383 * Note that this is updating p1->p_sigacts at the
384 * same time, since p_sigacts is just a pointer to
385 * the shared p_procsig->ps_sigacts.
386 */
387 p2->p_sigacts = newsigacts;
388 bcopy(&p1->p_addr->u_sigacts, p2->p_sigacts,
389 sizeof(*p2->p_sigacts));
390 *p2->p_sigacts = p1->p_addr->u_sigacts;
391 splx(s);
392 }
393 } else {
394 MALLOC(p2->p_procsig, struct procsig *, sizeof(struct procsig),
395 M_SUBPROC, M_WAITOK);
396 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig));
397 p2->p_procsig->ps_refcnt = 1;
398 p2->p_sigacts = NULL; /* finished in vm_fork() */
399 }
400 if (flags & RFLINUXTHPN)
401 p2->p_sigparent = SIGUSR1;
402 else
403 p2->p_sigparent = SIGCHLD;
404
405 /* bump references to the text vnode (for procfs) */
406 p2->p_textvp = p1->p_textvp;
407 if (p2->p_textvp)
408 VREF(p2->p_textvp);
409
410 if (flags & RFCFDG) {
411 p2->p_fd = fdinit(p1);
412 fdtol = NULL;
413 } else if (flags & RFFDG) {
414 p2->p_fd = fdcopy(p1);
415 fdtol = NULL;
416 } else {
417 p2->p_fd = fdshare(p1);
418 if (p1->p_fdtol == NULL)
419 p1->p_fdtol =
420 filedesc_to_leader_alloc(NULL,
421 p1->p_leader);
422 if ((flags & RFTHREAD) != 0) {
423 /*
424 * Shared file descriptor table and
425 * shared process leaders.
426 */
427 fdtol = p1->p_fdtol;
428 fdtol->fdl_refcount++;
429 } else {
430 /*
431 * Shared file descriptor table, and
432 * different process leaders
433 */
434 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
435 p2);
436 }
437 }
438 p2->p_fdtol = fdtol;
439
440 /*
441 * If p_limit is still copy-on-write, bump refcnt,
442 * otherwise get a copy that won't be modified.
443 * (If PL_SHAREMOD is clear, the structure is shared
444 * copy-on-write.)
445 */
446 if (p1->p_limit->p_lflags & PL_SHAREMOD)
447 p2->p_limit = limcopy(p1->p_limit);
448 else {
449 p2->p_limit = p1->p_limit;
450 p2->p_limit->p_refcnt++;
451 }
452
453 /*
454 * Preserve some more flags in subprocess. P_PROFIL has already
455 * been preserved.
456 */
457 p2->p_flag |= p1->p_flag & (P_SUGID | P_ALTSTACK);
458 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
459 p2->p_flag |= P_CONTROLT;
460 if (flags & RFPPWAIT)
461 p2->p_flag |= P_PPWAIT;
462
463 LIST_INSERT_AFTER(p1, p2, p_pglist);
464
465 /*
466 * Attach the new process to its parent.
467 *
468 * If RFNOWAIT is set, the newly created process becomes a child
469 * of init. This effectively disassociates the child from the
470 * parent.
471 */
472 if (flags & RFNOWAIT)
473 pptr = initproc;
474 else
475 pptr = p1;
476 p2->p_pptr = pptr;
477 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
478 LIST_INIT(&p2->p_children);
479
480#ifdef KTRACE
481 /*
482 * Copy traceflag and tracefile if enabled. If not inherited,
483 * these were zeroed above but we still could have a trace race
484 * so make sure p2's p_tracep is NULL.
485 */
486 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracep == NULL) {
487 p2->p_traceflag = p1->p_traceflag;
488 if ((p2->p_tracep = p1->p_tracep) != NULL)
489 VREF(p2->p_tracep);
490 }
491#endif
492
493 /*
494 * set priority of child to be that of parent
495 */
496 p2->p_estcpu = p1->p_estcpu;
497
498 /*
499 * This begins the section where we must prevent the parent
500 * from being swapped.
501 */
502 PHOLD(p1);
503
504 /*
505 * Finish creating the child process. It will return via a different
506 * execution path later. (ie: directly into user mode)
507 */
508 vm_fork(p1, p2, flags);
509
510 if (flags == (RFFDG | RFPROC)) {
511 cnt.v_forks++;
512 cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
513 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
514 cnt.v_vforks++;
515 cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
516 } else if (p1 == &proc0) {
517 cnt.v_kthreads++;
518 cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
519 } else {
520 cnt.v_rforks++;
521 cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
522 }
523
524 /*
525 * Both processes are set up, now check if any loadable modules want
526 * to adjust anything.
527 * What if they have an error? XXX
528 */
529 TAILQ_FOREACH(ep, &fork_list, next) {
530 (*ep->function)(p1, p2, flags);
531 }
532
533 /*
534 * Make child runnable and add to run queue.
535 */
536 microtime(&(p2->p_stats->p_start));
537 p2->p_acflag = AFORK;
984263bc
MD
538
539 /*
540 * tell any interested parties about the new process
541 */
542 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
543
984263bc
MD
544 /*
545 * Return child proc pointer to parent.
546 */
547 *procp = p2;
548 return (0);
549}
550
551/*
552 * The next two functionms are general routines to handle adding/deleting
553 * items on the fork callout list.
554 *
555 * at_fork():
556 * Take the arguments given and put them onto the fork callout list,
557 * However first make sure that it's not already there.
558 * Returns 0 on success or a standard error number.
559 */
560
561int
562at_fork(function)
563 forklist_fn function;
564{
565 struct forklist *ep;
566
567#ifdef INVARIANTS
568 /* let the programmer know if he's been stupid */
569 if (rm_at_fork(function))
570 printf("WARNING: fork callout entry (%p) already present\n",
571 function);
572#endif
573 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT);
574 if (ep == NULL)
575 return (ENOMEM);
576 ep->function = function;
577 TAILQ_INSERT_TAIL(&fork_list, ep, next);
578 return (0);
579}
580
581/*
582 * Scan the exit callout list for the given item and remove it..
583 * Returns the number of items removed (0 or 1)
584 */
585
586int
587rm_at_fork(function)
588 forklist_fn function;
589{
590 struct forklist *ep;
591
592 TAILQ_FOREACH(ep, &fork_list, next) {
593 if (ep->function == function) {
594 TAILQ_REMOVE(&fork_list, ep, next);
595 free(ep, M_ATFORK);
596 return(1);
597 }
598 }
599 return (0);
600}
7d0bac62
MD
601
602/*
603 * Add a forked process to the run queue after any remaining setup, such
604 * as setting the fork handler, has been completed.
605 */
606
607void
608start_forked_proc(struct proc *p1, struct proc *p2)
609{
610 /*
611 * Move from SIDL to RUN queue
612 */
613 KASSERT(p2->p_stat == SIDL,
614 ("cannot start forked process, bad status: %p", p2));
615 (void) splhigh();
616 p2->p_stat = SRUN;
617 setrunqueue(p2);
618 (void) spl0();
619
620 /*
621 * Now can be swapped.
622 */
623 PRELE(p1);
624
625 /*
626 * Preserve synchronization semantics of vfork. If waiting for
627 * child to exec or exit, set P_PPWAIT on child, and sleep on our
628 * proc (in case of exit).
629 */
630 while (p2->p_flag & P_PPWAIT)
631 tsleep(p1, PWAIT, "ppwait", 0);
632}
633