From 4643740aa6f3eac7d3dfab05967ec55b5d8ba984 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Tue, 15 Nov 2011 15:23:41 -0800 Subject: [PATCH] kernel - Major signal path adjustments to fix races, tsleep race fixes, +more * Refactor the signal code to properly hold the lp->lwp_token. In particular the ksignal() and lwp_signotify() paths. * The tsleep() path must also hold lp->lwp_token to properly handle lp->lwp_stat states and interlocks. * Refactor the timeout code in tsleep() to ensure that endtsleep() is only called from the proper context, and fix races between endtsleep() and lwkt_switch(). * Rename proc->p_flag to proc->p_flags * Rename lwp->lwp_flag to lwp->lwp_flags * Add lwp->lwp_mpflags and move flags which require atomic ops (are adjusted when not the current thread) to the new field. * Add td->td_mpflags and move flags which require atomic ops (are adjusted when not the current thread) to the new field. * Add some freeze testing code to the x86-64 trap code (default disabled). --- lib/libkvm/kvm_proc.c | 4 +- sys/ddb/db_ps.c | 6 +- sys/dev/raid/vinum/.gdbinit.kernel | 12 +- sys/dev/raid/vinum/vinumdaemon.c | 2 +- sys/emulation/43bsd/43bsd_signal.c | 2 +- .../linux/i386/linprocfs/linprocfs_vnops.c | 2 +- sys/emulation/linux/i386/linux_ptrace.c | 6 +- sys/emulation/linux/i386/linux_sysvec.c | 6 +- sys/emulation/linux/linux_file.c | 2 +- sys/emulation/ndis/ntoskrnl_var.h | 2 +- sys/kern/init_main.c | 4 +- sys/kern/kern_acct.c | 2 +- sys/kern/kern_checkpoint.c | 2 +- sys/kern/kern_clock.c | 14 +- sys/kern/kern_descrip.c | 16 +- sys/kern/kern_exec.c | 18 +- sys/kern/kern_exit.c | 40 +-- sys/kern/kern_fork.c | 18 +- sys/kern/kern_intr.c | 4 +- sys/kern/kern_jail.c | 2 +- sys/kern/kern_kinfo.c | 8 +- sys/kern/kern_kthread.c | 15 +- sys/kern/kern_ktrace.c | 2 +- sys/kern/kern_proc.c | 4 +- sys/kern/kern_prot.c | 6 +- sys/kern/kern_resource.c | 5 +- sys/kern/kern_sig.c | 230 ++++++++---------- sys/kern/kern_synch.c | 179 ++++++-------- sys/kern/kern_threads.c | 4 +- sys/kern/kern_time.c | 4 +- sys/kern/kern_timeout.c | 2 +- sys/kern/kern_upcall.c | 8 +- sys/kern/lwkt_thread.c | 18 +- sys/kern/subr_prf.c | 5 +- sys/kern/subr_prof.c | 2 +- sys/kern/subr_taskqueue.c | 4 +- sys/kern/sys_generic.c | 2 +- sys/kern/sys_process.c | 19 +- sys/kern/tty.c | 16 +- sys/kern/tty_pty.c | 3 +- sys/kern/tty_tty.c | 5 +- sys/kern/usched_bsd4.c | 35 +-- sys/kern/usched_dummy.c | 16 +- sys/kern/vfs_journal.c | 4 +- sys/net/if.c | 2 +- sys/net/netisr.c | 2 +- sys/netproto/smb/smb_subr.c | 2 +- sys/platform/pc32/i386/db_trace.c | 2 +- sys/platform/pc32/i386/machdep.c | 2 +- sys/platform/pc32/i386/trap.c | 40 +-- sys/platform/pc64/x86_64/machdep.c | 2 +- sys/platform/pc64/x86_64/trap.c | 62 +++-- sys/platform/vkernel/i386/cpu_regs.c | 2 +- sys/platform/vkernel/i386/trap.c | 40 +-- sys/platform/vkernel64/x86_64/cpu_regs.c | 2 +- sys/platform/vkernel64/x86_64/trap.c | 40 +-- sys/sys/proc.h | 91 +++---- sys/sys/signal2.h | 2 +- sys/sys/thread.h | 41 ++-- sys/sys/tty.h | 2 +- sys/vfs/procfs/procfs.h | 2 +- sys/vfs/procfs/procfs_ctl.c | 24 +- sys/vfs/procfs/procfs_dbregs.c | 4 +- sys/vfs/procfs/procfs_fpregs.c | 4 +- sys/vfs/procfs/procfs_map.c | 2 +- sys/vfs/procfs/procfs_mem.c | 4 +- sys/vfs/procfs/procfs_regs.c | 4 +- sys/vfs/procfs/procfs_status.c | 7 +- sys/vfs/procfs/procfs_type.c | 2 +- sys/vfs/procfs/procfs_vnops.c | 8 +- sys/vm/vm_fault.c | 11 +- sys/vm/vm_glue.c | 10 +- sys/vm/vm_meter.c | 10 +- sys/vm/vm_pageout.c | 6 +- 74 files changed, 587 insertions(+), 605 deletions(-) diff --git a/lib/libkvm/kvm_proc.c b/lib/libkvm/kvm_proc.c index 08d8aac470..95f64ef418 100644 --- a/lib/libkvm/kvm_proc.c +++ b/lib/libkvm/kvm_proc.c @@ -294,7 +294,7 @@ kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p, } pgrp.pg_session = &sess; - if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) { + if ((proc.p_flags & P_CONTROLT) && sess.s_ttyp != NULL) { if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) { _kvm_err(kd, kd->program, "can't read tty at %p", sess.s_ttyp); @@ -351,7 +351,7 @@ kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p, break; case KERN_PROC_TTY: - if ((proc.p_flag & P_CONTROLT) == 0 || + if ((proc.p_flags & P_CONTROLT) == 0 || dev2udev(proc.p_pgrp->pg_session->s_ttyp->t_dev) != (dev_t)arg) continue; diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c index d00196161b..16b31feb3a 100644 --- a/sys/ddb/db_ps.c +++ b/sys/ddb/db_ps.c @@ -61,7 +61,7 @@ db_ps(db_expr_t dummy1, boolean_t dummy2, db_expr_t dummy3, char *dummy4) if (db_more(&nl) < 0) return; - db_printf(" pid lwp uid ppid pgrp pflag lflag stat wmesg wchan cmd\n"); + db_printf(" pid lwp uid ppid pgrp pflags lflags stat wmesg wchan cmd\n"); for (;;) { while (lp == NULL) { --np; @@ -88,8 +88,8 @@ db_ps(db_expr_t dummy1, boolean_t dummy2, db_expr_t dummy3, char *dummy4) db_printf("%5d %8p %4d %5d %5d %06x %06x %d %d", p->p_pid, (volatile void *)lp, p->p_ucred ? p->p_ucred->cr_ruid : 0, pp->p_pid, - p->p_pgrp ? p->p_pgrp->pg_id : 0, p->p_flag, - lp->lwp_flag, p->p_stat, lp->lwp_stat); + p->p_pgrp ? p->p_pgrp->pg_id : 0, p->p_flags, + lp->lwp_flags, p->p_stat, lp->lwp_stat); if (lp->lwp_wchan) { db_printf(" %6s %8p", (lp->lwp_wmesg ? lp->lwp_wmesg : "?"), diff --git a/sys/dev/raid/vinum/.gdbinit.kernel b/sys/dev/raid/vinum/.gdbinit.kernel index 1cba58da4d..2b76e08e2f 100644 --- a/sys/dev/raid/vinum/.gdbinit.kernel +++ b/sys/dev/raid/vinum/.gdbinit.kernel @@ -405,7 +405,7 @@ define ps printf "%5d %08x %4d %5d %5d %06x %d %-10s ", \ $proc.p_pid, $aproc, \ $proc.p_cred->p_ruid, $pptr->p_pid, \ - $proc.p_pgrp->pg_id, $proc.p_flag, $proc.p_stat, \ + $proc.p_pgrp->pg_id, $proc.p_flags, $proc.p_stat, \ &$proc.p_comm[0] if ($proc.p_wchan) if ($proc.p_wmesg) @@ -497,7 +497,7 @@ define btpa printf "%5d %08x %4d %5d %5d %06x %d %-10s ", \ $proc.p_pid, $aproc, \ $proc.p_cred->p_ruid, $pptr->p_pid, \ - $proc.p_pgrp->pg_id, $proc.p_flag, $proc.p_stat, \ + $proc.p_pgrp->pg_id, $proc.p_flags, $proc.p_stat, \ &$proc.p_comm[0] if ($proc.p_wchan) if ($proc.p_wmesg) @@ -506,7 +506,7 @@ define btpa printf "%x", $proc.p_wchan end printf "\n" - if ($proc->p_flag & 4) + if ($proc->p_flags & 4) btr $proc->p_addr->u_pcb->pcb_ebp else echo (not loaded)\n @@ -523,7 +523,7 @@ document btpa Show backtraces for all processes in the system. end define btpp - if ($myvectorproc->p_flag & 4) + if ($myvectorproc->p_flags & 4) btr $myvectorproc->p_addr->u_pcb->pcb_ebp else echo (not loaded)\n @@ -547,7 +547,7 @@ define defproc printf "%5d %08x %4d %5d %5d %06x %d %-10s ", \ $proc.p_pid, $aproc, \ $proc.p_cred->p_ruid, $pptr->p_pid, \ - $proc.p_pgrp->pg_id, $proc.p_flag, $proc.p_stat, \ + $proc.p_pgrp->pg_id, $proc.p_flags, $proc.p_stat, \ &$proc.p_comm[0] if ($proc.p_wchan) if ($proc.p_wmesg) @@ -570,7 +570,7 @@ end define fr set $fno = 0 set $searching = 1 -if ($myvectorproc->p_flag & 4) +if ($myvectorproc->p_flags & 4) set $frame = $myvectorproc->p_addr->u_pcb->pcb_ebp while (($searching == 1) && (*(int *) $frame > 0xc0000000)) set $myebp = *(int *) $frame diff --git a/sys/dev/raid/vinum/vinumdaemon.c b/sys/dev/raid/vinum/vinumdaemon.c index d37abe2398..6750c264bb 100644 --- a/sys/dev/raid/vinum/vinumdaemon.c +++ b/sys/dev/raid/vinum/vinumdaemon.c @@ -69,7 +69,7 @@ vinum_daemon(void) { struct daemonq *request; - curproc->p_flag |= P_SYSTEM; /* we're a system process */ + curproc->p_flags |= P_SYSTEM; /* we're a system process */ daemon_save_config(); /* start by saving the configuration */ daemonpid = curproc->p_pid; /* mark our territory */ while (1) { diff --git a/sys/emulation/43bsd/43bsd_signal.c b/sys/emulation/43bsd/43bsd_signal.c index ff95e0ba87..fbe4994600 100644 --- a/sys/emulation/43bsd/43bsd_signal.c +++ b/sys/emulation/43bsd/43bsd_signal.c @@ -168,7 +168,7 @@ sys_osigstack(struct osigstack_args *uap) lp->lwp_sigstk.ss_sp = ss.ss_sp; lp->lwp_sigstk.ss_size = 0; lp->lwp_sigstk.ss_flags |= ss.ss_onstack & SS_ONSTACK; - lp->lwp_flag |= LWP_ALTSTACK; + lp->lwp_flags |= LWP_ALTSTACK; } return (error); } diff --git a/sys/emulation/linux/i386/linprocfs/linprocfs_vnops.c b/sys/emulation/linux/i386/linprocfs/linprocfs_vnops.c index e385198929..73dc1e6096 100644 --- a/sys/emulation/linux/i386/linprocfs/linprocfs_vnops.c +++ b/sys/emulation/linux/i386/linprocfs/linprocfs_vnops.c @@ -597,7 +597,7 @@ linprocfs_getattr(struct vop_getattr_args *ap) * change the owner to root - otherwise 'ps' and friends * will break even though they are setgid kmem. *SIGH* */ - if (procp->p_flag & P_SUGID) + if (procp->p_flags & P_SUGID) vap->va_uid = 0; else vap->va_uid = procp->p_ucred->cr_uid; diff --git a/sys/emulation/linux/i386/linux_ptrace.c b/sys/emulation/linux/i386/linux_ptrace.c index 21622f70b4..71d70f2531 100644 --- a/sys/emulation/linux/i386/linux_ptrace.c +++ b/sys/emulation/linux/i386/linux_ptrace.c @@ -381,13 +381,13 @@ sys_linux_ptrace(struct linux_ptrace_args *uap) } /* System processes can't be debugged. */ - if ((p->p_flag & P_SYSTEM) != 0) { + if ((p->p_flags & P_SYSTEM) != 0) { error = EINVAL; goto fail; } /* not being traced... */ - if ((p->p_flag & P_TRACED) == 0) { + if ((p->p_flags & P_TRACED) == 0) { error = EPERM; goto fail; } @@ -399,7 +399,7 @@ sys_linux_ptrace(struct linux_ptrace_args *uap) } /* not currently stopped */ - if ((p->p_flag & (P_TRACED|P_WAITED)) == 0) { + if ((p->p_flags & (P_TRACED|P_WAITED)) == 0) { error = EBUSY; goto fail; } diff --git a/sys/emulation/linux/i386/linux_sysvec.c b/sys/emulation/linux/i386/linux_sysvec.c index 469e7a6e5c..0d836101c0 100644 --- a/sys/emulation/linux/i386/linux_sysvec.c +++ b/sys/emulation/linux/i386/linux_sysvec.c @@ -269,7 +269,7 @@ linux_rt_sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) /* * Allocate space for the signal handler context. */ - if ((lp->lwp_flag & LWP_ALTSTACK) && !oonstack && + if ((lp->lwp_flags & LWP_ALTSTACK) && !oonstack && SIGISMEMBER(p->p_sigacts->ps_sigonstack, sig)) { fp = (struct l_rt_sigframe *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - sizeof(struct l_rt_sigframe)); @@ -327,7 +327,7 @@ linux_rt_sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) frame.sf_sc.uc_stack.ss_sp = lp->lwp_sigstk.ss_sp; frame.sf_sc.uc_stack.ss_size = lp->lwp_sigstk.ss_size; - frame.sf_sc.uc_stack.ss_flags = (lp->lwp_flag & LWP_ALTSTACK) + frame.sf_sc.uc_stack.ss_flags = (lp->lwp_flags & LWP_ALTSTACK) ? ((oonstack) ? LINUX_SS_ONSTACK : 0) : LINUX_SS_DISABLE; bsd_to_linux_sigset(mask, &frame.sf_sc.uc_sigmask); @@ -433,7 +433,7 @@ linux_sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) /* * Allocate space for the signal handler context. */ - if ((lp->lwp_flag & LWP_ALTSTACK) && !oonstack && + if ((lp->lwp_flags & LWP_ALTSTACK) && !oonstack && SIGISMEMBER(p->p_sigacts->ps_sigonstack, sig)) { fp = (struct l_sigframe *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - sizeof(struct l_sigframe)); diff --git a/sys/emulation/linux/linux_file.c b/sys/emulation/linux/linux_file.c index c641dbe186..6544c63859 100644 --- a/sys/emulation/linux/linux_file.c +++ b/sys/emulation/linux/linux_file.c @@ -141,7 +141,7 @@ linux_open_common(int dfd, char *lpath, int lflags, int mode, int *iresult) nlookup_done_at(&nd, fp); if (error == 0 && !(flags & O_NOCTTY) && - SESS_LEADER(p) && !(p->p_flag & P_CONTROLT)) { + SESS_LEADER(p) && !(p->p_flags & P_CONTROLT)) { struct file *fp; fp = holdfp(p->p_fd, *iresult, -1); diff --git a/sys/emulation/ndis/ntoskrnl_var.h b/sys/emulation/ndis/ntoskrnl_var.h index d2c78c3ef8..caee82ee07 100644 --- a/sys/emulation/ndis/ntoskrnl_var.h +++ b/sys/emulation/ndis/ntoskrnl_var.h @@ -337,7 +337,7 @@ typedef struct nt_dispatch_header nt_dispatch_header; #define SYNC_LEVEL_MP (IPI_LEVEL - 1) #define AT_PASSIVE_LEVEL(td) \ - ((td)->td_proc->p_flag & P_KTHREAD == FALSE) + ((td)->td_proc->p_flags & P_KTHREAD == FALSE) #define AT_DISPATCH_LEVEL(td) \ (lwkt_getpri(td) == TDPRI_INT_HIGH) diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 97b985d4fd..b0fdb82a5f 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -392,7 +392,7 @@ proc0_init(void *dummy __unused) p->p_sysent = &aout_sysvec; - p->p_flag = P_SYSTEM; + p->p_flags = P_SYSTEM; p->p_stat = SACTIVE; lp->lwp_stat = LSRUN; p->p_nice = NZERO; @@ -687,7 +687,7 @@ create_init(const void *udata __unused) error = fork1(&lwp0, RFFDG | RFPROC, &initproc); if (error) panic("cannot fork init: %d", error); - initproc->p_flag |= P_SYSTEM; + initproc->p_flags |= P_SYSTEM; lp = ONLY_LWP_IN_PROC(initproc); cpu_set_fork_handler(lp, start_init, NULL); crit_exit(); diff --git a/sys/kern/kern_acct.c b/sys/kern/kern_acct.c index 44502e9bb6..e70c7fd3ed 100644 --- a/sys/kern/kern_acct.c +++ b/sys/kern/kern_acct.c @@ -249,7 +249,7 @@ acct_process(struct proc *p) acct.ac_gid = p->p_ucred->cr_rgid; /* (7) The terminal from which the process was started */ - if ((p->p_flag & P_CONTROLT) && p->p_pgrp->pg_session->s_ttyp) + if ((p->p_flags & P_CONTROLT) && p->p_pgrp->pg_session->s_ttyp) acct.ac_tty = dev2udev(p->p_pgrp->pg_session->s_ttyp->t_dev); else acct.ac_tty = NOUDEV; diff --git a/sys/kern/kern_checkpoint.c b/sys/kern/kern_checkpoint.c index 3b604922bd..3aa33d8cbb 100644 --- a/sys/kern/kern_checkpoint.c +++ b/sys/kern/kern_checkpoint.c @@ -783,7 +783,7 @@ checkpoint_signal_handler(struct lwp *lp) * Being able to checkpoint an suid or sgid program is not a good * idea. */ - if (sugid_coredump == 0 && (p->p_flag & P_SUGID)) { + if (sugid_coredump == 0 && (p->p_flags & P_SUGID)) { chptinuse--; return (EPERM); } diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index dab7b6d8a4..60408ce3fc 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -561,12 +561,12 @@ hardclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) if (frame && CLKF_USERMODE(frame) && timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { - p->p_flag |= P_SIGVTALRM; + p->p_flags |= P_SIGVTALRM; need_user_resched(); } if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { - p->p_flag |= P_SIGPROF; + p->p_flags |= P_SIGPROF; need_user_resched(); } crit_exit_hard(); @@ -629,7 +629,7 @@ statclock(systimer_t info, int in_ipi, struct intrframe *frame) * Came from userland, handle user time and deal with * possible process. */ - if (p && (p->p_flag & P_PROFIL)) + if (p && (p->p_flags & P_PROFIL)) addupc_intr(p, CLKF_PC(frame), 1); td->td_uticks += bump; @@ -926,8 +926,8 @@ tstohz_low(struct timespec *ts) void startprofclock(struct proc *p) { - if ((p->p_flag & P_PROFIL) == 0) { - p->p_flag |= P_PROFIL; + if ((p->p_flags & P_PROFIL) == 0) { + p->p_flags |= P_PROFIL; #if 0 /* XXX */ if (++profprocs == 1 && stathz != 0) { crit_enter(); @@ -947,8 +947,8 @@ startprofclock(struct proc *p) void stopprofclock(struct proc *p) { - if (p->p_flag & P_PROFIL) { - p->p_flag &= ~P_PROFIL; + if (p->p_flags & P_PROFIL) { + p->p_flags &= ~P_PROFIL; #if 0 /* XXX */ if (--profprocs == 0 && stathz != 0) { crit_enter(); diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c index 796f15ec49..a967e33021 100644 --- a/sys/kern/kern_descrip.c +++ b/sys/kern/kern_descrip.c @@ -323,9 +323,9 @@ kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) error = EBADF; break; } - if ((p->p_leader->p_flag & P_ADVLOCK) == 0) { + if ((p->p_leader->p_flags & P_ADVLOCK) == 0) { lwkt_gettoken(&p->p_leader->p_token); - p->p_leader->p_flag |= P_ADVLOCK; + p->p_leader->p_flags |= P_ADVLOCK; lwkt_reltoken(&p->p_leader->p_token); } error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, @@ -336,9 +336,9 @@ kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) error = EBADF; break; } - if ((p->p_leader->p_flag & P_ADVLOCK) == 0) { + if ((p->p_leader->p_flags & P_ADVLOCK) == 0) { lwkt_gettoken(&p->p_leader->p_token); - p->p_leader->p_flag |= P_ADVLOCK; + p->p_leader->p_flags |= P_ADVLOCK; lwkt_reltoken(&p->p_leader->p_token); } error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, @@ -1978,7 +1978,7 @@ fdfree(struct proc *p, struct filedesc *repl) ("filedesc_to_refcount botch: fdl_refcount=%d", fdtol->fdl_refcount)); if (fdtol->fdl_refcount == 1 && - (p->p_leader->p_flag & P_ADVLOCK) != 0) { + (p->p_leader->p_flags & P_ADVLOCK) != 0) { for (i = 0; i <= fdp->fd_lastfile; ++i) { fdnode = &fdp->fd_files[i]; if (fdnode->fp == NULL || @@ -2006,7 +2006,7 @@ fdfree(struct proc *p, struct filedesc *repl) retry: if (fdtol->fdl_refcount == 1) { if (fdp->fd_holdleaderscount > 0 && - (p->p_leader->p_flag & P_ADVLOCK) != 0) { + (p->p_leader->p_flags & P_ADVLOCK) != 0) { /* * close() or do_dup() has cleared a reference * in a shared file descriptor table. @@ -2382,7 +2382,7 @@ closef(struct file *fp, struct proc *p) if (p != NULL && fp->f_type == DTYPE_VNODE && (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) ) { - if ((p->p_leader->p_flag & P_ADVLOCK) != 0) { + if ((p->p_leader->p_flags & P_ADVLOCK) != 0) { lf.l_whence = SEEK_SET; lf.l_start = 0; lf.l_len = 0; @@ -2401,7 +2401,7 @@ closef(struct file *fp, struct proc *p) for (fdtol = fdtol->fdl_next; fdtol != p->p_fdtol; fdtol = fdtol->fdl_next) { - if ((fdtol->fdl_leader->p_flag & + if ((fdtol->fdl_leader->p_flags & P_ADVLOCK) == 0) continue; fdtol->fdl_holdcount++; diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index a3984e8f59..ed3be80ffa 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -415,9 +415,9 @@ interpret: * mark as execed, wakeup the process that vforked (if any) and tell * it that it now has its own resources back */ - p->p_flag |= P_EXEC; - if (p->p_pptr && (p->p_flag & P_PPWAIT)) { - p->p_flag &= ~P_PPWAIT; + p->p_flags |= P_EXEC; + if (p->p_pptr && (p->p_flags & P_PPWAIT)) { + p->p_flags &= ~P_PPWAIT; wakeup((caddr_t)p->p_pptr); } @@ -430,7 +430,7 @@ interpret: if ((((attr.va_mode & VSUID) && p->p_ucred->cr_uid != attr.va_uid) || ((attr.va_mode & VSGID) && p->p_ucred->cr_gid != attr.va_gid)) && (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && - (p->p_flag & P_TRACED) == 0) { + (p->p_flags & P_TRACED) == 0) { /* * Turn off syscall tracing for set-id programs, except for * root. Record any set-id flags first to make sure that @@ -464,7 +464,7 @@ interpret: } else { if (p->p_ucred->cr_uid == p->p_ucred->cr_ruid && p->p_ucred->cr_gid == p->p_ucred->cr_rgid) - p->p_flag &= ~P_SUGID; + p->p_flags &= ~P_SUGID; } /* @@ -497,7 +497,7 @@ interpret: * as we're now a bona fide freshly-execed process. */ KNOTE(&p->p_klist, NOTE_EXEC); - p->p_flag &= ~P_INEXEC; + p->p_flags &= ~P_INEXEC; /* * If tracing the process, trap to debugger so breakpoints @@ -505,7 +505,7 @@ interpret: */ STOPEVENT(p, S_EXEC, 0); - if (p->p_flag & P_TRACED) + if (p->p_flags & P_TRACED) ksignal(p, SIGTRAP); /* clear "fork but no exec" flag, as we _are_ execing */ @@ -571,7 +571,7 @@ exec_fail: * clearing it. */ if (imgp->vmspace_destroyed & 2) - p->p_flag &= ~P_INEXEC; + p->p_flags &= ~P_INEXEC; lwkt_reltoken(&p->p_token); if (imgp->vmspace_destroyed) { /* @@ -758,7 +758,7 @@ exec_new_vmspace(struct image_params *imgp, struct vmspace *vmcopy) return (error); } imgp->vmspace_destroyed |= 2; /* we are responsible for P_INEXEC */ - p->p_flag |= P_INEXEC; + p->p_flags |= P_INEXEC; /* * Blow away entire process VM, if address space not shared, diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index e01d662121..f01755916e 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -206,14 +206,14 @@ killalllwps(int forexec) * Interlock against P_WEXIT. Only one of the process's thread * is allowed to do the master exit. */ - if (p->p_flag & P_WEXIT) + if (p->p_flags & P_WEXIT) return (EALREADY); - p->p_flag |= P_WEXIT; + p->p_flags |= P_WEXIT; /* - * Interlock with LWP_WEXIT and kill any remaining LWPs + * Interlock with LWP_MP_WEXIT and kill any remaining LWPs */ - lp->lwp_flag |= LWP_WEXIT; + atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); if (p->p_nthreads > 1) killlwps(lp); @@ -223,8 +223,8 @@ killalllwps(int forexec) * have been killed. */ if (forexec) { - lp->lwp_flag &= ~LWP_WEXIT; - p->p_flag &= ~P_WEXIT; + atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT); + p->p_flags &= ~P_WEXIT; } return(0); } @@ -242,16 +242,16 @@ killlwps(struct lwp *lp) /* * Kill the remaining LWPs. We must send the signal before setting - * LWP_WEXIT. The setting of WEXIT is optional but helps reduce + * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce * races. tlp must be held across the call as it might block and * allow the target lwp to rip itself out from under our loop. */ FOREACH_LWP_IN_PROC(tlp, p) { LWPHOLD(tlp); lwkt_gettoken(&tlp->lwp_token); - if ((tlp->lwp_flag & LWP_WEXIT) == 0) { + if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) { lwpsignal(p, tlp, SIGKILL); - tlp->lwp_flag |= LWP_WEXIT; + atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT); } lwkt_reltoken(&tlp->lwp_token); LWPRELE(tlp); @@ -344,13 +344,13 @@ exit1(int rv) TAILQ_FOREACH(ep, &exit_list, next) (*ep->function)(td); - if (p->p_flag & P_PROFIL) + if (p->p_flags & P_PROFIL) stopprofclock(p); /* * If parent is waiting for us to exit or exec, * P_PPWAIT is set; we will wakeup the parent below. */ - p->p_flag &= ~(P_TRACED | P_PPWAIT); + p->p_flags &= ~(P_TRACED | P_PPWAIT); SIGEMPTYSET(p->p_siglist); SIGEMPTYSET(lp->lwp_siglist); if (timevalisset(&p->p_realtimer.it_value)) @@ -505,8 +505,8 @@ exit1(int rv) * Traced processes are killed * since their existence means someone is screwing up. */ - if (q->p_flag & P_TRACED) { - q->p_flag &= ~P_TRACED; + if (q->p_flags & P_TRACED) { + q->p_flags &= ~P_TRACED; ksignal(q, SIGKILL); } q = nq; @@ -595,11 +595,11 @@ lwp_exit(int masterexit) int dowake = 0; /* - * lwp_exit() may be called without setting LWP_WEXIT, so + * lwp_exit() may be called without setting LWP_MP_WEXIT, so * make sure it is set here. */ ASSERT_LWKT_TOKEN_HELD(&p->p_token); - lp->lwp_flag |= LWP_WEXIT; + atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); /* * Clean up any virtualization @@ -966,10 +966,10 @@ loop: error = 0; goto done; } - if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && - ((p->p_flag & P_TRACED) || (options & WUNTRACED))) { + if (p->p_stat == SSTOP && (p->p_flags & P_WAITED) == 0 && + ((p->p_flags & P_TRACED) || (options & WUNTRACED))) { lwkt_gettoken(&p->p_token); - p->p_flag |= P_WAITED; + p->p_flags |= P_WAITED; *res = p->p_pid; p->p_usched->heuristic_exiting(td->td_lwp, p); @@ -982,11 +982,11 @@ loop: lwkt_reltoken(&p->p_token); goto done; } - if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) { + if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) { lwkt_gettoken(&p->p_token); *res = p->p_pid; p->p_usched->heuristic_exiting(td->td_lwp, p); - p->p_flag &= ~P_CONTINUED; + p->p_flags &= ~P_CONTINUED; if (status) *status = SIGCONT; diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index fe3c96a8f2..f5c8f2820b 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -223,7 +223,7 @@ fail: lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); --p->p_nthreads; /* lwp_dispose expects an exited lwp, and a held proc */ - lp->lwp_flag |= LWP_WEXIT; + atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); lp->lwp_thread->td_flags |= TDF_EXITING; lwkt_remove_tdallq(lp->lwp_thread); PHOLD(p); @@ -405,12 +405,12 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) * other consumers to gain temporary references to p2 * (p2->p_lock can change). */ - if (p1->p_flag & P_PROFIL) + if (p1->p_flags & P_PROFIL) startprofclock(p2); p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); if (jailed(p2->p_ucred)) - p2->p_flag |= P_JAILED; + p2->p_flags |= P_JAILED; if (p2->p_args) refcount_acquire(&p2->p_args->ar_ref); @@ -486,11 +486,11 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) * Preserve some more flags in subprocess. P_PROFIL has already * been preserved. */ - p2->p_flag |= p1->p_flag & P_SUGID; - if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) - p2->p_flag |= P_CONTROLT; + p2->p_flags |= p1->p_flags & P_SUGID; + if (p1->p_session->s_ttyvp != NULL && p1->p_flags & P_CONTROLT) + p2->p_flags |= P_CONTROLT; if (flags & RFPPWAIT) - p2->p_flag |= P_PPWAIT; + p2->p_flags |= P_PPWAIT; /* * Inherit the virtual kernel structure (allows a virtual kernel @@ -624,7 +624,7 @@ lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, (unsigned) ((caddr_t)&lp->lwp_endcopy - (caddr_t)&lp->lwp_startcopy)); - lp->lwp_flag |= origlp->lwp_flag & LWP_ALTSTACK; + lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; /* * Set cpbase to the last timeout that occured (not the upcoming * timeout). @@ -761,7 +761,7 @@ start_forked_proc(struct lwp *lp1, struct proc *p2) * We must hold our p_token to interlock the flag/tsleep */ lwkt_gettoken(&p2->p_token); - while (p2->p_flag & P_PPWAIT) + while (p2->p_flags & P_PPWAIT) tsleep(lp1->lwp_proc, 0, "ppwait", 0); lwkt_reltoken(&p2->p_token); } diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c index 286f05f0d0..aa1c79bb6d 100644 --- a/sys/kern/kern_intr.c +++ b/sys/kern/kern_intr.c @@ -263,7 +263,7 @@ register_int(int intr, inthand2_t *handler, void *arg, const char *name, */ if (emergency_intr_thread.td_kstack == NULL) { lwkt_create(ithread_emergency, NULL, NULL, &emergency_intr_thread, - TDF_STOPREQ | TDF_INTTHREAD, ncpus - 1, "ithread emerg"); + TDF_NOSTART | TDF_INTTHREAD, ncpus - 1, "ithread emerg"); systimer_init_periodic_nq(&emergency_intr_timer, emergency_intr_timer_callback, &emergency_intr_thread, (emergency_intr_enable ? emergency_intr_freq : 1)); @@ -278,7 +278,7 @@ register_int(int intr, inthand2_t *handler, void *arg, const char *name, if (info->i_state == ISTATE_NOTHREAD) { info->i_state = ISTATE_NORMAL; lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL, - &info->i_thread, TDF_STOPREQ | TDF_INTTHREAD, cpuid, + &info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithread %d", intr); if (intr >= FIRST_SOFTINT) lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM); diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c index 4842b6079a..fad701a161 100644 --- a/sys/kern/kern_jail.c +++ b/sys/kern/kern_jail.c @@ -120,7 +120,7 @@ kern_jail_attach(int jid) lwkt_gettoken(&p->p_token); cratom(&p->p_ucred); p->p_ucred->cr_prison = pr; - p->p_flag |= P_JAILED; + p->p_flags |= P_JAILED; lwkt_reltoken(&p->p_token); return(0); diff --git a/sys/kern/kern_kinfo.c b/sys/kern/kern_kinfo.c index 6adeab7f15..850e7c8f56 100644 --- a/sys/kern/kern_kinfo.c +++ b/sys/kern/kern_kinfo.c @@ -86,7 +86,7 @@ fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) kp->kp_paddr = (uintptr_t)p; kp->kp_fd = (uintptr_t)p->p_fd; - kp->kp_flags = p->p_flag; + kp->kp_flags = p->p_flags; kp->kp_stat = p->p_stat; kp->kp_lock = p->p_lock; kp->kp_acflag = p->p_acflag; @@ -132,7 +132,7 @@ fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) if ((p->p_session != NULL) && SESS_LEADER(p)) kp->kp_auxflags |= KI_SLEADER; } - if (sess && (p->p_flag & P_CONTROLT) != 0 && sess->s_ttyp != NULL) { + if (sess && (p->p_flags & P_CONTROLT) != 0 && sess->s_ttyp != NULL) { kp->kp_tdev = dev2udev(sess->s_ttyp->t_dev); if (sess->s_ttyp->t_pgrp != NULL) kp->kp_tpgid = sess->s_ttyp->t_pgrp->pg_id; @@ -189,7 +189,7 @@ fill_kinfo_lwp(struct lwp *lwp, struct kinfo_lwp *kl) kl->kl_pid = lwp->lwp_proc->p_pid; kl->kl_tid = lwp->lwp_tid; - kl->kl_flags = lwp->lwp_flag; + kl->kl_flags = lwp->lwp_flags; kl->kl_stat = lwp->lwp_stat; kl->kl_lock = lwp->lwp_lock; kl->kl_tdflags = lwp->lwp_thread->td_flags; @@ -203,7 +203,7 @@ fill_kinfo_lwp(struct lwp *lwp, struct kinfo_lwp *kl) */ if (kl->kl_stat == LSRUN) { if ((kl->kl_tdflags & TDF_RUNQ) == 0 && - (lwp->lwp_flag & LWP_ONRUNQ) == 0) { + (lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0) { kl->kl_stat = LSSLEEP; } } diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c index a3b6e6c0ed..8a69cc23b3 100644 --- a/sys/kern/kern_kthread.c +++ b/sys/kern/kern_kthread.c @@ -173,14 +173,15 @@ suspend_kproc(struct thread *td, int timo) { if (td->td_proc == NULL) { lwkt_gettoken(&kpsus_token); - td->td_flags |= TDF_STOPREQ; /* request thread pause */ + /* request thread pause */ + atomic_set_int(&td->td_mpflags, TDF_MP_STOPREQ); wakeup(td); - while (td->td_flags & TDF_STOPREQ) { + while (td->td_mpflags & TDF_MP_STOPREQ) { int error = tsleep(td, 0, "suspkp", timo); if (error == EWOULDBLOCK) break; } - td->td_flags &= ~TDF_STOPREQ; + atomic_clear_int(&td->td_mpflags, TDF_MP_STOPREQ); lwkt_reltoken(&kpsus_token); return(0); } else { @@ -193,14 +194,14 @@ kproc_suspend_loop(void) { struct thread *td = curthread; - if (td->td_flags & TDF_STOPREQ) { + if (td->td_mpflags & TDF_MP_STOPREQ) { lwkt_gettoken(&kpsus_token); - td->td_flags &= ~TDF_STOPREQ; - while ((td->td_flags & TDF_WAKEREQ) == 0) { + atomic_clear_int(&td->td_mpflags, TDF_MP_STOPREQ); + while ((td->td_mpflags & TDF_MP_WAKEREQ) == 0) { wakeup(td); tsleep(td, 0, "kpsusp", 0); } - td->td_flags &= ~TDF_WAKEREQ; + atomic_clear_int(&td->td_mpflags, TDF_MP_WAKEREQ); wakeup(td); lwkt_reltoken(&kpsus_token); } diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c index 71b3d9071f..6ccd577ff8 100644 --- a/sys/kern/kern_ktrace.c +++ b/sys/kern/kern_ktrace.c @@ -613,7 +613,7 @@ ktrcanset(struct thread *calltd, struct proc *targetp) caller->cr_rgid == target->cr_rgid && /* XXX */ target->cr_rgid == target->cr_svgid && (targetp->p_traceflag & KTRFAC_ROOT) == 0 && - (targetp->p_flag & P_SUGID) == 0) || + (targetp->p_flags & P_SUGID) == 0) || caller->cr_uid == 0) return (1); diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index 4aa83b2c7a..080d8bff4c 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -295,7 +295,7 @@ enterpgrp(struct proc *p, pid_t pgid, int mksess) KASSERT(p == curproc, ("enterpgrp: mksession and p != curproc")); lwkt_gettoken(&p->p_token); - p->p_flag &= ~P_CONTROLT; + p->p_flags &= ~P_CONTROLT; lwkt_reltoken(&p->p_token); } else { pgrp->pg_session = p->p_session; @@ -939,7 +939,7 @@ sysctl_kern_proc(SYSCTL_HANDLER_ARGS) break; case KERN_PROC_TTY: - if ((p->p_flag & P_CONTROLT) == 0 || + if ((p->p_flags & P_CONTROLT) == 0 || p->p_session == NULL || p->p_session->s_ttyp == NULL || dev2udev(p->p_session->s_ttyp->t_dev) != diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c index b77b0cc228..5916f1c541 100644 --- a/sys/kern/kern_prot.c +++ b/sys/kern/kern_prot.c @@ -324,7 +324,7 @@ sys_setpgid(struct setpgid_args *uap) error = EPERM; goto done; } - if (targp->p_flag & P_EXEC) { + if (targp->p_flags & P_EXEC) { error = EACCES; goto done; } @@ -873,7 +873,7 @@ sys_getresgid(struct getresgid_args *uap) int sys_issetugid(struct issetugid_args *uap) { - uap->sysmsg_result = (curproc->p_flag & P_SUGID) ? 1 : 0; + uap->sysmsg_result = (curproc->p_flags & P_SUGID) ? 1 : 0; return (0); } @@ -1211,7 +1211,7 @@ setsugid(void) KKASSERT(p != NULL); lwkt_gettoken(&p->p_token); - p->p_flag |= P_SUGID; + p->p_flags |= P_SUGID; if (!(p->p_pfsflags & PF_ISUGID)) p->p_stops = 0; lwkt_reltoken(&p->p_token); diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index afee45fa82..2cbd2177ca 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -295,8 +295,11 @@ donice(struct proc *chgp, int n) if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0)) return (EACCES); chgp->p_nice = n; - FOREACH_LWP_IN_PROC(lp, chgp) + FOREACH_LWP_IN_PROC(lp, chgp) { + LWPHOLD(lp); chgp->p_usched->resetpriority(lp); + LWPRELE(lp); + } return (0); } diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 4687e956ef..a13c73976d 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -81,7 +81,7 @@ static int sig_ffs(sigset_t *set); static int sigprop(int sig); static void lwp_signotify(struct lwp *lp); #ifdef SMP -static void signotify_remote(void *arg); +static void lwp_signotify_remote(void *arg); #endif static int kern_sigtimedwait(sigset_t set, siginfo_t *info, struct timespec *timeout); @@ -433,7 +433,7 @@ execsigs(struct proc *p) lp->lwp_sigstk.ss_flags = SS_DISABLE; lp->lwp_sigstk.ss_size = 0; lp->lwp_sigstk.ss_sp = 0; - lp->lwp_flag &= ~LWP_ALTSTACK; + lp->lwp_flags &= ~LWP_ALTSTACK; /* * Reset no zombies if child dies flag as Solaris does. */ @@ -561,7 +561,7 @@ kern_sigsuspend(struct __sigset *set) * to indicate this. */ lp->lwp_oldsigmask = lp->lwp_sigmask; - lp->lwp_flag |= LWP_OLDMASK; + lp->lwp_flags |= LWP_OLDMASK; SIG_CANTMASK(*set); lp->lwp_sigmask = *set; @@ -602,7 +602,7 @@ kern_sigaltstack(struct sigaltstack *ss, struct sigaltstack *oss) struct lwp *lp = td->td_lwp; struct proc *p = td->td_proc; - if ((lp->lwp_flag & LWP_ALTSTACK) == 0) + if ((lp->lwp_flags & LWP_ALTSTACK) == 0) lp->lwp_sigstk.ss_flags |= SS_DISABLE; if (oss) @@ -612,12 +612,12 @@ kern_sigaltstack(struct sigaltstack *ss, struct sigaltstack *oss) if (ss->ss_flags & SS_DISABLE) { if (lp->lwp_sigstk.ss_flags & SS_ONSTACK) return (EINVAL); - lp->lwp_flag &= ~LWP_ALTSTACK; + lp->lwp_flags &= ~LWP_ALTSTACK; lp->lwp_sigstk.ss_flags = ss->ss_flags; } else { if (ss->ss_size < p->p_sysent->sv_minsigstksz) return (ENOMEM); - lp->lwp_flag |= LWP_ALTSTACK; + lp->lwp_flags |= LWP_ALTSTACK; lp->lwp_sigstk = *ss; } } @@ -695,7 +695,7 @@ dokillpg(int sig, int pgid, int all) LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { if (p->p_pid <= 1 || p->p_stat == SZOMB || - (p->p_flag & P_SYSTEM) || + (p->p_flags & P_SYSTEM) || !CANSIGNAL(p, sig)) { continue; } @@ -714,7 +714,7 @@ killpg_all_callback(struct proc *p, void *data) { struct killpg_info *info = data; - if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) || + if (p->p_pid <= 1 || (p->p_flags & P_SYSTEM) || p == curproc || !CANSIGNAL(p, info->sig)) { return (0); } @@ -762,7 +762,7 @@ kern_kill(int sig, pid_t pid, lwpid_t tid) * called directly with P_WEXIT set to kill individual LWPs * during exit, which is allowed. */ - if (p->p_flag & P_WEXIT) { + if (p->p_flags & P_WEXIT) { lwkt_reltoken(&p->p_token); PRELE(p); lwkt_reltoken(&proc_token); @@ -872,7 +872,7 @@ pgsignal(struct pgrp *pgrp, int sig, int checkctty) pgref(pgrp); lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE); LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { - if (checkctty == 0 || p->p_flag & P_CONTROLT) + if (checkctty == 0 || p->p_flags & P_CONTROLT) ksignal(p, sig); } lockmgr(&pgrp->pg_lock, LK_RELEASE); @@ -905,7 +905,7 @@ trapsignal(struct lwp *lp, int sig, u_long code) } - if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) && + if ((p->p_flags & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) && !SIGISMEMBER(lp->lwp_sigmask, sig)) { lp->lwp_ru.ru_nsignals++; #ifdef KTRACE @@ -1003,7 +1003,7 @@ find_lwp_for_signal(struct proc *p, int sig) } break; case LSSLEEP: - if (lp->lwp_flag & LWP_SINTR) { + if (lp->lwp_flags & LWP_SINTR) { if (sleep) { lwkt_reltoken(&lp->lwp_token); LWPRELE(lp); @@ -1106,7 +1106,7 @@ lwpsignal(struct proc *p, struct lwp *lp, int sig) * if signal event is tracked by procfs, give *that* * a chance, as well. */ - if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) { + if ((p->p_flags & P_TRACED) || (p->p_stops & S_SIG)) { action = SIG_DFL; } else { /* @@ -1114,7 +1114,7 @@ lwpsignal(struct proc *p, struct lwp *lp, int sig) * that we must still deliver the signal if P_WEXIT is set * in the process flags. */ - if (lp && (lp->lwp_flag & LWP_WEXIT)) { + if (lp && (lp->lwp_mpflags & LWP_MP_WEXIT)) { if (lp) { lwkt_reltoken(&lp->lwp_token); LWPRELE(lp); @@ -1169,7 +1169,7 @@ lwpsignal(struct proc *p, struct lwp *lp, int sig) return; } SIG_CONTSIGMASK(p->p_siglist); - p->p_flag &= ~P_CONTINUED; + p->p_flags &= ~P_CONTINUED; } crit_enter(); @@ -1191,7 +1191,7 @@ lwpsignal(struct proc *p, struct lwp *lp, int sig) * If the process is stopped and is being traced, then no * further action is necessary. */ - if (p->p_flag & P_TRACED) + if (p->p_flags & P_TRACED) goto out; /* @@ -1225,7 +1225,7 @@ lwpsignal(struct proc *p, struct lwp *lp, int sig) q = p->p_pptr; PHOLD(q); lwkt_gettoken(&q->p_token); - p->p_flag |= P_CONTINUED; + p->p_flags |= P_CONTINUED; wakeup(q); if (action == SIG_DFL) SIGDELSET(p->p_siglist, sig); @@ -1255,19 +1255,14 @@ lwpsignal(struct proc *p, struct lwp *lp, int sig) /* * Otherwise the process is stopped and it received some - * signal, which does not change its stopped state. - * - * We have to select one thread to set LWP_BREAKTSLEEP, - * so that the current signal will break the sleep - * as soon as a SA_CONT signal will unstop the process. + * signal, which does not change its stopped state. When + * the process is continued a wakeup(p) will be issued which + * will wakeup any threads sleeping in tstop(). */ if (lp == NULL) { /* NOTE: returns lp w/ token held */ lp = find_lwp_for_signal(p, sig); } - if (lp != NULL && - (lp->lwp_stat == LSSLEEP || lp->lwp_stat == LSSTOP)) - lp->lwp_flag |= LWP_BREAKTSLEEP; goto out; /* NOTREACHED */ @@ -1305,7 +1300,7 @@ active_process: * not be dispatched if masked but we must still deliver it. */ if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) && - (p->p_flag & P_TRACED) == 0) { + (p->p_flags & P_TRACED) == 0) { p->p_nice = NZERO; } @@ -1320,7 +1315,7 @@ active_process: * could cause deadlock. Take no action at this * time. */ - if (p->p_flag & P_PPWAIT) { + if (p->p_flags & P_PPWAIT) { SIGADDSET(p->p_siglist, sig); goto out; } @@ -1360,7 +1355,10 @@ out: } /* - * p->p_token must be held + * Notify the LWP that a signal has arrived. The LWP does not have to be + * sleeping on the current cpu. + * + * p->p_token and lp->lwp_token must be held on call. */ static void lwp_signotify(struct lwp *lp) @@ -1368,73 +1366,38 @@ lwp_signotify(struct lwp *lp) ASSERT_LWKT_TOKEN_HELD(&lp->lwp_proc->p_token); crit_enter(); - if (lp->lwp_stat == LSSLEEP || lp->lwp_stat == LSSTOP) { + if (lp == lwkt_preempted_proc()) { /* - * Thread is in tsleep. + * lwp is on the current cpu AND it is currently running + * (we preempted it). */ - + signotify(); + } else if (lp->lwp_thread->td_gd == mycpu) { /* - * If the thread is sleeping uninterruptibly - * we can't interrupt the sleep... the signal will - * be noticed when the lwp returns through - * trap() or syscall(). - * - * Otherwise the signal can interrupt the sleep. - * - * If the process is traced, the lwp will handle the - * tracing in issignal() when it returns to userland. + * lwp is on the current cpu, we can safely call + * setrunnable() */ - if (lp->lwp_flag & LWP_SINTR) { - /* - * Make runnable and break out of any tsleep as well. - */ - lp->lwp_flag |= LWP_BREAKTSLEEP; - setrunnable(lp); - } - } else { + setrunnable(lp); + } else +#ifdef SMP + if (lp->lwp_flags & LWP_SINTR) { /* - * Otherwise the thread is running - * - * LSRUN does nothing with the signal, other than kicking - * ourselves if we are running. - * SZOMB and SIDL mean that it will either never be noticed, - * or noticed very soon. - * - * Note that lwp_thread may be NULL or may not be completely - * initialized if the process is in the SIDL or SZOMB state. + * The lwp is on some other cpu but sitting in a tsleep, + * we have to hold it to prevent it from going away and + * chase after the cpu it is sitting on. * - * For SMP we may have to forward the request to another cpu. - * YYY the MP lock prevents the target process from moving - * to another cpu, see kern/kern_switch.c - * - * If the target thread is waiting on its message port, - * wakeup the target thread so it can check (or ignore) - * the new signal. YYY needs cleanup. + * The lwp_token interlocks LWP_SINTR. */ - if (lp == lwkt_preempted_proc()) { - signotify(); - } else if (lp->lwp_stat == LSRUN) { - struct thread *td = lp->lwp_thread; - struct proc *p __debugvar = lp->lwp_proc; - - KASSERT(td != NULL, - ("pid %d/%d NULL lwp_thread stat %d flags %08x/%08x", - p->p_pid, lp->lwp_tid, lp->lwp_stat, - p->p_flag, lp->lwp_flag)); - - /* - * To prevent a MP race with TDF_SINTR we must - * schedule the thread on the correct cpu. - */ -#ifdef SMP - if (td->td_gd != mycpu) { - LWPHOLD(lp); - lwkt_send_ipiq(td->td_gd, signotify_remote, lp); - } else + LWPHOLD(lp); + lwkt_send_ipiq(lp->lwp_thread->td_gd, lwp_signotify_remote, lp); + } else #endif - if (td->td_flags & TDF_SINTR) - lwkt_schedule(td); - } + { + /* + * Otherwise the lwp is either in some uninterruptable state + * or it is on the userland scheduler's runqueue waiting to + * be scheduled to a cpu. + */ } crit_exit(); } @@ -1442,39 +1405,32 @@ lwp_signotify(struct lwp *lp) #ifdef SMP /* - * This function is called via an IPI. We will be in a critical section but - * the MP lock will NOT be held. The passed lp will be held. + * This function is called via an IPI so we cannot call setrunnable() here + * (because while we hold the lp we don't own its token, and can't get it + * from an IPI). * - * We must essentially repeat the code at the end of lwp_signotify(), - * in particular rechecking all races. If we are still not on the - * correct cpu we leave the lwp ref intact and continue the chase. - * - * XXX this may still not be entirely correct, since we are checking - * lwp_stat asynchronously. + * We are interlocked by virtue of being on the same cpu as the target. If + * we still are and LWP_SINTR is set we can schedule the target thread. */ static void -signotify_remote(void *arg) +lwp_signotify_remote(void *arg) { struct lwp *lp = arg; - thread_t td; + thread_t td = lp->lwp_thread; if (lp == lwkt_preempted_proc()) { signotify(); - } else if (lp->lwp_stat == LSRUN) { - /* - * To prevent a MP race with TDF_SINTR we must - * schedule the thread on the correct cpu. - */ - td = lp->lwp_thread; - if (td->td_gd != mycpu) { - lwkt_send_ipiq(td->td_gd, signotify_remote, lp); - return; - /* NOT REACHED */ - } + LWPRELE(lp); + } else if (td->td_gd == mycpu) { + if (lp->lwp_flags & LWP_SINTR) + lwkt_schedule(td); if (td->td_flags & TDF_SINTR) lwkt_schedule(td); + LWPRELE(lp); + } else { + lwkt_send_ipiq(td->td_gd, lwp_signotify_remote, lp); + /* LWPHOLD() is forwarded to the target cpu */ } - LWPRELE(lp); } #endif @@ -1514,12 +1470,14 @@ proc_stop(struct proc *p) /* * We're sleeping, but we will stop before * returning to userspace, so count us - * as stopped as well. We set LWP_WSTOP + * as stopped as well. We set LWP_MP_WSTOP * to signal the lwp that it should not * increase p_nstopped when reaching tstop(). + * + * LWP_MP_WSTOP is protected by lp->lwp_token. */ - if ((lp->lwp_flag & LWP_WSTOP) == 0) { - lp->lwp_flag |= LWP_WSTOP; + if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { + atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); ++p->p_nstopped; } break; @@ -1544,7 +1502,7 @@ proc_stop(struct proc *p) q = p->p_pptr; PHOLD(q); lwkt_gettoken(&q->p_token); - p->p_flag &= ~P_WAITED; + p->p_flags &= ~P_WAITED; wakeup(q); if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) ksignal(p->p_pptr, SIGCHLD); @@ -1595,9 +1553,12 @@ proc_unstop(struct proc *p) * Nevertheless we call setrunnable() so that it * will wake up in case a signal or timeout arrived * in the meantime. + * + * LWP_MP_WSTOP is protected by lp->lwp_token. */ - if (lp->lwp_flag & LWP_WSTOP) { - lp->lwp_flag &= ~LWP_WSTOP; + if (lp->lwp_mpflags & LWP_MP_WSTOP) { + atomic_clear_int(&lp->lwp_mpflags, + LWP_MP_WSTOP); --p->p_nstopped; } else { if (bootverbose) @@ -1607,13 +1568,24 @@ proc_unstop(struct proc *p) /* FALLTHROUGH */ case LSSTOP: - setrunnable(lp); + /* + * This handles any lwp's waiting in a tsleep with + * SIGCATCH. + */ + lwp_signotify(lp); break; } lwkt_reltoken(&lp->lwp_token); LWPRELE(lp); } + + /* + * This handles any lwp's waiting in tstop(). We have interlocked + * the setting of p_stat by acquiring and releasing each lpw's + * token. + */ + wakeup(p); crit_exit(); } @@ -1856,7 +1828,7 @@ issignal(struct lwp *lp, int maytrace) lwkt_gettoken(&p->p_token); for (;;) { - int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG); + int traced = (p->p_flags & P_TRACED) || (p->p_stops & S_SIG); /* * If this process is supposed to stop, stop this thread. @@ -1866,7 +1838,7 @@ issignal(struct lwp *lp, int maytrace) mask = lwp_sigpend(lp); SIGSETNAND(mask, lp->lwp_sigmask); - if (p->p_flag & P_PPWAIT) + if (p->p_flags & P_PPWAIT) SIG_STOPSIGMASK(mask); if (SIGISEMPTY(mask)) { /* no signal to send */ lwkt_reltoken(&p->p_token); @@ -1886,7 +1858,9 @@ issignal(struct lwp *lp, int maytrace) spin_unlock(&lp->lwp_spin); continue; } - if (maytrace && (p->p_flag & P_TRACED) && (p->p_flag & P_PPWAIT) == 0) { + if (maytrace && + (p->p_flags & P_TRACED) && + (p->p_flags & P_PPWAIT) == 0) { /* * If traced, always stop, and stay stopped until * released by the parent. @@ -1902,7 +1876,7 @@ issignal(struct lwp *lp, int maytrace) proc_stop(p); do { tstop(); - } while (!trace_req(p) && (p->p_flag & P_TRACED)); + } while (!trace_req(p) && (p->p_flags & P_TRACED)); /* * If parent wants us to take the signal, @@ -1931,7 +1905,7 @@ issignal(struct lwp *lp, int maytrace) * to the top to rescan signals. This ensures * that p_sig* and ps_sigact are consistent. */ - if ((p->p_flag & P_TRACED) == 0) + if ((p->p_flags & P_TRACED) == 0) continue; } @@ -1975,7 +1949,7 @@ issignal(struct lwp *lp, int maytrace) * process group, ignore tty stop signals. */ if (prop & SA_STOP) { - if (p->p_flag & P_TRACED || + if (p->p_flags & P_TRACED || (p->p_pgrp->pg_jobc == 0 && prop & SA_TTYSTOP)) break; /* == ignore */ @@ -2003,7 +1977,7 @@ issignal(struct lwp *lp, int maytrace) * than SIGCONT, unless process is traced. */ if ((prop & SA_CONT) == 0 && - (p->p_flag & P_TRACED) == 0) + (p->p_flags & P_TRACED) == 0) kprintf("issignal\n"); break; /* == ignore */ @@ -2059,7 +2033,7 @@ postsig(int sig) action = ps->ps_sigact[_SIG_IDX(sig)]; #ifdef KTRACE if (KTRPOINT(lp->lwp_thread, KTR_PSIG)) - ktrpsig(lp, sig, action, lp->lwp_flag & LWP_OLDMASK ? + ktrpsig(lp, sig, action, lp->lwp_flags & LWP_OLDMASK ? &lp->lwp_oldsigmask : &lp->lwp_sigmask, 0); #endif STOPEVENT(p, S_SIG, sig); @@ -2103,9 +2077,9 @@ postsig(int sig) * mask from before the sigsuspend is what we want * restored after the signal processing is completed. */ - if (lp->lwp_flag & LWP_OLDMASK) { + if (lp->lwp_flags & LWP_OLDMASK) { returnmask = lp->lwp_oldsigmask; - lp->lwp_flag &= ~LWP_OLDMASK; + lp->lwp_flags &= ~LWP_OLDMASK; } else { returnmask = lp->lwp_sigmask; } @@ -2286,7 +2260,7 @@ coredump(struct lwp *lp, int sig) STOPEVENT(p, S_CORE, 0); - if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) + if (((sugid_coredump == 0) && p->p_flags & P_SUGID) || do_coredump == 0) return (EFAULT); /* @@ -2397,7 +2371,7 @@ pgsigio(struct sigio *sigio, int sig, int checkctty) lockmgr(&pg->pg_lock, LK_EXCLUSIVE); LIST_FOREACH(p, &pg->pg_members, p_pglist) { if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, p) && - (checkctty == 0 || (p->p_flag & P_CONTROLT))) + (checkctty == 0 || (p->p_flags & P_CONTROLT))) ksignal(p, sig); } lockmgr(&pg->pg_lock, LK_RELEASE); diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 6d182a97f7..3513c64e17 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -117,9 +117,6 @@ static fixpt_t cexp[3] = { static void endtsleep (void *); static void loadav (void *arg); static void schedcpu (void *arg); -#ifdef SMP -static void tsleep_wakeup_remote(struct thread *td); -#endif /* * Adjust the scheduler quantum. The quantum is specified in microseconds. @@ -281,8 +278,8 @@ schedcpu_resource(struct proc *p, void *data __unused) killproc(p, "exceeded maximum CPU limit"); break; case PLIMIT_TESTCPU_XCPU: - if ((p->p_flag & P_XCPU) == 0) { - p->p_flag |= P_XCPU; + if ((p->p_flags & P_XCPU) == 0) { + p->p_flags |= P_XCPU; ksignal(p, SIGXCPU); } break; @@ -402,6 +399,7 @@ tsleep_interlock(const volatile void *ident, int flags) /* * Remove thread from sleepq. Must be called with a critical section held. + * The thread must not be migrating. */ static __inline void _tsleep_remove(thread_t td) @@ -410,6 +408,7 @@ _tsleep_remove(thread_t td) int id; KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td)); + KKASSERT((td->td_flags & TDF_MIGRATING) == 0); if (td->td_flags & TDF_TSLEEPQ) { td->td_flags &= ~TDF_TSLEEPQ; id = LOOKUP(td->td_wchan); @@ -427,51 +426,6 @@ tsleep_remove(thread_t td) _tsleep_remove(td); } -/* - * This function removes a thread from the tsleep queue and schedules - * it. This function may act asynchronously. The target thread may be - * sleeping on a different cpu. - * - * This function mus be called while in a critical section but if the - * target thread is sleeping on a different cpu we cannot safely probe - * td_flags. - * - * This function is only called from a different cpu via setrunnable() - * when the thread is in a known sleep. However, multiple wakeups are - * possible and we must hold the td to prevent a race against the thread - * exiting. - */ -static __inline -void -_tsleep_wakeup(struct thread *td) -{ -#ifdef SMP - globaldata_t gd = mycpu; - - if (td->td_gd != gd) { - lwkt_hold(td); - lwkt_send_ipiq(td->td_gd, (ipifunc1_t)tsleep_wakeup_remote, td); - return; - } -#endif - _tsleep_remove(td); - if (td->td_flags & TDF_TSLEEP_DESCHEDULED) { - td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; - lwkt_schedule(td); - } -} - -#ifdef SMP -static -void -tsleep_wakeup_remote(struct thread *td) -{ - _tsleep_wakeup(td); - lwkt_rele(td); -} -#endif - - /* * General sleep call. Suspends the current process until a wakeup is * performed on the specified identifier. The process will then be made @@ -559,12 +513,12 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) } /* - * Setup for the current process (if this is a process). - * - * We hold the process token if lp && catch. The resume - * code will release it. + * Setup for the current process (if this is a process). We must + * interlock with lwp_token to avoid remote wakeup races via + * setrunnable() */ if (lp) { + lwkt_gettoken(&lp->lwp_token); if (catch) { /* * Early termination if PCATCH was set and a @@ -581,7 +535,7 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) * Causes ksignal to wake us up if a signal is * received (interlocked with p->p_token). */ - lp->lwp_flag |= LWP_SINTR; + lp->lwp_flags |= LWP_SINTR; } } else { KKASSERT(p == NULL); @@ -641,7 +595,7 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) /* * Ok, we are sleeping. Place us in the SSLEEP state. */ - KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); + KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); /* * tstop() sets LSSTOP, so don't fiddle with that. */ @@ -649,7 +603,6 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) lp->lwp_stat = LSSLEEP; lp->lwp_ru.ru_nvcsw++; lwkt_switch(); - td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; /* * And when we are woken up, put us back in LSRUN. If we @@ -661,7 +614,6 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) lp->lwp_slptime = 0; } else { lwkt_switch(); - td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; } /* @@ -672,9 +624,17 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) /* * Cleanup the timeout. If the timeout has already occured thandle - * has already been stopped, otherwise stop thandle. + * has already been stopped, otherwise stop thandle. If the timeout + * is running (the callout thread must be blocked trying to get + * lwp_token) then wait for us to get scheduled. */ if (timo) { + while (td->td_flags & TDF_TIMEOUT_RUNNING) { + lwkt_deschedule_self(td); + td->td_wmesg = "tsrace"; + lwkt_switch(); + kprintf("td %p %s: timeout race\n", td, td->td_comm); + } if (td->td_flags & TDF_TIMEOUT) { td->td_flags &= ~TDF_TIMEOUT; error = EWOULDBLOCK; @@ -683,6 +643,7 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) callout_stop(&thandle); } } + td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; /* * Make sure we have been removed from the sleepq. In most @@ -700,7 +661,7 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) * signal we want to return EINTR or ERESTART. */ resume: - if (p) { + if (lp) { if (catch && error == 0) { if (sig != 0 || (sig = CURSIG(lp))) { if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) @@ -709,7 +670,8 @@ resume: error = ERESTART; } } - lp->lwp_flag &= ~(LWP_BREAKTSLEEP | LWP_SINTR); + lp->lwp_flags &= ~LWP_SINTR; + lwkt_reltoken(&lp->lwp_token); } logtsleep1(tsleep_end); crit_exit_quick(td); @@ -841,9 +803,6 @@ lwkt_sleep(const char *wmesg, int flags) /* * Implement the timeout for tsleep. * - * We set LWP_BREAKTSLEEP to indicate that an event has occured, but - * we only call setrunnable if the process is not stopped. - * * This type of callout timeout is scheduled on the same cpu the process * is sleeping on. Also, at the moment, the MP lock is held. */ @@ -853,41 +812,39 @@ endtsleep(void *arg) thread_t td = arg; struct lwp *lp; - KKASSERT(td->td_gd == mycpu); - crit_enter(); - /* - * Do this before we potentially block acquiring the token. Setting - * TDF_TIMEOUT tells tsleep that we have already stopped the callout. + * We are going to have to get the lwp_token, which means we might + * block. This can race a tsleep getting woken up by other means + * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our + * processing to complete (sorry tsleep!). + * + * We can safely set td_flags because td MUST be on the same cpu + * as we are. */ - lwkt_hold(td); - td->td_flags |= TDF_TIMEOUT; + KKASSERT(td->td_gd == mycpu); + crit_enter(); + td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT; /* - * This can block + * This can block but TDF_TIMEOUT_RUNNING will prevent the thread + * from exiting the tsleep on us. The flag is interlocked by virtue + * of lp being on the same cpu as we are. */ if ((lp = td->td_lwp) != NULL) lwkt_gettoken(&lp->lwp_token); - /* - * Only do nominal wakeup processing if TDF_TIMEOUT and - * TDF_TSLEEP_DESCHEDULED are both still set. Otherwise - * we raced a wakeup or we began executing and raced due to - * blocking in the token above, and should do nothing. - */ - if ((td->td_flags & (TDF_TIMEOUT | TDF_TSLEEP_DESCHEDULED)) == - (TDF_TIMEOUT | TDF_TSLEEP_DESCHEDULED)) { - if (lp) { - lp->lwp_flag |= LWP_BREAKTSLEEP; - if (lp->lwp_proc->p_stat != SSTOP) - setrunnable(lp); - } else { - _tsleep_wakeup(td); - } - } - if (lp) + KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED); + + if (lp) { + if (lp->lwp_proc->p_stat != SSTOP) + setrunnable(lp); lwkt_reltoken(&lp->lwp_token); - lwkt_rele(td); + } else { + _tsleep_remove(td); + lwkt_schedule(td); + } + KKASSERT(td->td_gd == mycpu); + td->td_flags &= ~TDF_TIMEOUT_RUNNING; crit_exit(); } @@ -931,7 +888,6 @@ restart: KKASSERT(td->td_gd == gd); _tsleep_remove(td); if (td->td_flags & TDF_TSLEEP_DESCHEDULED) { - td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; lwkt_schedule(td); if (domain & PWAKEUP_ONE) goto done; @@ -1071,22 +1027,27 @@ wakeup_domain_one(const volatile void *ident, int domain) /* * setrunnable() * - * Make a process runnable. lp->lwp_proc->p_token must be held on call. - * This only has an effect if we are in SSLEEP. We only break out of the - * tsleep if LWP_BREAKTSLEEP is set, otherwise we just fix-up the state. + * Make a process runnable. lp->lwp_token must be held on call and this + * function must be called from the cpu owning lp. * - * NOTE: With p_token held we can only safely manipulate the process - * structure and the lp's lwp_stat. + * This only has an effect if we are in LSSTOP or LSSLEEP. */ void setrunnable(struct lwp *lp) { + thread_t td = lp->lwp_thread; + ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token); + KKASSERT(td->td_gd == mycpu); crit_enter(); if (lp->lwp_stat == LSSTOP) lp->lwp_stat = LSSLEEP; - if (lp->lwp_stat == LSSLEEP && (lp->lwp_flag & LWP_BREAKTSLEEP)) - _tsleep_wakeup(lp->lwp_thread); + if (lp->lwp_stat == LSSLEEP) { + _tsleep_remove(td); + lwkt_schedule(td); + } else if (td->td_flags & TDF_SINTR) { + lwkt_schedule(td); + } crit_exit(); } @@ -1094,15 +1055,15 @@ setrunnable(struct lwp *lp) * The process is stopped due to some condition, usually because p_stat is * set to SSTOP, but also possibly due to being traced. * + * Caller must hold p->p_token + * * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED * because the parent may check the child's status before the child actually * gets to this routine. * * This routine is called with the current lwp only, typically just - * before returning to userland. - * - * Setting LWP_BREAKTSLEEP before entering the tsleep will cause a passive - * SIGCONT to break out of the tsleep. + * before returning to userland if the process state is detected as + * possibly being in a stopped state. */ void tstop(void) @@ -1111,19 +1072,21 @@ tstop(void) struct proc *p = lp->lwp_proc; struct proc *q; + lwkt_gettoken(&lp->lwp_token); crit_enter(); + /* - * If LWP_WSTOP is set, we were sleeping + * If LWP_MP_WSTOP is set, we were sleeping * while our process was stopped. At this point * we were already counted as stopped. */ - if ((lp->lwp_flag & LWP_WSTOP) == 0) { + if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { /* * If we're the last thread to stop, signal * our parent. */ p->p_nstopped++; - lp->lwp_flag |= LWP_WSTOP; + atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); wakeup(&p->p_nstopped); if (p->p_nstopped == p->p_nthreads) { /* @@ -1132,7 +1095,7 @@ tstop(void) q = p->p_pptr; PHOLD(q); lwkt_gettoken(&q->p_token); - p->p_flag &= ~P_WAITED; + p->p_flags &= ~P_WAITED; wakeup(p->p_pptr); if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) ksignal(q, SIGCHLD); @@ -1141,13 +1104,13 @@ tstop(void) } } while (p->p_stat == SSTOP) { - lp->lwp_flag |= LWP_BREAKTSLEEP; lp->lwp_stat = LSSTOP; tsleep(p, 0, "stop", 0); } p->p_nstopped--; - lp->lwp_flag &= ~LWP_WSTOP; + atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); crit_exit(); + lwkt_reltoken(&lp->lwp_token); } /* diff --git a/sys/kern/kern_threads.c b/sys/kern/kern_threads.c index 581cace982..c99f8e932b 100644 --- a/sys/kern/kern_threads.c +++ b/sys/kern/kern_threads.c @@ -111,9 +111,9 @@ sys_thr_sleep(struct thr_sleep_args *uap) uap->sysmsg_result = 0; if (p->p_wakeup == 0) { sleepstart = ticks; - lp->lwp_flag |= LWP_SINTR; + lp->lwp_flags |= LWP_SINTR; error = tsleep(p, 0, "thrslp", timo); - lp->lwp_flag &= ~LWP_SINTR; + lp->lwp_flags &= ~LWP_SINTR; if (error == EWOULDBLOCK) { p->p_wakeup = 0; uap->sysmsg_result = EAGAIN; diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c index abbd5db2cd..3058bf043a 100644 --- a/sys/kern/kern_time.c +++ b/sys/kern/kern_time.c @@ -770,10 +770,10 @@ sys_setitimer(struct setitimer_args *uap) p->p_timer[uap->which] = aitv; switch(uap->which) { case ITIMER_VIRTUAL: - p->p_flag &= ~P_SIGVTALRM; + p->p_flags &= ~P_SIGVTALRM; break; case ITIMER_PROF: - p->p_flag &= ~P_SIGPROF; + p->p_flags &= ~P_SIGPROF; break; } } diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 8eed720f19..728b9a900b 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -178,7 +178,7 @@ swi_softclock_setup(void *arg) * the cpu they were scheduled on. */ lwkt_create(softclock_handler, sc, NULL, - &sc->thread, TDF_STOPREQ | TDF_INTTHREAD, + &sc->thread, TDF_NOSTART | TDF_INTTHREAD, cpu, "softclock %d", cpu); } } diff --git a/sys/kern/kern_upcall.c b/sys/kern/kern_upcall.c index ee53a25a90..623a124d15 100644 --- a/sys/kern/kern_upcall.c +++ b/sys/kern/kern_upcall.c @@ -154,8 +154,8 @@ sys_upc_control(struct upc_control_args *uap) vu->vu_pending = (int)(intptr_t)uap->data; error = 0; targlp = vu->vu_lwp; - targlp->lwp_proc->p_flag |= P_UPCALLPEND; /* XXX lwp flags */ - if (targlp->lwp_proc->p_flag & P_UPCALLWAIT) + targlp->lwp_proc->p_flags |= P_UPCALLPEND; /* XXX lwp flags */ + if (targlp->lwp_proc->p_flags & P_UPCALLWAIT) wakeup(&targlp->lwp_upcall); #ifdef SMP if (targlp->lwp_thread->td_gd != mycpu) @@ -252,9 +252,9 @@ sys_upc_control(struct upc_control_args *uap) } } if (uap->cmd == UPC_CONTROL_WAIT && vu == NULL) { - lp->lwp_proc->p_flag |= P_UPCALLWAIT; /* XXX lwp flags */ + lp->lwp_proc->p_flags |= P_UPCALLWAIT; /* XXX lwp flags */ tsleep(&lp->lwp_upcall, PCATCH, "wupcall", 0); - lp->lwp_proc->p_flag &= ~P_UPCALLWAIT; /* XXX lwp flags */ + lp->lwp_proc->p_flags &= ~P_UPCALLWAIT; /* XXX lwp flags */ } break; default: diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index 419a162fb5..d741f135d5 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -291,6 +291,7 @@ _lwkt_thread_ctor(void *obj, void *privdata, int ocflags) td->td_kstack = NULL; td->td_kstack_size = 0; td->td_flags = TDF_ALLOCATED_THREAD; + td->td_mpflags = 0; return (1); } @@ -336,7 +337,8 @@ lwkt_schedule_self(thread_t td) crit_enter_quick(td); KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); - KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); + KKASSERT(td->td_lwp == NULL || + (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); _lwkt_enqueue(td); crit_exit_quick(td); } @@ -470,6 +472,7 @@ lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, td->td_kstack = stack; td->td_kstack_size = stksize; td->td_flags = flags; + td->td_mpflags = 0; td->td_gd = gd; td->td_pri = TDPRI_KERN_DAEMON; td->td_critcount = 1; @@ -1268,7 +1271,9 @@ _lwkt_schedule(thread_t td) ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); KKASSERT((td->td_flags & TDF_MIGRATING) == 0); crit_enter_gd(mygd); - KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); + KKASSERT(td->td_lwp == NULL || + (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); + if (td == mygd->gd_curthread) { _lwkt_enqueue(td); } else { @@ -1604,7 +1609,8 @@ lwkt_setcpu_remote(void *arg) cpu_mfence(); td->td_flags &= ~TDF_MIGRATING; KKASSERT(td->td_migrate_gd == NULL); - KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); + KKASSERT(td->td_lwp == NULL || + (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); _lwkt_enqueue(td); } #endif @@ -1649,10 +1655,10 @@ lwkt_create(void (*func)(void *), void *arg, struct thread **tdp, /* * Schedule the thread to run */ - if ((td->td_flags & TDF_STOPREQ) == 0) - lwkt_schedule(td); + if (td->td_flags & TDF_NOSTART) + td->td_flags &= ~TDF_NOSTART; else - td->td_flags &= ~TDF_STOPREQ; + lwkt_schedule(td); return 0; } diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c index 3859b3f32a..1c4d17a9a5 100644 --- a/sys/kern/subr_prf.c +++ b/sys/kern/subr_prf.c @@ -143,8 +143,7 @@ uprintf(const char *fmt, ...) struct putchar_arg pca; int retval = 0; - if (p && p->p_flag & P_CONTROLT && - p->p_session->s_ttyvp) { + if (p && (p->p_flags & P_CONTROLT) && p->p_session->s_ttyvp) { __va_start(ap, fmt); pca.tty = p->p_session->s_ttyp; pca.flags = TOTTY; @@ -158,7 +157,7 @@ uprintf(const char *fmt, ...) tpr_t tprintf_open(struct proc *p) { - if ((p->p_flag & P_CONTROLT) && p->p_session->s_ttyvp) { + if ((p->p_flags & P_CONTROLT) && p->p_session->s_ttyvp) { sess_hold(p->p_session); return ((tpr_t) p->p_session); } diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c index dc9c956e86..0e72a90b8e 100644 --- a/sys/kern/subr_prof.c +++ b/sys/kern/subr_prof.c @@ -432,7 +432,7 @@ addupc_task(struct proc *p, u_long pc, u_int ticks) u_short v; /* Testing P_PROFIL may be unnecessary, but is certainly safe. */ - if ((p->p_flag & P_PROFIL) == 0 || ticks == 0) + if ((p->p_flags & P_PROFIL) == 0 || ticks == 0) return; prof = &p->p_prof; diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c index 33a30f2db8..aced90443d 100644 --- a/sys/kern/subr_taskqueue.c +++ b/sys/kern/subr_taskqueue.c @@ -339,12 +339,12 @@ taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, int ncpu, if (count == 1) { error = lwkt_create(taskqueue_thread_loop, tqp, &tq->tq_threads[i], NULL, - TDF_STOPREQ, cpu, + TDF_NOSTART, cpu, "%s", ktname); } else { error = lwkt_create(taskqueue_thread_loop, tqp, &tq->tq_threads[i], NULL, - TDF_STOPREQ, cpu, + TDF_NOSTART, cpu, "%s_%d", ktname, i); } if (error) { diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c index 1d93e251d4..9e46223c6c 100644 --- a/sys/kern/sys_generic.c +++ b/sys/kern/sys_generic.c @@ -891,7 +891,7 @@ sys_pselect(struct pselect_args *uap) * us. So make a note to restore it after executing * the handler. */ - lp->lwp_flag |= LWP_OLDMASK; + lp->lwp_flags |= LWP_OLDMASK; } else { /* * No handler to run. Restore previous mask immediately. diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index bbef297bf2..cb45aca5bf 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -304,7 +304,7 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, lwkt_gettoken(&p->p_token); /* Can't trace a process that's currently exec'ing. */ - if ((p->p_flag & P_INEXEC) != 0) { + if ((p->p_flags & P_INEXEC) != 0) { lwkt_reltoken(&p->p_token); PRELE(p); lwkt_reltoken(&proc_token); @@ -329,14 +329,14 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, } /* Already traced */ - if (p->p_flag & P_TRACED) { + if (p->p_flags & P_TRACED) { lwkt_reltoken(&p->p_token); PRELE(p); lwkt_reltoken(&proc_token); return EBUSY; } - if (curp->p_flag & P_TRACED) + if (curp->p_flags & P_TRACED) for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) if (pp == p) { lwkt_reltoken(&p->p_token); @@ -347,7 +347,7 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, /* not owned by you, has done setuid (unless you're root) */ if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) || - (p->p_flag & P_SUGID)) { + (p->p_flags & P_SUGID)) { if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) { lwkt_reltoken(&p->p_token); PRELE(p); @@ -395,7 +395,7 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, case PT_SETDBREGS: #endif /* not being traced... */ - if ((p->p_flag & P_TRACED) == 0) { + if ((p->p_flags & P_TRACED) == 0) { lwkt_reltoken(&p->p_token); PRELE(p); lwkt_reltoken(&proc_token); @@ -412,7 +412,7 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, /* not currently stopped */ if (p->p_stat != SSTOP || - (p->p_flag & P_WAITED) == 0) { + (p->p_flags & P_WAITED) == 0) { lwkt_reltoken(&p->p_token); PRELE(p); lwkt_reltoken(&proc_token); @@ -447,7 +447,7 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, switch (req) { case PT_TRACE_ME: /* set my trace flag and "owner" so it can read/write me */ - p->p_flag |= P_TRACED; + p->p_flags |= P_TRACED; p->p_oppid = p->p_pptr->p_pid; lwkt_reltoken(&p->p_token); PRELE(p); @@ -456,7 +456,7 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, case PT_ATTACH: /* security check done above */ - p->p_flag |= P_TRACED; + p->p_flags |= P_TRACED; p->p_oppid = p->p_pptr->p_pid; if (p->p_pptr != curp) proc_reparent(p, curp); @@ -509,7 +509,7 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, PRELE(pp); } - p->p_flag &= ~(P_TRACED | P_WAITED); + p->p_flags &= ~(P_TRACED | P_WAITED); p->p_oppid = 0; /* should we send SIGCHLD? */ @@ -523,7 +523,6 @@ kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, crit_enter(); if (p->p_stat == SSTOP) { p->p_xstat = data; - lp->lwp_flag |= LWP_BREAKTSLEEP; proc_unstop(p); } else if (data) { ksignal(p, data); diff --git a/sys/kern/tty.c b/sys/kern/tty.c index 1d67899c13..7ef2bc23ea 100644 --- a/sys/kern/tty.c +++ b/sys/kern/tty.c @@ -895,7 +895,7 @@ ttioctl(struct tty *tp, u_long cmd, void *data, int flag) case TIOCSETP: case TIOCSLTC: #endif - while (isbackground(p, tp) && !(p->p_flag & P_PPWAIT) && + while (isbackground(p, tp) && !(p->p_flags & P_PPWAIT) && !SIGISMEMBER(p->p_sigignore, SIGTTOU) && !SIGISMEMBER(lp->lwp_sigmask, SIGTTOU)) { if (p->p_pgrp->pg_jobc == 0) { @@ -1238,7 +1238,7 @@ ttioctl(struct tty *tp, u_long cmd, void *data, int flag) tp->t_pgrp = p->p_pgrp; otp = p->p_session->s_ttyp; p->p_session->s_ttyp = tp; - p->p_flag |= P_CONTROLT; + p->p_flags |= P_CONTROLT; if (otp) ttyunhold(otp); if (opgrp) { @@ -1795,7 +1795,7 @@ loop: crit_exit(); if (SIGISMEMBER(pp->p_sigignore, SIGTTIN) || SIGISMEMBER(lp->lwp_sigmask, SIGTTIN) || - (pp->p_flag & P_PPWAIT) || pp->p_pgrp->pg_jobc == 0) { + (pp->p_flags & P_PPWAIT) || pp->p_pgrp->pg_jobc == 0) { lwkt_reltoken(&proc_token); lwkt_reltoken(&tty_token); return (EIO); @@ -2124,7 +2124,7 @@ loop: */ lwkt_gettoken(&proc_token); if ((pp = curproc) && isbackground(pp, tp) && - ISSET(tp->t_lflag, TOSTOP) && !(pp->p_flag & P_PPWAIT) && + ISSET(tp->t_lflag, TOSTOP) && !(pp->p_flags & P_PPWAIT) && !SIGISMEMBER(pp->p_sigignore, SIGTTOU) && !SIGISMEMBER(lp->lwp_sigmask, SIGTTOU)) { if (pp->p_pgrp->pg_jobc == 0) { @@ -2642,7 +2642,7 @@ ttyinfo(struct tty *tp) * XXX lwp This is a horrible mixture. We need to rework this * as soon as lwps have their own runnable status. */ - if (pick->p_flag & P_WEXIT) + if (pick->p_flags & P_WEXIT) str = "exiting"; else if (lp->lwp_stat == LSRUN) str = "running"; @@ -2661,7 +2661,7 @@ ttyinfo(struct tty *tp) * 'pick' becomes invalid the moment we exit the critical * section. */ - if (lp->lwp_thread && (pick->p_flag & P_SWAPPEDOUT) == 0) + if (lp->lwp_thread && (pick->p_flags & P_SWAPPEDOUT) == 0) calcru_proc(pick, &ru); pctcpu = (lp->lwp_pctcpu * 10000 + FSCALE / 2) >> FSHIFT; @@ -2766,9 +2766,9 @@ proc_compare(struct proc *p1, struct proc *p2) /* * favor one sleeping in a non-interruptible sleep */ - if (lp1->lwp_flag & LWP_SINTR && (lp2->lwp_flag & LWP_SINTR) == 0) + if (lp1->lwp_flags & LWP_SINTR && (lp2->lwp_flags & LWP_SINTR) == 0) return (1); - if (lp2->lwp_flag & LWP_SINTR && (lp1->lwp_flag & LWP_SINTR) == 0) + if (lp2->lwp_flags & LWP_SINTR && (lp1->lwp_flags & LWP_SINTR) == 0) return (0); return (p2->p_pid > p1->p_pid); /* tie - return highest pid */ } diff --git a/sys/kern/tty_pty.c b/sys/kern/tty_pty.c index 99337f1c7d..b5e503c33c 100644 --- a/sys/kern/tty_pty.c +++ b/sys/kern/tty_pty.c @@ -490,7 +490,8 @@ again: while (isbackground(p, tp)) { if (SIGISMEMBER(p->p_sigignore, SIGTTIN) || SIGISMEMBER(lp->lwp_sigmask, SIGTTIN) || - p->p_pgrp->pg_jobc == 0 || p->p_flag & P_PPWAIT) { + p->p_pgrp->pg_jobc == 0 || + (p->p_flags & P_PPWAIT)) { lwkt_reltoken(&tty_token); return (EIO); } diff --git a/sys/kern/tty_tty.c b/sys/kern/tty_tty.c index 5f44928e66..f98f43cb3d 100644 --- a/sys/kern/tty_tty.c +++ b/sys/kern/tty_tty.c @@ -75,7 +75,8 @@ static struct dev_ops ctty_ops = { .d_kqfilter = cttykqfilter }; -#define cttyvp(p) ((p)->p_flag & P_CONTROLT ? (p)->p_session->s_ttyvp : NULL) +#define cttyvp(p) (((p)->p_flags & P_CONTROLT) ? \ + (p)->p_session->s_ttyvp : NULL) /* * This opens /dev/tty. Because multiple opens of /dev/tty only @@ -238,7 +239,7 @@ cttyioctl(struct dev_ioctl_args *ap) } if (ap->a_cmd == TIOCNOTTY) { if (!SESS_LEADER(p)) { - p->p_flag &= ~P_CONTROLT; + p->p_flags &= ~P_CONTROLT; lwkt_reltoken(&proc_token); lwkt_reltoken(&p->p_token); return (0); diff --git a/sys/kern/usched_bsd4.c b/sys/kern/usched_bsd4.c index 36bf827311..96e557af7f 100644 --- a/sys/kern/usched_bsd4.c +++ b/sys/kern/usched_bsd4.c @@ -227,11 +227,18 @@ bsd4_acquire_curproc(struct lwp *lp) { globaldata_t gd; bsd4_pcpu_t dd; + thread_t td; #if 0 struct lwp *olp; #endif - crit_enter(); + /* + * Make sure we aren't sitting on a tsleep queue. + */ + td = lp->lwp_thread; + crit_enter_quick(td); + if (td->td_flags & TDF_TSLEEPQ) + tsleep_remove(td); bsd4_recalculate_estcpu(lp); /* @@ -307,8 +314,8 @@ bsd4_acquire_curproc(struct lwp *lp) } } while (dd->uschedcp != lp); - crit_exit(); - KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); + crit_exit_quick(td); + KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); } /* @@ -339,7 +346,7 @@ bsd4_release_curproc(struct lwp *lp) if (dd->uschedcp == lp) { crit_enter(); - KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); + KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); dd->uschedcp = NULL; /* don't let lp be selected */ dd->upri = PRIBASE_NULL; atomic_clear_cpumask(&bsd4_curprocmask, gd->gd_cpumask); @@ -426,9 +433,9 @@ bsd4_setrunqueue(struct lwp *lp) */ crit_enter(); KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN")); - KASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0, + KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0, ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid, - lp->lwp_tid, lp->lwp_proc->p_flag, lp->lwp_flag)); + lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags)); KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0); /* @@ -838,7 +845,7 @@ bsd4_resetpriority(struct lwp *lp) */ if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) { lp->lwp_priority = newpriority; - if (lp->lwp_flag & LWP_ONRUNQ) { + if (lp->lwp_mpflags & LWP_MP_ONRUNQ) { bsd4_remrunqueue_locked(lp); lp->lwp_rqtype = newrqtype; lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ; @@ -1065,8 +1072,8 @@ again: --bsd4_runqcount; if (TAILQ_EMPTY(q)) *which &= ~(1 << pri); - KASSERT((lp->lwp_flag & LWP_ONRUNQ) != 0, ("not on runq6!")); - lp->lwp_flag &= ~LWP_ONRUNQ; + KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!")); + atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); return lp; } @@ -1103,8 +1110,8 @@ bsd4_remrunqueue_locked(struct lwp *lp) u_int32_t *which; u_int8_t pri; - KKASSERT(lp->lwp_flag & LWP_ONRUNQ); - lp->lwp_flag &= ~LWP_ONRUNQ; + KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ); + atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); --bsd4_runqcount; KKASSERT(bsd4_runqcount >= 0); @@ -1153,8 +1160,8 @@ bsd4_setrunqueue_locked(struct lwp *lp) u_int32_t *which; int pri; - KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); - lp->lwp_flag |= LWP_ONRUNQ; + KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); + atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); ++bsd4_runqcount; pri = lp->lwp_rqindex; @@ -1334,7 +1341,7 @@ sched_thread_cpu_init(void) kprintf(" %d", i); lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread, - TDF_STOPREQ, i, "usched %d", i); + TDF_NOSTART, i, "usched %d", i); /* * Allow user scheduling on the target cpu. cpu #0 has already diff --git a/sys/kern/usched_dummy.c b/sys/kern/usched_dummy.c index 1cecb235fa..4a1efb6d88 100644 --- a/sys/kern/usched_dummy.c +++ b/sys/kern/usched_dummy.c @@ -183,7 +183,7 @@ dummy_acquire_curproc(struct lwp *lp) crit_exit(); gd = mycpu; dd = &dummy_pcpu[gd->gd_cpuid]; - KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); + KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); } while (dd->uschedcp != lp); } @@ -205,7 +205,7 @@ dummy_release_curproc(struct lwp *lp) globaldata_t gd = mycpu; dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid]; - KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); + KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); if (dd->uschedcp == lp) { dummy_select_curproc(gd); } @@ -240,7 +240,7 @@ dummy_select_curproc(globaldata_t gd) } else { --dummy_runqcount; TAILQ_REMOVE(&dummy_runq, lp, lwp_procq); - lp->lwp_flag &= ~LWP_ONRUNQ; + atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); dd->uschedcp = lp; atomic_set_cpumask(&dummy_curprocmask, gd->gd_cpumask); spin_unlock(&dummy_spin); @@ -259,7 +259,7 @@ dummy_select_curproc(globaldata_t gd) * the current process on the userland scheduler's run queue prior * to calling dummy_select_curproc(). * - * The caller may set LWP_PASSIVE_ACQ in lwp_flag to indicate that we should + * The caller may set LWP_PASSIVE_ACQ in lwp_flags to indicate that we should * attempt to leave the thread on the current cpu. * * MPSAFE @@ -280,11 +280,11 @@ dummy_setrunqueue(struct lwp *lp) /* * Add to our global runq */ - KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); + KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); spin_lock(&dummy_spin); ++dummy_runqcount; TAILQ_INSERT_TAIL(&dummy_runq, lp, lwp_procq); - lp->lwp_flag |= LWP_ONRUNQ; + atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); #ifdef SMP lwkt_giveaway(lp->lwp_thread); #endif @@ -493,7 +493,7 @@ dummy_sched_thread(void *dummy) } else if ((lp = TAILQ_FIRST(&dummy_runq)) != NULL) { --dummy_runqcount; TAILQ_REMOVE(&dummy_runq, lp, lwp_procq); - lp->lwp_flag &= ~LWP_ONRUNQ; + atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); dd->uschedcp = lp; atomic_set_cpumask(&dummy_curprocmask, cpumask); spin_unlock(&dummy_spin); @@ -531,7 +531,7 @@ dummy_sched_thread_cpu_init(void) kprintf(" %d", i); lwkt_create(dummy_sched_thread, NULL, NULL, &dd->helper_thread, - TDF_STOPREQ, i, "dsched %d", i); + TDF_NOSTART, i, "dsched %d", i); /* * Allow user scheduling on the target cpu. cpu #0 has already diff --git a/sys/kern/vfs_journal.c b/sys/kern/vfs_journal.c index 99d3456799..a8083d7665 100644 --- a/sys/kern/vfs_journal.c +++ b/sys/kern/vfs_journal.c @@ -120,7 +120,7 @@ journal_create_threads(struct journal *jo) jo->flags &= ~(MC_JOURNAL_STOP_REQ | MC_JOURNAL_STOP_IMM); jo->flags |= MC_JOURNAL_WACTIVE; lwkt_create(journal_wthread, jo, NULL, &jo->wthread, - TDF_STOPREQ, -1, + TDF_NOSTART, -1, "journal w:%.*s", JIDMAX, jo->id); lwkt_setpri(&jo->wthread, TDPRI_KERN_DAEMON); lwkt_schedule(&jo->wthread); @@ -128,7 +128,7 @@ journal_create_threads(struct journal *jo) if (jo->flags & MC_JOURNAL_WANT_FULLDUPLEX) { jo->flags |= MC_JOURNAL_RACTIVE; lwkt_create(journal_rthread, jo, NULL, &jo->rthread, - TDF_STOPREQ, -1, + TDF_NOSTART, -1, "journal r:%.*s", JIDMAX, jo->id); lwkt_setpri(&jo->rthread, TDPRI_KERN_DAEMON); lwkt_schedule(&jo->rthread); diff --git a/sys/net/if.c b/sys/net/if.c index 9619eb557a..bb1bcf5685 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -2753,7 +2753,7 @@ ifnetinit(void *dummy __unused) struct thread *thr = &ifnet_threads[i]; lwkt_create(ifnet_service_loop, NULL, NULL, - thr, TDF_STOPREQ|TDF_FORCE_SPINPORT, + thr, TDF_NOSTART|TDF_FORCE_SPINPORT, i, "ifnet %d", i); netmsg_service_port_init(&thr->td_msgport); lwkt_schedule(thr); diff --git a/sys/net/netisr.c b/sys/net/netisr.c index 945ebef679..ab3f7eba8e 100644 --- a/sys/net/netisr.c +++ b/sys/net/netisr.c @@ -181,7 +181,7 @@ netisr_init(void) */ for (i = 0; i < ncpus; ++i) { lwkt_create(netmsg_service_loop, NULL, NULL, - &netisr_cpu[i], TDF_STOPREQ|TDF_FORCE_SPINPORT, + &netisr_cpu[i], TDF_NOSTART|TDF_FORCE_SPINPORT, i, "netisr_cpu %d", i); netmsg_service_port_init(&netisr_cpu[i].td_msgport); lwkt_schedule(&netisr_cpu[i]); diff --git a/sys/netproto/smb/smb_subr.c b/sys/netproto/smb/smb_subr.c index f36c7b82cc..5c9f39bd73 100644 --- a/sys/netproto/smb/smb_subr.c +++ b/sys/netproto/smb/smb_subr.c @@ -386,7 +386,7 @@ smb_kthread_create(void (*func)(void *), void *arg, *newpp = p2; /* this is a non-swapped system process */ - p2->p_flag |= P_SYSTEM; + p2->p_flags |= P_SYSTEM; p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; lp2 = ONLY_LWP_IN_PROC(p2); diff --git a/sys/platform/pc32/i386/db_trace.c b/sys/platform/pc32/i386/db_trace.c index 7dbdf60f6f..18a484bc2d 100644 --- a/sys/platform/pc32/i386/db_trace.c +++ b/sys/platform/pc32/i386/db_trace.c @@ -297,7 +297,7 @@ db_stack_trace_cmd(db_expr_t addr, boolean_t have_addr, db_expr_t count, db_printf("pid %d not found\n", pid); return; } - if ((p->p_flag & P_SWAPPEDOUT)) { + if ((p->p_flags & P_SWAPPEDOUT)) { db_printf("pid %d swapped out\n", pid); return; } diff --git a/sys/platform/pc32/i386/machdep.c b/sys/platform/pc32/i386/machdep.c index 5297aac57f..cb866eb145 100644 --- a/sys/platform/pc32/i386/machdep.c +++ b/sys/platform/pc32/i386/machdep.c @@ -424,7 +424,7 @@ sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* Allocate and validate space for the signal handler context. */ - if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack && + if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - sizeof(struct sigframe)); diff --git a/sys/platform/pc32/i386/trap.c b/sys/platform/pc32/i386/trap.c index 4676fbdf7c..35d1e2450c 100644 --- a/sys/platform/pc32/i386/trap.c +++ b/sys/platform/pc32/i386/trap.c @@ -244,7 +244,7 @@ userret(struct lwp *lp, struct trapframe *frame, int sticks) * This may do a copyout and block, so do it first even though it * means some system time will be charged as user time. */ - if (p->p_flag & P_PROFIL) { + if (p->p_flags & P_PROFIL) { addupc_task(p, frame->tf_eip, (u_int)((int)lp->lwp_thread->td_sticks - sticks)); } @@ -253,7 +253,7 @@ recheck: /* * If the jungle wants us dead, so be it. */ - if (lp->lwp_flag & LWP_WEXIT) { + if (lp->lwp_mpflags & LWP_MP_WEXIT) { lwkt_gettoken(&p->p_token); lwp_exit(0); lwkt_reltoken(&p->p_token); /* NOT REACHED */ @@ -263,9 +263,9 @@ recheck: * Block here if we are in a stopped state. */ if (p->p_stat == SSTOP || dump_stop_usertds) { - get_mplock(); + lwkt_gettoken(&p->p_token); tstop(); - rel_mplock(); + lwkt_reltoken(&p->p_token); goto recheck; } @@ -273,18 +273,18 @@ recheck: * Post any pending upcalls. If running a virtual kernel be sure * to restore the virtual kernel's vmspace before posting the upcall. */ - if (p->p_flag & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { + if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { lwkt_gettoken(&p->p_token); - if (p->p_flag & P_SIGVTALRM) { - p->p_flag &= ~P_SIGVTALRM; + if (p->p_flags & P_SIGVTALRM) { + p->p_flags &= ~P_SIGVTALRM; ksignal(p, SIGVTALRM); } - if (p->p_flag & P_SIGPROF) { - p->p_flag &= ~P_SIGPROF; + if (p->p_flags & P_SIGPROF) { + p->p_flags &= ~P_SIGPROF; ksignal(p, SIGPROF); } - if (p->p_flag & P_UPCALLPEND) { - p->p_flag &= ~P_UPCALLPEND; + if (p->p_flags & P_UPCALLPEND) { + p->p_flags &= ~P_UPCALLPEND; postupcall(lp); } lwkt_reltoken(&p->p_token); @@ -309,14 +309,14 @@ recheck: * (such as SIGKILL). proc0 (the swapin scheduler) is already * aware of our situation, we do not have to wake it up. */ - if (p->p_flag & P_SWAPPEDOUT) { + if (p->p_flags & P_SWAPPEDOUT) { lwkt_gettoken(&p->p_token); get_mplock(); - p->p_flag |= P_SWAPWAIT; + p->p_flags |= P_SWAPWAIT; swapin_request(); - if (p->p_flag & P_SWAPWAIT) + if (p->p_flags & P_SWAPWAIT) tsleep(p, PCATCH, "SWOUT", 0); - p->p_flag &= ~P_SWAPWAIT; + p->p_flags &= ~P_SWAPWAIT; rel_mplock(); lwkt_reltoken(&p->p_token); goto recheck; @@ -326,7 +326,7 @@ recheck: * Make sure postsig() handled request to restore old signal mask after * running signal handler. */ - KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0); + KKASSERT((lp->lwp_flags & LWP_OLDMASK) == 0); } /* @@ -345,9 +345,9 @@ userexit(struct lwp *lp) * after this loop will generate another AST. */ while (lp->lwp_proc->p_stat == SSTOP) { - get_mplock(); + lwkt_gettoken(&lp->lwp_proc->p_token); tstop(); - rel_mplock(); + lwkt_reltoken(&lp->lwp_proc->p_token); } /* @@ -1466,9 +1466,9 @@ generic_lwp_return(struct lwp *lp, struct trapframe *frame) if (KTRPOINT(lp->lwp_thread, KTR_SYSRET)) ktrsysret(lp, SYS_fork, 0, 0); #endif - lp->lwp_flag |= LWP_PASSIVE_ACQ; + lp->lwp_flags |= LWP_PASSIVE_ACQ; userexit(lp); - lp->lwp_flag &= ~LWP_PASSIVE_ACQ; + lp->lwp_flags &= ~LWP_PASSIVE_ACQ; } /* diff --git a/sys/platform/pc64/x86_64/machdep.c b/sys/platform/pc64/x86_64/machdep.c index b765c5a73a..adceb132ff 100644 --- a/sys/platform/pc64/x86_64/machdep.c +++ b/sys/platform/pc64/x86_64/machdep.c @@ -448,7 +448,7 @@ sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* Allocate and validate space for the signal handler context. */ - if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack && + if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = (char *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - sizeof(struct sigframe)); diff --git a/sys/platform/pc64/x86_64/trap.c b/sys/platform/pc64/x86_64/trap.c index f45271262c..3a936d10bb 100644 --- a/sys/platform/pc64/x86_64/trap.c +++ b/sys/platform/pc64/x86_64/trap.c @@ -200,6 +200,15 @@ userenter(struct thread *curtd, struct proc *curp) if (ocred) crfree(ocred); } + + /* + * Debugging, remove top two user stack pages to catch kernel faults + */ + if (freeze_on_seg_fault > 1 && curtd->td_lwp) { + pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace), + 0x00007FFFFFFFD000LU, + 0x0000800000000000LU); + } } /* @@ -221,7 +230,7 @@ userret(struct lwp *lp, struct trapframe *frame, int sticks) * This may do a copyout and block, so do it first even though it * means some system time will be charged as user time. */ - if (p->p_flag & P_PROFIL) { + if (p->p_flags & P_PROFIL) { addupc_task(p, frame->tf_rip, (u_int)((int)lp->lwp_thread->td_sticks - sticks)); } @@ -230,7 +239,7 @@ recheck: /* * If the jungle wants us dead, so be it. */ - if (lp->lwp_flag & LWP_WEXIT) { + if (lp->lwp_mpflags & LWP_MP_WEXIT) { lwkt_gettoken(&p->p_token); lwp_exit(0); lwkt_reltoken(&p->p_token); /* NOT REACHED */ @@ -240,9 +249,9 @@ recheck: * Block here if we are in a stopped state. */ if (p->p_stat == SSTOP || dump_stop_usertds) { - get_mplock(); + lwkt_gettoken(&p->p_token); tstop(); - rel_mplock(); + lwkt_reltoken(&p->p_token); goto recheck; } @@ -250,18 +259,18 @@ recheck: * Post any pending upcalls. If running a virtual kernel be sure * to restore the virtual kernel's vmspace before posting the upcall. */ - if (p->p_flag & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { + if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { lwkt_gettoken(&p->p_token); - if (p->p_flag & P_SIGVTALRM) { - p->p_flag &= ~P_SIGVTALRM; + if (p->p_flags & P_SIGVTALRM) { + p->p_flags &= ~P_SIGVTALRM; ksignal(p, SIGVTALRM); } - if (p->p_flag & P_SIGPROF) { - p->p_flag &= ~P_SIGPROF; + if (p->p_flags & P_SIGPROF) { + p->p_flags &= ~P_SIGPROF; ksignal(p, SIGPROF); } - if (p->p_flag & P_UPCALLPEND) { - p->p_flag &= ~P_UPCALLPEND; + if (p->p_flags & P_UPCALLPEND) { + p->p_flags &= ~P_UPCALLPEND; postupcall(lp); } lwkt_reltoken(&p->p_token); @@ -286,14 +295,14 @@ recheck: * (such as SIGKILL). proc0 (the swapin scheduler) is already * aware of our situation, we do not have to wake it up. */ - if (p->p_flag & P_SWAPPEDOUT) { + if (p->p_flags & P_SWAPPEDOUT) { lwkt_gettoken(&p->p_token); get_mplock(); - p->p_flag |= P_SWAPWAIT; + p->p_flags |= P_SWAPWAIT; swapin_request(); - if (p->p_flag & P_SWAPWAIT) + if (p->p_flags & P_SWAPWAIT) tsleep(p, PCATCH, "SWOUT", 0); - p->p_flag &= ~P_SWAPWAIT; + p->p_flags &= ~P_SWAPWAIT; rel_mplock(); lwkt_reltoken(&p->p_token); goto recheck; @@ -303,7 +312,7 @@ recheck: * Make sure postsig() handled request to restore old signal mask after * running signal handler. */ - KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0); + KKASSERT((lp->lwp_flags & LWP_OLDMASK) == 0); } /* @@ -322,9 +331,9 @@ userexit(struct lwp *lp) * after this loop will generate another AST. */ while (lp->lwp_proc->p_stat == SSTOP) { - get_mplock(); + lwkt_gettoken(&lp->lwp_proc->p_token); tstop(); - rel_mplock(); + lwkt_reltoken(&lp->lwp_proc->p_token); } /* @@ -849,6 +858,19 @@ trap_pfault(struct trapframe *frame, int usermode) goto nogo; } + /* + * Debugging, try to catch kernel faults on the user address space when not inside + * on onfault (e.g. copyin/copyout) routine. + */ + if (usermode == 0 && (td->td_pcb == NULL || td->td_pcb->pcb_onfault == NULL)) { + if (freeze_on_seg_fault) { + kprintf("trap_pfault: user address fault from kernel mode " + "%016lx\n", (long)frame->tf_addr); + while (freeze_on_seg_fault) { + tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20); + } + } + } map = &vm->vm_map; } @@ -1365,9 +1387,9 @@ generic_lwp_return(struct lwp *lp, struct trapframe *frame) if (KTRPOINT(lp->lwp_thread, KTR_SYSRET)) ktrsysret(lp, SYS_fork, 0, 0); #endif - lp->lwp_flag |= LWP_PASSIVE_ACQ; + lp->lwp_flags |= LWP_PASSIVE_ACQ; userexit(lp); - lp->lwp_flag &= ~LWP_PASSIVE_ACQ; + lp->lwp_flags &= ~LWP_PASSIVE_ACQ; } /* diff --git a/sys/platform/vkernel/i386/cpu_regs.c b/sys/platform/vkernel/i386/cpu_regs.c index 9b6443da47..10aceb017b 100644 --- a/sys/platform/vkernel/i386/cpu_regs.c +++ b/sys/platform/vkernel/i386/cpu_regs.c @@ -236,7 +236,7 @@ sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* Allocate and validate space for the signal handler context. */ - if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack && + if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - sizeof(struct sigframe)); diff --git a/sys/platform/vkernel/i386/trap.c b/sys/platform/vkernel/i386/trap.c index 9af511073f..1a614a6247 100644 --- a/sys/platform/vkernel/i386/trap.c +++ b/sys/platform/vkernel/i386/trap.c @@ -218,7 +218,7 @@ userret(struct lwp *lp, struct trapframe *frame, int sticks) * This may do a copyout and block, so do it first even though it * means some system time will be charged as user time. */ - if (p->p_flag & P_PROFIL) { + if (p->p_flags & P_PROFIL) { addupc_task(p, frame->tf_eip, (u_int)((int)lp->lwp_thread->td_sticks - sticks)); } @@ -227,7 +227,7 @@ recheck: /* * If the jungle wants us dead, so be it. */ - if (lp->lwp_flag & LWP_WEXIT) { + if (lp->lwp_mpflags & LWP_MP_WEXIT) { lwkt_gettoken(&p->p_token); lwp_exit(0); lwkt_reltoken(&p->p_token); /* NOT REACHED */ @@ -237,9 +237,9 @@ recheck: * Block here if we are in a stopped state. */ if (p->p_stat == SSTOP) { - get_mplock(); + lwkt_gettoken(&p->p_token); tstop(); - rel_mplock(); + lwkt_reltoken(&p->p_token); goto recheck; } @@ -247,18 +247,18 @@ recheck: * Post any pending upcalls. If running a virtual kernel be sure * to restore the virtual kernel's vmspace before posting the upcall. */ - if (p->p_flag & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { + if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { lwkt_gettoken(&p->p_token); - if (p->p_flag & P_SIGVTALRM) { - p->p_flag &= ~P_SIGVTALRM; + if (p->p_flags & P_SIGVTALRM) { + p->p_flags &= ~P_SIGVTALRM; ksignal(p, SIGVTALRM); } - if (p->p_flag & P_SIGPROF) { - p->p_flag &= ~P_SIGPROF; + if (p->p_flags & P_SIGPROF) { + p->p_flags &= ~P_SIGPROF; ksignal(p, SIGPROF); } - if (p->p_flag & P_UPCALLPEND) { - p->p_flag &= ~P_UPCALLPEND; + if (p->p_flags & P_UPCALLPEND) { + p->p_flags &= ~P_UPCALLPEND; postupcall(lp); } lwkt_reltoken(&p->p_token); @@ -282,14 +282,14 @@ recheck: * (such as SIGKILL). proc0 (the swapin scheduler) is already * aware of our situation, we do not have to wake it up. */ - if (p->p_flag & P_SWAPPEDOUT) { + if (p->p_flags & P_SWAPPEDOUT) { lwkt_gettoken(&p->p_token); get_mplock(); - p->p_flag |= P_SWAPWAIT; + p->p_flags |= P_SWAPWAIT; swapin_request(); - if (p->p_flag & P_SWAPWAIT) + if (p->p_flags & P_SWAPWAIT) tsleep(p, PCATCH, "SWOUT", 0); - p->p_flag &= ~P_SWAPWAIT; + p->p_flags &= ~P_SWAPWAIT; rel_mplock(); lwkt_reltoken(&p->p_token); goto recheck; @@ -299,7 +299,7 @@ recheck: * Make sure postsig() handled request to restore old signal mask after * running signal handler. */ - KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0); + KKASSERT((lp->lwp_flags & LWP_OLDMASK) == 0); } /* @@ -318,9 +318,9 @@ userexit(struct lwp *lp) * after this loop will generate another AST. */ while (lp->lwp_proc->p_stat == SSTOP) { - get_mplock(); + lwkt_gettoken(&lp->lwp_proc->p_token); tstop(); - rel_mplock(); + lwkt_reltoken(&lp->lwp_proc->p_token); } /* @@ -1342,9 +1342,9 @@ generic_lwp_return(struct lwp *lp, struct trapframe *frame) if (KTRPOINT(lp->lwp_thread, KTR_SYSRET)) ktrsysret(lp, SYS_fork, 0, 0); #endif - lp->lwp_flag |= LWP_PASSIVE_ACQ; + lp->lwp_flags |= LWP_PASSIVE_ACQ; userexit(lp); - lp->lwp_flag &= ~LWP_PASSIVE_ACQ; + lp->lwp_flags &= ~LWP_PASSIVE_ACQ; } /* diff --git a/sys/platform/vkernel64/x86_64/cpu_regs.c b/sys/platform/vkernel64/x86_64/cpu_regs.c index 426dccfbcf..a517088d44 100644 --- a/sys/platform/vkernel64/x86_64/cpu_regs.c +++ b/sys/platform/vkernel64/x86_64/cpu_regs.c @@ -237,7 +237,7 @@ sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* Allocate and validate space for the signal handler context. */ - if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack && + if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = (char *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - sizeof(struct sigframe)); diff --git a/sys/platform/vkernel64/x86_64/trap.c b/sys/platform/vkernel64/x86_64/trap.c index c7ffa5f46a..e5b31cb033 100644 --- a/sys/platform/vkernel64/x86_64/trap.c +++ b/sys/platform/vkernel64/x86_64/trap.c @@ -218,7 +218,7 @@ userret(struct lwp *lp, struct trapframe *frame, int sticks) * This may do a copyout and block, so do it first even though it * means some system time will be charged as user time. */ - if (p->p_flag & P_PROFIL) { + if (p->p_flags & P_PROFIL) { addupc_task(p, frame->tf_rip, (u_int)((int)lp->lwp_thread->td_sticks - sticks)); } @@ -227,7 +227,7 @@ recheck: /* * If the jungle wants us dead, so be it. */ - if (lp->lwp_flag & LWP_WEXIT) { + if (lp->lwp_mpflags & LWP_MP_WEXIT) { lwkt_gettoken(&p->p_token); lwp_exit(0); lwkt_reltoken(&p->p_token); /* NOT REACHED */ @@ -237,9 +237,9 @@ recheck: * Block here if we are in a stopped state. */ if (p->p_stat == SSTOP) { - get_mplock(); + lwkt_gettoken(&p->p_token); tstop(); - rel_mplock(); + lwkt_reltoken(&p->p_token); goto recheck; } @@ -247,18 +247,18 @@ recheck: * Post any pending upcalls. If running a virtual kernel be sure * to restore the virtual kernel's vmspace before posting the upcall. */ - if (p->p_flag & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { + if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { lwkt_gettoken(&p->p_token); - if (p->p_flag & P_SIGVTALRM) { - p->p_flag &= ~P_SIGVTALRM; + if (p->p_flags & P_SIGVTALRM) { + p->p_flags &= ~P_SIGVTALRM; ksignal(p, SIGVTALRM); } - if (p->p_flag & P_SIGPROF) { - p->p_flag &= ~P_SIGPROF; + if (p->p_flags & P_SIGPROF) { + p->p_flags &= ~P_SIGPROF; ksignal(p, SIGPROF); } - if (p->p_flag & P_UPCALLPEND) { - p->p_flag &= ~P_UPCALLPEND; + if (p->p_flags & P_UPCALLPEND) { + p->p_flags &= ~P_UPCALLPEND; postupcall(lp); } lwkt_reltoken(&p->p_token); @@ -282,14 +282,14 @@ recheck: * (such as SIGKILL). proc0 (the swapin scheduler) is already * aware of our situation, we do not have to wake it up. */ - if (p->p_flag & P_SWAPPEDOUT) { + if (p->p_flags & P_SWAPPEDOUT) { lwkt_gettoken(&p->p_token); get_mplock(); - p->p_flag |= P_SWAPWAIT; + p->p_flags |= P_SWAPWAIT; swapin_request(); - if (p->p_flag & P_SWAPWAIT) + if (p->p_flags & P_SWAPWAIT) tsleep(p, PCATCH, "SWOUT", 0); - p->p_flag &= ~P_SWAPWAIT; + p->p_flags &= ~P_SWAPWAIT; rel_mplock(); lwkt_reltoken(&p->p_token); goto recheck; @@ -299,7 +299,7 @@ recheck: * Make sure postsig() handled request to restore old signal mask after * running signal handler. */ - KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0); + KKASSERT((lp->lwp_flags & LWP_OLDMASK) == 0); } /* @@ -318,9 +318,9 @@ userexit(struct lwp *lp) * after this loop will generate another AST. */ while (lp->lwp_proc->p_stat == SSTOP) { - get_mplock(); + lwkt_gettoken(&lp->lwp_proc->p_token); tstop(); - rel_mplock(); + lwkt_reltoken(&lp->lwp_proc->p_token); } /* @@ -1392,9 +1392,9 @@ generic_lwp_return(struct lwp *lp, struct trapframe *frame) if (KTRPOINT(lp->lwp_thread, KTR_SYSRET)) ktrsysret(lp, SYS_fork, 0, 0); #endif - lp->lwp_flag |= LWP_PASSIVE_ACQ; + lp->lwp_flags |= LWP_PASSIVE_ACQ; userexit(lp); - lp->lwp_flag &= ~LWP_PASSIVE_ACQ; + lp->lwp_flags &= ~LWP_PASSIVE_ACQ; } /* diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 0d28f974e3..6f4fddf620 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -169,13 +169,14 @@ struct lwp { struct vmspace *lwp_vmspace; /* Inherited from p_vmspace */ struct vkernel_lwp *lwp_vkernel;/* VKernel support, lwp part */ - lwpid_t lwp_tid; /* Our thread id . */ + lwpid_t lwp_tid; /* Our thread id */ - int lwp_flag; /* LWP_* flags. */ - enum lwpstat lwp_stat; /* LS* lwp status. */ + u_int lwp_flags; /* LWP_* flags */ + u_int lwp_mpflags; /* LWP_MP_* flags */ + enum lwpstat lwp_stat; /* LS* lwp status */ int lwp_lock; /* lwp lock (prevent destruct) count */ - int lwp_dupfd; /* Sideways return value from fdopen. XXX */ + int lwp_dupfd; /* Sideways return value from fdopen */ /* * The following two fields are marked XXX since (at least) the @@ -240,7 +241,7 @@ struct proc { #define p_sigcatch p_sigacts->ps_sigcatch #define p_rlimit p_limit->pl_rlimit - int p_flag; /* P_* flags. */ + int p_flags; /* P_* flags. */ enum procstat p_stat; /* S* process status. */ char p_pad1[3]; @@ -335,64 +336,64 @@ struct proc { #define p_pgid p_pgrp->pg_id /* These flags are kept in p_flags. */ -#define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ -#define P_CONTROLT 0x00002 /* Has a controlling terminal. */ +#define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock */ +#define P_CONTROLT 0x00002 /* Has a controlling terminal */ #define P_SWAPPEDOUT 0x00004 /* Swapped out of memory */ -#define P_UNUSED3 0x00008 /* was: Event pending, break tsleep on sigcont */ -#define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ -#define P_PROFIL 0x00020 /* Has started profiling. */ -#define P_UNUSED5 0x00040 /* was: Selecting; wakeup/waiting danger. */ -#define P_UNUSED4 0x00080 /* was: Sleep is interruptible. */ -#define P_SUGID 0x00100 /* Had set id privileges since last exec. */ -#define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ +#define P_UNUSED3 0x00008 +#define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit */ +#define P_PROFIL 0x00020 /* Has started profiling */ +#define P_UNUSED5 0x00040 /* was: Selecting; wakeup/waiting danger */ +#define P_UNUSED4 0x00080 /* was: Sleep is interruptible */ +#define P_SUGID 0x00100 /* Had set id privileges since last exec */ +#define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping */ #define P_UNUSED2 0x00400 /* was: SIGSTOP status */ -#define P_TRACED 0x00800 /* Debugged process being traced. */ +#define P_TRACED 0x00800 /* Debugged process being traced */ #define P_WAITED 0x01000 /* SIGSTOP status was returned by wait3/4 */ #define P_WEXIT 0x02000 /* Working on exiting (master exit) */ -#define P_EXEC 0x04000 /* Process called exec. */ -#define P_CONTINUED 0x08000 /* Proc has continued from a stopped state. */ +#define P_EXEC 0x04000 /* Process called exec */ +#define P_CONTINUED 0x08000 /* Proc has continued from a stopped state */ -/* Should probably be changed into a hold count. */ -/* was P_NOSWAP 0x08000 was: Do not swap upages; p->p_hold */ -#define P_UNUSED7 0x10000 +#define P_UNUSED16 0x00010000 +#define P_UPCALLPEND 0x00020000 /* an upcall is pending */ -#define P_UPCALLPEND 0x20000 /* an upcall is pending */ - -#define P_SWAPWAIT 0x40000 /* Waiting for a swapin */ -#define P_UNUSED6 0x80000 /* was: Now in a zombied state */ +#define P_SWAPWAIT 0x00040000 /* Waiting for a swapin */ +#define P_UNUSED19 0x00080000 /* was: Now in a zombied state */ /* Marked a kernel thread */ -#define P_UNUSED07 0x100000 /* was: on a user scheduling run queue */ -#define P_KTHREADP 0x200000 /* Process is really a kernel thread */ -#define P_IDLESWAP 0x400000 /* Swapout was due to idleswap, not load */ - -#define P_JAILED 0x1000000 /* Process is in jail */ -#define P_SIGVTALRM 0x2000000 /* signal SIGVTALRM pending due to itimer */ -#define P_SIGPROF 0x4000000 /* signal SIGPROF pending due to itimer */ -#define P_INEXEC 0x8000000 /* Process is in execve(). */ -#define P_UNUSED1000 0x10000000 +#define P_UNUSED20 0x00100000 /* was: on a user scheduling run queue */ +#define P_KTHREADP 0x00200000 /* Process is really a kernel thread */ +#define P_IDLESWAP 0x00400000 /* Swapout was due to idleswap, not load */ + +#define P_JAILED 0x01000000 /* Process is in jail */ +#define P_SIGVTALRM 0x02000000 /* signal SIGVTALRM pending due to itimer */ +#define P_SIGPROF 0x04000000 /* signal SIGPROF pending due to itimer */ +#define P_INEXEC 0x08000000 /* Process is in execve(). */ +#define P_UNUSED28 0x10000000 #define P_UPCALLWAIT 0x20000000 /* Wait for upcall or signal */ #define P_XCPU 0x40000000 /* SIGXCPU */ +#define LWP_ALTSTACK 0x0000001 /* have alternate signal stack */ +#define LWP_OLDMASK 0x0000002 /* need to restore mask before pause */ +#define LWP_SINTR 0x0000008 /* Sleep is interruptible. */ +#define LWP_SELECT 0x0000010 /* Selecting; wakeup/waiting danger. */ +#define LWP_UNUSED20 0x0000020 +#define LWP_UNUSED40 0x0000040 +#define LWP_UNUSED80 0x0000080 +#define LWP_PASSIVE_ACQ 0x0000100 /* Passive acquire cpu (see kern_switch) */ +#define LWP_PAGING 0x0000200 /* Currently in vm_fault */ + /* - * LWP_WSTOP: When set the thread will stop prior to return to userland + * LWP_MP_WSTOP: When set the thread will stop prior to return to userland * and has been counted in the process stop-threads-count, but * may still be running in kernel-land. * - * LWP_WEXIT: When set the thread has been asked to exit and will not return + * LWP_MP_WEXIT: When set the thread has been asked to exit and will not return * to userland. p_nthreads will not be decremented until the * thread has actually exited. */ -#define LWP_ALTSTACK 0x0000001 /* have alternate signal stack */ -#define LWP_OLDMASK 0x0000002 /* need to restore mask before pause */ -#define LWP_BREAKTSLEEP 0x0000004 /* Event pending, break tsleep on sigcont */ -#define LWP_SINTR 0x0000008 /* Sleep is interruptible. */ -#define LWP_SELECT 0x0000010 /* Selecting; wakeup/waiting danger. */ -#define LWP_ONRUNQ 0x0000020 /* on a user scheduling run queue */ -#define LWP_WEXIT 0x0000040 /* working on exiting */ -#define LWP_WSTOP 0x0000080 /* working on stopping */ -#define LWP_PASSIVE_ACQ 0x0000100 /* Passive acquire cpu (see kern_switch) */ -#define LWP_PAGING 0x0000200 /* Currently in vm_fault */ +#define LWP_MP_ONRUNQ 0x0000001 /* on a user scheduling run queue */ +#define LWP_MP_WEXIT 0x0000002 /* working on exiting */ +#define LWP_MP_WSTOP 0x0000004 /* working on stopping */ #define FIRST_LWP_IN_PROC(p) RB_FIRST(lwp_rb_tree, &(p)->p_lwp_tree) #define FOREACH_LWP_IN_PROC(lp, p) \ diff --git a/sys/sys/signal2.h b/sys/sys/signal2.h index 701c1798d4..2ec37660cf 100644 --- a/sys/sys/signal2.h +++ b/sys/sys/signal2.h @@ -103,7 +103,7 @@ __cursig(struct lwp *lp, int mayblock, int maytrace) * a) we may block and * b) somebody is tracing us. */ - if (!(mayblock && (p->p_flag & P_TRACED))) + if (!(mayblock && (p->p_flags & P_TRACED))) return (0); } diff --git a/sys/sys/thread.h b/sys/sys/thread.h index 55d379d12c..9c97487533 100644 --- a/sys/sys/thread.h +++ b/sys/sys/thread.h @@ -258,7 +258,7 @@ struct thread { const volatile void *td_wchan; /* waiting on channel */ int td_pri; /* 0-31, 31=highest priority (note 1) */ int td_critcount; /* critical section priority */ - int td_flags; /* TDF flags */ + u_int td_flags; /* TDF flags */ int td_wdomain; /* domain for wchan address (typ 0) */ void (*td_preemptable)(struct thread *td, int critcount); void (*td_release)(struct thread *td); @@ -274,7 +274,7 @@ struct thread { int td_refs; /* hold position in gd_tdallq / hold free */ int td_nest_count; /* prevent splz nesting */ int td_contended; /* token contention count */ - int td_unused01[1]; /* for future fields */ + u_int td_mpflags; /* flags can be set by foreign cpus */ #ifdef SMP int td_cscount; /* cpu synchronization master */ #else @@ -340,23 +340,23 @@ struct thread { * does not allow a thread to be scheduled if it already resides on some * queue. */ -#define TDF_RUNNING 0x0001 /* thread still active */ -#define TDF_RUNQ 0x0002 /* on an LWKT run queue */ -#define TDF_PREEMPT_LOCK 0x0004 /* I have been preempted */ -#define TDF_PREEMPT_DONE 0x0008 /* acknowledge preemption complete */ -#define TDF_UNUSED00000010 0x0010 -#define TDF_MIGRATING 0x0020 /* thread is being migrated */ -#define TDF_SINTR 0x0040 /* interruptability hint for 'ps' */ -#define TDF_TSLEEPQ 0x0080 /* on a tsleep wait queue */ - -#define TDF_SYSTHREAD 0x0100 /* allocations may use reserve */ -#define TDF_ALLOCATED_THREAD 0x0200 /* objcache allocated thread */ -#define TDF_ALLOCATED_STACK 0x0400 /* objcache allocated stack */ -#define TDF_VERBOSE 0x0800 /* verbose on exit */ -#define TDF_DEADLKTREAT 0x1000 /* special lockmgr deadlock treatment */ -#define TDF_STOPREQ 0x2000 /* suspend_kproc */ -#define TDF_WAKEREQ 0x4000 /* resume_kproc */ -#define TDF_TIMEOUT 0x8000 /* tsleep timeout */ +#define TDF_RUNNING 0x00000001 /* thread still active */ +#define TDF_RUNQ 0x00000002 /* on an LWKT run queue */ +#define TDF_PREEMPT_LOCK 0x00000004 /* I have been preempted */ +#define TDF_PREEMPT_DONE 0x00000008 /* ac preemption complete */ +#define TDF_NOSTART 0x00000010 /* do not schedule on create */ +#define TDF_MIGRATING 0x00000020 /* thread is being migrated */ +#define TDF_SINTR 0x00000040 /* interruptability for 'ps' */ +#define TDF_TSLEEPQ 0x00000080 /* on a tsleep wait queue */ + +#define TDF_SYSTHREAD 0x00000100 /* reserve memory may be used */ +#define TDF_ALLOCATED_THREAD 0x00000200 /* objcache allocated thread */ +#define TDF_ALLOCATED_STACK 0x00000400 /* objcache allocated stack */ +#define TDF_VERBOSE 0x00000800 /* verbose on exit */ +#define TDF_DEADLKTREAT 0x00001000 /* special lockmgr treatment */ +#define TDF_UNUSED2000 0x00002000 +#define TDF_TIMEOUT_RUNNING 0x00004000 /* tsleep timeout race */ +#define TDF_TIMEOUT 0x00008000 /* tsleep timeout */ #define TDF_INTTHREAD 0x00010000 /* interrupt thread */ #define TDF_TSLEEP_DESCHEDULED 0x00020000 /* tsleep core deschedule */ #define TDF_BLOCKED 0x00040000 /* Thread is blocked */ @@ -369,6 +369,9 @@ struct thread { #define TDF_UNUSED02000000 0x02000000 #define TDF_CRYPTO 0x04000000 /* crypto thread */ +#define TDF_MP_STOPREQ 0x00000001 /* suspend_kproc */ +#define TDF_MP_WAKEREQ 0x00000002 /* resume_kproc */ + /* * Thread priorities. Typically only one thread from any given * user process scheduling queue is on the LWKT run queue at a time. diff --git a/sys/sys/tty.h b/sys/sys/tty.h index c59cb644a2..d18ff0b17c 100644 --- a/sys/sys/tty.h +++ b/sys/sys/tty.h @@ -221,7 +221,7 @@ struct speedtab { /* Is tp controlling terminal for p? */ #define isctty(p, tp) \ - ((p)->p_session == (tp)->t_session && (p)->p_flag & P_CONTROLT) + ((p)->p_session == (tp)->t_session && ((p)->p_flags & P_CONTROLT)) /* Is p in background of tp? */ #define isbackground(p, tp) \ diff --git a/sys/vfs/procfs/procfs.h b/sys/vfs/procfs/procfs.h index 071d2ab830..40067b947a 100644 --- a/sys/vfs/procfs/procfs.h +++ b/sys/vfs/procfs/procfs.h @@ -99,7 +99,7 @@ struct pfsnode { ((((p1)->p_ucred->cr_uid == (p2)->p_ucred->cr_ruid) && \ ((p1)->p_ucred->cr_ruid == (p2)->p_ucred->cr_ruid) && \ ((p1)->p_ucred->cr_svuid == (p2)->p_ucred->cr_ruid) && \ - ((p2)->p_flag & (P_SUGID|P_INEXEC)) == 0) || \ + ((p2)->p_flags & (P_SUGID|P_INEXEC)) == 0) || \ (priv_check_cred((p1)->p_ucred, PRIV_DEBUG_UNPRIV, 0) == 0)) /* diff --git a/sys/vfs/procfs/procfs_ctl.c b/sys/vfs/procfs/procfs_ctl.c index a6f0c0b9ee..69e709f43d 100644 --- a/sys/vfs/procfs/procfs_ctl.c +++ b/sys/vfs/procfs/procfs_ctl.c @@ -68,7 +68,7 @@ #define TRACE_WAIT_P(curp, p) \ (((p)->p_stat == SSTOP) && \ (p)->p_pptr == (curp) && \ - ((p)->p_flag & P_TRACED)) + ((p)->p_flags & P_TRACED)) #define PROCFS_CTL_ATTACH 1 #define PROCFS_CTL_DETACH 2 @@ -119,7 +119,7 @@ procfs_control(struct proc *curp, struct lwp *lp, int op) ASSERT_LWKT_TOKEN_HELD(&proc_token); /* Can't trace a process that's currently exec'ing. */ - if ((p->p_flag & P_INEXEC) != 0) + if ((p->p_flags & P_INEXEC) != 0) return EAGAIN; /* * Authorization check: rely on normal debugging protection, except @@ -141,7 +141,7 @@ procfs_control(struct proc *curp, struct lwp *lp, int op) */ if (op == PROCFS_CTL_ATTACH) { /* check whether already being traced */ - if (p->p_flag & P_TRACED) + if (p->p_flags & P_TRACED) return (EBUSY); /* can't trace yourself! */ @@ -156,7 +156,7 @@ procfs_control(struct proc *curp, struct lwp *lp, int op) * proc gets to see all the action. * Stop the target. */ - p->p_flag |= P_TRACED; + p->p_flags |= P_TRACED; faultin(p); p->p_xstat = 0; /* XXX ? */ if (p->p_pptr != curp) { @@ -205,11 +205,11 @@ procfs_control(struct proc *curp, struct lwp *lp, int op) */ case PROCFS_CTL_DETACH: /* if not being traced, then this is a painless no-op */ - if ((p->p_flag & P_TRACED) == 0) + if ((p->p_flags & P_TRACED) == 0) return (0); /* not being traced any more */ - p->p_flag &= ~P_TRACED; + p->p_flags &= ~P_TRACED; /* remove pending SIGTRAP, else the process will die */ spin_lock(&lp->lwp_spin); @@ -228,8 +228,8 @@ procfs_control(struct proc *curp, struct lwp *lp, int op) } p->p_oppid = 0; - p->p_flag &= ~P_WAITED; /* XXX ? */ - wakeup((caddr_t) curp); /* XXX for CTL_WAIT below ? */ + p->p_flags &= ~P_WAITED; /* XXX ? */ + wakeup((caddr_t) curp); /* XXX for CTL_WAIT below ? */ break; @@ -258,10 +258,10 @@ procfs_control(struct proc *curp, struct lwp *lp, int op) */ case PROCFS_CTL_WAIT: error = 0; - if (p->p_flag & P_TRACED) { + if (p->p_flags & P_TRACED) { while (error == 0 && p->p_stat != SSTOP && - (p->p_flag & P_TRACED) && + (p->p_flags & P_TRACED) && (p->p_pptr == curp)) { error = tsleep((caddr_t) p, PCATCH, "procfsx", 0); @@ -282,8 +282,8 @@ procfs_control(struct proc *curp, struct lwp *lp, int op) /* * If the process is in a stopped state, make it runnable again. - * Do not set LWP_BREAKTSLEEP - that is, do not break a tsleep that - * might be in progress. + * Do not set LWP_MP_BREAKTSLEEP - that is, do not break a tsleep + * that might be in progress. */ if (p->p_stat == SSTOP) proc_unstop(p); diff --git a/sys/vfs/procfs/procfs_dbregs.c b/sys/vfs/procfs/procfs_dbregs.c index 939a6b5327..2fb21621a5 100644 --- a/sys/vfs/procfs/procfs_dbregs.c +++ b/sys/vfs/procfs/procfs_dbregs.c @@ -63,7 +63,7 @@ procfs_dodbregs(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, int kl; /* Can't trace a process that's currently exec'ing. */ - if ((p->p_flag & P_INEXEC) != 0) + if ((p->p_flags & P_INEXEC) != 0) return EAGAIN; if (!CHECKIO(curp, p) || p_trespass(curp->p_ucred, p->p_ucred)) return (EPERM); @@ -94,5 +94,5 @@ procfs_dodbregs(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, int procfs_validdbregs(struct lwp *lp) { - return ((lp->lwp_proc->p_flag & P_SYSTEM) == 0); + return ((lp->lwp_proc->p_flags & P_SYSTEM) == 0); } diff --git a/sys/vfs/procfs/procfs_fpregs.c b/sys/vfs/procfs/procfs_fpregs.c index 7692af3d82..a9acf20ef4 100644 --- a/sys/vfs/procfs/procfs_fpregs.c +++ b/sys/vfs/procfs/procfs_fpregs.c @@ -60,7 +60,7 @@ procfs_dofpregs(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, int kl; /* Can't trace a process that's currently exec'ing. */ - if ((p->p_flag & P_INEXEC) != 0) + if ((p->p_flags & P_INEXEC) != 0) return EAGAIN; if (!CHECKIO(curp, p) || p_trespass(curp->p_ucred, p->p_ucred)) return EPERM; @@ -91,5 +91,5 @@ procfs_dofpregs(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, int procfs_validfpregs(struct lwp *lp) { - return ((lp->lwp_proc->p_flag & P_SYSTEM) == 0); + return ((lp->lwp_proc->p_flags & P_SYSTEM) == 0); } diff --git a/sys/vfs/procfs/procfs_map.c b/sys/vfs/procfs/procfs_map.c index f75df879e1..1fb423b556 100644 --- a/sys/vfs/procfs/procfs_map.c +++ b/sys/vfs/procfs/procfs_map.c @@ -245,5 +245,5 @@ procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, int procfs_validmap(struct lwp *lp) { - return ((lp->lwp_proc->p_flag & P_SYSTEM) == 0); + return ((lp->lwp_proc->p_flags & P_SYSTEM) == 0); } diff --git a/sys/vfs/procfs/procfs_mem.c b/sys/vfs/procfs/procfs_mem.c index d5da3d92e6..84ecdf8b09 100644 --- a/sys/vfs/procfs/procfs_mem.c +++ b/sys/vfs/procfs/procfs_mem.c @@ -90,7 +90,7 @@ procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio) * page table usage in that process may be messed up. */ vm = p->p_vmspace; - if ((p->p_flag & P_WEXIT) || sysref_isinactive(&vm->vm_sysref)) { + if ((p->p_flags & P_WEXIT) || sysref_isinactive(&vm->vm_sysref)) { return EFAULT; } @@ -175,7 +175,7 @@ procfs_domem(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, return (0); lwkt_gettoken(&p->p_token); - if ((p->p_flag & P_INEXEC) != 0) { + if ((p->p_flags & P_INEXEC) != 0) { /* * Can't trace a process that's currently exec'ing. */ diff --git a/sys/vfs/procfs/procfs_regs.c b/sys/vfs/procfs/procfs_regs.c index 5e51c71b83..52d743dbaa 100644 --- a/sys/vfs/procfs/procfs_regs.c +++ b/sys/vfs/procfs/procfs_regs.c @@ -61,7 +61,7 @@ procfs_doregs(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, int kl; /* Can't trace a process that's currently exec'ing. */ - if ((p->p_flag & P_INEXEC) != 0) + if ((p->p_flags & P_INEXEC) != 0) return EAGAIN; if (!CHECKIO(curp, p) || p_trespass(curp->p_ucred, p->p_ucred)) return EPERM; @@ -93,5 +93,5 @@ procfs_doregs(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, int procfs_validregs(struct lwp *lp) { - return ((lp->lwp_proc->p_flag & P_SYSTEM) == 0); + return ((lp->lwp_proc->p_flags & P_SYSTEM) == 0); } diff --git a/sys/vfs/procfs/procfs_status.c b/sys/vfs/procfs/procfs_status.c index a246e004c3..6f782b205b 100644 --- a/sys/vfs/procfs/procfs_status.c +++ b/sys/vfs/procfs/procfs_status.c @@ -97,7 +97,7 @@ procfs_dostatus(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, ps += ksnprintf(ps, psbuf + sizeof(psbuf) - ps, " %d %d %d %d ", pid, ppid, pgid, sid); DOCHECK(); - if ((p->p_flag&P_CONTROLT) && (tp = sess->s_ttyp)) + if ((p->p_flags & P_CONTROLT) && (tp = sess->s_ttyp)) ps += ksnprintf(ps, psbuf + sizeof(psbuf) - ps, "%d,%d ", major(tp->t_dev), minor(tp->t_dev)); else @@ -121,7 +121,7 @@ procfs_dostatus(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, DOCHECK(); } - if (p->p_flag & P_SWAPPEDOUT) { + if (p->p_flags & P_SWAPPEDOUT) { ps += ksnprintf(ps, psbuf + sizeof(psbuf) - ps, " -1,-1 -1,-1 -1,-1"); } else { @@ -211,7 +211,8 @@ procfs_docmdline(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, if (p->p_args && (ps_argsopen || (CHECKIO(curp, p) && - (p->p_flag & P_INEXEC) == 0 && !p_trespass(curp->p_ucred, p->p_ucred))) + (p->p_flags & P_INEXEC) == 0 && + !p_trespass(curp->p_ucred, p->p_ucred))) ) { bp = p->p_args->ar_args; buflen = p->p_args->ar_length; diff --git a/sys/vfs/procfs/procfs_type.c b/sys/vfs/procfs/procfs_type.c index 7c9c265aec..2600175f5e 100644 --- a/sys/vfs/procfs/procfs_type.c +++ b/sys/vfs/procfs/procfs_type.c @@ -79,5 +79,5 @@ procfs_dotype(struct proc *curp, struct lwp *lp, struct pfsnode *pfs, int procfs_validtype(struct lwp *lp) { - return ((lp->lwp_proc->p_flag & P_SYSTEM) == 0); + return ((lp->lwp_proc->p_flags & P_SYSTEM) == 0); } diff --git a/sys/vfs/procfs/procfs_vnops.c b/sys/vfs/procfs/procfs_vnops.c index 593a020b6e..0613945c0b 100644 --- a/sys/vfs/procfs/procfs_vnops.c +++ b/sys/vfs/procfs/procfs_vnops.c @@ -193,7 +193,7 @@ procfs_open(struct vop_open_args *ap) p1 = curproc; KKASSERT(p1); /* Can't trace a process that's currently exec'ing. */ - if ((p2->p_flag & P_INEXEC) != 0) { + if ((p2->p_flags & P_INEXEC) != 0) { error = EAGAIN; goto done; } @@ -291,7 +291,7 @@ procfs_ioctl(struct vop_ioctl_args *ap) } /* Can't trace a process that's currently exec'ing. */ - if ((procp->p_flag & P_INEXEC) != 0) { + if ((procp->p_flags & P_INEXEC) != 0) { error = EAGAIN; goto done; } @@ -556,7 +556,7 @@ procfs_getattr(struct vop_getattr_args *ap) case Pfpregs: case Pdbregs: case Pmem: - if (procp->p_flag & P_SUGID) + if (procp->p_flags & P_SUGID) vap->va_mode &= ~((VREAD|VWRITE)| ((VREAD|VWRITE)>>3)| ((VREAD|VWRITE)>>6)); @@ -626,7 +626,7 @@ procfs_getattr(struct vop_getattr_args *ap) * change the owner to root - otherwise 'ps' and friends * will break even though they are setgid kmem. *SIGH* */ - if (procp->p_flag & P_SUGID) + if (procp->p_flags & P_SUGID) vap->va_uid = 0; else vap->va_uid = procp->p_ucred->cr_uid; diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 13ee444178..8f847804ec 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -124,7 +124,7 @@ struct faultstate { static int debug_cluster = 0; SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, ""); -static int vm_shared_fault = 1; +static int vm_shared_fault = 0; SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, &vm_shared_fault, 0, "Allow shared token on vm_object"); static long vm_shared_hit = 0; @@ -271,7 +271,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) growstack = 1; if ((lp = curthread->td_lwp) != NULL) - lp->lwp_flag |= LWP_PAGING; + lp->lwp_flags |= LWP_PAGING; lwkt_gettoken(&map->token); @@ -393,12 +393,13 @@ RetryFault: * short-cut a quick mapping. * * WARNING! We cannot call swap_pager_unswapped() - * with a shared token! + * with a shared token! Note that we + * have to test fs.first_prot here. */ vm_page_activate(fs.m); if (fs.m->valid == VM_PAGE_BITS_ALL && ((fs.m->flags & PG_SWAPPED) == 0 || - (fs.prot & VM_PROT_WRITE) == 0 || + (fs.first_prot & VM_PROT_WRITE) == 0 || (fs.fault_flags & VM_FAULT_DIRTY) == 0)) { fs.lookup_still_valid = TRUE; fs.first_m = NULL; @@ -565,7 +566,7 @@ done: vm_object_drop(fs.first_object); lwkt_reltoken(&map->token); if (lp) - lp->lwp_flag &= ~LWP_PAGING; + lp->lwp_flags &= ~LWP_PAGING; return (result); } diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 6c2ac8caf3..526cdbfb46 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -302,13 +302,13 @@ vm_init_limits(struct proc *p) void faultin(struct proc *p) { - if (p->p_flag & P_SWAPPEDOUT) { + if (p->p_flags & P_SWAPPEDOUT) { /* * The process is waiting in the kernel to return to user * mode but cannot until P_SWAPPEDOUT gets cleared. */ lwkt_gettoken(&p->p_token); - p->p_flag &= ~(P_SWAPPEDOUT | P_SWAPWAIT); + p->p_flags &= ~(P_SWAPPEDOUT | P_SWAPWAIT); #ifdef INVARIANTS if (swap_debug) kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm); @@ -397,7 +397,7 @@ scheduler_callback(struct proc *p, void *data) segsz_t pgs; int pri; - if (p->p_flag & P_SWAPWAIT) { + if (p->p_flags & P_SWAPWAIT) { pri = 0; FOREACH_LWP_IN_PROC(lp, p) { /* XXX lwp might need a different metric */ @@ -453,7 +453,7 @@ swapin_request(void) #define swappable(p) \ (((p)->p_lock == 0) && \ - ((p)->p_flag & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0) + ((p)->p_flags & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0) /* @@ -586,7 +586,7 @@ swapout(struct proc *p) * remember the process resident count */ p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); - p->p_flag |= P_SWAPPEDOUT; + p->p_flags |= P_SWAPPEDOUT; p->p_swtime = 0; } diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c index 1f49f41b13..672998d909 100644 --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -176,15 +176,15 @@ do_vmtotal_callback(struct proc *p, void *data) struct vmtotal *totalp = data; struct lwp *lp; - if (p->p_flag & P_SYSTEM) + if (p->p_flags & P_SYSTEM) return(0); FOREACH_LWP_IN_PROC(lp, p) { switch (lp->lwp_stat) { case LSSTOP: case LSSLEEP: - if ((p->p_flag & P_SWAPPEDOUT) == 0) { - if ((lp->lwp_flag & LWP_SINTR) == 0) + if ((p->p_flags & P_SWAPPEDOUT) == 0) { + if ((lp->lwp_flags & LWP_SINTR) == 0) totalp->t_dw++; else if (lp->lwp_slptime < maxslp) totalp->t_sl++; @@ -196,7 +196,7 @@ do_vmtotal_callback(struct proc *p, void *data) break; case LSRUN: - if (p->p_flag & P_SWAPPEDOUT) + if (p->p_flags & P_SWAPPEDOUT) totalp->t_sw++; else totalp->t_rq++; @@ -211,7 +211,7 @@ do_vmtotal_callback(struct proc *p, void *data) /* * Set while in vm_fault() */ - if (lp->lwp_flag & LWP_PAGING) + if (lp->lwp_flags & LWP_PAGING) totalp->t_pw++; } return(0); diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index ba1922d4d0..5e691d96e7 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -1501,7 +1501,7 @@ vm_pageout_scan_callback(struct proc *p, void *data) * Never kill system processes or init. If we have configured swap * then try to avoid killing low-numbered pids. */ - if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || + if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) || ((p->p_pid < 48) && (vm_swap_size != 0))) { return (0); } @@ -2066,7 +2066,7 @@ vm_daemon_callback(struct proc *p, void *data __unused) * if this is a system process or if we have already * looked at this process, skip it. */ - if (p->p_flag & (P_SYSTEM | P_WEXIT)) + if (p->p_flags & (P_SYSTEM | P_WEXIT)) return (0); /* @@ -2087,7 +2087,7 @@ vm_daemon_callback(struct proc *p, void *data __unused) * swapped out. Set the limit to nothing to get as * many pages out to swap as possible. */ - if (p->p_flag & P_SWAPPEDOUT) + if (p->p_flags & P_SWAPPEDOUT) limit = 0; lwkt_gettoken(&p->p_vmspace->vm_map.token); -- 2.41.0