From dda969a89b4bd6732395ed1e9b793e905afb0b59 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Fri, 18 Nov 2011 08:08:24 -0800 Subject: [PATCH] kernel - Cleanup and document * Cleanup and document various bits of code. --- sys/kern/kern_lock.c | 12 ++++++++---- sys/kern/sys_process.c | 8 ++++---- sys/vm/vm_fault.c | 3 +++ 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 4593cba907..10f13ec2d4 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -75,12 +75,13 @@ #endif #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ - LK_SHARE_NONZERO | LK_WAIT_NONZERO) + LK_SHARE_NONZERO | LK_WAIT_NONZERO) static int acquire(struct lock *lkp, int extflags, int wanted); static LOCK_INLINE void -sharelock(struct lock *lkp, int incr) { +sharelock(struct lock *lkp, int incr) +{ lkp->lk_flags |= LK_SHARE_NONZERO; lkp->lk_sharecount += incr; } @@ -368,8 +369,8 @@ lkmatch2: * lock, awaken upgrade requestor if we are the last shared * lock, then request an exclusive lock. */ - if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == - LK_WAIT_NONZERO) { + if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == + LK_WAIT_NONZERO) { ++dowakeup; } /* fall into exclusive request */ @@ -528,6 +529,9 @@ lockmgr_kernproc(struct lock *lp) * Set the lock to be exclusively held. The caller is holding the lock's * spinlock and the spinlock remains held on return. A panic will occur * if the lock cannot be set to exclusive. + * + * XXX not only unused but these functions also break EXCLUPGRADE's + * atomicy. */ void lockmgr_setexclusive_interlocked(struct lock *lkp) diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index cb45aca5bf..2b78addd28 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -76,8 +76,8 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) { pageno = trunc_page(addr); tmap = map; - rv = vm_map_lookup (&tmap, pageno, VM_PROT_READ, &out_entry, - &object, &pindex, &out_prot, &wired); + rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry, + &object, &pindex, &out_prot, &wired); if (rv != KERN_SUCCESS) return EINVAL; @@ -154,8 +154,8 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) { */ tmap = map; - rv = vm_map_lookup (&tmap, pageno, VM_PROT_WRITE, &out_entry, - &object, &pindex, &out_prot, &wired); + rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry, + &object, &pindex, &out_prot, &wired); if (rv != KERN_SUCCESS) return EINVAL; diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index fd2f87ccd5..1c010585b9 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -338,6 +338,9 @@ RetryFault: * If we don't COW now, on a user wire, the user will never * be able to write to the mapping. If we don't make this * restriction, the bookkeeping would be nearly impossible. + * + * XXX We have a shared lock, this will have a MP race but + * I don't see how it can hurt anything. */ if ((fs.entry->protection & VM_PROT_WRITE) == 0) fs.entry->max_protection &= ~VM_PROT_WRITE; -- 2.41.0