From bb36d905d2bf2569ce40fc3ebdab1877ae6342bf Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sun, 1 Oct 2017 11:18:49 -0700 Subject: [PATCH] kernel - Fix rare lockmgr() state transition (2) * Fix two lock timeout cases for LK_EXCLUPGRADE and LK_UPGRADE, and fix a bug in undo_upreq(). * A tsleep failure (such as the LK_TIMELOCK case via vm_map_lock_read_to()) was not properly backing-out a LKC_UPREQ, resulting in a situation where the lock becomes exclusively owned by nobody and deadlocks against all-comers. Fix by properly calling undo_upreq(). * Fix a bug in undo_upreq() itself. When undoing a granted UPREQ, the lockholder must be set prior to releasing the now-granted exclusive lock in order to avoid an assertion panic. * While we are at it, replace a weird cmpset count,count with a fetchadd(count, 0). --- share/man/man9/lock.9 | 22 ++++++++++++++++------ sys/kern/kern_lock.c | 13 +++++++++---- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/share/man/man9/lock.9 b/share/man/man9/lock.9 index 09b1d635c7..b03776a2ba 100644 --- a/share/man/man9/lock.9 +++ b/share/man/man9/lock.9 @@ -138,14 +138,24 @@ Upgrade a shared lock to an exclusive lock. Fails with .Er EBUSY if there is someone ahead of you in line waiting for an upgrade. -If this call fails, the shared lock is lost. -Attempts to upgrade an exclusive lock will cause a -.Xr panic 9 . +If this call fails for any reason, the shared lock is lost. +Attempts to upgrade an exclusive lock not already +owned by the caller will cause a +.Xr panic 9 , +but otherwise will always succeed. +NOTE! When this operation succeeds, it guarantees that no other +exclusive lock was able to acquire the lock ahead of you, but +as indicated above, if it fails your current shared lock is lost. .It Dv LK_UPGRADE Upgrade a shared lock to an exclusive lock. -If this call fails, the shared lock is lost. -Attempts to upgrade an exclusive lock will cause a -.Xr panic 9 . +If this call fails for any reason, the shared lock is lost. +Attempts to upgrade an exclusive lock not already +owned by the caller will cause a +.Xr panic 9 , +but otherwise will always succeed. +WARNING! This operation can block with the current lock temporarily +released, and other exclusive or shared lock holders can inject before +the lock is acquired on your behalf. .It Dv LK_RELEASE Release the lock. Releasing a lock that is not held can cause a diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 92f6523a74..d512a862c2 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -431,7 +431,7 @@ again: if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1)) { wflags |= LKC_EXCL | LKC_UPGRANT; wflags |= count; - wflags &= ~LKC_UPREQ; + wflags &= ~LKC_UPREQ; /* was set from count */ } else { wflags |= (count - 1); } @@ -453,9 +453,14 @@ again: error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo); - if (error) + if (error) { + if ((count & LKC_UPREQ) == 0) + undo_upreq(lkp); break; + } if (extflags & LK_SLEEPFAIL) { + if ((count & LKC_UPREQ) == 0) + undo_upreq(lkp); error = ENOLCK; break; } @@ -497,8 +502,7 @@ again: pflags = (extflags & LK_PCATCH) ? PCATCH : 0; timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0; tsleep_interlock(lkp, pflags); - if (atomic_cmpset_int(&lkp->lk_count, count, count)) { - + if (atomic_fetchadd_int(&lkp->lk_count, 0) == count) { mycpu->gd_cnt.v_lock_name[0] = 'U'; strncpy(mycpu->gd_cnt.v_lock_name + 1, lkp->lk_wmesg, @@ -692,6 +696,7 @@ undo_upreq(struct lock *lkp) */ if (atomic_cmpset_int(&lkp->lk_count, count, count & ~LKC_UPGRANT)) { + lkp->lk_lockholder = curthread; lockmgr(lkp, LK_RELEASE); break; } -- 2.41.0