From c40689e9b134213e4a3e29c2548d0ec47c7dd40b Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Tue, 19 Oct 2010 23:19:29 -0700 Subject: [PATCH] kernel - revamp mtx_spinlock() * Revamp mtx_spinlock(), add mtx_spinlock_try(), and add mtx_spinunlock(). * Enter a proper hard critical section when using mtx_spinlock*(), just like the normal spinlock() code does. The difference is that mtx spinlocks have a ref count and thus are reentrant. * mtx_spinlock*() is not used in the code yet. A followup commit will begin using it for the syscons lock. --- sys/kern/kern_mutex.c | 50 +++++++++++++++++++++++++- sys/sys/mutex.h | 4 +-- sys/sys/mutex2.h | 81 +++++++++++++++++++++++++++++++++---------- 3 files changed, 113 insertions(+), 22 deletions(-) diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 58259add33..f58b98ebe4 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -275,8 +275,11 @@ _mtx_lock_sh_quick(mtx_t mtx, const char *ident) return (__mtx_lock_sh(mtx, ident, 0, 0)); } +/* + * Get an exclusive spinlock the hard way. + */ void -_mtx_spinlock_ex(mtx_t mtx) +_mtx_spinlock(mtx_t mtx) { u_int lock; u_int nlock; @@ -311,6 +314,49 @@ _mtx_spinlock_ex(mtx_t mtx) } } +/* + * Attempt to acquire a spinlock, if we fail we must undo the + * gd->gd_spinlocks_wr/gd->gd_curthead->td_critcount predisposition. + * + * Returns 0 on success, EAGAIN on failure. + */ +int +_mtx_spinlock_try(mtx_t mtx) +{ + globaldata_t gd = mycpu; + u_int lock; + u_int nlock; + int res = 0; + + for (;;) { + lock = mtx->mtx_lock; + if (lock == 0) { + nlock = MTX_EXCLUSIVE | 1; + if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) { + mtx->mtx_owner = gd->gd_curthread; + break; + } + } else if ((lock & MTX_EXCLUSIVE) && + mtx->mtx_owner == gd->gd_curthread) { + KKASSERT((lock & MTX_MASK) != MTX_MASK); + nlock = lock + 1; + if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) + break; + } else { + --gd->gd_spinlocks_wr; + cpu_ccfence(); + --gd->gd_curthread->td_critcount; + res = EAGAIN; + break; + } + cpu_pause(); + ++mtx_collision_count; + } + return res; +} + +#if 0 + void _mtx_spinlock_sh(mtx_t mtx) { @@ -340,6 +386,8 @@ _mtx_spinlock_sh(mtx_t mtx) } } +#endif + int _mtx_lock_ex_try(mtx_t mtx) { diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index b17eacd89d..6139993980 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -105,8 +105,8 @@ int _mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to); int _mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to); int _mtx_lock_ex_quick(mtx_t mtx, const char *ident); int _mtx_lock_sh_quick(mtx_t mtx, const char *ident); -void _mtx_spinlock_ex(mtx_t mtx); -void _mtx_spinlock_sh(mtx_t mtx); +void _mtx_spinlock(mtx_t mtx); +int _mtx_spinlock_try(mtx_t mtx); int _mtx_lock_ex_try(mtx_t mtx); int _mtx_lock_sh_try(mtx_t mtx); void _mtx_downgrade(mtx_t mtx); diff --git a/sys/sys/mutex2.h b/sys/sys/mutex2.h index 00676047d3..9185d74037 100644 --- a/sys/sys/mutex2.h +++ b/sys/sys/mutex2.h @@ -38,6 +38,12 @@ #ifndef _SYS_MUTEX_H_ #include #endif +#ifndef _SYS_THREAD2_H_ +#include +#endif +#ifndef _SYS_GLOBALDATA_H_ +#include +#endif #ifndef _MACHINE_ATOMIC_H_ #include #endif @@ -151,37 +157,59 @@ mtx_lock_sh_quick(mtx_t mtx, const char *ident) } /* - * Short-form exclusive-lock a mutex, spin until acquired. Recursion is - * allowed. This form is identical to mtx_spinlock_ex(). + * Short-form exclusive spinlock a mutex. Must be paired with + * mtx_spinunlock(). */ static __inline void mtx_spinlock(mtx_t mtx) { + globaldata_t gd = mycpu; + + /* + * Predispose a hard critical section + */ + ++gd->gd_curthread->td_critcount; + cpu_ccfence(); + ++gd->gd_spinlocks_wr; + + /* + * If we cannot get it trivially get it the hard way. + * + * Note that mtx_owner will be set twice if we fail to get it + * trivially, but there's no point conditionalizing it as a + * conditional will be slower. + */ if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) - _mtx_spinlock_ex(mtx); + _mtx_spinlock(mtx); + mtx->mtx_owner = gd->gd_curthread; } -/* - * Exclusive-lock a mutex, spin until acquired. Recursion is allowed. - */ -static __inline void -mtx_spinlock_ex(mtx_t mtx) +static __inline int +mtx_spinlock_try(mtx_t mtx) { + globaldata_t gd = mycpu; + + /* + * Predispose a hard critical section + */ + ++gd->gd_curthread->td_critcount; + cpu_ccfence(); + ++gd->gd_spinlocks_wr; + + /* + * If we cannot get it trivially call _mtx_spinlock_try(). This + * function will clean up the hard critical section if it fails. + */ if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) - _mtx_spinlock_ex(mtx); -} - -/* - * Share-lock a mutex, spin until acquired. Recursion is allowed. - */ -static __inline void -mtx_spinlock_sh(mtx_t mtx) -{ - if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) - _mtx_spinlock_sh(mtx); + return(_mtx_spinlock_try(mtx)); + mtx->mtx_owner = gd->gd_curthread; + return (0); } /* + * Short-form exclusive-lock a mutex, spin until acquired. Recursion is + * allowed. This form is identical to mtx_spinlock_ex(). + * * Attempt to exclusive-lock a mutex, return 0 on success and * EAGAIN on failure. */ @@ -289,6 +317,21 @@ mtx_unlock_sh(mtx_t mtx) _mtx_unlock(mtx); } +/* + * NOTE: spinlocks are exclusive-only + */ +static __inline void +mtx_spinunlock(mtx_t mtx) +{ + globaldata_t gd = mycpu; + + mtx_unlock(mtx); + + --gd->gd_spinlocks_wr; + cpu_ccfence(); + --gd->gd_curthread->td_critcount; +} + /* * Return TRUE (non-zero) if the mutex is locked shared or exclusive by * anyone, including the owner. -- 2.41.0