From 83c891d5d2a4a437dbd464ff4a4bdf9602807e07 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Thu, 19 Oct 2017 13:27:22 -0700 Subject: [PATCH] kernel - Cleanup token code, add simple exclusive priority * Cleanup the token code and bring the comments up to date. * Implement exclusive priority for the situation where a thread is acquiring only a single shared token. We cannot implement exclusive priority when multiple tokens are held because that can lead to deadlocks. The token code guarantees no deadlocks. --- sys/kern/lwkt_token.c | 53 ++++++++++++++++--------------------------- sys/sys/thread.h | 28 +++++++++++++++++------ 2 files changed, 40 insertions(+), 41 deletions(-) diff --git a/sys/kern/lwkt_token.c b/sys/kern/lwkt_token.c index 8f272ef5ff..2752c30da2 100644 --- a/sys/kern/lwkt_token.c +++ b/sys/kern/lwkt_token.c @@ -144,9 +144,6 @@ struct lwkt_token vnode_token = LWKT_TOKEN_INITIALIZER(vnode_token); static int lwkt_token_spin = 5; SYSCTL_INT(_lwkt, OID_AUTO, token_spin, CTLFLAG_RW, &lwkt_token_spin, 0, "Decontention spin loops"); -static int lwkt_token_delay = 0; -SYSCTL_INT(_lwkt, OID_AUTO, token_delay, CTLFLAG_RW, - &lwkt_token_delay, 0, "Decontention spin delay in ns"); /* * The collision count is bumped every time the LWKT scheduler fails @@ -176,17 +173,6 @@ int tokens_debug_output; SYSCTL_INT(_lwkt, OID_AUTO, tokens_debug_output, CTLFLAG_RW, &tokens_debug_output, 0, "Generate stack trace N times"); - -#ifdef DEBUG_LOCKS_LATENCY - -static long tokens_add_latency; -SYSCTL_LONG(_debug, OID_AUTO, tokens_add_latency, CTLFLAG_RW, - &tokens_add_latency, 0, - "Add spinlock latency"); - -#endif - - static int _lwkt_getalltokens_sorted(thread_t td); /* @@ -284,7 +270,9 @@ _lwkt_trytokref(lwkt_tokref_t ref, thread_t td, long mode) * Our thread already holds the exclusive * bit, we treat this tokref as a shared * token (sorta) to make the token release - * code easier. + * code easier. Treating this as a shared + * token allows us to simply increment the + * count field. * * NOTE: oref cannot race above if it * happens to be ours, so we're good. @@ -326,13 +314,21 @@ _lwkt_trytokref(lwkt_tokref_t ref, thread_t td, long mode) * Attempt to get a shared token. Note that TOK_EXCLREQ * for shared tokens simply means the caller intends to * block. We never actually set the bit in tok->t_count. + * + * Due to the token's no-deadlock guarantee, and complications + * created by the sorted reacquisition code, we can only + * give exclusive requests priority over shared requests + * in situations where the thread holds only one token. */ count = tok->t_count; for (;;) { oref = tok->t_ref; /* can be NULL */ cpu_ccfence(); - if ((count & (TOK_EXCLUSIVE/*|TOK_EXCLREQ*/)) == 0) { + if ((count & (TOK_EXCLUSIVE|TOK_EXCLREQ)) == 0 || + ((count & TOK_EXCLUSIVE) == 0 && + td->td_toks_stop != &td->td_toks_base + 1) + ) { /* * It may be possible to get the token shared. */ @@ -369,27 +365,13 @@ _lwkt_trytokref_spin(lwkt_tokref_t ref, thread_t td, long mode) { int spin; - if (_lwkt_trytokref(ref, td, mode)) { -#ifdef DEBUG_LOCKS_LATENCY - long j; - for (j = tokens_add_latency; j > 0; --j) - cpu_ccfence(); -#endif + if (_lwkt_trytokref(ref, td, mode)) return TRUE; - } for (spin = lwkt_token_spin; spin > 0; --spin) { - if (lwkt_token_delay) - tsc_delay(lwkt_token_delay); - else - cpu_pause(); - if (_lwkt_trytokref(ref, td, mode)) { -#ifdef DEBUG_LOCKS_LATENCY - long j; - for (j = tokens_add_latency; j > 0; --j) - cpu_ccfence(); -#endif + cpu_pause(); + cpu_pause(); + if (_lwkt_trytokref(ref, td, mode)) return TRUE; - } } return FALSE; } @@ -557,6 +539,9 @@ _lwkt_getalltokens_sorted(thread_t td) * * NOTE: Recursively acquired tokens are ordered the same as in the * td_toks_array so we can always get the earliest one first. + * This is particularly important when a token is acquired + * exclusively multiple times, as only the first acquisition + * is treated as an exclusive token. */ i = 0; scan = &td->td_toks_base; diff --git a/sys/sys/thread.h b/sys/sys/thread.h index 052373106b..e5f2b4f6a3 100644 --- a/sys/sys/thread.h +++ b/sys/sys/thread.h @@ -82,6 +82,10 @@ struct intrframe; * running. If the thread blocks, other threads can run holding the same * token(s). The tokens are reacquired when the original thread resumes. * + * Tokens guarantee that no deadlock can happen regardless of type or + * ordering. However, obtaining the same token first shared, then + * stacking exclusive, is not allowed and will panic. + * * A thread can depend on its serialization remaining intact through a * preemption. An interrupt which attempts to use the same token as the * thread being preempted will reschedule itself for non-preemptive @@ -93,13 +97,23 @@ struct intrframe; * thread has a stack of tokref's to keep track of acquired tokens. Multiple * tokref's may reference the same token. * - * Tokens can be held shared or exclusive. An exclusive holder is able - * to set the TOK_EXCLUSIVE bit in t_count as long as no bit in the count - * mask is set. If unable to accomplish this TOK_EXCLREQ can be set instead - * which prevents any new shared acquisitions while the exclusive requestor - * spins in the scheduler. A shared holder can bump t_count by the increment - * value as long as neither TOK_EXCLUSIVE or TOK_EXCLREQ is set, else spin - * in the scheduler. + * EXCLUSIVE TOKENS + * Acquiring an exclusive token requires acquiring the EXCLUSIVE bit + * with count == 0. If the exclusive bit cannot be acquired, EXCLREQ + * is set. Once acquired, EXCLREQ is cleared (but could get set by + * another thread also trying for an exclusive lock at any time). + * + * SHARED TOKENS + * Acquiring a shared token requires waiting for the EXCLUSIVE bit + * to be cleared and then acquiring a count. A shared lock request + * can temporarily acquire a count and then back it out if it is + * unable to obtain the EXCLUSIVE bit, allowing fetchadd to be used. + * + * A thread attempting to get a single shared token will defer to + * pending exclusive requesters. However, a thread already holding + * one or more tokens and trying to get an additional shared token + * cannot defer to exclusive requesters because doing so can lead + * to a deadlock. * * Multiple exclusive tokens are handled by treating the additional tokens * as a special case of the shared token, incrementing the count value. This -- 2.41.0