4 * Rune locking implementation. The inlines try the easy cases and punt to
5 * a real procedure for the harder cases.
7 * NOTE: lklist_t defined in thread.h
10 void _RuneLockEx(runelock_t *lock, int hard);
11 void _RuneUnlockEx(runelock_t *lock, int hard);
12 void _RuneLockReacquire(runethr_t *td);
15 * Initialize a lock. The caller must have already pre-zerod the lock.
19 initLock(runelock_t *lock __unused)
21 /* RUNE_INIT(&lock->rl_Blocked); */
25 * Insert the lock onto the thread's locklist using the supplied tlc.
29 RuneInsertTLC(runethr_t *td, runelock_t *lock, runetlc_t *tlc)
32 tlc->count = lock->rl_Count & RLCOUNTF_MASK;
33 tlc->next = td->td_TLC;
38 * The idle lock is pre-initialized and locked to the specified thread, which
39 * might not be the current thread.
43 initInitialLock(runethr_t *td, runelock_t *lock, runetlc_t *tlc)
46 lock->rl_Count = RLCOUNT_INCR;
47 RuneInsertTLC(td, lock, tlc);
52 * Dispose of a TLC after its lock has been released.
54 * Asserts if the tlc crosses a semantic (lock == NULL) boundary.
58 RuneDisposeTLC(rgd_t *rgd, runethr_t *td, runelock_t *lock)
66 if (tlc->lock == lock)
68 dassert (tlc->lock != NULL);
72 tlc->next = rgd->FreeTLC;
81 RuneLockEx(runelock_t *lock, int hard)
83 rgd_t *rgd = getrgd();
84 runethr_t *td = rgd->CurThread;
85 runetlc_t *tlc = rgd->FreeTLC;
89 * Try to acquire the lock quickly
91 if (lock->rl_Owner == td) {
92 dassert(lock->rl_Count & RLCOUNTF_MASK);
93 atomic_add_rune(&lock->rl_Count, RLCOUNT_INCR);
96 rgd->FreeTLC = tlc->next;
97 RuneInsertTLC(td, lock, tlc);
104 count = lock->rl_Count;
108 if (atomic_cmpset_rune(&lock->rl_Count, 0, RLCOUNT_INCR)) {
109 dassert(lock->rl_Owner == NULL);
111 lock->rl_Hard = hard;
112 rgd->FreeTLC = tlc->next;
113 RuneInsertTLC(td, lock, tlc);
123 _RuneLockEx(lock, hard);
128 * Release a Rune lock
132 RuneUnlockEx(runelock_t *lock, int hard)
134 rgd_t *rgd = getrgd();
135 runethr_t *td = rgd->CurThread;
137 dassert(lock->rl_Owner == td);
141 count = lock->rl_Count;
147 if (count > (RLCOUNT_INCR | ~RLCOUNTF_MASK)) {
150 atomic_add_rune(&lock->rl_Count, -RLCOUNT_INCR);
151 RuneDisposeTLC(rgd, td, lock);
156 * If waiters pending punt to the real procedure.
158 if (count & RLCOUNTF_EXREQ)
164 lock->rl_Owner = NULL;
167 dassert(lock->rl_Hard == 0);
169 if (atomic_cmpset_rune(&lock->rl_Count, count, 0)) {
170 RuneDisposeTLC(rgd, td, lock);
181 _RuneUnlockEx(lock, hard);
186 * The lock is dead (the memory is gone), causes an assertion if anyone
187 * attempts to obtain it.
191 RuneLockDead(runelock_t *lock)
193 atomic_set_rune(&lock->rl_Count, RLCOUNTF_DEAD);
194 atomic_add_rune(&lock->rl_Count, RLCOUNT_INCR);
198 * Push a semantic layer context lock. This is performed on any stack
199 * layer for which an element address is taken.
205 rgd_t *rgd = getrgd();
206 runethr_t *td = rgd->CurThread;
209 if ((rs = rgd->FreeRS) == NULL) {
210 rs = zalloc(sizeof(*rs));
211 initRefStor(rs, NULL, RSOP_THREAD);
214 rgd->FreeRS = rs->rs_Parent;
216 RuneLockEx(&rs->rs_Lock, hard);
217 rs->rs_Parent = td->td_StackRS;
218 rs->rs_Op = RSOP_THREAD;
223 * Pop a semantic layer context lock.
229 rgd_t *rgd = getrgd();
230 runethr_t *td = rgd->CurThread;
235 td->td_StackRS = rs->rs_Parent;
236 RuneUnlockEx(&rs->rs_Lock, hard);
237 if (rs->rs_Refs != 1)
238 dpanic("Stack context popped while still in use!");
239 rs->rs_Parent = rgd->FreeRS;