Rune - Further Object abstraction work
[rune.git] / libruntime / lock2.h
1 /*
2  * LOCK2.H
3  *
4  * Rune locking implementation.  The inlines try the easy cases and punt to a
5  * real procedure for the harder cases.
6  *
7  * NOTE: lklist_t defined in thread.h
8  */
9
10 void    _RuneLockEx(runelock_t *lock, int hard);
11 void    _RuneUnlockEx(runelock_t *lock, int hard);
12 void    _RuneLockReacquire(runethr_t *td);
13 void    _RuneAutostealLock(runethr_t *otd, runethr_t *ntd, runelock_t *lock);
14
15 /*
16  * Initialize a lock.  The caller must have already pre-zerod the lock.
17  */
18 static __inline
19 void
20 initLock(runelock_t *lock __unused)
21 {
22     /* RUNE_INIT(&lock->rl_Blocked); */
23 }
24
25 /*
26  * Insert the lock onto the thread's locklist using the supplied tlc.
27  */
28 static __inline
29 void
30 RuneInsertTLC(runethr_t *td, runelock_t *lock, runetlc_t *tlc)
31 {
32     tlc->lock = lock;
33     tlc->count = lock->rl_Count & RLCOUNTF_MASK;
34     tlc->next = td->td_TLC;
35     td->td_TLC = tlc;
36 }
37
38 /*
39  * The idle lock is pre-initialized and locked to the specified thread, which
40  * might not be the current thread.
41  */
42 static __inline
43 void
44 initInitialLock(runethr_t *td, runelock_t *lock, runetlc_t *tlc)
45 {
46     lock->rl_Owner = td;
47     lock->rl_Count = RLCOUNT_INCR;
48     RuneInsertTLC(td, lock, tlc);
49 }
50
51
52 /*
53  * Dispose of a TLC after its lock has been released.
54  *
55  * Asserts if the tlc crosses a semantic (lock == NULL) boundary.
56  */
57 static __inline
58 void
59 RuneDisposeTLC(rgd_t *rgd, runethr_t *td, runelock_t *lock)
60 {
61     runetlc_t *tlc;
62     runetlc_t **tlcp;
63
64     tlcp = &td->td_TLC;
65     for (;;) {
66         tlc = *tlcp;
67         if (tlc->lock == lock)
68             break;
69         dassert(tlc->lock != NULL);
70         tlcp = &tlc->next;
71     }
72     *tlcp = tlc->next;
73     tlc->next = rgd->FreeTLC;
74     rgd->FreeTLC = tlc;
75 }
76
77 /*
78  * Obtain a Rune lock
79  */
80 static __inline
81 void
82 RuneLockEx(runelock_t *lock, int hard)
83 {
84     rgd_t  *rgd = getrgd();
85     runethr_t *td = rgd->CurThread;
86     runetlc_t *tlc = rgd->FreeTLC;
87
88     if (tlc) {
89         /*
90          * Try to acquire the lock quickly
91          */
92         if (lock->rl_Owner == td) {
93             dassert(lock->rl_Count & RLCOUNTF_MASK);
94             atomic_add_rune(&lock->rl_Count, RLCOUNT_INCR);
95             lock->rl_Hard += hard;
96             rgd->FreeTLC = tlc->next;
97             RuneInsertTLC(td, lock, tlc);
98 #ifdef DEBUG_RUNELOCKS
99             printf("%p LOCK  %p by %p\n", rgd, lock, td);
100 #endif
101 #if 0
102             if (lock == (void *)(uintptr_t) 0x8018b0180LLU)
103                 *(int *)0 = 1;
104 #endif
105             return;
106         }
107
108         for (;;) {
109             srunesize_t count;
110
111             count = lock->rl_Count;
112             cpu_ccfence();
113             if (count)
114                 break;
115             if (atomic_cmpset_rune(&lock->rl_Count, 0, RLCOUNT_INCR)) {
116                 dassert(lock->rl_Owner == NULL &&
117                         lock->rl_Hard == 0);
118                 lock->rl_Owner = td;
119                 lock->rl_Hard = hard;
120                 rgd->FreeTLC = tlc->next;
121                 RuneInsertTLC(td, lock, tlc);
122 #ifdef DEBUG_RUNELOCKS
123                 printf("%p LOCK  %p by %p\n", rgd, lock, td);
124 #endif
125                 return;
126             }
127             /* retry */
128         }
129     }
130
131     /*
132      * More complex
133      */
134     _RuneLockEx(lock, hard);
135     /* rgd invalid */
136 }
137
138 /*
139  * Release a Rune lock
140  */
141 static __inline
142 void
143 RuneUnlockEx(runelock_t *lock, int hard)
144 {
145     rgd_t  *rgd = getrgd();
146     runethr_t *td = rgd->CurThread;
147
148     dassert(lock->rl_Owner == td && lock->rl_Hard >= hard);
149     for (;;) {
150         srunesize_t count;
151
152         count = lock->rl_Count;
153         cpu_ccfence();
154
155         /*
156          * Multiple locks
157          */
158         if (count > (RLCOUNT_INCR | ~RLCOUNTF_MASK)) {
159             lock->rl_Hard -= hard;
160             atomic_add_rune(&lock->rl_Count, -RLCOUNT_INCR);
161             RuneDisposeTLC(rgd, td, lock);
162 #ifdef DEBUG_RUNELOCKS
163             printf("%p UNLCK %p(%08lx) by %p\n",
164                    rgd, lock, lock->rl_Count, td);
165 #endif
166             return;
167         }
168
169         /*
170          * If waiters pending punt to the real procedure.
171          */
172         if (count & RLCOUNTF_EXREQ)
173             break;
174
175         /*
176          * Nominal last lock.
177          */
178         lock->rl_Owner = NULL;
179         lock->rl_Hard -= hard;
180         dassert(lock->rl_Hard == 0);
181         if (atomic_cmpset_rune(&lock->rl_Count, count, 0)) {
182             RuneDisposeTLC(rgd, td, lock);
183 #ifdef DEBUG_RUNELOCKS
184             printf("%p UNLCK %p(%08lx) by %p\n",
185                    rgd, lock, lock->rl_Count, td);
186 #endif
187             return;
188         }
189         lock->rl_Hard += hard;
190         lock->rl_Owner = td;
191     }
192
193     /*
194      * Else more complex
195      */
196     _RuneUnlockEx(lock, hard);
197     /* rgd invalid */
198 }
199
200 /*
201  * The lock is dead (the memory is gone), causes an assertion if anyone
202  * attempts to obtain it.
203  */
204 static __inline
205 void
206 RuneLockDead(runelock_t *lock)
207 {
208     atomic_set_rune(&lock->rl_Count, RLCOUNTF_DEAD);
209     atomic_add_rune(&lock->rl_Count, RLCOUNT_INCR);
210 }