Rune - Implement hard-locking model feature
[rune.git] / libruntime / lock2.h
1 /*
2  * LOCK2.H
3  *
4  * Rune locking implementation.  The inlines try the easy cases and punt to
5  * a real procedure for the harder cases.
6  *
7  * NOTE: lklist_t defined in thread.h
8  */
9
10 void _RuneLockEx(runelock_t *lock, int hard);
11 void _RuneUnlockEx(runelock_t *lock, int hard);
12 void _RuneLockReacquire(runethr_t *td);
13
14 /*
15  * Initialize a lock.  The caller must have already pre-zerod the lock.
16  */
17 static __inline
18 void
19 initLock(runelock_t *lock __unused)
20 {
21         /* RUNE_INIT(&lock->rl_Blocked); */
22 }
23
24 /*
25  * Insert the lock onto the thread's locklist using the supplied tlc.
26  */
27 static __inline
28 void
29 RuneInsertTLC(runethr_t *td, runelock_t *lock, runetlc_t *tlc)
30 {
31         tlc->lock = lock;
32         tlc->count = lock->rl_Count & RLCOUNTF_MASK;
33         tlc->next = td->td_TLC;
34         td->td_TLC = tlc;
35 }
36
37 /*
38  * The idle lock is pre-initialized and locked to the specified thread, which
39  * might not be the current thread.
40  */
41 static __inline
42 void
43 initInitialLock(runethr_t *td, runelock_t *lock, runetlc_t *tlc)
44 {
45         lock->rl_Owner = td;
46         lock->rl_Count = RLCOUNT_INCR;
47         RuneInsertTLC(td, lock, tlc);
48 }
49
50
51 /*
52  * Dispose of a TLC after its lock has been released.
53  *
54  * Asserts if the tlc crosses a semantic (lock == NULL) boundary.
55  */
56 static __inline
57 void
58 RuneDisposeTLC(rgd_t *rgd, runethr_t *td, runelock_t *lock)
59 {
60         runetlc_t *tlc;
61         runetlc_t **tlcp;
62
63         tlcp = &td->td_TLC;
64         for (;;) {
65                 tlc = *tlcp;
66                 if (tlc->lock == lock)
67                         break;
68                 dassert (tlc->lock != NULL);
69                 tlcp = &tlc->next;
70         }
71         *tlcp = tlc->next;
72         tlc->next = rgd->FreeTLC;
73         rgd->FreeTLC = tlc;
74 }
75
76 /*
77  * Obtain a Rune lock
78  */
79 static __inline
80 void
81 RuneLockEx(runelock_t *lock, int hard)
82 {
83         rgd_t *rgd = getrgd();
84         runethr_t *td = rgd->CurThread;
85         runetlc_t *tlc = rgd->FreeTLC;
86
87         if (tlc) {
88                 /*
89                  * Try to acquire the lock quickly
90                  */
91                 if (lock->rl_Owner == td) {
92                         dassert(lock->rl_Count & RLCOUNTF_MASK);
93                         atomic_add_rune(&lock->rl_Count, RLCOUNT_INCR);
94                         if (hard)
95                                 ++lock->rl_Hard;
96                         rgd->FreeTLC = tlc->next;
97                         RuneInsertTLC(td, lock, tlc);
98                         return;
99                 }
100
101                 for (;;) {
102                         runesize_t count;
103
104                         count = lock->rl_Count;
105                         cpu_ccfence();
106                         if (count)
107                                 break;
108                         if (atomic_cmpset_rune(&lock->rl_Count, 0, RLCOUNT_INCR)) {
109                                 dassert(lock->rl_Owner == NULL);
110                                 lock->rl_Owner = td;
111                                 lock->rl_Hard = hard;
112                                 rgd->FreeTLC = tlc->next;
113                                 RuneInsertTLC(td, lock, tlc);
114                                 return;
115                         }
116                         /* retry */
117                 }
118         }
119
120         /*
121          * More complex
122          */
123         _RuneLockEx(lock, hard);
124         /* rgd invalid */
125 }
126
127 /*
128  * Release a Rune lock
129  */
130 static __inline
131 void
132 RuneUnlockEx(runelock_t *lock, int hard)
133 {
134         rgd_t *rgd = getrgd();
135         runethr_t *td = rgd->CurThread;
136
137         dassert(lock->rl_Owner == td);
138         for (;;) {
139                 runesize_t count;
140
141                 count = lock->rl_Count;
142                 cpu_ccfence();
143
144                 /*
145                  * Multiple locks
146                  */
147                 if (count > (RLCOUNT_INCR | ~RLCOUNTF_MASK)) {
148                         if (hard)
149                                 --lock->rl_Hard;
150                         atomic_add_rune(&lock->rl_Count, -RLCOUNT_INCR);
151                         RuneDisposeTLC(rgd, td, lock);
152                         return;
153                 }
154
155                 /*
156                  * If waiters pending punt to the real procedure.
157                  */
158                 if (count & RLCOUNTF_EXREQ)
159                         break;
160
161                 /*
162                  * Nominal last lock.
163                  */
164                 lock->rl_Owner = NULL;
165                 if (hard) {
166                         --lock->rl_Hard;
167                         dassert(lock->rl_Hard == 0);
168                 }
169                 if (atomic_cmpset_rune(&lock->rl_Count, count, 0)) {
170                         RuneDisposeTLC(rgd, td, lock);
171                         return;
172                 }
173                 if (hard)
174                         ++lock->rl_Hard;
175                 lock->rl_Owner = td;
176         }
177
178         /*
179          * Else more complex
180          */
181         _RuneUnlockEx(lock, hard);
182         /* rgd invalid */
183 }
184
185 /*
186  * The lock is dead (the memory is gone), causes an assertion if anyone
187  * attempts to obtain it.
188  */
189 static __inline
190 void
191 RuneLockDead(runelock_t *lock)
192 {
193         atomic_set_rune(&lock->rl_Count, RLCOUNTF_DEAD);
194         atomic_add_rune(&lock->rl_Count, RLCOUNT_INCR);
195 }
196
197 /*
198  * Push a semantic layer context lock.  This is performed on any stack
199  * layer for which an element address is taken.
200  */
201 static __inline
202 void
203 RuneSRSGet(int hard)
204 {
205         rgd_t *rgd = getrgd();
206         runethr_t *td = rgd->CurThread;
207         RefStor *rs;
208
209         if ((rs = rgd->FreeRS) == NULL) {
210                 rs = zalloc(sizeof(*rs));
211                 initRefStor(rs, NULL, RSOP_THREAD);
212                 rs->rs_Refs = 1;
213         } else {
214                 rgd->FreeRS = rs->rs_Parent;
215         }
216         RuneLockEx(&rs->rs_Lock, hard);
217         rs->rs_Parent = td->td_StackRS;
218         rs->rs_Op = RSOP_THREAD;
219         td->td_StackRS = rs;
220 }
221
222 /*
223  * Pop a semantic layer context lock.
224  */
225 static __inline
226 void
227 RuneSRSPut(int hard)
228 {
229         rgd_t *rgd = getrgd();
230         runethr_t *td = rgd->CurThread;
231         RefStor *rs;
232
233         rs = td->td_StackRS;
234         dassert(rs);
235         td->td_StackRS = rs->rs_Parent;
236         RuneUnlockEx(&rs->rs_Lock, hard);
237         if (rs->rs_Refs != 1)
238                 dpanic("Stack context popped while still in use!");
239         rs->rs_Parent = rgd->FreeRS;
240         rgd->FreeRS = rs;
241 }