libm & rtld: Ansify some remaining functions.
[dragonfly.git] / libexec / rtld-elf / rtld_lock.c
... / ...
CommitLineData
1/*-
2 * Copyright 1999, 2000 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * from: FreeBSD: src/libexec/rtld-elf/sparc64/lockdflt.c,v 1.3 2002/10/09
26 * $FreeBSD$
27 */
28
29/*
30 * Thread locking implementation for the dynamic linker.
31 *
32 * We use the "simple, non-scalable reader-preference lock" from:
33 *
34 * J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer
35 * Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on
36 * Principles and Practice of Parallel Programming, April 1991.
37 *
38 * In this algorithm the lock is a single word. Its low-order bit is
39 * set when a writer holds the lock. The remaining high-order bits
40 * contain a count of readers desiring the lock. The algorithm requires
41 * atomic "compare_and_store" and "add" operations, which we implement
42 * using assembly language sequences in "rtld_start.S".
43 */
44
45#include <sys/param.h>
46#include <signal.h>
47#include <stdlib.h>
48#include <time.h>
49
50#include "debug.h"
51#include "rtld.h"
52#include "rtld_machdep.h"
53
54#define WAFLAG 0x1 /* A writer holds the lock */
55#define RC_INCR 0x2 /* Adjusts count of readers desiring lock */
56
57typedef struct Struct_Lock {
58 volatile u_int lock;
59 void *base;
60} Lock;
61
62static sigset_t fullsigmask, oldsigmask;
63static int thread_flag;
64
65static void *
66def_lock_create(void)
67{
68 void *base;
69 char *p;
70 uintptr_t r;
71 Lock *l;
72
73 /*
74 * Arrange for the lock to occupy its own cache line. First, we
75 * optimistically allocate just a cache line, hoping that malloc
76 * will give us a well-aligned block of memory. If that doesn't
77 * work, we allocate a larger block and take a well-aligned cache
78 * line from it.
79 */
80 base = xmalloc(CACHE_LINE_SIZE);
81 p = (char *)base;
82 if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
83 free(base);
84 base = xmalloc(2 * CACHE_LINE_SIZE);
85 p = (char *)base;
86 if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
87 p += CACHE_LINE_SIZE - r;
88 }
89 l = (Lock *)p;
90 l->base = base;
91 l->lock = 0;
92 return l;
93}
94
95static void
96def_lock_destroy(void *lock)
97{
98 Lock *l = (Lock *)lock;
99
100 free(l->base);
101}
102
103static void
104def_rlock_acquire(void *lock)
105{
106 Lock *l = (Lock *)lock;
107
108 atomic_add_acq_int(&l->lock, RC_INCR);
109 while (l->lock & WAFLAG)
110 ; /* Spin */
111}
112
113static void
114def_wlock_acquire(void *lock)
115{
116 Lock *l = (Lock *)lock;
117 sigset_t tmp_oldsigmask;
118
119 for ( ; ; ) {
120 sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
121 if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
122 break;
123 sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
124 }
125 oldsigmask = tmp_oldsigmask;
126}
127
128static void
129def_lock_release(void *lock)
130{
131 Lock *l = (Lock *)lock;
132
133 if ((l->lock & WAFLAG) == 0)
134 atomic_add_rel_int(&l->lock, -RC_INCR);
135 else {
136 atomic_add_rel_int(&l->lock, -WAFLAG);
137 sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
138 }
139}
140
141static int
142def_thread_set_flag(int mask)
143{
144 int old_val = thread_flag;
145 thread_flag |= mask;
146 return (old_val);
147}
148
149static int
150def_thread_clr_flag(int mask)
151{
152 int old_val = thread_flag;
153 thread_flag &= ~mask;
154 return (old_val);
155}
156
157/*
158 * Public interface exposed to the rest of the dynamic linker.
159 */
160static struct RtldLockInfo lockinfo;
161static struct RtldLockInfo deflockinfo;
162
163static __inline int
164thread_mask_set(int mask)
165{
166 return lockinfo.thread_set_flag(mask);
167}
168
169static __inline void
170thread_mask_clear(int mask)
171{
172 lockinfo.thread_clr_flag(mask);
173}
174
175#define RTLD_LOCK_CNT 3
176struct rtld_lock {
177 void *handle;
178 int mask;
179} rtld_locks[RTLD_LOCK_CNT];
180
181rtld_lock_t rtld_bind_lock = &rtld_locks[0];
182rtld_lock_t rtld_libc_lock = &rtld_locks[1];
183rtld_lock_t rtld_phdr_lock = &rtld_locks[2];
184
185void
186rlock_acquire(rtld_lock_t lock, RtldLockState *lockstate)
187{
188
189 if (lockstate == NULL)
190 return;
191
192 if (thread_mask_set(lock->mask) & lock->mask) {
193 dbg("rlock_acquire: recursed");
194 lockstate->lockstate = RTLD_LOCK_UNLOCKED;
195 return;
196 }
197 lockinfo.rlock_acquire(lock->handle);
198 lockstate->lockstate = RTLD_LOCK_RLOCKED;
199}
200
201void
202wlock_acquire(rtld_lock_t lock, RtldLockState *lockstate)
203{
204
205 if (lockstate == NULL)
206 return;
207
208 if (thread_mask_set(lock->mask) & lock->mask) {
209 dbg("wlock_acquire: recursed");
210 lockstate->lockstate = RTLD_LOCK_UNLOCKED;
211 return;
212 }
213 lockinfo.wlock_acquire(lock->handle);
214 lockstate->lockstate = RTLD_LOCK_WLOCKED;
215}
216
217void
218lock_release(rtld_lock_t lock, RtldLockState *lockstate)
219{
220
221 if (lockstate == NULL)
222 return;
223
224 switch (lockstate->lockstate) {
225 case RTLD_LOCK_UNLOCKED:
226 break;
227 case RTLD_LOCK_RLOCKED:
228 case RTLD_LOCK_WLOCKED:
229 thread_mask_clear(lock->mask);
230 lockinfo.lock_release(lock->handle);
231 break;
232 default:
233 assert(0);
234 }
235}
236
237void
238lock_upgrade(rtld_lock_t lock, RtldLockState *lockstate)
239{
240
241 if (lockstate == NULL)
242 return;
243
244 lock_release(lock, lockstate);
245 wlock_acquire(lock, lockstate);
246}
247
248void
249lock_restart_for_upgrade(RtldLockState *lockstate)
250{
251
252 if (lockstate == NULL)
253 return;
254
255 switch (lockstate->lockstate) {
256 case RTLD_LOCK_UNLOCKED:
257 case RTLD_LOCK_WLOCKED:
258 break;
259 case RTLD_LOCK_RLOCKED:
260 siglongjmp(lockstate->env, 1);
261 break;
262 default:
263 assert(0);
264 }
265}
266
267void
268lockdflt_init(void)
269{
270 int i;
271
272 deflockinfo.rtli_version = RTLI_VERSION;
273 deflockinfo.lock_create = def_lock_create;
274 deflockinfo.lock_destroy = def_lock_destroy;
275 deflockinfo.rlock_acquire = def_rlock_acquire;
276 deflockinfo.wlock_acquire = def_wlock_acquire;
277 deflockinfo.lock_release = def_lock_release;
278 deflockinfo.thread_set_flag = def_thread_set_flag;
279 deflockinfo.thread_clr_flag = def_thread_clr_flag;
280 deflockinfo.at_fork = NULL;
281
282 for (i = 0; i < RTLD_LOCK_CNT; i++) {
283 rtld_locks[i].mask = (1 << i);
284 rtld_locks[i].handle = NULL;
285 }
286
287 memcpy(&lockinfo, &deflockinfo, sizeof(lockinfo));
288 _rtld_thread_init(NULL);
289 /*
290 * Construct a mask to block all signals except traps which might
291 * conceivably be generated within the dynamic linker itself.
292 */
293 sigfillset(&fullsigmask);
294 sigdelset(&fullsigmask, SIGILL);
295 sigdelset(&fullsigmask, SIGTRAP);
296 sigdelset(&fullsigmask, SIGABRT);
297 sigdelset(&fullsigmask, SIGEMT);
298 sigdelset(&fullsigmask, SIGFPE);
299 sigdelset(&fullsigmask, SIGBUS);
300 sigdelset(&fullsigmask, SIGSEGV);
301 sigdelset(&fullsigmask, SIGSYS);
302}
303
304/*
305 * Callback function to allow threads implementation to
306 * register their own locking primitives if the default
307 * one is not suitable.
308 * The current context should be the only context
309 * executing at the invocation time.
310 */
311void
312_rtld_thread_init(struct RtldLockInfo *pli)
313{
314 int flags, i;
315 void *locks[RTLD_LOCK_CNT];
316
317 /* disable all locking while this function is running */
318 flags = thread_mask_set(~0);
319
320 if (pli == NULL)
321 pli = &deflockinfo;
322
323
324 for (i = 0; i < RTLD_LOCK_CNT; i++)
325 if ((locks[i] = pli->lock_create()) == NULL)
326 break;
327
328 if (i < RTLD_LOCK_CNT) {
329 while (--i >= 0)
330 pli->lock_destroy(locks[i]);
331 abort();
332 }
333
334 for (i = 0; i < RTLD_LOCK_CNT; i++) {
335 if (rtld_locks[i].handle == NULL)
336 continue;
337 if (flags & rtld_locks[i].mask)
338 lockinfo.lock_release(rtld_locks[i].handle);
339 lockinfo.lock_destroy(rtld_locks[i].handle);
340 }
341
342 for (i = 0; i < RTLD_LOCK_CNT; i++) {
343 rtld_locks[i].handle = locks[i];
344 if (flags & rtld_locks[i].mask)
345 pli->wlock_acquire(rtld_locks[i].handle);
346 }
347
348 lockinfo.lock_create = pli->lock_create;
349 lockinfo.lock_destroy = pli->lock_destroy;
350 lockinfo.rlock_acquire = pli->rlock_acquire;
351 lockinfo.wlock_acquire = pli->wlock_acquire;
352 lockinfo.lock_release = pli->lock_release;
353 lockinfo.thread_set_flag = pli->thread_set_flag;
354 lockinfo.thread_clr_flag = pli->thread_clr_flag;
355 lockinfo.at_fork = pli->at_fork;
356
357 /* restore thread locking state, this time with new locks */
358 thread_mask_clear(~0);
359 thread_mask_set(flags);
360 dbg("_rtld_thread_init: done");
361}