Cleanup the TLS implementation:
[dragonfly.git] / lib / libthread_xu / thread / thr_rwlock.c
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
27  * $DragonFly: src/lib/libthread_xu/thread/thr_rwlock.c,v 1.2 2005/03/29 19:26:20 joerg Exp $
28  */
29
30 #include <machine/tls.h>
31
32 #include <errno.h>
33 #include <limits.h>
34 #include <stdlib.h>
35
36 #include <pthread.h>
37 #include "thr_private.h"
38
39 /* maximum number of times a read lock may be obtained */
40 #define MAX_READ_LOCKS          (INT_MAX - 1)
41
42 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
44 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
45 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
46 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
47 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
48 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
49 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
50 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
51
52 /*
53  * Prototypes
54  */
55
56 static int
57 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
58 {
59         pthread_rwlock_t prwlock;
60         int ret;
61
62         /* allocate rwlock object */
63         prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
64
65         if (prwlock == NULL)
66                 return (ENOMEM);
67
68         /* initialize the lock */
69         if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
70                 free(prwlock);
71         else {
72                 /* initialize the read condition signal */
73                 ret = _pthread_cond_init(&prwlock->read_signal, NULL);
74
75                 if (ret != 0) {
76                         _pthread_mutex_destroy(&prwlock->lock);
77                         free(prwlock);
78                 } else {
79                         /* initialize the write condition signal */
80                         ret = _pthread_cond_init(&prwlock->write_signal, NULL);
81
82                         if (ret != 0) {
83                                 _pthread_cond_destroy(&prwlock->read_signal);
84                                 _pthread_mutex_destroy(&prwlock->lock);
85                                 free(prwlock);
86                         } else {
87                                 /* success */
88                                 prwlock->state = 0;
89                                 prwlock->blocked_writers = 0;
90                                 *rwlock = prwlock;
91                         }
92                 }
93         }
94
95         return (ret);
96 }
97
98 int
99 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
100 {
101         int ret;
102
103         if (rwlock == NULL)
104                 ret = EINVAL;
105         else {
106                 pthread_rwlock_t prwlock;
107
108                 prwlock = *rwlock;
109
110                 _pthread_mutex_destroy(&prwlock->lock);
111                 _pthread_cond_destroy(&prwlock->read_signal);
112                 _pthread_cond_destroy(&prwlock->write_signal);
113                 free(prwlock);
114
115                 *rwlock = NULL;
116
117                 ret = 0;
118         }
119         return (ret);
120 }
121
122 static int
123 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
124 {
125         int ret;
126
127         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
128
129         if (*rwlock == NULL)
130                 ret = rwlock_init(rwlock, NULL);
131         else
132                 ret = 0;
133
134         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
135
136         return (ret);
137 }
138
139 int
140 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
141 {
142         *rwlock = NULL;
143         return (rwlock_init(rwlock, attr));
144 }
145
146 static int
147 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
148 {
149         struct pthread *curthread = tls_get_curthread();
150         pthread_rwlock_t prwlock;
151         int ret;
152
153         if (rwlock == NULL)
154                 return (EINVAL);
155
156         prwlock = *rwlock;
157
158         /* check for static initialization */
159         if (prwlock == NULL) {
160                 if ((ret = init_static(curthread, rwlock)) != 0)
161                         return (ret);
162
163                 prwlock = *rwlock;
164         }
165
166         /* grab the monitor lock */
167         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
168                 return (ret);
169
170         /* check lock count */
171         if (prwlock->state == MAX_READ_LOCKS) {
172                 _thr_mutex_unlock(&prwlock->lock);
173                 return (EAGAIN);
174         }
175
176         curthread = tls_get_curthread();
177         if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
178                 /*
179                  * To avoid having to track all the rdlocks held by
180                  * a thread or all of the threads that hold a rdlock,
181                  * we keep a simple count of all the rdlocks held by
182                  * a thread.  If a thread holds any rdlocks it is
183                  * possible that it is attempting to take a recursive
184                  * rdlock.  If there are blocked writers and precedence
185                  * is given to them, then that would result in the thread
186                  * deadlocking.  So allowing a thread to take the rdlock
187                  * when it already has one or more rdlocks avoids the
188                  * deadlock.  I hope the reader can follow that logic ;-)
189                  */
190                 ;       /* nothing needed */
191         } else {
192                 /* give writers priority over readers */
193                 while (prwlock->blocked_writers || prwlock->state < 0) {
194                         if (abstime)
195                                 ret = _pthread_cond_timedwait
196                                     (&prwlock->read_signal,
197                                     &prwlock->lock, abstime);
198                         else
199                                 ret = _thr_cond_wait(&prwlock->read_signal,
200                             &prwlock->lock);
201                         if (ret != 0) {
202                                 /* can't do a whole lot if this fails */
203                                 _thr_mutex_unlock(&prwlock->lock);
204                                 return (ret);
205                         }
206                 }
207         }
208
209         curthread->rdlock_count++;
210         prwlock->state++; /* indicate we are locked for reading */
211
212         /*
213          * Something is really wrong if this call fails.  Returning
214          * error won't do because we've already obtained the read
215          * lock.  Decrementing 'state' is no good because we probably
216          * don't have the monitor lock.
217          */
218         _thr_mutex_unlock(&prwlock->lock);
219
220         return (ret);
221 }
222
223 int
224 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
225 {
226         return (rwlock_rdlock_common(rwlock, NULL));
227 }
228
229 __strong_reference(_pthread_rwlock_rdlock, _thr_rwlock_rdlock);
230
231 int
232 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
233          const struct timespec *abstime)
234 {
235         return (rwlock_rdlock_common(rwlock, abstime));
236 }
237
238 int
239 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
240 {
241         struct pthread *curthread = tls_get_curthread();
242         pthread_rwlock_t prwlock;
243         int ret;
244
245         if (rwlock == NULL)
246                 return (EINVAL);
247
248         prwlock = *rwlock;
249
250         /* check for static initialization */
251         if (prwlock == NULL) {
252                 if ((ret = init_static(curthread, rwlock)) != 0)
253                         return (ret);
254
255                 prwlock = *rwlock;
256         }
257
258         /* grab the monitor lock */
259         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
260                 return (ret);
261
262         curthread = tls_get_curthread();
263         if (prwlock->state == MAX_READ_LOCKS)
264                 ret = EAGAIN;
265         else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
266                 /* see comment for pthread_rwlock_rdlock() */
267                 curthread->rdlock_count++;
268                 prwlock->state++;
269         }
270         /* give writers priority over readers */
271         else if (prwlock->blocked_writers || prwlock->state < 0)
272                 ret = EBUSY;
273         else {
274                 curthread->rdlock_count++;
275                 prwlock->state++; /* indicate we are locked for reading */
276         }
277
278         /* see the comment on this in pthread_rwlock_rdlock */
279         _pthread_mutex_unlock(&prwlock->lock);
280
281         return (ret);
282 }
283
284 int
285 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
286 {
287         struct pthread *curthread = tls_get_curthread();
288         pthread_rwlock_t prwlock;
289         int ret;
290
291         if (rwlock == NULL)
292                 return (EINVAL);
293
294         prwlock = *rwlock;
295
296         /* check for static initialization */
297         if (prwlock == NULL) {
298                 if ((ret = init_static(curthread, rwlock)) != 0)
299                         return (ret);
300
301                 prwlock = *rwlock;
302         }
303
304         /* grab the monitor lock */
305         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
306                 return (ret);
307
308         if (prwlock->state != 0)
309                 ret = EBUSY;
310         else
311                 /* indicate we are locked for writing */
312                 prwlock->state = -1;
313
314         /* see the comment on this in pthread_rwlock_rdlock */
315         _pthread_mutex_unlock(&prwlock->lock);
316
317         return (ret);
318 }
319
320 int
321 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
322 {
323         struct pthread *curthread;
324         pthread_rwlock_t prwlock;
325         int ret;
326
327         if (rwlock == NULL)
328                 return (EINVAL);
329
330         prwlock = *rwlock;
331
332         if (prwlock == NULL)
333                 return (EINVAL);
334
335         /* grab the monitor lock */
336         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
337                 return (ret);
338
339         curthread = tls_get_curthread();
340         if (prwlock->state > 0) {
341                 curthread->rdlock_count--;
342                 prwlock->state--;
343                 if (prwlock->state == 0 && prwlock->blocked_writers)
344                         ret = _thr_cond_signal(&prwlock->write_signal);
345         } else if (prwlock->state < 0) {
346                 prwlock->state = 0;
347
348                 if (prwlock->blocked_writers)
349                         ret = _thr_cond_signal(&prwlock->write_signal);
350                 else
351                         ret = _thr_cond_broadcast(&prwlock->read_signal);
352         } else
353                 ret = EINVAL;
354
355         /* see the comment on this in pthread_rwlock_rdlock */
356         _thr_mutex_unlock(&prwlock->lock);
357
358         return (ret);
359 }
360
361 __strong_reference(_pthread_rwlock_unlock, _thr_rwlock_unlock);
362
363 static int
364 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
365 {
366         struct pthread *curthread = tls_get_curthread();
367         pthread_rwlock_t prwlock;
368         int ret;
369
370         if (rwlock == NULL)
371                 return (EINVAL);
372
373         prwlock = *rwlock;
374
375         /* check for static initialization */
376         if (prwlock == NULL) {
377                 if ((ret = init_static(curthread, rwlock)) != 0)
378                         return (ret);
379
380                 prwlock = *rwlock;
381         }
382
383         /* grab the monitor lock */
384         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
385                 return (ret);
386
387         while (prwlock->state != 0) {
388                 prwlock->blocked_writers++;
389
390                 if (abstime != NULL)
391                         ret = _pthread_cond_timedwait(&prwlock->write_signal,
392                             &prwlock->lock, abstime);
393                 else
394                         ret = _thr_cond_wait(&prwlock->write_signal,
395                             &prwlock->lock);
396                 if (ret != 0) {
397                         prwlock->blocked_writers--;
398                         _thr_mutex_unlock(&prwlock->lock);
399                         return (ret);
400                 }
401
402                 prwlock->blocked_writers--;
403         }
404
405         /* indicate we are locked for writing */
406         prwlock->state = -1;
407
408         /* see the comment on this in pthread_rwlock_rdlock */
409         _thr_mutex_unlock(&prwlock->lock);
410
411         return (ret);
412 }
413
414 int
415 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
416 {
417         return (rwlock_wrlock_common (rwlock, NULL));
418 }
419 __strong_reference(_pthread_rwlock_wrlock, _thr_rwlock_wrlock);
420
421 int
422 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
423     const struct timespec *abstime)
424 {
425         return (rwlock_wrlock_common (rwlock, abstime));
426 }