2 * Copyright (c) 1998 Alex Nash
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
27 * $DragonFly: src/lib/libthread_xu/thread/thr_rwlock.c,v 1.2 2005/03/29 19:26:20 joerg Exp $
30 #include <machine/tls.h>
37 #include "thr_private.h"
39 /* maximum number of times a read lock may be obtained */
40 #define MAX_READ_LOCKS (INT_MAX - 1)
42 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
44 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
45 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
46 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
47 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
48 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
49 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
50 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
57 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
59 pthread_rwlock_t prwlock;
62 /* allocate rwlock object */
63 prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
68 /* initialize the lock */
69 if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
72 /* initialize the read condition signal */
73 ret = _pthread_cond_init(&prwlock->read_signal, NULL);
76 _pthread_mutex_destroy(&prwlock->lock);
79 /* initialize the write condition signal */
80 ret = _pthread_cond_init(&prwlock->write_signal, NULL);
83 _pthread_cond_destroy(&prwlock->read_signal);
84 _pthread_mutex_destroy(&prwlock->lock);
89 prwlock->blocked_writers = 0;
99 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
106 pthread_rwlock_t prwlock;
110 _pthread_mutex_destroy(&prwlock->lock);
111 _pthread_cond_destroy(&prwlock->read_signal);
112 _pthread_cond_destroy(&prwlock->write_signal);
123 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
127 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
130 ret = rwlock_init(rwlock, NULL);
134 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
140 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
143 return (rwlock_init(rwlock, attr));
147 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
149 struct pthread *curthread = tls_get_curthread();
150 pthread_rwlock_t prwlock;
158 /* check for static initialization */
159 if (prwlock == NULL) {
160 if ((ret = init_static(curthread, rwlock)) != 0)
166 /* grab the monitor lock */
167 if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
170 /* check lock count */
171 if (prwlock->state == MAX_READ_LOCKS) {
172 _thr_mutex_unlock(&prwlock->lock);
176 curthread = tls_get_curthread();
177 if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
179 * To avoid having to track all the rdlocks held by
180 * a thread or all of the threads that hold a rdlock,
181 * we keep a simple count of all the rdlocks held by
182 * a thread. If a thread holds any rdlocks it is
183 * possible that it is attempting to take a recursive
184 * rdlock. If there are blocked writers and precedence
185 * is given to them, then that would result in the thread
186 * deadlocking. So allowing a thread to take the rdlock
187 * when it already has one or more rdlocks avoids the
188 * deadlock. I hope the reader can follow that logic ;-)
190 ; /* nothing needed */
192 /* give writers priority over readers */
193 while (prwlock->blocked_writers || prwlock->state < 0) {
195 ret = _pthread_cond_timedwait
196 (&prwlock->read_signal,
197 &prwlock->lock, abstime);
199 ret = _thr_cond_wait(&prwlock->read_signal,
202 /* can't do a whole lot if this fails */
203 _thr_mutex_unlock(&prwlock->lock);
209 curthread->rdlock_count++;
210 prwlock->state++; /* indicate we are locked for reading */
213 * Something is really wrong if this call fails. Returning
214 * error won't do because we've already obtained the read
215 * lock. Decrementing 'state' is no good because we probably
216 * don't have the monitor lock.
218 _thr_mutex_unlock(&prwlock->lock);
224 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
226 return (rwlock_rdlock_common(rwlock, NULL));
229 __strong_reference(_pthread_rwlock_rdlock, _thr_rwlock_rdlock);
232 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
233 const struct timespec *abstime)
235 return (rwlock_rdlock_common(rwlock, abstime));
239 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
241 struct pthread *curthread = tls_get_curthread();
242 pthread_rwlock_t prwlock;
250 /* check for static initialization */
251 if (prwlock == NULL) {
252 if ((ret = init_static(curthread, rwlock)) != 0)
258 /* grab the monitor lock */
259 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
262 curthread = tls_get_curthread();
263 if (prwlock->state == MAX_READ_LOCKS)
265 else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
266 /* see comment for pthread_rwlock_rdlock() */
267 curthread->rdlock_count++;
270 /* give writers priority over readers */
271 else if (prwlock->blocked_writers || prwlock->state < 0)
274 curthread->rdlock_count++;
275 prwlock->state++; /* indicate we are locked for reading */
278 /* see the comment on this in pthread_rwlock_rdlock */
279 _pthread_mutex_unlock(&prwlock->lock);
285 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
287 struct pthread *curthread = tls_get_curthread();
288 pthread_rwlock_t prwlock;
296 /* check for static initialization */
297 if (prwlock == NULL) {
298 if ((ret = init_static(curthread, rwlock)) != 0)
304 /* grab the monitor lock */
305 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
308 if (prwlock->state != 0)
311 /* indicate we are locked for writing */
314 /* see the comment on this in pthread_rwlock_rdlock */
315 _pthread_mutex_unlock(&prwlock->lock);
321 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
323 struct pthread *curthread;
324 pthread_rwlock_t prwlock;
335 /* grab the monitor lock */
336 if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
339 curthread = tls_get_curthread();
340 if (prwlock->state > 0) {
341 curthread->rdlock_count--;
343 if (prwlock->state == 0 && prwlock->blocked_writers)
344 ret = _thr_cond_signal(&prwlock->write_signal);
345 } else if (prwlock->state < 0) {
348 if (prwlock->blocked_writers)
349 ret = _thr_cond_signal(&prwlock->write_signal);
351 ret = _thr_cond_broadcast(&prwlock->read_signal);
355 /* see the comment on this in pthread_rwlock_rdlock */
356 _thr_mutex_unlock(&prwlock->lock);
361 __strong_reference(_pthread_rwlock_unlock, _thr_rwlock_unlock);
364 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
366 struct pthread *curthread = tls_get_curthread();
367 pthread_rwlock_t prwlock;
375 /* check for static initialization */
376 if (prwlock == NULL) {
377 if ((ret = init_static(curthread, rwlock)) != 0)
383 /* grab the monitor lock */
384 if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
387 while (prwlock->state != 0) {
388 prwlock->blocked_writers++;
391 ret = _pthread_cond_timedwait(&prwlock->write_signal,
392 &prwlock->lock, abstime);
394 ret = _thr_cond_wait(&prwlock->write_signal,
397 prwlock->blocked_writers--;
398 _thr_mutex_unlock(&prwlock->lock);
402 prwlock->blocked_writers--;
405 /* indicate we are locked for writing */
408 /* see the comment on this in pthread_rwlock_rdlock */
409 _thr_mutex_unlock(&prwlock->lock);
415 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
417 return (rwlock_wrlock_common (rwlock, NULL));
419 __strong_reference(_pthread_rwlock_wrlock, _thr_rwlock_wrlock);
422 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
423 const struct timespec *abstime)
425 return (rwlock_wrlock_common (rwlock, abstime));