2 * Copyright (c) 1998 Alex Nash
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
29 #include "namespace.h"
30 #include <machine/tls.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
38 #ifdef _PTHREADS_DEBUGGING
47 /* maximum number of times a read lock may be obtained */
48 #define MAX_READ_LOCKS (INT_MAX - 1)
50 umtx_t _rwlock_static_lock;
52 #ifdef _PTHREADS_DEBUGGING
56 rwlock_log(const char *ctl, ...)
63 len = vsnprintf(buf, sizeof(buf), ctl, va);
72 rwlock_log(const char *ctl __unused, ...)
79 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
81 pthread_rwlock_t prwlock;
84 /* allocate rwlock object */
85 prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
90 /* initialize the lock */
91 if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0) {
94 /* initialize the read condition signal */
95 ret = _pthread_cond_init(&prwlock->read_signal, NULL);
98 _pthread_mutex_destroy(&prwlock->lock);
101 /* initialize the write condition signal */
102 ret = _pthread_cond_init(&prwlock->write_signal, NULL);
105 _pthread_cond_destroy(&prwlock->read_signal);
106 _pthread_mutex_destroy(&prwlock->lock);
111 prwlock->blocked_writers = 0;
122 _rwlock_reinit(pthread_rwlock_t prwlock)
124 _mutex_reinit(&prwlock->lock);
125 _cond_reinit(prwlock->read_signal);
127 prwlock->blocked_writers = 0;
132 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
136 if (rwlock == NULL) {
138 } else if (*rwlock == NULL) {
141 pthread_rwlock_t prwlock;
144 rwlock_log("rwlock_destroy %p\n", prwlock);
146 _pthread_mutex_destroy(&prwlock->lock);
147 _pthread_cond_destroy(&prwlock->read_signal);
148 _pthread_cond_destroy(&prwlock->write_signal);
159 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
163 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
166 ret = rwlock_init(rwlock, NULL);
170 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
176 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
179 return (rwlock_init(rwlock, attr));
183 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
185 struct pthread *curthread = tls_get_curthread();
186 pthread_rwlock_t prwlock;
194 /* check for static initialization */
195 if (prwlock == NULL) {
196 if ((ret = init_static(curthread, rwlock)) != 0)
201 rwlock_log("rwlock_rdlock_common %p\n", prwlock);
203 /* grab the monitor lock */
204 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) {
205 rwlock_log("rwlock_rdlock_common %p (failedA)\n", prwlock);
209 /* check lock count */
210 if (prwlock->state == MAX_READ_LOCKS) {
211 _pthread_mutex_unlock(&prwlock->lock);
212 rwlock_log("rwlock_rdlock_common %p (failedB)\n", prwlock);
216 curthread = tls_get_curthread();
217 if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
219 * To avoid having to track all the rdlocks held by
220 * a thread or all of the threads that hold a rdlock,
221 * we keep a simple count of all the rdlocks held by
222 * a thread. If a thread holds any rdlocks it is
223 * possible that it is attempting to take a recursive
224 * rdlock. If there are blocked writers and precedence
225 * is given to them, then that would result in the thread
226 * deadlocking. So allowing a thread to take the rdlock
227 * when it already has one or more rdlocks avoids the
228 * deadlock. I hope the reader can follow that logic ;-)
230 ; /* nothing needed */
233 * Give writers priority over readers
235 * WARNING: pthread_cond*() temporarily releases the
238 while (prwlock->blocked_writers || prwlock->state < 0) {
240 ret = _pthread_cond_timedwait(
241 &prwlock->read_signal,
242 &prwlock->lock, abstime);
244 ret = _pthread_cond_wait(
245 &prwlock->read_signal,
249 /* can't do a whole lot if this fails */
250 _pthread_mutex_unlock(&prwlock->lock);
251 rwlock_log("rwlock_rdlock_common %p "
252 "(failedC)\n", prwlock);
258 curthread->rdlock_count++;
259 prwlock->state++; /* indicate we are locked for reading */
262 * Something is really wrong if this call fails. Returning
263 * error won't do because we've already obtained the read
264 * lock. Decrementing 'state' is no good because we probably
265 * don't have the monitor lock.
267 _pthread_mutex_unlock(&prwlock->lock);
268 rwlock_log("rwlock_rdlock_common %p (return %d)\n", prwlock, ret);
274 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
276 return (rwlock_rdlock_common(rwlock, NULL));
280 _pthread_rwlock_timedrdlock (pthread_rwlock_t * __restrict rwlock,
281 const struct timespec * __restrict abstime)
283 return (rwlock_rdlock_common(rwlock, abstime));
287 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
289 struct pthread *curthread = tls_get_curthread();
290 pthread_rwlock_t prwlock;
298 /* check for static initialization */
299 if (prwlock == NULL) {
300 if ((ret = init_static(curthread, rwlock)) != 0)
306 /* grab the monitor lock */
307 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
310 curthread = tls_get_curthread();
311 if (prwlock->state == MAX_READ_LOCKS)
313 else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
314 /* see comment for pthread_rwlock_rdlock() */
315 curthread->rdlock_count++;
318 /* give writers priority over readers */
319 else if (prwlock->blocked_writers || prwlock->state < 0)
322 curthread->rdlock_count++;
323 prwlock->state++; /* indicate we are locked for reading */
326 /* see the comment on this in pthread_rwlock_rdlock */
327 _pthread_mutex_unlock(&prwlock->lock);
333 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
335 struct pthread *curthread = tls_get_curthread();
336 pthread_rwlock_t prwlock;
344 /* check for static initialization */
345 if (prwlock == NULL) {
346 if ((ret = init_static(curthread, rwlock)) != 0)
352 /* grab the monitor lock */
353 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
356 if (prwlock->state != 0)
359 /* indicate we are locked for writing */
362 /* see the comment on this in pthread_rwlock_rdlock */
363 _pthread_mutex_unlock(&prwlock->lock);
369 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
371 struct pthread *curthread;
372 pthread_rwlock_t prwlock;
383 rwlock_log("rwlock_unlock %p\n", prwlock);
385 /* grab the monitor lock */
386 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
389 curthread = tls_get_curthread();
390 if (prwlock->state > 0) {
394 curthread->rdlock_count--;
396 if (prwlock->state == 0 && prwlock->blocked_writers)
397 ret = _pthread_cond_signal(&prwlock->write_signal);
398 } else if (prwlock->state < 0) {
404 if (prwlock->blocked_writers)
405 ret = _pthread_cond_signal(&prwlock->write_signal);
407 ret = _pthread_cond_broadcast(&prwlock->read_signal);
412 /* see the comment on this in pthread_rwlock_rdlock */
413 _pthread_mutex_unlock(&prwlock->lock);
414 rwlock_log("rwlock_unlock %p (return %d)\n", prwlock, ret);
420 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
422 struct pthread *curthread = tls_get_curthread();
423 pthread_rwlock_t prwlock;
431 /* check for static initialization */
432 if (prwlock == NULL) {
433 if ((ret = init_static(curthread, rwlock)) != 0)
438 rwlock_log("rwlock_wrlock_common %p\n", prwlock);
440 /* grab the monitor lock */
441 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) {
442 rwlock_log("rwlock_wrlock_common %p (failedA)\n", prwlock);
446 while (prwlock->state != 0) {
447 prwlock->blocked_writers++;
450 * WARNING: pthread_cond*() temporarily releases the
453 if (abstime != NULL) {
454 ret = _pthread_cond_timedwait(&prwlock->write_signal,
458 ret = _pthread_cond_wait(&prwlock->write_signal,
463 * Undo on failure. When the blocked_writers count drops
464 * to 0 we may have to wakeup blocked readers.
467 prwlock->blocked_writers--;
468 if (prwlock->blocked_writers == 0 &&
469 prwlock->state >= 0) {
470 _pthread_cond_broadcast(&prwlock->read_signal);
472 _pthread_mutex_unlock(&prwlock->lock);
473 rwlock_log("rwlock_wrlock_common %p (failedB %d)\n",
478 prwlock->blocked_writers--;
481 /* indicate we are locked for writing */
484 /* see the comment on this in pthread_rwlock_rdlock */
485 _pthread_mutex_unlock(&prwlock->lock);
486 rwlock_log("rwlock_wrlock_common %p (returns %d)\n", prwlock, ret);
492 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
494 return (rwlock_wrlock_common (rwlock, NULL));
498 _pthread_rwlock_timedwrlock (pthread_rwlock_t * __restrict rwlock,
499 const struct timespec * __restrict abstime)
501 return (rwlock_wrlock_common (rwlock, abstime));
504 __strong_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
505 __strong_reference(_pthread_rwlock_init, pthread_rwlock_init);
506 __strong_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
507 __strong_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
508 __strong_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
509 __strong_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
510 __strong_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
511 __strong_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
512 __strong_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);