Import initial version of 1:1 pthread library.
[dragonfly.git] / lib / libthread_xu / thread / thr_rwlock.c
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
27  * $DragonFly: src/lib/libthread_xu/thread/thr_rwlock.c,v 1.1 2005/02/01 12:38:27 davidxu Exp $
28  */
29
30 #include <errno.h>
31 #include <limits.h>
32 #include <stdlib.h>
33
34 #include <pthread.h>
35 #include "thr_private.h"
36
37 /* maximum number of times a read lock may be obtained */
38 #define MAX_READ_LOCKS          (INT_MAX - 1)
39
40 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
41 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
42 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
43 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
44 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
45 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
46 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
47 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
48 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
49
50 /*
51  * Prototypes
52  */
53
54 static int
55 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
56 {
57         pthread_rwlock_t prwlock;
58         int ret;
59
60         /* allocate rwlock object */
61         prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
62
63         if (prwlock == NULL)
64                 return (ENOMEM);
65
66         /* initialize the lock */
67         if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
68                 free(prwlock);
69         else {
70                 /* initialize the read condition signal */
71                 ret = _pthread_cond_init(&prwlock->read_signal, NULL);
72
73                 if (ret != 0) {
74                         _pthread_mutex_destroy(&prwlock->lock);
75                         free(prwlock);
76                 } else {
77                         /* initialize the write condition signal */
78                         ret = _pthread_cond_init(&prwlock->write_signal, NULL);
79
80                         if (ret != 0) {
81                                 _pthread_cond_destroy(&prwlock->read_signal);
82                                 _pthread_mutex_destroy(&prwlock->lock);
83                                 free(prwlock);
84                         } else {
85                                 /* success */
86                                 prwlock->state = 0;
87                                 prwlock->blocked_writers = 0;
88                                 *rwlock = prwlock;
89                         }
90                 }
91         }
92
93         return (ret);
94 }
95
96 int
97 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
98 {
99         int ret;
100
101         if (rwlock == NULL)
102                 ret = EINVAL;
103         else {
104                 pthread_rwlock_t prwlock;
105
106                 prwlock = *rwlock;
107
108                 _pthread_mutex_destroy(&prwlock->lock);
109                 _pthread_cond_destroy(&prwlock->read_signal);
110                 _pthread_cond_destroy(&prwlock->write_signal);
111                 free(prwlock);
112
113                 *rwlock = NULL;
114
115                 ret = 0;
116         }
117         return (ret);
118 }
119
120 static int
121 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
122 {
123         int ret;
124
125         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
126
127         if (*rwlock == NULL)
128                 ret = rwlock_init(rwlock, NULL);
129         else
130                 ret = 0;
131
132         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
133
134         return (ret);
135 }
136
137 int
138 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
139 {
140         *rwlock = NULL;
141         return (rwlock_init(rwlock, attr));
142 }
143
144 static int
145 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
146 {
147         struct pthread *curthread = _get_curthread();
148         pthread_rwlock_t prwlock;
149         int ret;
150
151         if (rwlock == NULL)
152                 return (EINVAL);
153
154         prwlock = *rwlock;
155
156         /* check for static initialization */
157         if (prwlock == NULL) {
158                 if ((ret = init_static(curthread, rwlock)) != 0)
159                         return (ret);
160
161                 prwlock = *rwlock;
162         }
163
164         /* grab the monitor lock */
165         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
166                 return (ret);
167
168         /* check lock count */
169         if (prwlock->state == MAX_READ_LOCKS) {
170                 _thr_mutex_unlock(&prwlock->lock);
171                 return (EAGAIN);
172         }
173
174         curthread = _get_curthread();
175         if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
176                 /*
177                  * To avoid having to track all the rdlocks held by
178                  * a thread or all of the threads that hold a rdlock,
179                  * we keep a simple count of all the rdlocks held by
180                  * a thread.  If a thread holds any rdlocks it is
181                  * possible that it is attempting to take a recursive
182                  * rdlock.  If there are blocked writers and precedence
183                  * is given to them, then that would result in the thread
184                  * deadlocking.  So allowing a thread to take the rdlock
185                  * when it already has one or more rdlocks avoids the
186                  * deadlock.  I hope the reader can follow that logic ;-)
187                  */
188                 ;       /* nothing needed */
189         } else {
190                 /* give writers priority over readers */
191                 while (prwlock->blocked_writers || prwlock->state < 0) {
192                         if (abstime)
193                                 ret = _pthread_cond_timedwait
194                                     (&prwlock->read_signal,
195                                     &prwlock->lock, abstime);
196                         else
197                                 ret = _thr_cond_wait(&prwlock->read_signal,
198                             &prwlock->lock);
199                         if (ret != 0) {
200                                 /* can't do a whole lot if this fails */
201                                 _thr_mutex_unlock(&prwlock->lock);
202                                 return (ret);
203                         }
204                 }
205         }
206
207         curthread->rdlock_count++;
208         prwlock->state++; /* indicate we are locked for reading */
209
210         /*
211          * Something is really wrong if this call fails.  Returning
212          * error won't do because we've already obtained the read
213          * lock.  Decrementing 'state' is no good because we probably
214          * don't have the monitor lock.
215          */
216         _thr_mutex_unlock(&prwlock->lock);
217
218         return (ret);
219 }
220
221 int
222 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
223 {
224         return (rwlock_rdlock_common(rwlock, NULL));
225 }
226
227 __strong_reference(_pthread_rwlock_rdlock, _thr_rwlock_rdlock);
228
229 int
230 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
231          const struct timespec *abstime)
232 {
233         return (rwlock_rdlock_common(rwlock, abstime));
234 }
235
236 int
237 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
238 {
239         struct pthread *curthread = _get_curthread();
240         pthread_rwlock_t prwlock;
241         int ret;
242
243         if (rwlock == NULL)
244                 return (EINVAL);
245
246         prwlock = *rwlock;
247
248         /* check for static initialization */
249         if (prwlock == NULL) {
250                 if ((ret = init_static(curthread, rwlock)) != 0)
251                         return (ret);
252
253                 prwlock = *rwlock;
254         }
255
256         /* grab the monitor lock */
257         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
258                 return (ret);
259
260         curthread = _get_curthread();
261         if (prwlock->state == MAX_READ_LOCKS)
262                 ret = EAGAIN;
263         else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
264                 /* see comment for pthread_rwlock_rdlock() */
265                 curthread->rdlock_count++;
266                 prwlock->state++;
267         }
268         /* give writers priority over readers */
269         else if (prwlock->blocked_writers || prwlock->state < 0)
270                 ret = EBUSY;
271         else {
272                 curthread->rdlock_count++;
273                 prwlock->state++; /* indicate we are locked for reading */
274         }
275
276         /* see the comment on this in pthread_rwlock_rdlock */
277         _pthread_mutex_unlock(&prwlock->lock);
278
279         return (ret);
280 }
281
282 int
283 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
284 {
285         struct pthread *curthread = _get_curthread();
286         pthread_rwlock_t prwlock;
287         int ret;
288
289         if (rwlock == NULL)
290                 return (EINVAL);
291
292         prwlock = *rwlock;
293
294         /* check for static initialization */
295         if (prwlock == NULL) {
296                 if ((ret = init_static(curthread, rwlock)) != 0)
297                         return (ret);
298
299                 prwlock = *rwlock;
300         }
301
302         /* grab the monitor lock */
303         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
304                 return (ret);
305
306         if (prwlock->state != 0)
307                 ret = EBUSY;
308         else
309                 /* indicate we are locked for writing */
310                 prwlock->state = -1;
311
312         /* see the comment on this in pthread_rwlock_rdlock */
313         _pthread_mutex_unlock(&prwlock->lock);
314
315         return (ret);
316 }
317
318 int
319 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
320 {
321         struct pthread *curthread;
322         pthread_rwlock_t prwlock;
323         int ret;
324
325         if (rwlock == NULL)
326                 return (EINVAL);
327
328         prwlock = *rwlock;
329
330         if (prwlock == NULL)
331                 return (EINVAL);
332
333         /* grab the monitor lock */
334         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
335                 return (ret);
336
337         curthread = _get_curthread();
338         if (prwlock->state > 0) {
339                 curthread->rdlock_count--;
340                 prwlock->state--;
341                 if (prwlock->state == 0 && prwlock->blocked_writers)
342                         ret = _thr_cond_signal(&prwlock->write_signal);
343         } else if (prwlock->state < 0) {
344                 prwlock->state = 0;
345
346                 if (prwlock->blocked_writers)
347                         ret = _thr_cond_signal(&prwlock->write_signal);
348                 else
349                         ret = _thr_cond_broadcast(&prwlock->read_signal);
350         } else
351                 ret = EINVAL;
352
353         /* see the comment on this in pthread_rwlock_rdlock */
354         _thr_mutex_unlock(&prwlock->lock);
355
356         return (ret);
357 }
358
359 __strong_reference(_pthread_rwlock_unlock, _thr_rwlock_unlock);
360
361 static int
362 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
363 {
364         struct pthread *curthread = _get_curthread();
365         pthread_rwlock_t prwlock;
366         int ret;
367
368         if (rwlock == NULL)
369                 return (EINVAL);
370
371         prwlock = *rwlock;
372
373         /* check for static initialization */
374         if (prwlock == NULL) {
375                 if ((ret = init_static(curthread, rwlock)) != 0)
376                         return (ret);
377
378                 prwlock = *rwlock;
379         }
380
381         /* grab the monitor lock */
382         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
383                 return (ret);
384
385         while (prwlock->state != 0) {
386                 prwlock->blocked_writers++;
387
388                 if (abstime != NULL)
389                         ret = _pthread_cond_timedwait(&prwlock->write_signal,
390                             &prwlock->lock, abstime);
391                 else
392                         ret = _thr_cond_wait(&prwlock->write_signal,
393                             &prwlock->lock);
394                 if (ret != 0) {
395                         prwlock->blocked_writers--;
396                         _thr_mutex_unlock(&prwlock->lock);
397                         return (ret);
398                 }
399
400                 prwlock->blocked_writers--;
401         }
402
403         /* indicate we are locked for writing */
404         prwlock->state = -1;
405
406         /* see the comment on this in pthread_rwlock_rdlock */
407         _thr_mutex_unlock(&prwlock->lock);
408
409         return (ret);
410 }
411
412 int
413 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
414 {
415         return (rwlock_wrlock_common (rwlock, NULL));
416 }
417 __strong_reference(_pthread_rwlock_wrlock, _thr_rwlock_wrlock);
418
419 int
420 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
421     const struct timespec *abstime)
422 {
423         return (rwlock_wrlock_common (rwlock, abstime));
424 }