Merge from vendor branch GROFF:
[dragonfly.git] / lib / libthread_xu / thread / thr_rwlock.c
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
27  * $DragonFly: src/lib/libthread_xu/thread/thr_rwlock.c,v 1.4 2005/04/12 14:01:31 davidxu Exp $
28  */
29
30 #include <machine/tls.h>
31
32 #include <errno.h>
33 #include <limits.h>
34 #include <stdlib.h>
35
36 #include <pthread.h>
37 #include "thr_private.h"
38
39 /* maximum number of times a read lock may be obtained */
40 #define MAX_READ_LOCKS          (INT_MAX - 1)
41
42 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
44 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
45 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
46 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
47 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
48 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
49 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
50 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
51
52 /*
53  * Prototypes
54  */
55
56 static int
57 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
58 {
59         pthread_rwlock_t prwlock;
60         int ret;
61
62         /* allocate rwlock object */
63         prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
64
65         if (prwlock == NULL)
66                 return (ENOMEM);
67
68         /* initialize the lock */
69         if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
70                 free(prwlock);
71         else {
72                 /* initialize the read condition signal */
73                 ret = _pthread_cond_init(&prwlock->read_signal, NULL);
74
75                 if (ret != 0) {
76                         _pthread_mutex_destroy(&prwlock->lock);
77                         free(prwlock);
78                 } else {
79                         /* initialize the write condition signal */
80                         ret = _pthread_cond_init(&prwlock->write_signal, NULL);
81
82                         if (ret != 0) {
83                                 _pthread_cond_destroy(&prwlock->read_signal);
84                                 _pthread_mutex_destroy(&prwlock->lock);
85                                 free(prwlock);
86                         } else {
87                                 /* success */
88                                 prwlock->state = 0;
89                                 prwlock->blocked_writers = 0;
90                                 *rwlock = prwlock;
91                         }
92                 }
93         }
94
95         return (ret);
96 }
97
98 int
99 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
100 {
101         int ret;
102
103         if (rwlock == NULL)
104                 ret = EINVAL;
105         else {
106                 pthread_rwlock_t prwlock;
107
108                 prwlock = *rwlock;
109
110                 _pthread_mutex_destroy(&prwlock->lock);
111                 _pthread_cond_destroy(&prwlock->read_signal);
112                 _pthread_cond_destroy(&prwlock->write_signal);
113                 free(prwlock);
114
115                 *rwlock = NULL;
116
117                 ret = 0;
118         }
119         return (ret);
120 }
121
122 static int
123 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
124 {
125         int ret;
126
127         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
128
129         if (*rwlock == NULL)
130                 ret = rwlock_init(rwlock, NULL);
131         else
132                 ret = 0;
133
134         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
135
136         return (ret);
137 }
138
139 int
140 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
141 {
142         *rwlock = NULL;
143         return (rwlock_init(rwlock, attr));
144 }
145
146 static int
147 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
148 {
149         struct pthread *curthread = tls_get_curthread();
150         pthread_rwlock_t prwlock;
151         int ret;
152
153         if (rwlock == NULL)
154                 return (EINVAL);
155
156         prwlock = *rwlock;
157
158         /* check for static initialization */
159         if (prwlock == NULL) {
160                 if ((ret = init_static(curthread, rwlock)) != 0)
161                         return (ret);
162
163                 prwlock = *rwlock;
164         }
165
166         /* grab the monitor lock */
167         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
168                 return (ret);
169
170         /* check lock count */
171         if (prwlock->state == MAX_READ_LOCKS) {
172                 _pthread_mutex_unlock(&prwlock->lock);
173                 return (EAGAIN);
174         }
175
176         curthread = tls_get_curthread();
177         if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
178                 /*
179                  * To avoid having to track all the rdlocks held by
180                  * a thread or all of the threads that hold a rdlock,
181                  * we keep a simple count of all the rdlocks held by
182                  * a thread.  If a thread holds any rdlocks it is
183                  * possible that it is attempting to take a recursive
184                  * rdlock.  If there are blocked writers and precedence
185                  * is given to them, then that would result in the thread
186                  * deadlocking.  So allowing a thread to take the rdlock
187                  * when it already has one or more rdlocks avoids the
188                  * deadlock.  I hope the reader can follow that logic ;-)
189                  */
190                 ;       /* nothing needed */
191         } else {
192                 /* give writers priority over readers */
193                 while (prwlock->blocked_writers || prwlock->state < 0) {
194                         if (abstime)
195                                 ret = _pthread_cond_timedwait
196                                     (&prwlock->read_signal,
197                                     &prwlock->lock, abstime);
198                         else
199                                 ret = _pthread_cond_wait(&prwlock->read_signal,
200                             &prwlock->lock);
201                         if (ret != 0) {
202                                 /* can't do a whole lot if this fails */
203                                 _pthread_mutex_unlock(&prwlock->lock);
204                                 return (ret);
205                         }
206                 }
207         }
208
209         curthread->rdlock_count++;
210         prwlock->state++; /* indicate we are locked for reading */
211
212         /*
213          * Something is really wrong if this call fails.  Returning
214          * error won't do because we've already obtained the read
215          * lock.  Decrementing 'state' is no good because we probably
216          * don't have the monitor lock.
217          */
218         _pthread_mutex_unlock(&prwlock->lock);
219
220         return (ret);
221 }
222
223 int
224 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
225 {
226         return (rwlock_rdlock_common(rwlock, NULL));
227 }
228
229 int
230 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
231          const struct timespec *abstime)
232 {
233         return (rwlock_rdlock_common(rwlock, abstime));
234 }
235
236 int
237 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
238 {
239         struct pthread *curthread = tls_get_curthread();
240         pthread_rwlock_t prwlock;
241         int ret;
242
243         if (rwlock == NULL)
244                 return (EINVAL);
245
246         prwlock = *rwlock;
247
248         /* check for static initialization */
249         if (prwlock == NULL) {
250                 if ((ret = init_static(curthread, rwlock)) != 0)
251                         return (ret);
252
253                 prwlock = *rwlock;
254         }
255
256         /* grab the monitor lock */
257         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
258                 return (ret);
259
260         curthread = tls_get_curthread();
261         if (prwlock->state == MAX_READ_LOCKS)
262                 ret = EAGAIN;
263         else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
264                 /* see comment for pthread_rwlock_rdlock() */
265                 curthread->rdlock_count++;
266                 prwlock->state++;
267         }
268         /* give writers priority over readers */
269         else if (prwlock->blocked_writers || prwlock->state < 0)
270                 ret = EBUSY;
271         else {
272                 curthread->rdlock_count++;
273                 prwlock->state++; /* indicate we are locked for reading */
274         }
275
276         /* see the comment on this in pthread_rwlock_rdlock */
277         _pthread_mutex_unlock(&prwlock->lock);
278
279         return (ret);
280 }
281
282 int
283 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
284 {
285         struct pthread *curthread = tls_get_curthread();
286         pthread_rwlock_t prwlock;
287         int ret;
288
289         if (rwlock == NULL)
290                 return (EINVAL);
291
292         prwlock = *rwlock;
293
294         /* check for static initialization */
295         if (prwlock == NULL) {
296                 if ((ret = init_static(curthread, rwlock)) != 0)
297                         return (ret);
298
299                 prwlock = *rwlock;
300         }
301
302         /* grab the monitor lock */
303         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
304                 return (ret);
305
306         if (prwlock->state != 0)
307                 ret = EBUSY;
308         else
309                 /* indicate we are locked for writing */
310                 prwlock->state = -1;
311
312         /* see the comment on this in pthread_rwlock_rdlock */
313         _pthread_mutex_unlock(&prwlock->lock);
314
315         return (ret);
316 }
317
318 int
319 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
320 {
321         struct pthread *curthread;
322         pthread_rwlock_t prwlock;
323         int ret;
324
325         if (rwlock == NULL)
326                 return (EINVAL);
327
328         prwlock = *rwlock;
329
330         if (prwlock == NULL)
331                 return (EINVAL);
332
333         /* grab the monitor lock */
334         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
335                 return (ret);
336
337         curthread = tls_get_curthread();
338         if (prwlock->state > 0) {
339                 curthread->rdlock_count--;
340                 prwlock->state--;
341                 if (prwlock->state == 0 && prwlock->blocked_writers)
342                         ret = _pthread_cond_signal(&prwlock->write_signal);
343         } else if (prwlock->state < 0) {
344                 prwlock->state = 0;
345
346                 if (prwlock->blocked_writers)
347                         ret = _pthread_cond_signal(&prwlock->write_signal);
348                 else
349                         ret = _pthread_cond_broadcast(&prwlock->read_signal);
350         } else
351                 ret = EINVAL;
352
353         /* see the comment on this in pthread_rwlock_rdlock */
354         _pthread_mutex_unlock(&prwlock->lock);
355
356         return (ret);
357 }
358
359 static int
360 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
361 {
362         struct pthread *curthread = tls_get_curthread();
363         pthread_rwlock_t prwlock;
364         int ret;
365
366         if (rwlock == NULL)
367                 return (EINVAL);
368
369         prwlock = *rwlock;
370
371         /* check for static initialization */
372         if (prwlock == NULL) {
373                 if ((ret = init_static(curthread, rwlock)) != 0)
374                         return (ret);
375
376                 prwlock = *rwlock;
377         }
378
379         /* grab the monitor lock */
380         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
381                 return (ret);
382
383         while (prwlock->state != 0) {
384                 prwlock->blocked_writers++;
385
386                 if (abstime != NULL)
387                         ret = _pthread_cond_timedwait(&prwlock->write_signal,
388                             &prwlock->lock, abstime);
389                 else
390                         ret = _pthread_cond_wait(&prwlock->write_signal,
391                             &prwlock->lock);
392                 if (ret != 0) {
393                         prwlock->blocked_writers--;
394                         _pthread_mutex_unlock(&prwlock->lock);
395                         return (ret);
396                 }
397
398                 prwlock->blocked_writers--;
399         }
400
401         /* indicate we are locked for writing */
402         prwlock->state = -1;
403
404         /* see the comment on this in pthread_rwlock_rdlock */
405         _pthread_mutex_unlock(&prwlock->lock);
406
407         return (ret);
408 }
409
410 int
411 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
412 {
413         return (rwlock_wrlock_common (rwlock, NULL));
414 }
415
416 int
417 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
418     const struct timespec *abstime)
419 {
420         return (rwlock_wrlock_common (rwlock, abstime));
421 }