Merge branch 'vendor/OPENRESOLV'
[dragonfly.git] / lib / libthread_xu / thread / thr_rwlock.c
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
27  */
28
29 #include "namespace.h"
30 #include <machine/tls.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdlib.h>
34 #include <pthread.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
37
38 #ifdef _PTHREADS_DEBUGGING
39
40 #include <stdio.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <sys/file.h>
44
45 #endif
46
47 /* maximum number of times a read lock may be obtained */
48 #define MAX_READ_LOCKS          (INT_MAX - 1)
49
50 umtx_t  _rwlock_static_lock;
51
52 #ifdef _PTHREADS_DEBUGGING
53
54 static
55 void
56 rwlock_log(const char *ctl, ...)
57 {
58         char buf[256];
59         va_list va;
60         size_t len;
61
62         va_start(va, ctl);
63         len = vsnprintf(buf, sizeof(buf), ctl, va);
64         va_end(va);
65         _thr_log(buf, len);
66 }
67
68 #else
69
70 static __inline
71 void
72 rwlock_log(const char *ctl __unused, ...)
73 {
74 }
75
76 #endif
77
78 static int
79 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
80 {
81         pthread_rwlock_t prwlock;
82         int ret;
83
84         /* allocate rwlock object */
85         prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
86
87         if (prwlock == NULL)
88                 return (ENOMEM);
89
90         /* initialize the lock */
91         if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0) {
92                 free(prwlock);
93         } else {
94                 /* initialize the read condition signal */
95                 ret = _pthread_cond_init(&prwlock->read_signal, NULL);
96
97                 if (ret != 0) {
98                         _pthread_mutex_destroy(&prwlock->lock);
99                         free(prwlock);
100                 } else {
101                         /* initialize the write condition signal */
102                         ret = _pthread_cond_init(&prwlock->write_signal, NULL);
103
104                         if (ret != 0) {
105                                 _pthread_cond_destroy(&prwlock->read_signal);
106                                 _pthread_mutex_destroy(&prwlock->lock);
107                                 free(prwlock);
108                         } else {
109                                 /* success */
110                                 prwlock->state = 0;
111                                 prwlock->blocked_writers = 0;
112                                 *rwlock = prwlock;
113                         }
114                 }
115         }
116
117         return (ret);
118 }
119
120 #if 0
121 void
122 _rwlock_reinit(pthread_rwlock_t prwlock)
123 {
124         _mutex_reinit(&prwlock->lock);
125         _cond_reinit(prwlock->read_signal);
126         prwlock->state = 0;
127         prwlock->blocked_writers = 0;
128 }
129 #endif
130
131 int
132 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
133 {
134         int ret;
135
136         if (rwlock == NULL) {
137                 ret = EINVAL;
138         } else if (*rwlock == NULL) {
139                 ret = 0;
140         } else {
141                 pthread_rwlock_t prwlock;
142
143                 prwlock = *rwlock;
144                 rwlock_log("rwlock_destroy %p\n", prwlock);
145
146                 _pthread_mutex_destroy(&prwlock->lock);
147                 _pthread_cond_destroy(&prwlock->read_signal);
148                 _pthread_cond_destroy(&prwlock->write_signal);
149                 free(prwlock);
150
151                 *rwlock = NULL;
152
153                 ret = 0;
154         }
155         return (ret);
156 }
157
158 static int
159 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
160 {
161         int ret;
162
163         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
164
165         if (*rwlock == NULL)
166                 ret = rwlock_init(rwlock, NULL);
167         else
168                 ret = 0;
169
170         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
171
172         return (ret);
173 }
174
175 int
176 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
177 {
178         *rwlock = NULL;
179         return (rwlock_init(rwlock, attr));
180 }
181
182 static int
183 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
184 {
185         struct pthread *curthread = tls_get_curthread();
186         pthread_rwlock_t prwlock;
187         int ret;
188
189         if (rwlock == NULL)
190                 return (EINVAL);
191
192         prwlock = *rwlock;
193
194         /* check for static initialization */
195         if (prwlock == NULL) {
196                 if ((ret = init_static(curthread, rwlock)) != 0)
197                         return (ret);
198
199                 prwlock = *rwlock;
200         }
201         rwlock_log("rwlock_rdlock_common %p\n", prwlock);
202
203         /* grab the monitor lock */
204         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) {
205                 rwlock_log("rwlock_rdlock_common %p (failedA)\n", prwlock);
206                 return (ret);
207         }
208
209         /* check lock count */
210         if (prwlock->state == MAX_READ_LOCKS) {
211                 _pthread_mutex_unlock(&prwlock->lock);
212                 rwlock_log("rwlock_rdlock_common %p (failedB)\n", prwlock);
213                 return (EAGAIN);
214         }
215
216         curthread = tls_get_curthread();
217         if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
218                 /*
219                  * To avoid having to track all the rdlocks held by
220                  * a thread or all of the threads that hold a rdlock,
221                  * we keep a simple count of all the rdlocks held by
222                  * a thread.  If a thread holds any rdlocks it is
223                  * possible that it is attempting to take a recursive
224                  * rdlock.  If there are blocked writers and precedence
225                  * is given to them, then that would result in the thread
226                  * deadlocking.  So allowing a thread to take the rdlock
227                  * when it already has one or more rdlocks avoids the
228                  * deadlock.  I hope the reader can follow that logic ;-)
229                  */
230                 ;       /* nothing needed */
231         } else {
232                 /*
233                  * Give writers priority over readers
234                  *
235                  * WARNING: pthread_cond*() temporarily releases the
236                  *          mutex.
237                  */
238                 while (prwlock->blocked_writers || prwlock->state < 0) {
239                         if (abstime) {
240                                 ret = _pthread_cond_timedwait(
241                                             &prwlock->read_signal,
242                                             &prwlock->lock, abstime);
243                         } else {
244                                 ret = _pthread_cond_wait(
245                                             &prwlock->read_signal,
246                                             &prwlock->lock);
247                         }
248                         if (ret != 0) {
249                                 /* can't do a whole lot if this fails */
250                                 _pthread_mutex_unlock(&prwlock->lock);
251                                 rwlock_log("rwlock_rdlock_common %p "
252                                            "(failedC)\n", prwlock);
253                                 return (ret);
254                         }
255                 }
256         }
257
258         curthread->rdlock_count++;
259         prwlock->state++; /* indicate we are locked for reading */
260
261         /*
262          * Something is really wrong if this call fails.  Returning
263          * error won't do because we've already obtained the read
264          * lock.  Decrementing 'state' is no good because we probably
265          * don't have the monitor lock.
266          */
267         _pthread_mutex_unlock(&prwlock->lock);
268         rwlock_log("rwlock_rdlock_common %p (return %d)\n", prwlock, ret);
269
270         return (ret);
271 }
272
273 int
274 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
275 {
276         return (rwlock_rdlock_common(rwlock, NULL));
277 }
278
279 int
280 _pthread_rwlock_timedrdlock (pthread_rwlock_t * __restrict rwlock,
281     const struct timespec * __restrict abstime)
282 {
283         return (rwlock_rdlock_common(rwlock, abstime));
284 }
285
286 int
287 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
288 {
289         struct pthread *curthread = tls_get_curthread();
290         pthread_rwlock_t prwlock;
291         int ret;
292
293         if (rwlock == NULL)
294                 return (EINVAL);
295
296         prwlock = *rwlock;
297
298         /* check for static initialization */
299         if (prwlock == NULL) {
300                 if ((ret = init_static(curthread, rwlock)) != 0)
301                         return (ret);
302
303                 prwlock = *rwlock;
304         }
305
306         /* grab the monitor lock */
307         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
308                 return (ret);
309
310         curthread = tls_get_curthread();
311         if (prwlock->state == MAX_READ_LOCKS)
312                 ret = EAGAIN;
313         else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
314                 /* see comment for pthread_rwlock_rdlock() */
315                 curthread->rdlock_count++;
316                 prwlock->state++;
317         }
318         /* give writers priority over readers */
319         else if (prwlock->blocked_writers || prwlock->state < 0)
320                 ret = EBUSY;
321         else {
322                 curthread->rdlock_count++;
323                 prwlock->state++; /* indicate we are locked for reading */
324         }
325
326         /* see the comment on this in pthread_rwlock_rdlock */
327         _pthread_mutex_unlock(&prwlock->lock);
328
329         return (ret);
330 }
331
332 int
333 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
334 {
335         struct pthread *curthread = tls_get_curthread();
336         pthread_rwlock_t prwlock;
337         int ret;
338
339         if (rwlock == NULL)
340                 return (EINVAL);
341
342         prwlock = *rwlock;
343
344         /* check for static initialization */
345         if (prwlock == NULL) {
346                 if ((ret = init_static(curthread, rwlock)) != 0)
347                         return (ret);
348
349                 prwlock = *rwlock;
350         }
351
352         /* grab the monitor lock */
353         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
354                 return (ret);
355
356         if (prwlock->state != 0)
357                 ret = EBUSY;
358         else
359                 /* indicate we are locked for writing */
360                 prwlock->state = -1;
361
362         /* see the comment on this in pthread_rwlock_rdlock */
363         _pthread_mutex_unlock(&prwlock->lock);
364
365         return (ret);
366 }
367
368 int
369 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
370 {
371         struct pthread *curthread;
372         pthread_rwlock_t prwlock;
373         int ret;
374
375         if (rwlock == NULL)
376                 return (EINVAL);
377
378         prwlock = *rwlock;
379
380         if (prwlock == NULL)
381                 return (EINVAL);
382
383         rwlock_log("rwlock_unlock %p\n", prwlock);
384
385         /* grab the monitor lock */
386         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
387                 return (ret);
388
389         curthread = tls_get_curthread();
390         if (prwlock->state > 0) {
391                 /*
392                  * Unlock reader
393                  */
394                 curthread->rdlock_count--;
395                 prwlock->state--;
396                 if (prwlock->state == 0 && prwlock->blocked_writers)
397                         ret = _pthread_cond_signal(&prwlock->write_signal);
398         } else if (prwlock->state < 0) {
399                 /*
400                  * unlock writer
401                  */
402                 prwlock->state = 0;
403
404                 if (prwlock->blocked_writers)
405                         ret = _pthread_cond_signal(&prwlock->write_signal);
406                 else
407                         ret = _pthread_cond_broadcast(&prwlock->read_signal);
408         } else {
409                 ret = EINVAL;
410         }
411
412         /* see the comment on this in pthread_rwlock_rdlock */
413         _pthread_mutex_unlock(&prwlock->lock);
414         rwlock_log("rwlock_unlock %p (return %d)\n", prwlock, ret);
415
416         return (ret);
417 }
418
419 static int
420 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
421 {
422         struct pthread *curthread = tls_get_curthread();
423         pthread_rwlock_t prwlock;
424         int ret;
425
426         if (rwlock == NULL)
427                 return (EINVAL);
428
429         prwlock = *rwlock;
430
431         /* check for static initialization */
432         if (prwlock == NULL) {
433                 if ((ret = init_static(curthread, rwlock)) != 0)
434                         return (ret);
435
436                 prwlock = *rwlock;
437         }
438         rwlock_log("rwlock_wrlock_common %p\n", prwlock);
439
440         /* grab the monitor lock */
441         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) {
442                 rwlock_log("rwlock_wrlock_common %p (failedA)\n", prwlock);
443                 return (ret);
444         }
445
446         while (prwlock->state != 0) {
447                 prwlock->blocked_writers++;
448
449                 /*
450                  * WARNING: pthread_cond*() temporarily releases the
451                  *          mutex.
452                  */
453                 if (abstime != NULL) {
454                         ret = _pthread_cond_timedwait(&prwlock->write_signal,
455                                                       &prwlock->lock,
456                                                       abstime);
457                 } else {
458                         ret = _pthread_cond_wait(&prwlock->write_signal,
459                                                  &prwlock->lock);
460                 }
461
462                 /*
463                  * Undo on failure.  When the blocked_writers count drops
464                  * to 0 we may have to wakeup blocked readers.
465                  */
466                 if (ret != 0) {
467                         prwlock->blocked_writers--;
468                         if (prwlock->blocked_writers == 0 &&
469                             prwlock->state >= 0) {
470                                 _pthread_cond_broadcast(&prwlock->read_signal);
471                         }
472                         _pthread_mutex_unlock(&prwlock->lock);
473                         rwlock_log("rwlock_wrlock_common %p (failedB %d)\n",
474                                    prwlock, ret);
475                         return (ret);
476                 }
477
478                 prwlock->blocked_writers--;
479         }
480
481         /* indicate we are locked for writing */
482         prwlock->state = -1;
483
484         /* see the comment on this in pthread_rwlock_rdlock */
485         _pthread_mutex_unlock(&prwlock->lock);
486         rwlock_log("rwlock_wrlock_common %p (returns %d)\n", prwlock, ret);
487
488         return (ret);
489 }
490
491 int
492 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
493 {
494         return (rwlock_wrlock_common (rwlock, NULL));
495 }
496
497 int
498 _pthread_rwlock_timedwrlock (pthread_rwlock_t * __restrict rwlock,
499     const struct timespec * __restrict abstime)
500 {
501         return (rwlock_wrlock_common (rwlock, abstime));
502 }
503
504 __strong_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
505 __strong_reference(_pthread_rwlock_init, pthread_rwlock_init);
506 __strong_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
507 __strong_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
508 __strong_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
509 __strong_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
510 __strong_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
511 __strong_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
512 __strong_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);