2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 1998 Alex Nash
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include "namespace.h"
36 #include "un-namespace.h"
37 #include "thr_private.h"
39 _Static_assert(sizeof(struct pthread_rwlock) <= THR_PAGE_SIZE_MIN,
40 "pthread_rwlock is too large for off-page");
42 __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy);
44 __weak_reference(_thr_rwlock_init, pthread_rwlock_init);
45 __weak_reference(_thr_rwlock_init, _pthread_rwlock_init);
46 __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock);
47 __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock);
48 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
49 __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
50 __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock);
51 __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock);
52 __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock);
53 __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock);
54 __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock);
55 __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock);
56 __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock);
57 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
59 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
60 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
62 static int __always_inline
63 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
65 if (__predict_false(*rwlock == THR_PSHARED_PTR ||
66 *rwlock <= THR_RWLOCK_DESTROYED))
67 return (init_rwlock(rwlock, rwlock_out));
68 *rwlock_out = *rwlock;
73 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
75 pthread_rwlock_t prwlock;
78 if (*rwlock == THR_PSHARED_PTR) {
79 prwlock = __thr_pshared_offpage(rwlock, 0);
82 } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
83 if (prwlock == THR_RWLOCK_INITIALIZER) {
84 ret = init_static(_get_curthread(), rwlock);
87 } else if (prwlock == THR_RWLOCK_DESTROYED) {
92 *rwlock_out = prwlock;
97 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
99 pthread_rwlock_t prwlock;
101 if (attr == NULL || *attr == NULL ||
102 (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
103 prwlock = aligned_alloc(CACHE_LINE_SIZE,
104 roundup(sizeof(struct pthread_rwlock), CACHE_LINE_SIZE));
107 memset(prwlock, 0, sizeof(struct pthread_rwlock));
110 prwlock = __thr_pshared_offpage(rwlock, 1);
113 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
114 *rwlock = THR_PSHARED_PTR;
120 _thr_rwlock_destroy(pthread_rwlock_t *rwlock)
122 pthread_rwlock_t prwlock;
126 if (prwlock == THR_RWLOCK_INITIALIZER)
128 else if (prwlock == THR_RWLOCK_DESTROYED)
130 else if (prwlock == THR_PSHARED_PTR) {
131 *rwlock = THR_RWLOCK_DESTROYED;
132 __thr_pshared_destroy(rwlock);
135 *rwlock = THR_RWLOCK_DESTROYED;
143 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
147 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
149 if (*rwlock == THR_RWLOCK_INITIALIZER)
150 ret = rwlock_init(rwlock, NULL);
154 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
160 _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
165 return (rwlock_init(rwlock, attr));
169 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
171 struct pthread *curthread = _get_curthread();
172 pthread_rwlock_t prwlock;
176 ret = check_and_init_rwlock(rwlock, &prwlock);
180 if (curthread->rdlock_count) {
182 * To avoid having to track all the rdlocks held by
183 * a thread or all of the threads that hold a rdlock,
184 * we keep a simple count of all the rdlocks held by
185 * a thread. If a thread holds any rdlocks it is
186 * possible that it is attempting to take a recursive
187 * rdlock. If there are blocked writers and precedence
188 * is given to them, then that would result in the thread
189 * deadlocking. So allowing a thread to take the rdlock
190 * when it already has one or more rdlocks avoids the
191 * deadlock. I hope the reader can follow that logic ;-)
193 flags = URWLOCK_PREFER_READER;
199 * POSIX said the validity of the abstimeout parameter need
200 * not be checked if the lock can be immediately acquired.
202 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
204 curthread->rdlock_count++;
208 if (__predict_false(abstime &&
209 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
213 /* goto kernel and lock it */
214 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
218 /* if interrupted, try to lock it in userland again. */
219 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
225 curthread->rdlock_count++;
230 _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock)
233 return (rwlock_rdlock_common(rwlock, NULL));
237 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
238 const struct timespec * __restrict abstime)
241 return (rwlock_rdlock_common(rwlock, abstime));
245 _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
247 struct pthread *curthread;
248 pthread_rwlock_t prwlock;
253 ret = check_and_init_rwlock(rwlock, &prwlock);
257 curthread = _get_curthread();
258 if (curthread->rdlock_count) {
260 * To avoid having to track all the rdlocks held by
261 * a thread or all of the threads that hold a rdlock,
262 * we keep a simple count of all the rdlocks held by
263 * a thread. If a thread holds any rdlocks it is
264 * possible that it is attempting to take a recursive
265 * rdlock. If there are blocked writers and precedence
266 * is given to them, then that would result in the thread
267 * deadlocking. So allowing a thread to take the rdlock
268 * when it already has one or more rdlocks avoids the
269 * deadlock. I hope the reader can follow that logic ;-)
271 flags = URWLOCK_PREFER_READER;
276 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
278 curthread->rdlock_count++;
283 _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock)
285 struct pthread *curthread;
286 pthread_rwlock_t prwlock;
290 ret = check_and_init_rwlock(rwlock, &prwlock);
294 curthread = _get_curthread();
295 ret = _thr_rwlock_trywrlock(&prwlock->lock);
297 prwlock->owner = TID(curthread);
302 rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
304 struct pthread *curthread = _get_curthread();
305 pthread_rwlock_t prwlock;
308 ret = check_and_init_rwlock(rwlock, &prwlock);
313 * POSIX said the validity of the abstimeout parameter need
314 * not be checked if the lock can be immediately acquired.
316 ret = _thr_rwlock_trywrlock(&prwlock->lock);
318 prwlock->owner = TID(curthread);
322 if (__predict_false(abstime &&
323 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
327 /* goto kernel and lock it */
328 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
330 prwlock->owner = TID(curthread);
337 /* if interrupted, try to lock it in userland again. */
338 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
340 prwlock->owner = TID(curthread);
348 _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock)
351 return (rwlock_wrlock_common(rwlock, NULL));
355 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
356 const struct timespec * __restrict abstime)
359 return (rwlock_wrlock_common(rwlock, abstime));
363 _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock)
365 struct pthread *curthread = _get_curthread();
366 pthread_rwlock_t prwlock;
370 if (*rwlock == THR_PSHARED_PTR) {
371 prwlock = __thr_pshared_offpage(rwlock, 0);
378 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
381 state = prwlock->lock.rw_state;
382 if (state & URWLOCK_WRITE_OWNER) {
383 if (__predict_false(prwlock->owner != TID(curthread)))
388 ret = _thr_rwlock_unlock(&prwlock->lock);
389 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
390 curthread->rdlock_count--;