2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by John Birrell.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include "namespace.h"
36 #include <machine/tls.h>
40 #include <sys/queue.h>
42 #include "un-namespace.h"
44 #include "thr_private.h"
46 #if defined(_PTHREADS_INVARIANTS)
47 #define MUTEX_INIT_LINK(m) do { \
48 (m)->m_qe.tqe_prev = NULL; \
49 (m)->m_qe.tqe_next = NULL; \
51 #define MUTEX_ASSERT_IS_OWNED(m) do { \
52 if ((m)->m_qe.tqe_prev == NULL) \
53 PANIC("mutex is not on list"); \
55 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
56 if (((m)->m_qe.tqe_prev != NULL) || \
57 ((m)->m_qe.tqe_next != NULL)) \
58 PANIC("mutex is on list"); \
60 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
61 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
62 "thread in syncq when it shouldn't be."); \
65 #define MUTEX_INIT_LINK(m)
66 #define MUTEX_ASSERT_IS_OWNED(m)
67 #define MUTEX_ASSERT_NOT_OWNED(m)
68 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
71 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
72 #define MUTEX_DESTROY(m) do { \
76 umtx_t _mutex_static_lock;
81 static int mutex_self_trylock(pthread_mutex_t);
82 static int mutex_self_lock(pthread_mutex_t,
83 const struct timespec *abstime);
84 static int mutex_unlock_common(pthread_mutex_t *);
86 int __pthread_mutex_init(pthread_mutex_t *mutex,
87 const pthread_mutexattr_t *mutex_attr);
88 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
89 int __pthread_mutex_lock(pthread_mutex_t *mutex);
90 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
91 const struct timespec *abs_timeout);
94 mutex_init(pthread_mutex_t *mutex,
95 const pthread_mutexattr_t *mutex_attr, int private)
97 const struct pthread_mutex_attr *attr;
98 struct pthread_mutex *pmutex;
100 if (mutex_attr == NULL) {
101 attr = &_pthread_mutexattr_default;
104 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
105 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
107 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
108 attr->m_protocol > PTHREAD_PRIO_PROTECT)
112 if ((pmutex = (pthread_mutex_t)
113 malloc(sizeof(struct pthread_mutex))) == NULL)
116 _thr_umtx_init(&pmutex->m_lock);
117 pmutex->m_type = attr->m_type;
118 pmutex->m_protocol = attr->m_protocol;
119 TAILQ_INIT(&pmutex->m_queue);
120 pmutex->m_owner = NULL;
121 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
123 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
125 pmutex->m_refcount = 0;
126 if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
127 pmutex->m_prio = attr->m_ceiling;
130 pmutex->m_saved_prio = 0;
131 MUTEX_INIT_LINK(pmutex);
137 init_static(struct pthread *thread, pthread_mutex_t *mutex)
141 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
144 ret = mutex_init(mutex, NULL, 0);
148 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
154 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
158 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
161 ret = mutex_init(mutex, NULL, 1);
165 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
171 _pthread_mutex_init(pthread_mutex_t *mutex,
172 const pthread_mutexattr_t *mutex_attr)
174 return mutex_init(mutex, mutex_attr, 1);
178 __pthread_mutex_init(pthread_mutex_t *mutex,
179 const pthread_mutexattr_t *mutex_attr)
181 return mutex_init(mutex, mutex_attr, 0);
185 _mutex_reinit(pthread_mutex_t *mutex)
187 _thr_umtx_init(&(*mutex)->m_lock);
188 TAILQ_INIT(&(*mutex)->m_queue);
189 MUTEX_INIT_LINK(*mutex);
190 (*mutex)->m_owner = NULL;
191 (*mutex)->m_count = 0;
192 (*mutex)->m_refcount = 0;
193 (*mutex)->m_prio = 0;
194 (*mutex)->m_saved_prio = 0;
199 _mutex_fork(struct pthread *curthread)
201 struct pthread_mutex *m;
203 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
204 m->m_lock = UMTX_LOCKED;
208 _pthread_mutex_destroy(pthread_mutex_t *mutex)
210 struct pthread *curthread = tls_get_curthread();
216 else if (*mutex == NULL)
220 * Try to lock the mutex structure, we only need to
221 * try once, if failed, the mutex is in used.
223 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
228 * Check mutex other fields to see if this mutex is
229 * in use. Mostly for prority mutex types, or there
230 * are condition variables referencing it.
232 if (((*mutex)->m_owner != NULL) ||
233 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
234 ((*mutex)->m_refcount != 0)) {
235 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
239 * Save a pointer to the mutex so it can be free'd
240 * and set the caller's pointer to NULL:
245 /* Unlock the mutex structure: */
246 THR_UMTX_UNLOCK(curthread, &m->m_lock);
249 * Free the memory allocated for the mutex
252 MUTEX_ASSERT_NOT_OWNED(m);
257 /* Return the completion status: */
262 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
264 struct pthread_mutex *m;
268 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
270 m->m_owner = curthread;
271 /* Add to the list of owned mutexes: */
272 MUTEX_ASSERT_NOT_OWNED(m);
273 TAILQ_INSERT_TAIL(&curthread->mutexq,
275 } else if (m->m_owner == curthread) {
276 ret = mutex_self_trylock(m);
283 __pthread_mutex_trylock(pthread_mutex_t *m)
285 struct pthread *curthread = tls_get_curthread();
288 if (__predict_false(m == NULL))
291 * If the mutex is statically initialized, perform the dynamic
294 if (__predict_false(*m == NULL)) {
295 ret = init_static(curthread, m);
296 if (__predict_false(ret != 0))
299 return (mutex_trylock_common(curthread, m));
303 _pthread_mutex_trylock(pthread_mutex_t *m)
305 struct pthread *curthread = tls_get_curthread();
309 * If the mutex is statically initialized, perform the dynamic
310 * initialization marking the mutex private (delete safe):
312 if (__predict_false(*m == NULL)) {
313 ret = init_static_private(curthread, m);
314 if (__predict_false(ret != 0))
317 return (mutex_trylock_common(curthread, m));
321 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
322 const struct timespec * abstime)
324 struct timespec ts, ts2;
325 struct pthread_mutex *m;
329 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
331 m->m_owner = curthread;
332 /* Add to the list of owned mutexes: */
333 MUTEX_ASSERT_NOT_OWNED(m);
334 TAILQ_INSERT_TAIL(&curthread->mutexq,
336 } else if (m->m_owner == curthread) {
337 ret = mutex_self_lock(m, abstime);
339 if (abstime == NULL) {
340 THR_UMTX_LOCK(curthread, &m->m_lock);
342 } else if (__predict_false(
343 abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
344 abstime->tv_nsec >= 1000000000)) {
347 clock_gettime(CLOCK_REALTIME, &ts);
348 TIMESPEC_SUB(&ts2, abstime, &ts);
349 ret = THR_UMTX_TIMEDLOCK(curthread,
352 * Timed out wait is not restarted if
353 * it was interrupted, not worth to do it.
359 m->m_owner = curthread;
360 /* Add to the list of owned mutexes: */
361 MUTEX_ASSERT_NOT_OWNED(m);
362 TAILQ_INSERT_TAIL(&curthread->mutexq,
370 __pthread_mutex_lock(pthread_mutex_t *m)
372 struct pthread *curthread;
375 if (__predict_false(m == NULL))
379 * If the mutex is statically initialized, perform the dynamic
382 curthread = tls_get_curthread();
383 if (__predict_false(*m == NULL)) {
384 ret = init_static(curthread, m);
385 if (__predict_false(ret))
388 return (mutex_lock_common(curthread, m, NULL));
392 _pthread_mutex_lock(pthread_mutex_t *m)
394 struct pthread *curthread;
397 if (__predict_false(m == NULL))
401 * If the mutex is statically initialized, perform the dynamic
402 * initialization marking it private (delete safe):
404 curthread = tls_get_curthread();
405 if (__predict_false(*m == NULL)) {
406 ret = init_static_private(curthread, m);
407 if (__predict_false(ret))
410 return (mutex_lock_common(curthread, m, NULL));
414 __pthread_mutex_timedlock(pthread_mutex_t *m,
415 const struct timespec *abs_timeout)
417 struct pthread *curthread;
420 if (__predict_false(m == NULL))
424 * If the mutex is statically initialized, perform the dynamic
427 curthread = tls_get_curthread();
428 if (__predict_false(*m == NULL)) {
429 ret = init_static(curthread, m);
430 if (__predict_false(ret))
433 return (mutex_lock_common(curthread, m, abs_timeout));
437 _pthread_mutex_timedlock(pthread_mutex_t *m,
438 const struct timespec *abs_timeout)
440 struct pthread *curthread;
443 if (__predict_false(m == NULL))
446 curthread = tls_get_curthread();
449 * If the mutex is statically initialized, perform the dynamic
450 * initialization marking it private (delete safe):
452 if (__predict_false(*m == NULL)) {
453 ret = init_static_private(curthread, m);
454 if (__predict_false(ret))
457 return (mutex_lock_common(curthread, m, abs_timeout));
461 _pthread_mutex_unlock(pthread_mutex_t *m)
463 if (__predict_false(m == NULL))
465 return (mutex_unlock_common(m));
469 mutex_self_trylock(pthread_mutex_t m)
474 /* case PTHREAD_MUTEX_DEFAULT: */
475 case PTHREAD_MUTEX_ERRORCHECK:
476 case PTHREAD_MUTEX_NORMAL:
480 case PTHREAD_MUTEX_RECURSIVE:
481 /* Increment the lock count: */
482 if (m->m_count + 1 > 0) {
490 /* Trap invalid mutex types; */
498 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
500 struct timespec ts1, ts2;
504 /* case PTHREAD_MUTEX_DEFAULT: */
505 case PTHREAD_MUTEX_ERRORCHECK:
507 clock_gettime(CLOCK_REALTIME, &ts1);
508 TIMESPEC_SUB(&ts2, abstime, &ts1);
509 __sys_nanosleep(&ts2, NULL);
513 * POSIX specifies that mutexes should return
514 * EDEADLK if a recursive lock is detected.
520 case PTHREAD_MUTEX_NORMAL:
522 * What SS2 define as a 'normal' mutex. Intentionally
523 * deadlock on attempts to get a lock you already own.
527 clock_gettime(CLOCK_REALTIME, &ts1);
528 TIMESPEC_SUB(&ts2, abstime, &ts1);
529 __sys_nanosleep(&ts2, NULL);
535 __sys_nanosleep(&ts1, NULL);
539 case PTHREAD_MUTEX_RECURSIVE:
540 /* Increment the lock count: */
541 if (m->m_count + 1 > 0) {
549 /* Trap invalid mutex types; */
557 mutex_unlock_common(pthread_mutex_t *mutex)
559 struct pthread *curthread = tls_get_curthread();
560 struct pthread_mutex *m;
562 if (__predict_false((m = *mutex)== NULL))
564 if (__predict_false(m->m_owner != curthread))
568 m->m_type == PTHREAD_MUTEX_RECURSIVE &&
573 * Clear the count in case this is a recursive mutex.
577 /* Remove the mutex from the threads queue. */
578 MUTEX_ASSERT_IS_OWNED(m);
579 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
582 * Hand off the mutex to the next waiting thread.
584 THR_UMTX_UNLOCK(curthread, &m->m_lock);
590 _mutex_cv_lock(pthread_mutex_t *m, int count)
594 if ((ret = _pthread_mutex_lock(m)) == 0) {
596 (*m)->m_count += count;
602 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
604 struct pthread *curthread = tls_get_curthread();
605 struct pthread_mutex *m;
607 if (__predict_false(mutex == NULL))
609 if (__predict_false((m = *mutex) == NULL))
611 if (__predict_false(m->m_owner != curthread))
618 /* Remove the mutex from the threads queue. */
619 MUTEX_ASSERT_IS_OWNED(m);
620 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
622 THR_UMTX_UNLOCK(curthread, &m->m_lock);
627 _mutex_unlock_private(pthread_t pthread)
629 struct pthread_mutex *m, *m_next;
631 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
632 m_next = TAILQ_NEXT(m, m_qe);
633 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
634 _pthread_mutex_unlock(&m);
638 __strong_reference(__pthread_mutex_init, pthread_mutex_init);
639 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock);
640 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
641 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
643 /* Single underscore versions provided for libc internal usage: */
644 /* No difference between libc and application usage of these: */
645 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
646 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);