2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libpthread/thread/thr_mutex.c,v 1.46 2004/10/31 05:03:50 green Exp $
33 * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.2 2005/03/15 11:24:23 davidxu Exp $
38 #include <sys/param.h>
39 #include <sys/queue.h>
41 #include "thr_private.h"
43 #if defined(_PTHREADS_INVARIANTS)
44 #define MUTEX_INIT_LINK(m) do { \
45 (m)->m_qe.tqe_prev = NULL; \
46 (m)->m_qe.tqe_next = NULL; \
48 #define MUTEX_ASSERT_IS_OWNED(m) do { \
49 if ((m)->m_qe.tqe_prev == NULL) \
50 PANIC("mutex is not on list"); \
52 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
53 if (((m)->m_qe.tqe_prev != NULL) || \
54 ((m)->m_qe.tqe_next != NULL)) \
55 PANIC("mutex is on list"); \
57 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
58 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
59 "thread in syncq when it shouldn't be."); \
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
68 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
69 #define MUTEX_DESTROY(m) do { \
77 static long mutex_handoff(struct pthread *, struct pthread_mutex *);
78 static int mutex_self_trylock(struct pthread *, pthread_mutex_t);
79 static int mutex_self_lock(struct pthread *, pthread_mutex_t,
80 const struct timespec *abstime);
81 static int mutex_unlock_common(pthread_mutex_t *, int);
82 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83 static void mutex_rescan_owned (struct pthread *, struct pthread *,
84 struct pthread_mutex *);
86 static pthread_t mutex_queue_deq(pthread_mutex_t);
88 static void mutex_queue_remove(pthread_mutex_t, pthread_t);
89 static void mutex_queue_enq(pthread_mutex_t, pthread_t);
91 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
92 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
93 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
94 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
96 /* Single underscore versions provided for libc internal usage: */
97 /* No difference between libc and application usage of these: */
98 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
99 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
102 mutex_init(pthread_mutex_t *mutex,
103 const pthread_mutexattr_t *mutex_attr, int private)
105 struct pthread_mutex *pmutex;
106 enum pthread_mutextype type;
112 /* Check if default mutex attributes: */
113 if (mutex_attr == NULL || *mutex_attr == NULL) {
114 /* Default to a (error checking) POSIX mutex: */
115 type = PTHREAD_MUTEX_ERRORCHECK;
116 protocol = PTHREAD_PRIO_NONE;
117 ceiling = THR_MAX_PRIORITY;
121 /* Check mutex type: */
122 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
123 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
124 /* Return an invalid argument error: */
127 /* Check mutex protocol: */
128 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
129 ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT))
130 /* Return an invalid argument error: */
134 /* Use the requested mutex type and protocol: */
135 type = (*mutex_attr)->m_type;
136 protocol = (*mutex_attr)->m_protocol;
137 ceiling = (*mutex_attr)->m_ceiling;
138 flags = (*mutex_attr)->m_flags;
141 /* Check no errors so far: */
143 if ((pmutex = (pthread_mutex_t)
144 malloc(sizeof(struct pthread_mutex))) == NULL) {
147 _thr_umtx_init(&pmutex->m_lock);
148 /* Set the mutex flags: */
149 pmutex->m_flags = flags;
151 /* Process according to mutex type: */
153 /* case PTHREAD_MUTEX_DEFAULT: */
154 case PTHREAD_MUTEX_ERRORCHECK:
155 case PTHREAD_MUTEX_NORMAL:
156 /* Nothing to do here. */
159 /* Single UNIX Spec 2 recursive mutex: */
160 case PTHREAD_MUTEX_RECURSIVE:
161 /* Reset the mutex count: */
165 /* Trap invalid mutex types: */
167 /* Return an invalid argument error: */
172 /* Initialise the rest of the mutex: */
173 TAILQ_INIT(&pmutex->m_queue);
174 pmutex->m_flags |= MUTEX_FLAGS_INITED;
176 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
177 pmutex->m_owner = NULL;
178 pmutex->m_type = type;
179 pmutex->m_protocol = protocol;
180 pmutex->m_refcount = 0;
181 if (protocol == PTHREAD_PRIO_PROTECT)
182 pmutex->m_prio = ceiling;
185 pmutex->m_saved_prio = 0;
186 MUTEX_INIT_LINK(pmutex);
189 /* Free the mutex lock structure: */
190 MUTEX_DESTROY(pmutex);
195 /* Return the completion status: */
200 init_static(struct pthread *thread, pthread_mutex_t *mutex)
204 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
207 ret = mutex_init(mutex, NULL, 0);
211 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
217 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
221 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
224 ret = mutex_init(mutex, NULL, 1);
228 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
234 _pthread_mutex_init(pthread_mutex_t *mutex,
235 const pthread_mutexattr_t *mutex_attr)
237 return mutex_init(mutex, mutex_attr, 1);
241 __pthread_mutex_init(pthread_mutex_t *mutex,
242 const pthread_mutexattr_t *mutex_attr)
244 return mutex_init(mutex, mutex_attr, 0);
248 _mutex_reinit(pthread_mutex_t *mutex)
250 _thr_umtx_init(&(*mutex)->m_lock);
251 TAILQ_INIT(&(*mutex)->m_queue);
252 MUTEX_INIT_LINK(*mutex);
253 (*mutex)->m_owner = NULL;
254 (*mutex)->m_count = 0;
255 (*mutex)->m_refcount = 0;
256 (*mutex)->m_prio = 0;
257 (*mutex)->m_saved_prio = 0;
262 _mutex_fork(struct pthread *curthread)
264 struct pthread_mutex *m;
266 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
267 m->m_lock = UMTX_LOCKED;
269 /* Clear contender for priority mutexes */
270 TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
271 /* clear another thread locked us */
272 _thr_umtx_init(&m->m_lock);
273 TAILQ_INIT(&m->m_queue);
278 _pthread_mutex_destroy(pthread_mutex_t *mutex)
280 struct pthread *curthread = _get_curthread();
284 if (mutex == NULL || *mutex == NULL)
288 * Try to lock the mutex structure, we only need to
289 * try once, if failed, the mutex is in used.
291 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
296 * Check mutex other fields to see if this mutex is
297 * in use. Mostly for prority mutex types, or there
298 * are condition variables referencing it.
300 if (((*mutex)->m_owner != NULL) ||
301 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
302 ((*mutex)->m_refcount != 0)) {
303 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
307 * Save a pointer to the mutex so it can be free'd
308 * and set the caller's pointer to NULL:
313 /* Unlock the mutex structure: */
314 _thr_umtx_unlock(&m->m_lock, curthread->tid);
317 * Free the memory allocated for the mutex
320 MUTEX_ASSERT_NOT_OWNED(m);
325 /* Return the completion status: */
330 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
334 THR_ASSERT((mutex != NULL) && (*mutex != NULL),
335 "Uninitialized mutex in mutex_trylock_common");
337 /* Short cut for simple mutex. */
338 if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
339 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
341 (*mutex)->m_owner = curthread;
342 /* Add to the list of owned mutexes: */
343 MUTEX_ASSERT_NOT_OWNED(*mutex);
344 TAILQ_INSERT_TAIL(&curthread->mutexq,
346 } else if ((*mutex)->m_owner == curthread) {
347 ret = mutex_self_trylock(curthread, *mutex);
353 /* Code for priority mutex */
355 /* Lock the mutex structure: */
356 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
359 * If the mutex was statically allocated, properly
360 * initialize the tail queue.
362 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
363 TAILQ_INIT(&(*mutex)->m_queue);
364 MUTEX_INIT_LINK(*mutex);
365 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
368 /* Process according to mutex type: */
369 switch ((*mutex)->m_protocol) {
370 /* POSIX priority inheritence mutex: */
371 case PTHREAD_PRIO_INHERIT:
372 /* Check if this mutex is not locked: */
373 if ((*mutex)->m_owner == NULL) {
374 /* Lock the mutex for the running thread: */
375 (*mutex)->m_owner = curthread;
378 /* Track number of priority mutexes owned: */
379 curthread->priority_mutex_count++;
382 * The mutex takes on the attributes of the
383 * running thread when there are no waiters.
385 (*mutex)->m_prio = curthread->active_priority;
386 (*mutex)->m_saved_prio =
387 curthread->inherited_priority;
388 curthread->inherited_priority = (*mutex)->m_prio;
389 THR_UNLOCK(curthread);
391 /* Add to the list of owned mutexes: */
392 MUTEX_ASSERT_NOT_OWNED(*mutex);
393 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
395 } else if ((*mutex)->m_owner == curthread)
396 ret = mutex_self_trylock(curthread, *mutex);
398 /* Return a busy error: */
402 /* POSIX priority protection mutex: */
403 case PTHREAD_PRIO_PROTECT:
404 /* Check for a priority ceiling violation: */
405 if (curthread->active_priority > (*mutex)->m_prio)
408 /* Check if this mutex is not locked: */
409 else if ((*mutex)->m_owner == NULL) {
410 /* Lock the mutex for the running thread: */
411 (*mutex)->m_owner = curthread;
414 /* Track number of priority mutexes owned: */
415 curthread->priority_mutex_count++;
418 * The running thread inherits the ceiling
419 * priority of the mutex and executes at that
422 curthread->active_priority = (*mutex)->m_prio;
423 (*mutex)->m_saved_prio =
424 curthread->inherited_priority;
425 curthread->inherited_priority =
427 THR_UNLOCK(curthread);
428 /* Add to the list of owned mutexes: */
429 MUTEX_ASSERT_NOT_OWNED(*mutex);
430 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
432 } else if ((*mutex)->m_owner == curthread)
433 ret = mutex_self_trylock(curthread, *mutex);
435 /* Return a busy error: */
439 /* Trap invalid mutex types: */
441 /* Return an invalid argument error: */
446 /* Unlock the mutex structure: */
447 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
449 /* Return the completion status: */
454 __pthread_mutex_trylock(pthread_mutex_t *mutex)
456 struct pthread *curthread = _get_curthread();
460 * If the mutex is statically initialized, perform the dynamic
463 if ((*mutex != NULL) ||
464 ((ret = init_static(curthread, mutex)) == 0))
465 ret = mutex_trylock_common(curthread, mutex);
471 _pthread_mutex_trylock(pthread_mutex_t *mutex)
473 struct pthread *curthread = _get_curthread();
477 * If the mutex is statically initialized, perform the dynamic
478 * initialization marking the mutex private (delete safe):
480 if ((*mutex != NULL) ||
481 ((ret = init_static_private(curthread, mutex)) == 0))
482 ret = mutex_trylock_common(curthread, mutex);
488 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
489 const struct timespec * abstime)
491 struct timespec ts, ts2;
495 THR_ASSERT((m != NULL) && (*m != NULL),
496 "Uninitialized mutex in mutex_lock_common");
498 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
499 abstime->tv_nsec >= 1000000000))
502 /* Short cut for simple mutex. */
504 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
505 /* Default POSIX mutex: */
506 ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
508 (*m)->m_owner = curthread;
509 /* Add to the list of owned mutexes: */
510 MUTEX_ASSERT_NOT_OWNED(*m);
511 TAILQ_INSERT_TAIL(&curthread->mutexq,
513 } else if ((*m)->m_owner == curthread) {
514 ret = mutex_self_lock(curthread, *m, abstime);
516 if (abstime == NULL) {
517 THR_UMTX_LOCK(curthread, &(*m)->m_lock);
520 clock_gettime(CLOCK_REALTIME, &ts);
521 TIMESPEC_SUB(&ts2, abstime, &ts);
522 ret = THR_UMTX_TIMEDLOCK(curthread,
523 &(*m)->m_lock, &ts2);
525 * Timed out wait is not restarted if
526 * it was interrupted, not worth to do it.
532 (*m)->m_owner = curthread;
533 /* Add to the list of owned mutexes: */
534 MUTEX_ASSERT_NOT_OWNED(*m);
535 TAILQ_INSERT_TAIL(&curthread->mutexq,
542 /* Code for priority mutex */
545 * Enter a loop waiting to become the mutex owner. We need a
546 * loop in case the waiting thread is interrupted by a signal
547 * to execute a signal handler. It is not (currently) possible
548 * to remain in the waiting queue while running a handler.
549 * Instead, the thread is interrupted and backed out of the
550 * waiting queue prior to executing the signal handler.
553 /* Lock the mutex structure: */
554 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
557 * If the mutex was statically allocated, properly
558 * initialize the tail queue.
560 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
561 TAILQ_INIT(&(*m)->m_queue);
562 (*m)->m_flags |= MUTEX_FLAGS_INITED;
566 /* Process according to mutex type: */
567 switch ((*m)->m_protocol) {
568 /* POSIX priority inheritence mutex: */
569 case PTHREAD_PRIO_INHERIT:
570 /* Check if this mutex is not locked: */
571 if ((*m)->m_owner == NULL) {
572 /* Lock the mutex for this thread: */
573 (*m)->m_owner = curthread;
576 /* Track number of priority mutexes owned: */
577 curthread->priority_mutex_count++;
580 * The mutex takes on attributes of the
581 * running thread when there are no waiters.
582 * Make sure the thread's scheduling lock is
583 * held while priorities are adjusted.
585 (*m)->m_prio = curthread->active_priority;
587 curthread->inherited_priority;
588 curthread->inherited_priority = (*m)->m_prio;
589 THR_UNLOCK(curthread);
591 /* Add to the list of owned mutexes: */
592 MUTEX_ASSERT_NOT_OWNED(*m);
593 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
596 /* Unlock the mutex structure: */
597 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
598 } else if ((*m)->m_owner == curthread) {
599 ret = mutex_self_lock(curthread, *m, abstime);
601 /* Unlock the mutex structure: */
602 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
605 * Join the queue of threads waiting to lock
606 * the mutex and save a pointer to the mutex.
608 mutex_queue_enq(*m, curthread);
609 curthread->data.mutex = *m;
611 if (curthread->active_priority > (*m)->m_prio)
612 /* Adjust priorities: */
613 mutex_priority_adjust(curthread, *m);
616 cycle = curthread->cycle;
617 THR_UNLOCK(curthread);
619 /* Unlock the mutex structure: */
620 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
622 clock_gettime(CLOCK_REALTIME, &ts);
623 TIMESPEC_SUB(&ts2, abstime, &ts);
624 ret = _thr_umtx_wait(&curthread->cycle, cycle,
625 &ts2, CLOCK_REALTIME);
629 if (THR_IN_MUTEXQ(curthread)) {
630 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
631 mutex_queue_remove(*m, curthread);
632 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
635 * Only clear these after assuring the
636 * thread is dequeued.
638 curthread->data.mutex = NULL;
642 /* POSIX priority protection mutex: */
643 case PTHREAD_PRIO_PROTECT:
644 /* Check for a priority ceiling violation: */
645 if (curthread->active_priority > (*m)->m_prio) {
646 /* Unlock the mutex structure: */
647 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
650 /* Check if this mutex is not locked: */
651 else if ((*m)->m_owner == NULL) {
653 * Lock the mutex for the running
656 (*m)->m_owner = curthread;
659 /* Track number of priority mutexes owned: */
660 curthread->priority_mutex_count++;
663 * The running thread inherits the ceiling
664 * priority of the mutex and executes at that
665 * priority. Make sure the thread's
666 * scheduling lock is held while priorities
669 curthread->active_priority = (*m)->m_prio;
671 curthread->inherited_priority;
672 curthread->inherited_priority = (*m)->m_prio;
673 THR_UNLOCK(curthread);
675 /* Add to the list of owned mutexes: */
676 MUTEX_ASSERT_NOT_OWNED(*m);
677 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
680 /* Unlock the mutex structure: */
681 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
682 } else if ((*m)->m_owner == curthread) {
683 ret = mutex_self_lock(curthread, *m, abstime);
685 /* Unlock the mutex structure: */
686 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
689 * Join the queue of threads waiting to lock
690 * the mutex and save a pointer to the mutex.
692 mutex_queue_enq(*m, curthread);
693 curthread->data.mutex = *m;
695 /* Clear any previous error: */
696 curthread->error = 0;
699 cycle = curthread->cycle;
700 THR_UNLOCK(curthread);
702 /* Unlock the mutex structure: */
703 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
705 clock_gettime(CLOCK_REALTIME, &ts);
706 TIMESPEC_SUB(&ts2, abstime, &ts);
707 ret = _thr_umtx_wait(&curthread->cycle, cycle,
708 &ts2, CLOCK_REALTIME);
712 curthread->data.mutex = NULL;
713 if (THR_IN_MUTEXQ(curthread)) {
714 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
715 mutex_queue_remove(*m, curthread);
716 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
719 * Only clear these after assuring the
720 * thread is dequeued.
722 curthread->data.mutex = NULL;
725 * The threads priority may have changed while
726 * waiting for the mutex causing a ceiling
729 ret = curthread->error;
730 curthread->error = 0;
734 /* Trap invalid mutex types: */
736 /* Unlock the mutex structure: */
737 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
739 /* Return an invalid argument error: */
744 } while (((*m)->m_owner != curthread) && (ret == 0));
746 /* Return the completion status: */
751 __pthread_mutex_lock(pthread_mutex_t *m)
753 struct pthread *curthread;
758 curthread = _get_curthread();
761 * If the mutex is statically initialized, perform the dynamic
764 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
765 ret = mutex_lock_common(curthread, m, NULL);
770 __strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
773 _pthread_mutex_lock(pthread_mutex_t *m)
775 struct pthread *curthread;
780 curthread = _get_curthread();
783 * If the mutex is statically initialized, perform the dynamic
784 * initialization marking it private (delete safe):
787 ((ret = init_static_private(curthread, m)) == 0))
788 ret = mutex_lock_common(curthread, m, NULL);
794 __pthread_mutex_timedlock(pthread_mutex_t *m,
795 const struct timespec *abs_timeout)
797 struct pthread *curthread;
802 curthread = _get_curthread();
805 * If the mutex is statically initialized, perform the dynamic
808 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
809 ret = mutex_lock_common(curthread, m, abs_timeout);
815 _pthread_mutex_timedlock(pthread_mutex_t *m,
816 const struct timespec *abs_timeout)
818 struct pthread *curthread;
823 curthread = _get_curthread();
826 * If the mutex is statically initialized, perform the dynamic
827 * initialization marking it private (delete safe):
830 ((ret = init_static_private(curthread, m)) == 0))
831 ret = mutex_lock_common(curthread, m, abs_timeout);
837 _pthread_mutex_unlock(pthread_mutex_t *m)
839 return (mutex_unlock_common(m, /* add reference */ 0));
842 __strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
845 _mutex_cv_unlock(pthread_mutex_t *m)
847 return (mutex_unlock_common(m, /* add reference */ 1));
851 _mutex_cv_lock(pthread_mutex_t *m)
853 struct pthread *curthread;
856 curthread = _get_curthread();
857 if ((ret = _pthread_mutex_lock(m)) == 0)
863 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
868 /* case PTHREAD_MUTEX_DEFAULT: */
869 case PTHREAD_MUTEX_ERRORCHECK:
870 case PTHREAD_MUTEX_NORMAL:
874 case PTHREAD_MUTEX_RECURSIVE:
875 /* Increment the lock count: */
876 if (m->m_count + 1 > 0) {
884 /* Trap invalid mutex types; */
892 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
893 const struct timespec *abstime)
895 struct timespec ts1, ts2;
899 /* case PTHREAD_MUTEX_DEFAULT: */
900 case PTHREAD_MUTEX_ERRORCHECK:
902 clock_gettime(CLOCK_REALTIME, &ts1);
903 TIMESPEC_SUB(&ts2, abstime, &ts1);
904 __sys_nanosleep(&ts2, NULL);
908 * POSIX specifies that mutexes should return
909 * EDEADLK if a recursive lock is detected.
915 case PTHREAD_MUTEX_NORMAL:
917 * What SS2 define as a 'normal' mutex. Intentionally
918 * deadlock on attempts to get a lock you already own.
921 if (m->m_protocol != PTHREAD_PRIO_NONE) {
922 /* Unlock the mutex structure: */
923 THR_LOCK_RELEASE(curthread, &m->m_lock);
926 clock_gettime(CLOCK_REALTIME, &ts1);
927 TIMESPEC_SUB(&ts2, abstime, &ts1);
928 __sys_nanosleep(&ts2, NULL);
934 __sys_nanosleep(&ts1, NULL);
938 case PTHREAD_MUTEX_RECURSIVE:
939 /* Increment the lock count: */
940 if (m->m_count + 1 > 0) {
948 /* Trap invalid mutex types; */
956 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
958 struct pthread *curthread = _get_curthread();
962 if (m == NULL || *m == NULL)
965 /* Short cut for simple mutex. */
967 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
969 * Check if the running thread is not the owner of the
972 if (__predict_false((*m)->m_owner != curthread)) {
974 } else if (__predict_false(
975 (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
976 (*m)->m_count > 0)) {
977 /* Decrement the count: */
983 * Clear the count in case this is a recursive
987 (*m)->m_owner = NULL;
988 /* Remove the mutex from the threads queue. */
989 MUTEX_ASSERT_IS_OWNED(*m);
990 TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
995 * Hand off the mutex to the next waiting
998 _thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
1003 /* Code for priority mutex */
1005 /* Lock the mutex structure: */
1006 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1008 /* Process according to mutex type: */
1009 switch ((*m)->m_protocol) {
1010 /* POSIX priority inheritence mutex: */
1011 case PTHREAD_PRIO_INHERIT:
1013 * Check if the running thread is not the owner of the
1016 if ((*m)->m_owner != curthread)
1018 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1019 ((*m)->m_count > 0))
1020 /* Decrement the count: */
1024 * Clear the count in case this is recursive
1030 * Restore the threads inherited priority and
1031 * recompute the active priority (being careful
1032 * not to override changes in the threads base
1033 * priority subsequent to locking the mutex).
1035 THR_LOCK(curthread);
1036 curthread->inherited_priority =
1038 curthread->active_priority =
1039 MAX(curthread->inherited_priority,
1040 curthread->base_priority);
1043 * This thread now owns one less priority mutex.
1045 curthread->priority_mutex_count--;
1046 THR_UNLOCK(curthread);
1048 /* Remove the mutex from the threads queue. */
1049 MUTEX_ASSERT_IS_OWNED(*m);
1050 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1052 MUTEX_INIT_LINK(*m);
1055 * Hand off the mutex to the next waiting
1058 tid = mutex_handoff(curthread, *m);
1062 /* POSIX priority ceiling mutex: */
1063 case PTHREAD_PRIO_PROTECT:
1065 * Check if the running thread is not the owner of the
1068 if ((*m)->m_owner != curthread)
1070 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1071 ((*m)->m_count > 0))
1072 /* Decrement the count: */
1076 * Clear the count in case this is a recursive
1082 * Restore the threads inherited priority and
1083 * recompute the active priority (being careful
1084 * not to override changes in the threads base
1085 * priority subsequent to locking the mutex).
1087 THR_LOCK(curthread);
1088 curthread->inherited_priority =
1090 curthread->active_priority =
1091 MAX(curthread->inherited_priority,
1092 curthread->base_priority);
1095 * This thread now owns one less priority mutex.
1097 curthread->priority_mutex_count--;
1098 THR_UNLOCK(curthread);
1100 /* Remove the mutex from the threads queue. */
1101 MUTEX_ASSERT_IS_OWNED(*m);
1102 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1104 MUTEX_INIT_LINK(*m);
1107 * Hand off the mutex to the next waiting
1110 tid = mutex_handoff(curthread, *m);
1114 /* Trap invalid mutex types: */
1116 /* Return an invalid argument error: */
1121 if ((ret == 0) && (add_reference != 0))
1122 /* Increment the reference count: */
1125 /* Unlock the mutex structure: */
1126 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1129 /* Return the completion status: */
1135 * This function is called when a change in base priority occurs for
1136 * a thread that is holding or waiting for a priority protection or
1137 * inheritence mutex. A change in a threads base priority can effect
1138 * changes to active priorities of other threads and to the ordering
1139 * of mutex locking by waiting threads.
1141 * This must be called without the target thread's scheduling lock held.
1144 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1147 struct pthread_mutex *m;
1149 /* Adjust the priorites of any owned priority mutexes: */
1150 if (pthread->priority_mutex_count > 0) {
1152 * Rescan the mutexes owned by this thread and correct
1153 * their priorities to account for this threads change
1154 * in priority. This has the side effect of changing
1155 * the threads active priority.
1157 * Be sure to lock the first mutex in the list of owned
1158 * mutexes. This acts as a barrier against another
1159 * simultaneous call to change the threads priority
1160 * and from the owning thread releasing the mutex.
1162 m = TAILQ_FIRST(&pthread->pri_mutexq);
1164 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1166 * Make sure the thread still owns the lock.
1168 if (m == TAILQ_FIRST(&pthread->pri_mutexq))
1169 mutex_rescan_owned(curthread, pthread,
1170 /* rescan all owned */ NULL);
1171 THR_LOCK_RELEASE(curthread, &m->m_lock);
1176 * If this thread is waiting on a priority inheritence mutex,
1177 * check for priority adjustments. A change in priority can
1178 * also cause a ceiling violation(*) for a thread waiting on
1179 * a priority protection mutex; we don't perform the check here
1180 * as it is done in pthread_mutex_unlock.
1182 * (*) It should be noted that a priority change to a thread
1183 * _after_ taking and owning a priority ceiling mutex
1184 * does not affect ownership of that mutex; the ceiling
1185 * priority is only checked before mutex ownership occurs.
1187 if (propagate_prio != 0) {
1189 * Lock the thread's scheduling queue. This is a bit
1190 * convoluted; the "in synchronization queue flag" can
1191 * only be cleared with both the thread's scheduling and
1192 * mutex locks held. The thread's pointer to the wanted
1193 * mutex is guaranteed to be valid during this time.
1195 THR_THREAD_LOCK(curthread, pthread);
1197 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1198 ((m = pthread->data.mutex) == NULL))
1199 THR_THREAD_UNLOCK(curthread, pthread);
1202 * This thread is currently waiting on a mutex; unlock
1203 * the scheduling queue lock and lock the mutex. We
1204 * can't hold both at the same time because the locking
1205 * order could cause a deadlock.
1207 THR_THREAD_UNLOCK(curthread, pthread);
1208 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1211 * Check to make sure this thread is still in the
1212 * same state (the lock above can yield the CPU to
1213 * another thread or the thread may be running on
1216 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1217 (pthread->data.mutex == m)) {
1219 * Remove and reinsert this thread into
1220 * the list of waiting threads to preserve
1221 * decreasing priority order.
1223 mutex_queue_remove(m, pthread);
1224 mutex_queue_enq(m, pthread);
1226 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1227 /* Adjust priorities: */
1228 mutex_priority_adjust(curthread, m);
1231 /* Unlock the mutex structure: */
1232 THR_LOCK_RELEASE(curthread, &m->m_lock);
1238 * Called when a new thread is added to the mutex waiting queue or
1239 * when a threads priority changes that is already in the mutex
1242 * This must be called with the mutex locked by the current thread.
1245 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1247 pthread_mutex_t m = mutex;
1248 struct pthread *pthread_next, *pthread = mutex->m_owner;
1249 int done, temp_prio;
1252 * Calculate the mutex priority as the maximum of the highest
1253 * active priority of any waiting threads and the owning threads
1254 * active priority(*).
1256 * (*) Because the owning threads current active priority may
1257 * reflect priority inherited from this mutex (and the mutex
1258 * priority may have changed) we must recalculate the active
1259 * priority based on the threads saved inherited priority
1260 * and its base priority.
1262 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1263 temp_prio = MAX(pthread_next->active_priority,
1264 MAX(m->m_saved_prio, pthread->base_priority));
1266 /* See if this mutex really needs adjusting: */
1267 if (temp_prio == m->m_prio)
1268 /* No need to propagate the priority: */
1271 /* Set new priority of the mutex: */
1272 m->m_prio = temp_prio;
1275 * Don't unlock the mutex passed in as an argument. It is
1276 * expected to be locked and unlocked by the caller.
1281 * Save the threads priority before rescanning the
1284 temp_prio = pthread->active_priority;
1287 * Fix the priorities for all mutexes held by the owning
1288 * thread since taking this mutex. This also has a
1289 * potential side-effect of changing the threads priority.
1291 * At this point the mutex is locked by the current thread.
1292 * The owning thread can't release the mutex until it is
1293 * unlocked, so we should be able to safely walk its list
1296 mutex_rescan_owned(curthread, pthread, m);
1299 * If this isn't the first time through the loop,
1300 * the current mutex needs to be unlocked.
1303 THR_LOCK_RELEASE(curthread, &m->m_lock);
1305 /* Assume we're done unless told otherwise: */
1309 * If the thread is currently waiting on a mutex, check
1310 * to see if the threads new priority has affected the
1311 * priority of the mutex.
1313 if ((temp_prio != pthread->active_priority) &&
1314 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1315 ((m = pthread->data.mutex) != NULL) &&
1316 (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1317 /* Lock the mutex structure: */
1318 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1321 * Make sure the thread is still waiting on the
1324 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1325 (m == pthread->data.mutex)) {
1327 * The priority for this thread has changed.
1328 * Remove and reinsert this thread into the
1329 * list of waiting threads to preserve
1330 * decreasing priority order.
1332 mutex_queue_remove(m, pthread);
1333 mutex_queue_enq(m, pthread);
1336 * Grab the waiting thread with highest
1339 pthread_next = TAILQ_FIRST(&m->m_queue);
1342 * Calculate the mutex priority as the maximum
1343 * of the highest active priority of any
1344 * waiting threads and the owning threads
1347 temp_prio = MAX(pthread_next->active_priority,
1348 MAX(m->m_saved_prio,
1349 m->m_owner->base_priority));
1351 if (temp_prio != m->m_prio) {
1353 * The priority needs to be propagated
1354 * to the mutex this thread is waiting
1355 * on and up to the owner of that mutex.
1357 m->m_prio = temp_prio;
1358 pthread = m->m_owner;
1360 /* We're not done yet: */
1364 /* Only release the mutex if we're done: */
1366 THR_LOCK_RELEASE(curthread, &m->m_lock);
1368 } while (done == 0);
1372 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1373 struct pthread_mutex *mutex)
1375 struct pthread_mutex *m;
1376 struct pthread *pthread_next;
1377 int active_prio, inherited_prio;
1380 * Start walking the mutexes the thread has taken since
1381 * taking this mutex.
1383 if (mutex == NULL) {
1385 * A null mutex means start at the beginning of the owned
1388 m = TAILQ_FIRST(&pthread->pri_mutexq);
1390 /* There is no inherited priority yet. */
1394 * The caller wants to start after a specific mutex. It
1395 * is assumed that this mutex is a priority inheritence
1396 * mutex and that its priority has been correctly
1399 m = TAILQ_NEXT(mutex, m_qe);
1401 /* Start inheriting priority from the specified mutex. */
1402 inherited_prio = mutex->m_prio;
1404 active_prio = MAX(inherited_prio, pthread->base_priority);
1406 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1408 * We only want to deal with priority inheritence
1409 * mutexes. This might be optimized by only placing
1410 * priority inheritence mutexes into the owned mutex
1411 * list, but it may prove to be useful having all
1412 * owned mutexes in this list. Consider a thread
1413 * exiting while holding mutexes...
1415 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1417 * Fix the owners saved (inherited) priority to
1418 * reflect the priority of the previous mutex.
1420 m->m_saved_prio = inherited_prio;
1422 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1423 /* Recalculate the priority of the mutex: */
1424 m->m_prio = MAX(active_prio,
1425 pthread_next->active_priority);
1427 m->m_prio = active_prio;
1429 /* Recalculate new inherited and active priorities: */
1430 inherited_prio = m->m_prio;
1431 active_prio = MAX(m->m_prio, pthread->base_priority);
1436 * Fix the threads inherited priority and recalculate its
1439 pthread->inherited_priority = inherited_prio;
1440 active_prio = MAX(inherited_prio, pthread->base_priority);
1442 if (active_prio != pthread->active_priority) {
1443 /* Lock the thread's scheduling queue: */
1444 THR_THREAD_LOCK(curthread, pthread);
1446 /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */
1449 * This thread is not in a run queue. Just set
1450 * its active priority.
1452 pthread->active_priority = active_prio;
1456 * This thread is in a run queue. Remove it from
1457 * the queue before changing its priority:
1459 /* THR_RUNQ_REMOVE(pthread);*/
1461 * POSIX states that if the priority is being
1462 * lowered, the thread must be inserted at the
1463 * head of the queue for its priority if it owns
1464 * any priority protection or inheritence mutexes.
1466 if ((active_prio < pthread->active_priority) &&
1467 (pthread->priority_mutex_count > 0)) {
1468 /* Set the new active priority. */
1469 pthread->active_priority = active_prio;
1470 /* THR_RUNQ_INSERT_HEAD(pthread); */
1472 /* Set the new active priority. */
1473 pthread->active_priority = active_prio;
1474 /* THR_RUNQ_INSERT_TAIL(pthread);*/
1477 THR_THREAD_UNLOCK(curthread, pthread);
1482 _mutex_unlock_private(pthread_t pthread)
1484 struct pthread_mutex *m, *m_next;
1486 for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) {
1487 m_next = TAILQ_NEXT(m, m_qe);
1488 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1489 pthread_mutex_unlock(&m);
1494 * Dequeue a waiting thread from the head of a mutex queue in descending
1497 * In order to properly dequeue a thread from the mutex queue and
1498 * make it runnable without the possibility of errant wakeups, it
1499 * is necessary to lock the thread's scheduling queue while also
1500 * holding the mutex lock.
1503 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1505 struct pthread *pthread;
1508 /* Keep dequeueing until we find a valid thread: */
1509 mutex->m_owner = NULL;
1510 pthread = TAILQ_FIRST(&mutex->m_queue);
1511 while (pthread != NULL) {
1512 /* Take the thread's scheduling lock: */
1513 THR_THREAD_LOCK(curthread, pthread);
1515 /* Remove the thread from the mutex queue: */
1516 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1517 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1520 * Only exit the loop if the thread hasn't been
1523 switch (mutex->m_protocol) {
1524 case PTHREAD_PRIO_NONE:
1526 * Assign the new owner and add the mutex to the
1527 * thread's list of owned mutexes.
1529 mutex->m_owner = pthread;
1530 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1533 case PTHREAD_PRIO_INHERIT:
1535 * Assign the new owner and add the mutex to the
1536 * thread's list of owned mutexes.
1538 mutex->m_owner = pthread;
1539 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1541 /* Track number of priority mutexes owned: */
1542 pthread->priority_mutex_count++;
1545 * Set the priority of the mutex. Since our waiting
1546 * threads are in descending priority order, the
1547 * priority of the mutex becomes the active priority
1548 * of the thread we just dequeued.
1550 mutex->m_prio = pthread->active_priority;
1552 /* Save the owning threads inherited priority: */
1553 mutex->m_saved_prio = pthread->inherited_priority;
1556 * The owning threads inherited priority now becomes
1557 * his active priority (the priority of the mutex).
1559 pthread->inherited_priority = mutex->m_prio;
1562 case PTHREAD_PRIO_PROTECT:
1563 if (pthread->active_priority > mutex->m_prio) {
1565 * Either the mutex ceiling priority has
1566 * been lowered and/or this threads priority
1567 * has been raised subsequent to the thread
1568 * being queued on the waiting list.
1570 pthread->error = EINVAL;
1574 * Assign the new owner and add the mutex
1575 * to the thread's list of owned mutexes.
1577 mutex->m_owner = pthread;
1578 TAILQ_INSERT_TAIL(&pthread->pri_mutexq,
1581 /* Track number of priority mutexes owned: */
1582 pthread->priority_mutex_count++;
1585 * Save the owning threads inherited
1588 mutex->m_saved_prio =
1589 pthread->inherited_priority;
1592 * The owning thread inherits the ceiling
1593 * priority of the mutex and executes at
1596 pthread->inherited_priority = mutex->m_prio;
1597 pthread->active_priority = mutex->m_prio;
1603 /* Make the thread runnable and unlock the scheduling queue: */
1605 _thr_umtx_wake(&pthread->cycle, 1);
1607 THR_THREAD_UNLOCK(curthread, pthread);
1608 if (mutex->m_owner == pthread)
1609 /* We're done; a valid owner was found. */
1612 /* Get the next thread from the waiting queue: */
1613 pthread = TAILQ_NEXT(pthread, sqe);
1616 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1617 /* This mutex has no priority: */
1624 * Dequeue a waiting thread from the head of a mutex queue in descending
1628 mutex_queue_deq(struct pthread_mutex *mutex)
1632 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1633 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1634 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1642 * Remove a waiting thread from a mutex queue in descending priority order.
1645 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1647 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1648 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1649 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1654 * Enqueue a waiting thread to a queue in descending priority order.
1657 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1659 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1661 THR_ASSERT_NOT_IN_SYNCQ(pthread);
1663 * For the common case of all threads having equal priority,
1664 * we perform a quick check against the priority of the thread
1665 * at the tail of the queue.
1667 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1668 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1670 tid = TAILQ_FIRST(&mutex->m_queue);
1671 while (pthread->active_priority <= tid->active_priority)
1672 tid = TAILQ_NEXT(tid, sqe);
1673 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1675 pthread->sflags |= THR_FLAGS_IN_SYNCQ;