2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_mutex.c,v 1.20.2.8 2002/10/22 14:44:03 fjoe Exp $
33 * $DragonFly: src/lib/libc_r/uthread/uthread_mutex.c,v 1.4 2005/05/30 20:50:53 joerg Exp $
38 #include <sys/param.h>
39 #include <sys/queue.h>
41 #include "pthread_private.h"
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m) do { \
45 (m)->m_qe.tqe_prev = NULL; \
46 (m)->m_qe.tqe_next = NULL; \
48 #define _MUTEX_ASSERT_IS_OWNED(m) do { \
49 if ((m)->m_qe.tqe_prev == NULL) \
50 PANIC("mutex is not on list"); \
52 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \
53 if (((m)->m_qe.tqe_prev != NULL) || \
54 ((m)->m_qe.tqe_next != NULL)) \
55 PANIC("mutex is on list"); \
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
66 static inline int mutex_self_trylock(pthread_mutex_t);
67 static inline int mutex_self_lock(pthread_mutex_t);
68 static inline int mutex_unlock_common(pthread_mutex_t *, int);
69 static void mutex_priority_adjust(pthread_mutex_t);
70 static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
71 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
72 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
73 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
76 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
78 /* Reinitialize a mutex to defaults. */
80 _mutex_reinit(pthread_mutex_t * mutex)
86 else if (*mutex == NULL)
87 ret = pthread_mutex_init(mutex, NULL);
90 * Initialize the mutex structure:
92 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
93 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
94 TAILQ_INIT(&(*mutex)->m_queue);
95 (*mutex)->m_owner = NULL;
96 (*mutex)->m_data.m_count = 0;
97 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
98 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
99 (*mutex)->m_refcount = 0;
100 (*mutex)->m_prio = 0;
101 (*mutex)->m_saved_prio = 0;
102 _MUTEX_INIT_LINK(*mutex);
103 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
109 _pthread_mutex_init(pthread_mutex_t * mutex,
110 const pthread_mutexattr_t * mutex_attr)
112 enum pthread_mutextype type;
115 pthread_mutex_t pmutex;
121 /* Check if default mutex attributes: */
122 else if (mutex_attr == NULL || *mutex_attr == NULL) {
123 /* Default to a (error checking) POSIX mutex: */
124 type = PTHREAD_MUTEX_ERRORCHECK;
125 protocol = PTHREAD_PRIO_NONE;
126 ceiling = PTHREAD_MAX_PRIORITY;
129 /* Check mutex type: */
130 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132 /* Return an invalid argument error: */
135 /* Check mutex protocol: */
136 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138 /* Return an invalid argument error: */
142 /* Use the requested mutex type and protocol: */
143 type = (*mutex_attr)->m_type;
144 protocol = (*mutex_attr)->m_protocol;
145 ceiling = (*mutex_attr)->m_ceiling;
148 /* Check no errors so far: */
150 if ((pmutex = (pthread_mutex_t)
151 malloc(sizeof(struct pthread_mutex))) == NULL)
154 /* Reset the mutex flags: */
157 /* Process according to mutex type: */
159 /* case PTHREAD_MUTEX_DEFAULT: */
160 case PTHREAD_MUTEX_ERRORCHECK:
161 case PTHREAD_MUTEX_NORMAL:
162 /* Nothing to do here. */
165 /* Single UNIX Spec 2 recursive mutex: */
166 case PTHREAD_MUTEX_RECURSIVE:
167 /* Reset the mutex count: */
168 pmutex->m_data.m_count = 0;
171 /* Trap invalid mutex types: */
173 /* Return an invalid argument error: */
178 /* Initialise the rest of the mutex: */
179 TAILQ_INIT(&pmutex->m_queue);
180 pmutex->m_flags |= MUTEX_FLAGS_INITED;
181 pmutex->m_owner = NULL;
182 pmutex->m_type = type;
183 pmutex->m_protocol = protocol;
184 pmutex->m_refcount = 0;
185 if (protocol == PTHREAD_PRIO_PROTECT)
186 pmutex->m_prio = ceiling;
189 pmutex->m_saved_prio = 0;
190 _MUTEX_INIT_LINK(pmutex);
191 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
199 /* Return the completion status: */
204 _pthread_mutex_destroy(pthread_mutex_t * mutex)
208 if (mutex == NULL || *mutex == NULL)
211 /* Lock the mutex structure: */
212 _SPINLOCK(&(*mutex)->lock);
215 * Check to see if this mutex is in use:
217 if (((*mutex)->m_owner != NULL) ||
218 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
219 ((*mutex)->m_refcount != 0)) {
222 /* Unlock the mutex structure: */
223 _SPINUNLOCK(&(*mutex)->lock);
227 * Free the memory allocated for the mutex
230 _MUTEX_ASSERT_NOT_OWNED(*mutex);
234 * Leave the caller's pointer NULL now that
235 * the mutex has been destroyed:
241 /* Return the completion status: */
246 init_static(pthread_mutex_t *mutex)
250 _SPINLOCK(&static_init_lock);
253 ret = pthread_mutex_init(mutex, NULL);
257 _SPINUNLOCK(&static_init_lock);
263 _pthread_mutex_trylock(pthread_mutex_t * mutex)
265 struct pthread *curthread = _get_curthread();
272 * If the mutex is statically initialized, perform the dynamic
275 else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
277 * Defer signals to protect the scheduling queues from
278 * access by the signal handler:
280 _thread_kern_sig_defer();
282 /* Lock the mutex structure: */
283 _SPINLOCK(&(*mutex)->lock);
286 * If the mutex was statically allocated, properly
287 * initialize the tail queue.
289 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
290 TAILQ_INIT(&(*mutex)->m_queue);
291 _MUTEX_INIT_LINK(*mutex);
292 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
295 /* Process according to mutex type: */
296 switch ((*mutex)->m_protocol) {
297 /* Default POSIX mutex: */
298 case PTHREAD_PRIO_NONE:
299 /* Check if this mutex is not locked: */
300 if ((*mutex)->m_owner == NULL) {
301 /* Lock the mutex for the running thread: */
302 (*mutex)->m_owner = curthread;
304 /* Add to the list of owned mutexes: */
305 _MUTEX_ASSERT_NOT_OWNED(*mutex);
306 TAILQ_INSERT_TAIL(&curthread->mutexq,
308 } else if ((*mutex)->m_owner == curthread)
309 ret = mutex_self_trylock(*mutex);
311 /* Return a busy error: */
315 /* POSIX priority inheritence mutex: */
316 case PTHREAD_PRIO_INHERIT:
317 /* Check if this mutex is not locked: */
318 if ((*mutex)->m_owner == NULL) {
319 /* Lock the mutex for the running thread: */
320 (*mutex)->m_owner = curthread;
322 /* Track number of priority mutexes owned: */
323 curthread->priority_mutex_count++;
326 * The mutex takes on the attributes of the
327 * running thread when there are no waiters.
329 (*mutex)->m_prio = curthread->active_priority;
330 (*mutex)->m_saved_prio =
331 curthread->inherited_priority;
333 /* Add to the list of owned mutexes: */
334 _MUTEX_ASSERT_NOT_OWNED(*mutex);
335 TAILQ_INSERT_TAIL(&curthread->mutexq,
337 } else if ((*mutex)->m_owner == curthread)
338 ret = mutex_self_trylock(*mutex);
340 /* Return a busy error: */
344 /* POSIX priority protection mutex: */
345 case PTHREAD_PRIO_PROTECT:
346 /* Check for a priority ceiling violation: */
347 if (curthread->active_priority > (*mutex)->m_prio)
350 /* Check if this mutex is not locked: */
351 else if ((*mutex)->m_owner == NULL) {
352 /* Lock the mutex for the running thread: */
353 (*mutex)->m_owner = curthread;
355 /* Track number of priority mutexes owned: */
356 curthread->priority_mutex_count++;
359 * The running thread inherits the ceiling
360 * priority of the mutex and executes at that
363 curthread->active_priority = (*mutex)->m_prio;
364 (*mutex)->m_saved_prio =
365 curthread->inherited_priority;
366 curthread->inherited_priority =
369 /* Add to the list of owned mutexes: */
370 _MUTEX_ASSERT_NOT_OWNED(*mutex);
371 TAILQ_INSERT_TAIL(&curthread->mutexq,
373 } else if ((*mutex)->m_owner == curthread)
374 ret = mutex_self_trylock(*mutex);
376 /* Return a busy error: */
380 /* Trap invalid mutex types: */
382 /* Return an invalid argument error: */
387 /* Unlock the mutex structure: */
388 _SPINUNLOCK(&(*mutex)->lock);
391 * Undefer and handle pending signals, yielding if
394 _thread_kern_sig_undefer();
397 /* Return the completion status: */
402 _pthread_mutex_lock(pthread_mutex_t * mutex)
404 struct pthread *curthread = _get_curthread();
407 if (_thread_initial == NULL)
414 * If the mutex is statically initialized, perform the dynamic
417 if ((*mutex == NULL) &&
418 ((ret = init_static(mutex)) != 0))
421 /* Reset the interrupted flag: */
422 curthread->interrupted = 0;
425 * Enter a loop waiting to become the mutex owner. We need a
426 * loop in case the waiting thread is interrupted by a signal
427 * to execute a signal handler. It is not (currently) possible
428 * to remain in the waiting queue while running a handler.
429 * Instead, the thread is interrupted and backed out of the
430 * waiting queue prior to executing the signal handler.
434 * Defer signals to protect the scheduling queues from
435 * access by the signal handler:
437 _thread_kern_sig_defer();
439 /* Lock the mutex structure: */
440 _SPINLOCK(&(*mutex)->lock);
443 * If the mutex was statically allocated, properly
444 * initialize the tail queue.
446 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
447 TAILQ_INIT(&(*mutex)->m_queue);
448 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
449 _MUTEX_INIT_LINK(*mutex);
452 /* Process according to mutex type: */
453 switch ((*mutex)->m_protocol) {
454 /* Default POSIX mutex: */
455 case PTHREAD_PRIO_NONE:
456 if ((*mutex)->m_owner == NULL) {
457 /* Lock the mutex for this thread: */
458 (*mutex)->m_owner = curthread;
460 /* Add to the list of owned mutexes: */
461 _MUTEX_ASSERT_NOT_OWNED(*mutex);
462 TAILQ_INSERT_TAIL(&curthread->mutexq,
465 } else if ((*mutex)->m_owner == curthread)
466 ret = mutex_self_lock(*mutex);
469 * Join the queue of threads waiting to lock
472 mutex_queue_enq(*mutex, curthread);
475 * Keep a pointer to the mutex this thread
478 curthread->data.mutex = *mutex;
481 * Unlock the mutex structure and schedule the
484 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
485 &(*mutex)->lock, __FILE__, __LINE__);
487 /* Lock the mutex structure again: */
488 _SPINLOCK(&(*mutex)->lock);
492 /* POSIX priority inheritence mutex: */
493 case PTHREAD_PRIO_INHERIT:
494 /* Check if this mutex is not locked: */
495 if ((*mutex)->m_owner == NULL) {
496 /* Lock the mutex for this thread: */
497 (*mutex)->m_owner = curthread;
499 /* Track number of priority mutexes owned: */
500 curthread->priority_mutex_count++;
503 * The mutex takes on attributes of the
504 * running thread when there are no waiters.
506 (*mutex)->m_prio = curthread->active_priority;
507 (*mutex)->m_saved_prio =
508 curthread->inherited_priority;
509 curthread->inherited_priority =
512 /* Add to the list of owned mutexes: */
513 _MUTEX_ASSERT_NOT_OWNED(*mutex);
514 TAILQ_INSERT_TAIL(&curthread->mutexq,
517 } else if ((*mutex)->m_owner == curthread)
518 ret = mutex_self_lock(*mutex);
521 * Join the queue of threads waiting to lock
524 mutex_queue_enq(*mutex, curthread);
527 * Keep a pointer to the mutex this thread
530 curthread->data.mutex = *mutex;
532 if (curthread->active_priority >
534 /* Adjust priorities: */
535 mutex_priority_adjust(*mutex);
538 * Unlock the mutex structure and schedule the
541 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
542 &(*mutex)->lock, __FILE__, __LINE__);
544 /* Lock the mutex structure again: */
545 _SPINLOCK(&(*mutex)->lock);
549 /* POSIX priority protection mutex: */
550 case PTHREAD_PRIO_PROTECT:
551 /* Check for a priority ceiling violation: */
552 if (curthread->active_priority > (*mutex)->m_prio)
555 /* Check if this mutex is not locked: */
556 else if ((*mutex)->m_owner == NULL) {
558 * Lock the mutex for the running
561 (*mutex)->m_owner = curthread;
563 /* Track number of priority mutexes owned: */
564 curthread->priority_mutex_count++;
567 * The running thread inherits the ceiling
568 * priority of the mutex and executes at that
571 curthread->active_priority = (*mutex)->m_prio;
572 (*mutex)->m_saved_prio =
573 curthread->inherited_priority;
574 curthread->inherited_priority =
577 /* Add to the list of owned mutexes: */
578 _MUTEX_ASSERT_NOT_OWNED(*mutex);
579 TAILQ_INSERT_TAIL(&curthread->mutexq,
581 } else if ((*mutex)->m_owner == curthread)
582 ret = mutex_self_lock(*mutex);
585 * Join the queue of threads waiting to lock
588 mutex_queue_enq(*mutex, curthread);
591 * Keep a pointer to the mutex this thread
594 curthread->data.mutex = *mutex;
596 /* Clear any previous error: */
600 * Unlock the mutex structure and schedule the
603 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
604 &(*mutex)->lock, __FILE__, __LINE__);
606 /* Lock the mutex structure again: */
607 _SPINLOCK(&(*mutex)->lock);
610 * The threads priority may have changed while
611 * waiting for the mutex causing a ceiling
619 /* Trap invalid mutex types: */
621 /* Return an invalid argument error: */
627 * Check to see if this thread was interrupted and
628 * is still in the mutex queue of waiting threads:
630 if (curthread->interrupted != 0)
631 mutex_queue_remove(*mutex, curthread);
633 /* Unlock the mutex structure: */
634 _SPINUNLOCK(&(*mutex)->lock);
637 * Undefer and handle pending signals, yielding if
640 _thread_kern_sig_undefer();
641 } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
642 (curthread->interrupted == 0));
644 if (curthread->interrupted != 0 &&
645 curthread->continuation != NULL)
646 curthread->continuation((void *) curthread);
648 /* Return the completion status: */
653 _pthread_mutex_unlock(pthread_mutex_t * mutex)
655 return (mutex_unlock_common(mutex, /* add reference */ 0));
659 _mutex_cv_unlock(pthread_mutex_t * mutex)
661 return (mutex_unlock_common(mutex, /* add reference */ 1));
665 _mutex_cv_lock(pthread_mutex_t * mutex)
668 if ((ret = pthread_mutex_lock(mutex)) == 0)
669 (*mutex)->m_refcount--;
674 mutex_self_trylock(pthread_mutex_t mutex)
678 switch (mutex->m_type) {
680 /* case PTHREAD_MUTEX_DEFAULT: */
681 case PTHREAD_MUTEX_ERRORCHECK:
682 case PTHREAD_MUTEX_NORMAL:
684 * POSIX specifies that mutexes should return EDEADLK if a
685 * recursive lock is detected.
690 case PTHREAD_MUTEX_RECURSIVE:
691 /* Increment the lock count: */
692 mutex->m_data.m_count++;
696 /* Trap invalid mutex types; */
704 mutex_self_lock(pthread_mutex_t mutex)
708 switch (mutex->m_type) {
709 /* case PTHREAD_MUTEX_DEFAULT: */
710 case PTHREAD_MUTEX_ERRORCHECK:
712 * POSIX specifies that mutexes should return EDEADLK if a
713 * recursive lock is detected.
718 case PTHREAD_MUTEX_NORMAL:
720 * What SS2 define as a 'normal' mutex. Intentionally
721 * deadlock on attempts to get a lock you already own.
723 _thread_kern_sched_state_unlock(PS_DEADLOCK,
724 &mutex->lock, __FILE__, __LINE__);
727 case PTHREAD_MUTEX_RECURSIVE:
728 /* Increment the lock count: */
729 mutex->m_data.m_count++;
733 /* Trap invalid mutex types; */
741 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
743 struct pthread *curthread = _get_curthread();
746 if (mutex == NULL || *mutex == NULL) {
750 * Defer signals to protect the scheduling queues from
751 * access by the signal handler:
753 _thread_kern_sig_defer();
755 /* Lock the mutex structure: */
756 _SPINLOCK(&(*mutex)->lock);
758 /* Process according to mutex type: */
759 switch ((*mutex)->m_protocol) {
760 /* Default POSIX mutex: */
761 case PTHREAD_PRIO_NONE:
763 * Check if the running thread is not the owner of the
766 if ((*mutex)->m_owner != curthread) {
768 * Return an invalid argument error for no
769 * owner and a permission error otherwise:
771 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
773 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
774 ((*mutex)->m_data.m_count > 0)) {
775 /* Decrement the count: */
776 (*mutex)->m_data.m_count--;
779 * Clear the count in case this is recursive
782 (*mutex)->m_data.m_count = 0;
784 /* Remove the mutex from the threads queue. */
785 _MUTEX_ASSERT_IS_OWNED(*mutex);
786 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
788 _MUTEX_INIT_LINK(*mutex);
791 * Get the next thread from the queue of
792 * threads waiting on the mutex:
794 if (((*mutex)->m_owner =
795 mutex_queue_deq(*mutex)) != NULL) {
796 /* Make the new owner runnable: */
797 PTHREAD_NEW_STATE((*mutex)->m_owner,
801 * Add the mutex to the threads list of
804 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
808 * The owner is no longer waiting for
811 (*mutex)->m_owner->data.mutex = NULL;
816 /* POSIX priority inheritence mutex: */
817 case PTHREAD_PRIO_INHERIT:
819 * Check if the running thread is not the owner of the
822 if ((*mutex)->m_owner != curthread) {
824 * Return an invalid argument error for no
825 * owner and a permission error otherwise:
827 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
829 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
830 ((*mutex)->m_data.m_count > 0)) {
831 /* Decrement the count: */
832 (*mutex)->m_data.m_count--;
835 * Clear the count in case this is recursive
838 (*mutex)->m_data.m_count = 0;
841 * Restore the threads inherited priority and
842 * recompute the active priority (being careful
843 * not to override changes in the threads base
844 * priority subsequent to locking the mutex).
846 curthread->inherited_priority =
847 (*mutex)->m_saved_prio;
848 curthread->active_priority =
849 MAX(curthread->inherited_priority,
850 curthread->base_priority);
853 * This thread now owns one less priority mutex.
855 curthread->priority_mutex_count--;
857 /* Remove the mutex from the threads queue. */
858 _MUTEX_ASSERT_IS_OWNED(*mutex);
859 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
861 _MUTEX_INIT_LINK(*mutex);
864 * Get the next thread from the queue of threads
865 * waiting on the mutex:
867 if (((*mutex)->m_owner =
868 mutex_queue_deq(*mutex)) == NULL)
869 /* This mutex has no priority. */
870 (*mutex)->m_prio = 0;
873 * Track number of priority mutexes owned:
875 (*mutex)->m_owner->priority_mutex_count++;
878 * Add the mutex to the threads list
881 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
885 * The owner is no longer waiting for
888 (*mutex)->m_owner->data.mutex = NULL;
891 * Set the priority of the mutex. Since
892 * our waiting threads are in descending
893 * priority order, the priority of the
894 * mutex becomes the active priority of
895 * the thread we just dequeued.
898 (*mutex)->m_owner->active_priority;
901 * Save the owning threads inherited
904 (*mutex)->m_saved_prio =
905 (*mutex)->m_owner->inherited_priority;
908 * The owning threads inherited priority
909 * now becomes his active priority (the
910 * priority of the mutex).
912 (*mutex)->m_owner->inherited_priority =
916 * Make the new owner runnable:
918 PTHREAD_NEW_STATE((*mutex)->m_owner,
924 /* POSIX priority ceiling mutex: */
925 case PTHREAD_PRIO_PROTECT:
927 * Check if the running thread is not the owner of the
930 if ((*mutex)->m_owner != curthread) {
932 * Return an invalid argument error for no
933 * owner and a permission error otherwise:
935 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
937 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
938 ((*mutex)->m_data.m_count > 0)) {
939 /* Decrement the count: */
940 (*mutex)->m_data.m_count--;
943 * Clear the count in case this is recursive
946 (*mutex)->m_data.m_count = 0;
949 * Restore the threads inherited priority and
950 * recompute the active priority (being careful
951 * not to override changes in the threads base
952 * priority subsequent to locking the mutex).
954 curthread->inherited_priority =
955 (*mutex)->m_saved_prio;
956 curthread->active_priority =
957 MAX(curthread->inherited_priority,
958 curthread->base_priority);
961 * This thread now owns one less priority mutex.
963 curthread->priority_mutex_count--;
965 /* Remove the mutex from the threads queue. */
966 _MUTEX_ASSERT_IS_OWNED(*mutex);
967 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
969 _MUTEX_INIT_LINK(*mutex);
972 * Enter a loop to find a waiting thread whose
973 * active priority will not cause a ceiling
976 while ((((*mutex)->m_owner =
977 mutex_queue_deq(*mutex)) != NULL) &&
978 ((*mutex)->m_owner->active_priority >
981 * Either the mutex ceiling priority
982 * been lowered and/or this threads
983 * priority has been raised subsequent
984 * to this thread being queued on the
987 tls_set_tcb((*mutex)->m_owner->tcb);
989 tls_set_tcb(curthread->tcb);
990 PTHREAD_NEW_STATE((*mutex)->m_owner,
993 * The thread is no longer waiting for
996 (*mutex)->m_owner->data.mutex = NULL;
999 /* Check for a new owner: */
1000 if ((*mutex)->m_owner != NULL) {
1002 * Track number of priority mutexes owned:
1004 (*mutex)->m_owner->priority_mutex_count++;
1007 * Add the mutex to the threads list
1010 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1014 * The owner is no longer waiting for
1017 (*mutex)->m_owner->data.mutex = NULL;
1020 * Save the owning threads inherited
1023 (*mutex)->m_saved_prio =
1024 (*mutex)->m_owner->inherited_priority;
1027 * The owning thread inherits the
1028 * ceiling priority of the mutex and
1029 * executes at that priority:
1031 (*mutex)->m_owner->inherited_priority =
1033 (*mutex)->m_owner->active_priority =
1037 * Make the new owner runnable:
1039 PTHREAD_NEW_STATE((*mutex)->m_owner,
1045 /* Trap invalid mutex types: */
1047 /* Return an invalid argument error: */
1052 if ((ret == 0) && (add_reference != 0)) {
1053 /* Increment the reference count: */
1054 (*mutex)->m_refcount++;
1057 /* Unlock the mutex structure: */
1058 _SPINUNLOCK(&(*mutex)->lock);
1061 * Undefer and handle pending signals, yielding if
1064 _thread_kern_sig_undefer();
1067 /* Return the completion status: */
1073 * This function is called when a change in base priority occurs for
1074 * a thread that is holding or waiting for a priority protection or
1075 * inheritence mutex. A change in a threads base priority can effect
1076 * changes to active priorities of other threads and to the ordering
1077 * of mutex locking by waiting threads.
1079 * This must be called while thread scheduling is deferred.
1082 _mutex_notify_priochange(pthread_t pthread)
1084 /* Adjust the priorites of any owned priority mutexes: */
1085 if (pthread->priority_mutex_count > 0) {
1087 * Rescan the mutexes owned by this thread and correct
1088 * their priorities to account for this threads change
1089 * in priority. This has the side effect of changing
1090 * the threads active priority.
1092 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1096 * If this thread is waiting on a priority inheritence mutex,
1097 * check for priority adjustments. A change in priority can
1098 * also effect a ceiling violation(*) for a thread waiting on
1099 * a priority protection mutex; we don't perform the check here
1100 * as it is done in pthread_mutex_unlock.
1102 * (*) It should be noted that a priority change to a thread
1103 * _after_ taking and owning a priority ceiling mutex
1104 * does not affect ownership of that mutex; the ceiling
1105 * priority is only checked before mutex ownership occurs.
1107 if (pthread->state == PS_MUTEX_WAIT) {
1108 /* Lock the mutex structure: */
1109 _SPINLOCK(&pthread->data.mutex->lock);
1112 * Check to make sure this thread is still in the same state
1113 * (the spinlock above can yield the CPU to another thread):
1115 if (pthread->state == PS_MUTEX_WAIT) {
1117 * Remove and reinsert this thread into the list of
1118 * waiting threads to preserve decreasing priority
1121 mutex_queue_remove(pthread->data.mutex, pthread);
1122 mutex_queue_enq(pthread->data.mutex, pthread);
1124 if (pthread->data.mutex->m_protocol ==
1125 PTHREAD_PRIO_INHERIT) {
1126 /* Adjust priorities: */
1127 mutex_priority_adjust(pthread->data.mutex);
1131 /* Unlock the mutex structure: */
1132 _SPINUNLOCK(&pthread->data.mutex->lock);
1137 * Called when a new thread is added to the mutex waiting queue or
1138 * when a threads priority changes that is already in the mutex
1142 mutex_priority_adjust(pthread_mutex_t mutex)
1144 pthread_t pthread_next, pthread = mutex->m_owner;
1146 pthread_mutex_t m = mutex;
1149 * Calculate the mutex priority as the maximum of the highest
1150 * active priority of any waiting threads and the owning threads
1151 * active priority(*).
1153 * (*) Because the owning threads current active priority may
1154 * reflect priority inherited from this mutex (and the mutex
1155 * priority may have changed) we must recalculate the active
1156 * priority based on the threads saved inherited priority
1157 * and its base priority.
1159 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1160 temp_prio = MAX(pthread_next->active_priority,
1161 MAX(m->m_saved_prio, pthread->base_priority));
1163 /* See if this mutex really needs adjusting: */
1164 if (temp_prio == m->m_prio)
1165 /* No need to propagate the priority: */
1168 /* Set new priority of the mutex: */
1169 m->m_prio = temp_prio;
1173 * Save the threads priority before rescanning the
1176 temp_prio = pthread->active_priority;
1179 * Fix the priorities for all the mutexes this thread has
1180 * locked since taking this mutex. This also has a
1181 * potential side-effect of changing the threads priority.
1183 mutex_rescan_owned(pthread, m);
1186 * If the thread is currently waiting on a mutex, check
1187 * to see if the threads new priority has affected the
1188 * priority of the mutex.
1190 if ((temp_prio != pthread->active_priority) &&
1191 (pthread->state == PS_MUTEX_WAIT) &&
1192 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1193 /* Grab the mutex this thread is waiting on: */
1194 m = pthread->data.mutex;
1197 * The priority for this thread has changed. Remove
1198 * and reinsert this thread into the list of waiting
1199 * threads to preserve decreasing priority order.
1201 mutex_queue_remove(m, pthread);
1202 mutex_queue_enq(m, pthread);
1204 /* Grab the waiting thread with highest priority: */
1205 pthread_next = TAILQ_FIRST(&m->m_queue);
1208 * Calculate the mutex priority as the maximum of the
1209 * highest active priority of any waiting threads and
1210 * the owning threads active priority.
1212 temp_prio = MAX(pthread_next->active_priority,
1213 MAX(m->m_saved_prio, m->m_owner->base_priority));
1215 if (temp_prio != m->m_prio) {
1217 * The priority needs to be propagated to the
1218 * mutex this thread is waiting on and up to
1219 * the owner of that mutex.
1221 m->m_prio = temp_prio;
1222 pthread = m->m_owner;
1236 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1238 int active_prio, inherited_prio;
1240 pthread_t pthread_next;
1243 * Start walking the mutexes the thread has taken since
1244 * taking this mutex.
1246 if (mutex == NULL) {
1248 * A null mutex means start at the beginning of the owned
1251 m = TAILQ_FIRST(&pthread->mutexq);
1253 /* There is no inherited priority yet. */
1258 * The caller wants to start after a specific mutex. It
1259 * is assumed that this mutex is a priority inheritence
1260 * mutex and that its priority has been correctly
1263 m = TAILQ_NEXT(mutex, m_qe);
1265 /* Start inheriting priority from the specified mutex. */
1266 inherited_prio = mutex->m_prio;
1268 active_prio = MAX(inherited_prio, pthread->base_priority);
1272 * We only want to deal with priority inheritence
1273 * mutexes. This might be optimized by only placing
1274 * priority inheritence mutexes into the owned mutex
1275 * list, but it may prove to be useful having all
1276 * owned mutexes in this list. Consider a thread
1277 * exiting while holding mutexes...
1279 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1281 * Fix the owners saved (inherited) priority to
1282 * reflect the priority of the previous mutex.
1284 m->m_saved_prio = inherited_prio;
1286 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1287 /* Recalculate the priority of the mutex: */
1288 m->m_prio = MAX(active_prio,
1289 pthread_next->active_priority);
1291 m->m_prio = active_prio;
1293 /* Recalculate new inherited and active priorities: */
1294 inherited_prio = m->m_prio;
1295 active_prio = MAX(m->m_prio, pthread->base_priority);
1298 /* Advance to the next mutex owned by this thread: */
1299 m = TAILQ_NEXT(m, m_qe);
1303 * Fix the threads inherited priority and recalculate its
1306 pthread->inherited_priority = inherited_prio;
1307 active_prio = MAX(inherited_prio, pthread->base_priority);
1309 if (active_prio != pthread->active_priority) {
1311 * If this thread is in the priority queue, it must be
1312 * removed and reinserted for its new priority.
1314 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1316 * Remove the thread from the priority queue
1317 * before changing its priority:
1319 PTHREAD_PRIOQ_REMOVE(pthread);
1322 * POSIX states that if the priority is being
1323 * lowered, the thread must be inserted at the
1324 * head of the queue for its priority if it owns
1325 * any priority protection or inheritence mutexes.
1327 if ((active_prio < pthread->active_priority) &&
1328 (pthread->priority_mutex_count > 0)) {
1329 /* Set the new active priority. */
1330 pthread->active_priority = active_prio;
1332 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1335 /* Set the new active priority. */
1336 pthread->active_priority = active_prio;
1338 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1342 /* Set the new active priority. */
1343 pthread->active_priority = active_prio;
1349 _mutex_unlock_private(pthread_t pthread)
1351 struct pthread_mutex *m, *m_next;
1353 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1354 m_next = TAILQ_NEXT(m, m_qe);
1355 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1356 pthread_mutex_unlock(&m);
1361 _mutex_lock_backout(pthread_t pthread)
1363 struct pthread_mutex *mutex;
1366 * Defer signals to protect the scheduling queues from
1367 * access by the signal handler:
1369 _thread_kern_sig_defer();
1370 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1371 mutex = pthread->data.mutex;
1373 /* Lock the mutex structure: */
1374 _SPINLOCK(&mutex->lock);
1376 mutex_queue_remove(mutex, pthread);
1378 /* This thread is no longer waiting for the mutex: */
1379 pthread->data.mutex = NULL;
1381 /* Unlock the mutex structure: */
1382 _SPINUNLOCK(&mutex->lock);
1386 * Undefer and handle pending signals, yielding if
1389 _thread_kern_sig_undefer();
1393 * Dequeue a waiting thread from the head of a mutex queue in descending
1396 static inline pthread_t
1397 mutex_queue_deq(pthread_mutex_t mutex)
1401 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1402 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1403 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1406 * Only exit the loop if the thread hasn't been
1409 if (pthread->interrupted == 0)
1417 * Remove a waiting thread from a mutex queue in descending priority order.
1420 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1422 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1423 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1424 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1429 * Enqueue a waiting thread to a queue in descending priority order.
1432 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1434 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1436 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1438 * For the common case of all threads having equal priority,
1439 * we perform a quick check against the priority of the thread
1440 * at the tail of the queue.
1442 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1443 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1445 tid = TAILQ_FIRST(&mutex->m_queue);
1446 while (pthread->active_priority <= tid->active_priority)
1447 tid = TAILQ_NEXT(tid, sqe);
1448 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1450 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1453 __strong_reference(_pthread_mutex_init, pthread_mutex_init);
1454 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
1455 __strong_reference(_pthread_mutex_trylock, pthread_mutex_trylock);
1456 __strong_reference(_pthread_mutex_lock, pthread_mutex_lock);
1457 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);