2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_mutex.c,v 1.20.2.8 2002/10/22 14:44:03 fjoe Exp $
33 * $DragonFly: src/lib/libc_r/uthread/uthread_mutex.c,v 1.2 2003/06/17 04:26:48 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/queue.h>
41 #include "pthread_private.h"
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m) do { \
45 (m)->m_qe.tqe_prev = NULL; \
46 (m)->m_qe.tqe_next = NULL; \
48 #define _MUTEX_ASSERT_IS_OWNED(m) do { \
49 if ((m)->m_qe.tqe_prev == NULL) \
50 PANIC("mutex is not on list"); \
52 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \
53 if (((m)->m_qe.tqe_prev != NULL) || \
54 ((m)->m_qe.tqe_next != NULL)) \
55 PANIC("mutex is on list"); \
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
66 static inline int mutex_self_trylock(pthread_mutex_t);
67 static inline int mutex_self_lock(pthread_mutex_t);
68 static inline int mutex_unlock_common(pthread_mutex_t *, int);
69 static void mutex_priority_adjust(pthread_mutex_t);
70 static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
71 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
72 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
73 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
76 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
78 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
79 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
80 __weak_reference(_pthread_mutex_trylock, pthread_mutex_trylock);
81 __weak_reference(_pthread_mutex_lock, pthread_mutex_lock);
82 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
84 /* Reinitialize a mutex to defaults. */
86 _mutex_reinit(pthread_mutex_t * mutex)
92 else if (*mutex == NULL)
93 ret = pthread_mutex_init(mutex, NULL);
96 * Initialize the mutex structure:
98 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
99 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
100 TAILQ_INIT(&(*mutex)->m_queue);
101 (*mutex)->m_owner = NULL;
102 (*mutex)->m_data.m_count = 0;
103 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
104 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
105 (*mutex)->m_refcount = 0;
106 (*mutex)->m_prio = 0;
107 (*mutex)->m_saved_prio = 0;
108 _MUTEX_INIT_LINK(*mutex);
109 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
115 _pthread_mutex_init(pthread_mutex_t * mutex,
116 const pthread_mutexattr_t * mutex_attr)
118 enum pthread_mutextype type;
121 pthread_mutex_t pmutex;
127 /* Check if default mutex attributes: */
128 else if (mutex_attr == NULL || *mutex_attr == NULL) {
129 /* Default to a (error checking) POSIX mutex: */
130 type = PTHREAD_MUTEX_ERRORCHECK;
131 protocol = PTHREAD_PRIO_NONE;
132 ceiling = PTHREAD_MAX_PRIORITY;
135 /* Check mutex type: */
136 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
137 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
138 /* Return an invalid argument error: */
141 /* Check mutex protocol: */
142 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
143 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
144 /* Return an invalid argument error: */
148 /* Use the requested mutex type and protocol: */
149 type = (*mutex_attr)->m_type;
150 protocol = (*mutex_attr)->m_protocol;
151 ceiling = (*mutex_attr)->m_ceiling;
154 /* Check no errors so far: */
156 if ((pmutex = (pthread_mutex_t)
157 malloc(sizeof(struct pthread_mutex))) == NULL)
160 /* Reset the mutex flags: */
163 /* Process according to mutex type: */
165 /* case PTHREAD_MUTEX_DEFAULT: */
166 case PTHREAD_MUTEX_ERRORCHECK:
167 case PTHREAD_MUTEX_NORMAL:
168 /* Nothing to do here. */
171 /* Single UNIX Spec 2 recursive mutex: */
172 case PTHREAD_MUTEX_RECURSIVE:
173 /* Reset the mutex count: */
174 pmutex->m_data.m_count = 0;
177 /* Trap invalid mutex types: */
179 /* Return an invalid argument error: */
184 /* Initialise the rest of the mutex: */
185 TAILQ_INIT(&pmutex->m_queue);
186 pmutex->m_flags |= MUTEX_FLAGS_INITED;
187 pmutex->m_owner = NULL;
188 pmutex->m_type = type;
189 pmutex->m_protocol = protocol;
190 pmutex->m_refcount = 0;
191 if (protocol == PTHREAD_PRIO_PROTECT)
192 pmutex->m_prio = ceiling;
195 pmutex->m_saved_prio = 0;
196 _MUTEX_INIT_LINK(pmutex);
197 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
205 /* Return the completion status: */
210 _pthread_mutex_destroy(pthread_mutex_t * mutex)
214 if (mutex == NULL || *mutex == NULL)
217 /* Lock the mutex structure: */
218 _SPINLOCK(&(*mutex)->lock);
221 * Check to see if this mutex is in use:
223 if (((*mutex)->m_owner != NULL) ||
224 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
225 ((*mutex)->m_refcount != 0)) {
228 /* Unlock the mutex structure: */
229 _SPINUNLOCK(&(*mutex)->lock);
233 * Free the memory allocated for the mutex
236 _MUTEX_ASSERT_NOT_OWNED(*mutex);
240 * Leave the caller's pointer NULL now that
241 * the mutex has been destroyed:
247 /* Return the completion status: */
252 init_static(pthread_mutex_t *mutex)
256 _SPINLOCK(&static_init_lock);
259 ret = pthread_mutex_init(mutex, NULL);
263 _SPINUNLOCK(&static_init_lock);
269 _pthread_mutex_trylock(pthread_mutex_t * mutex)
271 struct pthread *curthread = _get_curthread();
278 * If the mutex is statically initialized, perform the dynamic
281 else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
283 * Defer signals to protect the scheduling queues from
284 * access by the signal handler:
286 _thread_kern_sig_defer();
288 /* Lock the mutex structure: */
289 _SPINLOCK(&(*mutex)->lock);
292 * If the mutex was statically allocated, properly
293 * initialize the tail queue.
295 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
296 TAILQ_INIT(&(*mutex)->m_queue);
297 _MUTEX_INIT_LINK(*mutex);
298 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
301 /* Process according to mutex type: */
302 switch ((*mutex)->m_protocol) {
303 /* Default POSIX mutex: */
304 case PTHREAD_PRIO_NONE:
305 /* Check if this mutex is not locked: */
306 if ((*mutex)->m_owner == NULL) {
307 /* Lock the mutex for the running thread: */
308 (*mutex)->m_owner = curthread;
310 /* Add to the list of owned mutexes: */
311 _MUTEX_ASSERT_NOT_OWNED(*mutex);
312 TAILQ_INSERT_TAIL(&curthread->mutexq,
314 } else if ((*mutex)->m_owner == curthread)
315 ret = mutex_self_trylock(*mutex);
317 /* Return a busy error: */
321 /* POSIX priority inheritence mutex: */
322 case PTHREAD_PRIO_INHERIT:
323 /* Check if this mutex is not locked: */
324 if ((*mutex)->m_owner == NULL) {
325 /* Lock the mutex for the running thread: */
326 (*mutex)->m_owner = curthread;
328 /* Track number of priority mutexes owned: */
329 curthread->priority_mutex_count++;
332 * The mutex takes on the attributes of the
333 * running thread when there are no waiters.
335 (*mutex)->m_prio = curthread->active_priority;
336 (*mutex)->m_saved_prio =
337 curthread->inherited_priority;
339 /* Add to the list of owned mutexes: */
340 _MUTEX_ASSERT_NOT_OWNED(*mutex);
341 TAILQ_INSERT_TAIL(&curthread->mutexq,
343 } else if ((*mutex)->m_owner == curthread)
344 ret = mutex_self_trylock(*mutex);
346 /* Return a busy error: */
350 /* POSIX priority protection mutex: */
351 case PTHREAD_PRIO_PROTECT:
352 /* Check for a priority ceiling violation: */
353 if (curthread->active_priority > (*mutex)->m_prio)
356 /* Check if this mutex is not locked: */
357 else if ((*mutex)->m_owner == NULL) {
358 /* Lock the mutex for the running thread: */
359 (*mutex)->m_owner = curthread;
361 /* Track number of priority mutexes owned: */
362 curthread->priority_mutex_count++;
365 * The running thread inherits the ceiling
366 * priority of the mutex and executes at that
369 curthread->active_priority = (*mutex)->m_prio;
370 (*mutex)->m_saved_prio =
371 curthread->inherited_priority;
372 curthread->inherited_priority =
375 /* Add to the list of owned mutexes: */
376 _MUTEX_ASSERT_NOT_OWNED(*mutex);
377 TAILQ_INSERT_TAIL(&curthread->mutexq,
379 } else if ((*mutex)->m_owner == curthread)
380 ret = mutex_self_trylock(*mutex);
382 /* Return a busy error: */
386 /* Trap invalid mutex types: */
388 /* Return an invalid argument error: */
393 /* Unlock the mutex structure: */
394 _SPINUNLOCK(&(*mutex)->lock);
397 * Undefer and handle pending signals, yielding if
400 _thread_kern_sig_undefer();
403 /* Return the completion status: */
408 _pthread_mutex_lock(pthread_mutex_t * mutex)
410 struct pthread *curthread = _get_curthread();
413 if (_thread_initial == NULL)
420 * If the mutex is statically initialized, perform the dynamic
423 if ((*mutex == NULL) &&
424 ((ret = init_static(mutex)) != 0))
427 /* Reset the interrupted flag: */
428 curthread->interrupted = 0;
431 * Enter a loop waiting to become the mutex owner. We need a
432 * loop in case the waiting thread is interrupted by a signal
433 * to execute a signal handler. It is not (currently) possible
434 * to remain in the waiting queue while running a handler.
435 * Instead, the thread is interrupted and backed out of the
436 * waiting queue prior to executing the signal handler.
440 * Defer signals to protect the scheduling queues from
441 * access by the signal handler:
443 _thread_kern_sig_defer();
445 /* Lock the mutex structure: */
446 _SPINLOCK(&(*mutex)->lock);
449 * If the mutex was statically allocated, properly
450 * initialize the tail queue.
452 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
453 TAILQ_INIT(&(*mutex)->m_queue);
454 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
455 _MUTEX_INIT_LINK(*mutex);
458 /* Process according to mutex type: */
459 switch ((*mutex)->m_protocol) {
460 /* Default POSIX mutex: */
461 case PTHREAD_PRIO_NONE:
462 if ((*mutex)->m_owner == NULL) {
463 /* Lock the mutex for this thread: */
464 (*mutex)->m_owner = curthread;
466 /* Add to the list of owned mutexes: */
467 _MUTEX_ASSERT_NOT_OWNED(*mutex);
468 TAILQ_INSERT_TAIL(&curthread->mutexq,
471 } else if ((*mutex)->m_owner == curthread)
472 ret = mutex_self_lock(*mutex);
475 * Join the queue of threads waiting to lock
478 mutex_queue_enq(*mutex, curthread);
481 * Keep a pointer to the mutex this thread
484 curthread->data.mutex = *mutex;
487 * Unlock the mutex structure and schedule the
490 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
491 &(*mutex)->lock, __FILE__, __LINE__);
493 /* Lock the mutex structure again: */
494 _SPINLOCK(&(*mutex)->lock);
498 /* POSIX priority inheritence mutex: */
499 case PTHREAD_PRIO_INHERIT:
500 /* Check if this mutex is not locked: */
501 if ((*mutex)->m_owner == NULL) {
502 /* Lock the mutex for this thread: */
503 (*mutex)->m_owner = curthread;
505 /* Track number of priority mutexes owned: */
506 curthread->priority_mutex_count++;
509 * The mutex takes on attributes of the
510 * running thread when there are no waiters.
512 (*mutex)->m_prio = curthread->active_priority;
513 (*mutex)->m_saved_prio =
514 curthread->inherited_priority;
515 curthread->inherited_priority =
518 /* Add to the list of owned mutexes: */
519 _MUTEX_ASSERT_NOT_OWNED(*mutex);
520 TAILQ_INSERT_TAIL(&curthread->mutexq,
523 } else if ((*mutex)->m_owner == curthread)
524 ret = mutex_self_lock(*mutex);
527 * Join the queue of threads waiting to lock
530 mutex_queue_enq(*mutex, curthread);
533 * Keep a pointer to the mutex this thread
536 curthread->data.mutex = *mutex;
538 if (curthread->active_priority >
540 /* Adjust priorities: */
541 mutex_priority_adjust(*mutex);
544 * Unlock the mutex structure and schedule the
547 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
548 &(*mutex)->lock, __FILE__, __LINE__);
550 /* Lock the mutex structure again: */
551 _SPINLOCK(&(*mutex)->lock);
555 /* POSIX priority protection mutex: */
556 case PTHREAD_PRIO_PROTECT:
557 /* Check for a priority ceiling violation: */
558 if (curthread->active_priority > (*mutex)->m_prio)
561 /* Check if this mutex is not locked: */
562 else if ((*mutex)->m_owner == NULL) {
564 * Lock the mutex for the running
567 (*mutex)->m_owner = curthread;
569 /* Track number of priority mutexes owned: */
570 curthread->priority_mutex_count++;
573 * The running thread inherits the ceiling
574 * priority of the mutex and executes at that
577 curthread->active_priority = (*mutex)->m_prio;
578 (*mutex)->m_saved_prio =
579 curthread->inherited_priority;
580 curthread->inherited_priority =
583 /* Add to the list of owned mutexes: */
584 _MUTEX_ASSERT_NOT_OWNED(*mutex);
585 TAILQ_INSERT_TAIL(&curthread->mutexq,
587 } else if ((*mutex)->m_owner == curthread)
588 ret = mutex_self_lock(*mutex);
591 * Join the queue of threads waiting to lock
594 mutex_queue_enq(*mutex, curthread);
597 * Keep a pointer to the mutex this thread
600 curthread->data.mutex = *mutex;
602 /* Clear any previous error: */
603 curthread->error = 0;
606 * Unlock the mutex structure and schedule the
609 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
610 &(*mutex)->lock, __FILE__, __LINE__);
612 /* Lock the mutex structure again: */
613 _SPINLOCK(&(*mutex)->lock);
616 * The threads priority may have changed while
617 * waiting for the mutex causing a ceiling
620 ret = curthread->error;
621 curthread->error = 0;
625 /* Trap invalid mutex types: */
627 /* Return an invalid argument error: */
633 * Check to see if this thread was interrupted and
634 * is still in the mutex queue of waiting threads:
636 if (curthread->interrupted != 0)
637 mutex_queue_remove(*mutex, curthread);
639 /* Unlock the mutex structure: */
640 _SPINUNLOCK(&(*mutex)->lock);
643 * Undefer and handle pending signals, yielding if
646 _thread_kern_sig_undefer();
647 } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
648 (curthread->interrupted == 0));
650 if (curthread->interrupted != 0 &&
651 curthread->continuation != NULL)
652 curthread->continuation((void *) curthread);
654 /* Return the completion status: */
659 _pthread_mutex_unlock(pthread_mutex_t * mutex)
661 return (mutex_unlock_common(mutex, /* add reference */ 0));
665 _mutex_cv_unlock(pthread_mutex_t * mutex)
667 return (mutex_unlock_common(mutex, /* add reference */ 1));
671 _mutex_cv_lock(pthread_mutex_t * mutex)
674 if ((ret = pthread_mutex_lock(mutex)) == 0)
675 (*mutex)->m_refcount--;
680 mutex_self_trylock(pthread_mutex_t mutex)
684 switch (mutex->m_type) {
686 /* case PTHREAD_MUTEX_DEFAULT: */
687 case PTHREAD_MUTEX_ERRORCHECK:
688 case PTHREAD_MUTEX_NORMAL:
690 * POSIX specifies that mutexes should return EDEADLK if a
691 * recursive lock is detected.
696 case PTHREAD_MUTEX_RECURSIVE:
697 /* Increment the lock count: */
698 mutex->m_data.m_count++;
702 /* Trap invalid mutex types; */
710 mutex_self_lock(pthread_mutex_t mutex)
714 switch (mutex->m_type) {
715 /* case PTHREAD_MUTEX_DEFAULT: */
716 case PTHREAD_MUTEX_ERRORCHECK:
718 * POSIX specifies that mutexes should return EDEADLK if a
719 * recursive lock is detected.
724 case PTHREAD_MUTEX_NORMAL:
726 * What SS2 define as a 'normal' mutex. Intentionally
727 * deadlock on attempts to get a lock you already own.
729 _thread_kern_sched_state_unlock(PS_DEADLOCK,
730 &mutex->lock, __FILE__, __LINE__);
733 case PTHREAD_MUTEX_RECURSIVE:
734 /* Increment the lock count: */
735 mutex->m_data.m_count++;
739 /* Trap invalid mutex types; */
747 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
749 struct pthread *curthread = _get_curthread();
752 if (mutex == NULL || *mutex == NULL) {
756 * Defer signals to protect the scheduling queues from
757 * access by the signal handler:
759 _thread_kern_sig_defer();
761 /* Lock the mutex structure: */
762 _SPINLOCK(&(*mutex)->lock);
764 /* Process according to mutex type: */
765 switch ((*mutex)->m_protocol) {
766 /* Default POSIX mutex: */
767 case PTHREAD_PRIO_NONE:
769 * Check if the running thread is not the owner of the
772 if ((*mutex)->m_owner != curthread) {
774 * Return an invalid argument error for no
775 * owner and a permission error otherwise:
777 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
779 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
780 ((*mutex)->m_data.m_count > 0)) {
781 /* Decrement the count: */
782 (*mutex)->m_data.m_count--;
785 * Clear the count in case this is recursive
788 (*mutex)->m_data.m_count = 0;
790 /* Remove the mutex from the threads queue. */
791 _MUTEX_ASSERT_IS_OWNED(*mutex);
792 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
794 _MUTEX_INIT_LINK(*mutex);
797 * Get the next thread from the queue of
798 * threads waiting on the mutex:
800 if (((*mutex)->m_owner =
801 mutex_queue_deq(*mutex)) != NULL) {
802 /* Make the new owner runnable: */
803 PTHREAD_NEW_STATE((*mutex)->m_owner,
807 * Add the mutex to the threads list of
810 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
814 * The owner is no longer waiting for
817 (*mutex)->m_owner->data.mutex = NULL;
822 /* POSIX priority inheritence mutex: */
823 case PTHREAD_PRIO_INHERIT:
825 * Check if the running thread is not the owner of the
828 if ((*mutex)->m_owner != curthread) {
830 * Return an invalid argument error for no
831 * owner and a permission error otherwise:
833 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
835 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
836 ((*mutex)->m_data.m_count > 0)) {
837 /* Decrement the count: */
838 (*mutex)->m_data.m_count--;
841 * Clear the count in case this is recursive
844 (*mutex)->m_data.m_count = 0;
847 * Restore the threads inherited priority and
848 * recompute the active priority (being careful
849 * not to override changes in the threads base
850 * priority subsequent to locking the mutex).
852 curthread->inherited_priority =
853 (*mutex)->m_saved_prio;
854 curthread->active_priority =
855 MAX(curthread->inherited_priority,
856 curthread->base_priority);
859 * This thread now owns one less priority mutex.
861 curthread->priority_mutex_count--;
863 /* Remove the mutex from the threads queue. */
864 _MUTEX_ASSERT_IS_OWNED(*mutex);
865 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
867 _MUTEX_INIT_LINK(*mutex);
870 * Get the next thread from the queue of threads
871 * waiting on the mutex:
873 if (((*mutex)->m_owner =
874 mutex_queue_deq(*mutex)) == NULL)
875 /* This mutex has no priority. */
876 (*mutex)->m_prio = 0;
879 * Track number of priority mutexes owned:
881 (*mutex)->m_owner->priority_mutex_count++;
884 * Add the mutex to the threads list
887 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
891 * The owner is no longer waiting for
894 (*mutex)->m_owner->data.mutex = NULL;
897 * Set the priority of the mutex. Since
898 * our waiting threads are in descending
899 * priority order, the priority of the
900 * mutex becomes the active priority of
901 * the thread we just dequeued.
904 (*mutex)->m_owner->active_priority;
907 * Save the owning threads inherited
910 (*mutex)->m_saved_prio =
911 (*mutex)->m_owner->inherited_priority;
914 * The owning threads inherited priority
915 * now becomes his active priority (the
916 * priority of the mutex).
918 (*mutex)->m_owner->inherited_priority =
922 * Make the new owner runnable:
924 PTHREAD_NEW_STATE((*mutex)->m_owner,
930 /* POSIX priority ceiling mutex: */
931 case PTHREAD_PRIO_PROTECT:
933 * Check if the running thread is not the owner of the
936 if ((*mutex)->m_owner != curthread) {
938 * Return an invalid argument error for no
939 * owner and a permission error otherwise:
941 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
943 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
944 ((*mutex)->m_data.m_count > 0)) {
945 /* Decrement the count: */
946 (*mutex)->m_data.m_count--;
949 * Clear the count in case this is recursive
952 (*mutex)->m_data.m_count = 0;
955 * Restore the threads inherited priority and
956 * recompute the active priority (being careful
957 * not to override changes in the threads base
958 * priority subsequent to locking the mutex).
960 curthread->inherited_priority =
961 (*mutex)->m_saved_prio;
962 curthread->active_priority =
963 MAX(curthread->inherited_priority,
964 curthread->base_priority);
967 * This thread now owns one less priority mutex.
969 curthread->priority_mutex_count--;
971 /* Remove the mutex from the threads queue. */
972 _MUTEX_ASSERT_IS_OWNED(*mutex);
973 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
975 _MUTEX_INIT_LINK(*mutex);
978 * Enter a loop to find a waiting thread whose
979 * active priority will not cause a ceiling
982 while ((((*mutex)->m_owner =
983 mutex_queue_deq(*mutex)) != NULL) &&
984 ((*mutex)->m_owner->active_priority >
987 * Either the mutex ceiling priority
988 * been lowered and/or this threads
989 * priority has been raised subsequent
990 * to this thread being queued on the
993 (*mutex)->m_owner->error = EINVAL;
994 PTHREAD_NEW_STATE((*mutex)->m_owner,
997 * The thread is no longer waiting for
1000 (*mutex)->m_owner->data.mutex = NULL;
1003 /* Check for a new owner: */
1004 if ((*mutex)->m_owner != NULL) {
1006 * Track number of priority mutexes owned:
1008 (*mutex)->m_owner->priority_mutex_count++;
1011 * Add the mutex to the threads list
1014 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1018 * The owner is no longer waiting for
1021 (*mutex)->m_owner->data.mutex = NULL;
1024 * Save the owning threads inherited
1027 (*mutex)->m_saved_prio =
1028 (*mutex)->m_owner->inherited_priority;
1031 * The owning thread inherits the
1032 * ceiling priority of the mutex and
1033 * executes at that priority:
1035 (*mutex)->m_owner->inherited_priority =
1037 (*mutex)->m_owner->active_priority =
1041 * Make the new owner runnable:
1043 PTHREAD_NEW_STATE((*mutex)->m_owner,
1049 /* Trap invalid mutex types: */
1051 /* Return an invalid argument error: */
1056 if ((ret == 0) && (add_reference != 0)) {
1057 /* Increment the reference count: */
1058 (*mutex)->m_refcount++;
1061 /* Unlock the mutex structure: */
1062 _SPINUNLOCK(&(*mutex)->lock);
1065 * Undefer and handle pending signals, yielding if
1068 _thread_kern_sig_undefer();
1071 /* Return the completion status: */
1077 * This function is called when a change in base priority occurs for
1078 * a thread that is holding or waiting for a priority protection or
1079 * inheritence mutex. A change in a threads base priority can effect
1080 * changes to active priorities of other threads and to the ordering
1081 * of mutex locking by waiting threads.
1083 * This must be called while thread scheduling is deferred.
1086 _mutex_notify_priochange(pthread_t pthread)
1088 /* Adjust the priorites of any owned priority mutexes: */
1089 if (pthread->priority_mutex_count > 0) {
1091 * Rescan the mutexes owned by this thread and correct
1092 * their priorities to account for this threads change
1093 * in priority. This has the side effect of changing
1094 * the threads active priority.
1096 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1100 * If this thread is waiting on a priority inheritence mutex,
1101 * check for priority adjustments. A change in priority can
1102 * also effect a ceiling violation(*) for a thread waiting on
1103 * a priority protection mutex; we don't perform the check here
1104 * as it is done in pthread_mutex_unlock.
1106 * (*) It should be noted that a priority change to a thread
1107 * _after_ taking and owning a priority ceiling mutex
1108 * does not affect ownership of that mutex; the ceiling
1109 * priority is only checked before mutex ownership occurs.
1111 if (pthread->state == PS_MUTEX_WAIT) {
1112 /* Lock the mutex structure: */
1113 _SPINLOCK(&pthread->data.mutex->lock);
1116 * Check to make sure this thread is still in the same state
1117 * (the spinlock above can yield the CPU to another thread):
1119 if (pthread->state == PS_MUTEX_WAIT) {
1121 * Remove and reinsert this thread into the list of
1122 * waiting threads to preserve decreasing priority
1125 mutex_queue_remove(pthread->data.mutex, pthread);
1126 mutex_queue_enq(pthread->data.mutex, pthread);
1128 if (pthread->data.mutex->m_protocol ==
1129 PTHREAD_PRIO_INHERIT) {
1130 /* Adjust priorities: */
1131 mutex_priority_adjust(pthread->data.mutex);
1135 /* Unlock the mutex structure: */
1136 _SPINUNLOCK(&pthread->data.mutex->lock);
1141 * Called when a new thread is added to the mutex waiting queue or
1142 * when a threads priority changes that is already in the mutex
1146 mutex_priority_adjust(pthread_mutex_t mutex)
1148 pthread_t pthread_next, pthread = mutex->m_owner;
1150 pthread_mutex_t m = mutex;
1153 * Calculate the mutex priority as the maximum of the highest
1154 * active priority of any waiting threads and the owning threads
1155 * active priority(*).
1157 * (*) Because the owning threads current active priority may
1158 * reflect priority inherited from this mutex (and the mutex
1159 * priority may have changed) we must recalculate the active
1160 * priority based on the threads saved inherited priority
1161 * and its base priority.
1163 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1164 temp_prio = MAX(pthread_next->active_priority,
1165 MAX(m->m_saved_prio, pthread->base_priority));
1167 /* See if this mutex really needs adjusting: */
1168 if (temp_prio == m->m_prio)
1169 /* No need to propagate the priority: */
1172 /* Set new priority of the mutex: */
1173 m->m_prio = temp_prio;
1177 * Save the threads priority before rescanning the
1180 temp_prio = pthread->active_priority;
1183 * Fix the priorities for all the mutexes this thread has
1184 * locked since taking this mutex. This also has a
1185 * potential side-effect of changing the threads priority.
1187 mutex_rescan_owned(pthread, m);
1190 * If the thread is currently waiting on a mutex, check
1191 * to see if the threads new priority has affected the
1192 * priority of the mutex.
1194 if ((temp_prio != pthread->active_priority) &&
1195 (pthread->state == PS_MUTEX_WAIT) &&
1196 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1197 /* Grab the mutex this thread is waiting on: */
1198 m = pthread->data.mutex;
1201 * The priority for this thread has changed. Remove
1202 * and reinsert this thread into the list of waiting
1203 * threads to preserve decreasing priority order.
1205 mutex_queue_remove(m, pthread);
1206 mutex_queue_enq(m, pthread);
1208 /* Grab the waiting thread with highest priority: */
1209 pthread_next = TAILQ_FIRST(&m->m_queue);
1212 * Calculate the mutex priority as the maximum of the
1213 * highest active priority of any waiting threads and
1214 * the owning threads active priority.
1216 temp_prio = MAX(pthread_next->active_priority,
1217 MAX(m->m_saved_prio, m->m_owner->base_priority));
1219 if (temp_prio != m->m_prio) {
1221 * The priority needs to be propagated to the
1222 * mutex this thread is waiting on and up to
1223 * the owner of that mutex.
1225 m->m_prio = temp_prio;
1226 pthread = m->m_owner;
1240 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1242 int active_prio, inherited_prio;
1244 pthread_t pthread_next;
1247 * Start walking the mutexes the thread has taken since
1248 * taking this mutex.
1250 if (mutex == NULL) {
1252 * A null mutex means start at the beginning of the owned
1255 m = TAILQ_FIRST(&pthread->mutexq);
1257 /* There is no inherited priority yet. */
1262 * The caller wants to start after a specific mutex. It
1263 * is assumed that this mutex is a priority inheritence
1264 * mutex and that its priority has been correctly
1267 m = TAILQ_NEXT(mutex, m_qe);
1269 /* Start inheriting priority from the specified mutex. */
1270 inherited_prio = mutex->m_prio;
1272 active_prio = MAX(inherited_prio, pthread->base_priority);
1276 * We only want to deal with priority inheritence
1277 * mutexes. This might be optimized by only placing
1278 * priority inheritence mutexes into the owned mutex
1279 * list, but it may prove to be useful having all
1280 * owned mutexes in this list. Consider a thread
1281 * exiting while holding mutexes...
1283 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1285 * Fix the owners saved (inherited) priority to
1286 * reflect the priority of the previous mutex.
1288 m->m_saved_prio = inherited_prio;
1290 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1291 /* Recalculate the priority of the mutex: */
1292 m->m_prio = MAX(active_prio,
1293 pthread_next->active_priority);
1295 m->m_prio = active_prio;
1297 /* Recalculate new inherited and active priorities: */
1298 inherited_prio = m->m_prio;
1299 active_prio = MAX(m->m_prio, pthread->base_priority);
1302 /* Advance to the next mutex owned by this thread: */
1303 m = TAILQ_NEXT(m, m_qe);
1307 * Fix the threads inherited priority and recalculate its
1310 pthread->inherited_priority = inherited_prio;
1311 active_prio = MAX(inherited_prio, pthread->base_priority);
1313 if (active_prio != pthread->active_priority) {
1315 * If this thread is in the priority queue, it must be
1316 * removed and reinserted for its new priority.
1318 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1320 * Remove the thread from the priority queue
1321 * before changing its priority:
1323 PTHREAD_PRIOQ_REMOVE(pthread);
1326 * POSIX states that if the priority is being
1327 * lowered, the thread must be inserted at the
1328 * head of the queue for its priority if it owns
1329 * any priority protection or inheritence mutexes.
1331 if ((active_prio < pthread->active_priority) &&
1332 (pthread->priority_mutex_count > 0)) {
1333 /* Set the new active priority. */
1334 pthread->active_priority = active_prio;
1336 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1339 /* Set the new active priority. */
1340 pthread->active_priority = active_prio;
1342 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1346 /* Set the new active priority. */
1347 pthread->active_priority = active_prio;
1353 _mutex_unlock_private(pthread_t pthread)
1355 struct pthread_mutex *m, *m_next;
1357 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1358 m_next = TAILQ_NEXT(m, m_qe);
1359 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1360 pthread_mutex_unlock(&m);
1365 _mutex_lock_backout(pthread_t pthread)
1367 struct pthread_mutex *mutex;
1370 * Defer signals to protect the scheduling queues from
1371 * access by the signal handler:
1373 _thread_kern_sig_defer();
1374 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1375 mutex = pthread->data.mutex;
1377 /* Lock the mutex structure: */
1378 _SPINLOCK(&mutex->lock);
1380 mutex_queue_remove(mutex, pthread);
1382 /* This thread is no longer waiting for the mutex: */
1383 pthread->data.mutex = NULL;
1385 /* Unlock the mutex structure: */
1386 _SPINUNLOCK(&mutex->lock);
1390 * Undefer and handle pending signals, yielding if
1393 _thread_kern_sig_undefer();
1397 * Dequeue a waiting thread from the head of a mutex queue in descending
1400 static inline pthread_t
1401 mutex_queue_deq(pthread_mutex_t mutex)
1405 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1406 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1407 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1410 * Only exit the loop if the thread hasn't been
1413 if (pthread->interrupted == 0)
1421 * Remove a waiting thread from a mutex queue in descending priority order.
1424 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1426 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1427 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1428 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1433 * Enqueue a waiting thread to a queue in descending priority order.
1436 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1438 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1440 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1442 * For the common case of all threads having equal priority,
1443 * we perform a quick check against the priority of the thread
1444 * at the tail of the queue.
1446 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1447 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1449 tid = TAILQ_FIRST(&mutex->m_queue);
1450 while (pthread->active_priority <= tid->active_priority)
1451 tid = TAILQ_NEXT(tid, sqe);
1452 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1454 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;