2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libpthread/thread/thr_mutex.c,v 1.46 2004/10/31 05:03:50 green Exp $
33 * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.6 2005/05/07 07:39:14 davidxu Exp $
36 #include <machine/tls.h>
41 #include <sys/param.h>
42 #include <sys/queue.h>
44 #include "thr_private.h"
46 #if defined(_PTHREADS_INVARIANTS)
47 #define MUTEX_INIT_LINK(m) do { \
48 (m)->m_qe.tqe_prev = NULL; \
49 (m)->m_qe.tqe_next = NULL; \
51 #define MUTEX_ASSERT_IS_OWNED(m) do { \
52 if ((m)->m_qe.tqe_prev == NULL) \
53 PANIC("mutex is not on list"); \
55 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
56 if (((m)->m_qe.tqe_prev != NULL) || \
57 ((m)->m_qe.tqe_next != NULL)) \
58 PANIC("mutex is on list"); \
60 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
61 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
62 "thread in syncq when it shouldn't be."); \
65 #define MUTEX_INIT_LINK(m)
66 #define MUTEX_ASSERT_IS_OWNED(m)
67 #define MUTEX_ASSERT_NOT_OWNED(m)
68 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
71 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
72 #define MUTEX_DESTROY(m) do { \
80 static long mutex_handoff(struct pthread *, struct pthread_mutex *);
81 static int mutex_self_trylock(struct pthread *, pthread_mutex_t);
82 static int mutex_self_lock(struct pthread *, pthread_mutex_t,
83 const struct timespec *abstime);
84 static int mutex_unlock_common(pthread_mutex_t *, int);
85 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
86 static void mutex_rescan_owned (struct pthread *, struct pthread *,
87 struct pthread_mutex *);
89 static pthread_t mutex_queue_deq(pthread_mutex_t);
91 static void mutex_queue_remove(pthread_mutex_t, pthread_t);
92 static void mutex_queue_enq(pthread_mutex_t, pthread_t);
94 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
95 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
96 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
97 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
99 /* Single underscore versions provided for libc internal usage: */
100 /* No difference between libc and application usage of these: */
101 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
102 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
105 mutex_init(pthread_mutex_t *mutex,
106 const pthread_mutexattr_t *mutex_attr, int private)
108 struct pthread_mutex *pmutex;
109 enum pthread_mutextype type;
115 /* Check if default mutex attributes: */
116 if (mutex_attr == NULL || *mutex_attr == NULL) {
117 /* Default to a (error checking) POSIX mutex: */
118 type = PTHREAD_MUTEX_ERRORCHECK;
119 protocol = PTHREAD_PRIO_NONE;
120 ceiling = THR_MAX_PRIORITY;
124 /* Check mutex type: */
125 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
126 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
127 /* Return an invalid argument error: */
130 /* Check mutex protocol: */
131 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
132 ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT))
133 /* Return an invalid argument error: */
137 /* Use the requested mutex type and protocol: */
138 type = (*mutex_attr)->m_type;
139 protocol = (*mutex_attr)->m_protocol;
140 ceiling = (*mutex_attr)->m_ceiling;
141 flags = (*mutex_attr)->m_flags;
144 /* Check no errors so far: */
146 if ((pmutex = (pthread_mutex_t)
147 malloc(sizeof(struct pthread_mutex))) == NULL) {
150 _thr_umtx_init(&pmutex->m_lock);
151 /* Set the mutex flags: */
152 pmutex->m_flags = flags;
154 /* Process according to mutex type: */
156 /* case PTHREAD_MUTEX_DEFAULT: */
157 case PTHREAD_MUTEX_ERRORCHECK:
158 case PTHREAD_MUTEX_NORMAL:
159 /* Nothing to do here. */
162 /* Single UNIX Spec 2 recursive mutex: */
163 case PTHREAD_MUTEX_RECURSIVE:
164 /* Reset the mutex count: */
168 /* Trap invalid mutex types: */
170 /* Return an invalid argument error: */
175 /* Initialise the rest of the mutex: */
176 TAILQ_INIT(&pmutex->m_queue);
177 pmutex->m_flags |= MUTEX_FLAGS_INITED;
179 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
180 pmutex->m_owner = NULL;
181 pmutex->m_type = type;
182 pmutex->m_protocol = protocol;
183 pmutex->m_refcount = 0;
184 if (protocol == PTHREAD_PRIO_PROTECT)
185 pmutex->m_prio = ceiling;
188 pmutex->m_saved_prio = 0;
189 MUTEX_INIT_LINK(pmutex);
192 /* Free the mutex lock structure: */
193 MUTEX_DESTROY(pmutex);
198 /* Return the completion status: */
203 init_static(struct pthread *thread, pthread_mutex_t *mutex)
207 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
210 ret = mutex_init(mutex, NULL, 0);
214 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
220 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
224 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
227 ret = mutex_init(mutex, NULL, 1);
231 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
237 _pthread_mutex_init(pthread_mutex_t *mutex,
238 const pthread_mutexattr_t *mutex_attr)
240 return mutex_init(mutex, mutex_attr, 1);
244 __pthread_mutex_init(pthread_mutex_t *mutex,
245 const pthread_mutexattr_t *mutex_attr)
247 return mutex_init(mutex, mutex_attr, 0);
251 _mutex_reinit(pthread_mutex_t *mutex)
253 _thr_umtx_init(&(*mutex)->m_lock);
254 TAILQ_INIT(&(*mutex)->m_queue);
255 MUTEX_INIT_LINK(*mutex);
256 (*mutex)->m_owner = NULL;
257 (*mutex)->m_count = 0;
258 (*mutex)->m_refcount = 0;
259 (*mutex)->m_prio = 0;
260 (*mutex)->m_saved_prio = 0;
265 _mutex_fork(struct pthread *curthread)
267 struct pthread_mutex *m;
269 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
270 m->m_lock = UMTX_LOCKED;
272 /* Clear contender for priority mutexes */
273 TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
274 /* clear another thread locked us */
275 _thr_umtx_init(&m->m_lock);
276 TAILQ_INIT(&m->m_queue);
281 _pthread_mutex_destroy(pthread_mutex_t *mutex)
283 struct pthread *curthread = tls_get_curthread();
287 if (mutex == NULL || *mutex == NULL)
291 * Try to lock the mutex structure, we only need to
292 * try once, if failed, the mutex is in used.
294 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
299 * Check mutex other fields to see if this mutex is
300 * in use. Mostly for prority mutex types, or there
301 * are condition variables referencing it.
303 if (((*mutex)->m_owner != NULL) ||
304 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
305 ((*mutex)->m_refcount != 0)) {
306 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
310 * Save a pointer to the mutex so it can be free'd
311 * and set the caller's pointer to NULL:
316 /* Unlock the mutex structure: */
317 _thr_umtx_unlock(&m->m_lock, curthread->tid);
320 * Free the memory allocated for the mutex
323 MUTEX_ASSERT_NOT_OWNED(m);
328 /* Return the completion status: */
333 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
337 THR_ASSERT((mutex != NULL) && (*mutex != NULL),
338 "Uninitialized mutex in mutex_trylock_common");
340 /* Short cut for simple mutex. */
341 if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
342 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
344 (*mutex)->m_owner = curthread;
345 /* Add to the list of owned mutexes: */
346 MUTEX_ASSERT_NOT_OWNED(*mutex);
347 TAILQ_INSERT_TAIL(&curthread->mutexq,
349 } else if ((*mutex)->m_owner == curthread) {
350 ret = mutex_self_trylock(curthread, *mutex);
356 /* Code for priority mutex */
358 /* Lock the mutex structure: */
359 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
362 * If the mutex was statically allocated, properly
363 * initialize the tail queue.
365 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
366 TAILQ_INIT(&(*mutex)->m_queue);
367 MUTEX_INIT_LINK(*mutex);
368 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
371 /* Process according to mutex type: */
372 switch ((*mutex)->m_protocol) {
373 /* POSIX priority inheritence mutex: */
374 case PTHREAD_PRIO_INHERIT:
375 /* Check if this mutex is not locked: */
376 if ((*mutex)->m_owner == NULL) {
377 /* Lock the mutex for the running thread: */
378 (*mutex)->m_owner = curthread;
381 /* Track number of priority mutexes owned: */
382 curthread->priority_mutex_count++;
385 * The mutex takes on the attributes of the
386 * running thread when there are no waiters.
388 (*mutex)->m_prio = curthread->active_priority;
389 (*mutex)->m_saved_prio =
390 curthread->inherited_priority;
391 curthread->inherited_priority = (*mutex)->m_prio;
392 THR_UNLOCK(curthread);
394 /* Add to the list of owned mutexes: */
395 MUTEX_ASSERT_NOT_OWNED(*mutex);
396 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
398 } else if ((*mutex)->m_owner == curthread)
399 ret = mutex_self_trylock(curthread, *mutex);
401 /* Return a busy error: */
405 /* POSIX priority protection mutex: */
406 case PTHREAD_PRIO_PROTECT:
407 /* Check for a priority ceiling violation: */
408 if (curthread->active_priority > (*mutex)->m_prio)
411 /* Check if this mutex is not locked: */
412 else if ((*mutex)->m_owner == NULL) {
413 /* Lock the mutex for the running thread: */
414 (*mutex)->m_owner = curthread;
417 /* Track number of priority mutexes owned: */
418 curthread->priority_mutex_count++;
421 * The running thread inherits the ceiling
422 * priority of the mutex and executes at that
425 curthread->active_priority = (*mutex)->m_prio;
426 (*mutex)->m_saved_prio =
427 curthread->inherited_priority;
428 curthread->inherited_priority =
430 THR_UNLOCK(curthread);
431 /* Add to the list of owned mutexes: */
432 MUTEX_ASSERT_NOT_OWNED(*mutex);
433 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
435 } else if ((*mutex)->m_owner == curthread)
436 ret = mutex_self_trylock(curthread, *mutex);
438 /* Return a busy error: */
442 /* Trap invalid mutex types: */
444 /* Return an invalid argument error: */
449 /* Unlock the mutex structure: */
450 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
452 /* Return the completion status: */
457 __pthread_mutex_trylock(pthread_mutex_t *mutex)
459 struct pthread *curthread = tls_get_curthread();
463 * If the mutex is statically initialized, perform the dynamic
466 if ((*mutex != NULL) ||
467 ((ret = init_static(curthread, mutex)) == 0))
468 ret = mutex_trylock_common(curthread, mutex);
474 _pthread_mutex_trylock(pthread_mutex_t *mutex)
476 struct pthread *curthread = tls_get_curthread();
480 * If the mutex is statically initialized, perform the dynamic
481 * initialization marking the mutex private (delete safe):
483 if ((*mutex != NULL) ||
484 ((ret = init_static_private(curthread, mutex)) == 0))
485 ret = mutex_trylock_common(curthread, mutex);
491 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
492 const struct timespec * abstime)
494 struct timespec ts, ts2;
498 THR_ASSERT((m != NULL) && (*m != NULL),
499 "Uninitialized mutex in mutex_lock_common");
501 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
502 abstime->tv_nsec >= 1000000000))
505 /* Short cut for simple mutex. */
507 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
508 /* Default POSIX mutex: */
509 ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
511 (*m)->m_owner = curthread;
512 /* Add to the list of owned mutexes: */
513 MUTEX_ASSERT_NOT_OWNED(*m);
514 TAILQ_INSERT_TAIL(&curthread->mutexq,
516 } else if ((*m)->m_owner == curthread) {
517 ret = mutex_self_lock(curthread, *m, abstime);
519 if (abstime == NULL) {
520 THR_UMTX_LOCK(curthread, &(*m)->m_lock);
523 clock_gettime(CLOCK_REALTIME, &ts);
524 TIMESPEC_SUB(&ts2, abstime, &ts);
525 ret = THR_UMTX_TIMEDLOCK(curthread,
526 &(*m)->m_lock, &ts2);
528 * Timed out wait is not restarted if
529 * it was interrupted, not worth to do it.
535 (*m)->m_owner = curthread;
536 /* Add to the list of owned mutexes: */
537 MUTEX_ASSERT_NOT_OWNED(*m);
538 TAILQ_INSERT_TAIL(&curthread->mutexq,
545 /* Code for priority mutex */
548 * Enter a loop waiting to become the mutex owner. We need a
549 * loop in case the waiting thread is interrupted by a signal
550 * to execute a signal handler. It is not (currently) possible
551 * to remain in the waiting queue while running a handler.
552 * Instead, the thread is interrupted and backed out of the
553 * waiting queue prior to executing the signal handler.
556 /* Lock the mutex structure: */
557 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
560 * If the mutex was statically allocated, properly
561 * initialize the tail queue.
563 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
564 TAILQ_INIT(&(*m)->m_queue);
565 (*m)->m_flags |= MUTEX_FLAGS_INITED;
569 /* Process according to mutex type: */
570 switch ((*m)->m_protocol) {
571 /* POSIX priority inheritence mutex: */
572 case PTHREAD_PRIO_INHERIT:
573 /* Check if this mutex is not locked: */
574 if ((*m)->m_owner == NULL) {
575 /* Lock the mutex for this thread: */
576 (*m)->m_owner = curthread;
579 /* Track number of priority mutexes owned: */
580 curthread->priority_mutex_count++;
583 * The mutex takes on attributes of the
584 * running thread when there are no waiters.
585 * Make sure the thread's scheduling lock is
586 * held while priorities are adjusted.
588 (*m)->m_prio = curthread->active_priority;
590 curthread->inherited_priority;
591 curthread->inherited_priority = (*m)->m_prio;
592 THR_UNLOCK(curthread);
594 /* Add to the list of owned mutexes: */
595 MUTEX_ASSERT_NOT_OWNED(*m);
596 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
599 /* Unlock the mutex structure: */
600 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
601 } else if ((*m)->m_owner == curthread) {
602 ret = mutex_self_lock(curthread, *m, abstime);
604 /* Unlock the mutex structure: */
605 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
608 * Join the queue of threads waiting to lock
609 * the mutex and save a pointer to the mutex.
611 mutex_queue_enq(*m, curthread);
612 curthread->data.mutex = *m;
614 if (curthread->active_priority > (*m)->m_prio)
615 /* Adjust priorities: */
616 mutex_priority_adjust(curthread, *m);
619 cycle = curthread->cycle;
620 THR_UNLOCK(curthread);
622 /* Unlock the mutex structure: */
623 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
625 clock_gettime(CLOCK_REALTIME, &ts);
626 TIMESPEC_SUB(&ts2, abstime, &ts);
627 ret = _thr_umtx_wait(&curthread->cycle, cycle,
628 &ts2, CLOCK_REALTIME);
632 if (THR_IN_MUTEXQ(curthread)) {
633 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
634 mutex_queue_remove(*m, curthread);
635 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
638 * Only clear these after assuring the
639 * thread is dequeued.
641 curthread->data.mutex = NULL;
645 /* POSIX priority protection mutex: */
646 case PTHREAD_PRIO_PROTECT:
647 /* Check for a priority ceiling violation: */
648 if (curthread->active_priority > (*m)->m_prio) {
649 /* Unlock the mutex structure: */
650 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
653 /* Check if this mutex is not locked: */
654 else if ((*m)->m_owner == NULL) {
656 * Lock the mutex for the running
659 (*m)->m_owner = curthread;
662 /* Track number of priority mutexes owned: */
663 curthread->priority_mutex_count++;
666 * The running thread inherits the ceiling
667 * priority of the mutex and executes at that
668 * priority. Make sure the thread's
669 * scheduling lock is held while priorities
672 curthread->active_priority = (*m)->m_prio;
674 curthread->inherited_priority;
675 curthread->inherited_priority = (*m)->m_prio;
676 THR_UNLOCK(curthread);
678 /* Add to the list of owned mutexes: */
679 MUTEX_ASSERT_NOT_OWNED(*m);
680 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
683 /* Unlock the mutex structure: */
684 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
685 } else if ((*m)->m_owner == curthread) {
686 ret = mutex_self_lock(curthread, *m, abstime);
688 /* Unlock the mutex structure: */
689 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
692 * Join the queue of threads waiting to lock
693 * the mutex and save a pointer to the mutex.
695 mutex_queue_enq(*m, curthread);
696 curthread->data.mutex = *m;
698 /* Clear any previous error: */
699 curthread->error = 0;
702 cycle = curthread->cycle;
703 THR_UNLOCK(curthread);
705 /* Unlock the mutex structure: */
706 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
708 clock_gettime(CLOCK_REALTIME, &ts);
709 TIMESPEC_SUB(&ts2, abstime, &ts);
710 ret = _thr_umtx_wait(&curthread->cycle, cycle,
711 &ts2, CLOCK_REALTIME);
715 curthread->data.mutex = NULL;
716 if (THR_IN_MUTEXQ(curthread)) {
717 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
718 mutex_queue_remove(*m, curthread);
719 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
722 * Only clear these after assuring the
723 * thread is dequeued.
725 curthread->data.mutex = NULL;
728 * The threads priority may have changed while
729 * waiting for the mutex causing a ceiling
732 ret = curthread->error;
733 curthread->error = 0;
737 /* Trap invalid mutex types: */
739 /* Unlock the mutex structure: */
740 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
742 /* Return an invalid argument error: */
747 } while (((*m)->m_owner != curthread) && (ret == 0));
749 /* Return the completion status: */
754 __pthread_mutex_lock(pthread_mutex_t *m)
756 struct pthread *curthread;
761 curthread = tls_get_curthread();
764 * If the mutex is statically initialized, perform the dynamic
767 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
768 ret = mutex_lock_common(curthread, m, NULL);
774 _pthread_mutex_lock(pthread_mutex_t *m)
776 struct pthread *curthread;
781 curthread = tls_get_curthread();
784 * If the mutex is statically initialized, perform the dynamic
785 * initialization marking it private (delete safe):
788 ((ret = init_static_private(curthread, m)) == 0))
789 ret = mutex_lock_common(curthread, m, NULL);
795 __pthread_mutex_timedlock(pthread_mutex_t *m,
796 const struct timespec *abs_timeout)
798 struct pthread *curthread;
803 curthread = tls_get_curthread();
806 * If the mutex is statically initialized, perform the dynamic
809 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
810 ret = mutex_lock_common(curthread, m, abs_timeout);
816 _pthread_mutex_timedlock(pthread_mutex_t *m,
817 const struct timespec *abs_timeout)
819 struct pthread *curthread;
824 curthread = tls_get_curthread();
827 * If the mutex is statically initialized, perform the dynamic
828 * initialization marking it private (delete safe):
831 ((ret = init_static_private(curthread, m)) == 0))
832 ret = mutex_lock_common(curthread, m, abs_timeout);
838 _pthread_mutex_unlock(pthread_mutex_t *m)
840 return (mutex_unlock_common(m, /* add reference */ 0));
844 _mutex_cv_unlock(pthread_mutex_t *m)
846 return (mutex_unlock_common(m, /* add reference */ 1));
850 _mutex_cv_lock(pthread_mutex_t *m)
852 struct pthread *curthread;
855 curthread = tls_get_curthread();
856 if ((ret = _pthread_mutex_lock(m)) == 0)
862 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
867 /* case PTHREAD_MUTEX_DEFAULT: */
868 case PTHREAD_MUTEX_ERRORCHECK:
869 case PTHREAD_MUTEX_NORMAL:
873 case PTHREAD_MUTEX_RECURSIVE:
874 /* Increment the lock count: */
875 if (m->m_count + 1 > 0) {
883 /* Trap invalid mutex types; */
891 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
892 const struct timespec *abstime)
894 struct timespec ts1, ts2;
898 /* case PTHREAD_MUTEX_DEFAULT: */
899 case PTHREAD_MUTEX_ERRORCHECK:
901 clock_gettime(CLOCK_REALTIME, &ts1);
902 TIMESPEC_SUB(&ts2, abstime, &ts1);
903 __sys_nanosleep(&ts2, NULL);
907 * POSIX specifies that mutexes should return
908 * EDEADLK if a recursive lock is detected.
914 case PTHREAD_MUTEX_NORMAL:
916 * What SS2 define as a 'normal' mutex. Intentionally
917 * deadlock on attempts to get a lock you already own.
920 if (m->m_protocol != PTHREAD_PRIO_NONE) {
921 /* Unlock the mutex structure: */
922 THR_LOCK_RELEASE(curthread, &m->m_lock);
925 clock_gettime(CLOCK_REALTIME, &ts1);
926 TIMESPEC_SUB(&ts2, abstime, &ts1);
927 __sys_nanosleep(&ts2, NULL);
933 __sys_nanosleep(&ts1, NULL);
937 case PTHREAD_MUTEX_RECURSIVE:
938 /* Increment the lock count: */
939 if (m->m_count + 1 > 0) {
947 /* Trap invalid mutex types; */
955 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
957 struct pthread *curthread = tls_get_curthread();
961 if (m == NULL || *m == NULL)
964 /* Short cut for simple mutex. */
966 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
968 * Check if the running thread is not the owner of the
971 if (__predict_false((*m)->m_owner != curthread)) {
973 } else if (__predict_false(
974 (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
975 (*m)->m_count > 0)) {
976 /* Decrement the count: */
982 * Clear the count in case this is a recursive
986 (*m)->m_owner = NULL;
987 /* Remove the mutex from the threads queue. */
988 MUTEX_ASSERT_IS_OWNED(*m);
989 TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
994 * Hand off the mutex to the next waiting
997 _thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
1002 /* Code for priority mutex */
1004 /* Lock the mutex structure: */
1005 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1007 /* Process according to mutex type: */
1008 switch ((*m)->m_protocol) {
1009 /* POSIX priority inheritence mutex: */
1010 case PTHREAD_PRIO_INHERIT:
1012 * Check if the running thread is not the owner of the
1015 if ((*m)->m_owner != curthread)
1017 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1018 ((*m)->m_count > 0))
1019 /* Decrement the count: */
1023 * Clear the count in case this is recursive
1029 * Restore the threads inherited priority and
1030 * recompute the active priority (being careful
1031 * not to override changes in the threads base
1032 * priority subsequent to locking the mutex).
1034 THR_LOCK(curthread);
1035 curthread->inherited_priority =
1037 curthread->active_priority =
1038 MAX(curthread->inherited_priority,
1039 curthread->base_priority);
1042 * This thread now owns one less priority mutex.
1044 curthread->priority_mutex_count--;
1045 THR_UNLOCK(curthread);
1047 /* Remove the mutex from the threads queue. */
1048 MUTEX_ASSERT_IS_OWNED(*m);
1049 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1051 MUTEX_INIT_LINK(*m);
1054 * Hand off the mutex to the next waiting
1057 tid = mutex_handoff(curthread, *m);
1061 /* POSIX priority ceiling mutex: */
1062 case PTHREAD_PRIO_PROTECT:
1064 * Check if the running thread is not the owner of the
1067 if ((*m)->m_owner != curthread)
1069 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1070 ((*m)->m_count > 0))
1071 /* Decrement the count: */
1075 * Clear the count in case this is a recursive
1081 * Restore the threads inherited priority and
1082 * recompute the active priority (being careful
1083 * not to override changes in the threads base
1084 * priority subsequent to locking the mutex).
1086 THR_LOCK(curthread);
1087 curthread->inherited_priority =
1089 curthread->active_priority =
1090 MAX(curthread->inherited_priority,
1091 curthread->base_priority);
1094 * This thread now owns one less priority mutex.
1096 curthread->priority_mutex_count--;
1097 THR_UNLOCK(curthread);
1099 /* Remove the mutex from the threads queue. */
1100 MUTEX_ASSERT_IS_OWNED(*m);
1101 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1103 MUTEX_INIT_LINK(*m);
1106 * Hand off the mutex to the next waiting
1109 tid = mutex_handoff(curthread, *m);
1113 /* Trap invalid mutex types: */
1115 /* Return an invalid argument error: */
1120 if ((ret == 0) && (add_reference != 0))
1121 /* Increment the reference count: */
1124 /* Unlock the mutex structure: */
1125 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1128 /* Return the completion status: */
1134 * This function is called when a change in base priority occurs for
1135 * a thread that is holding or waiting for a priority protection or
1136 * inheritence mutex. A change in a threads base priority can effect
1137 * changes to active priorities of other threads and to the ordering
1138 * of mutex locking by waiting threads.
1140 * This must be called without the target thread's scheduling lock held.
1143 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1146 struct pthread_mutex *m;
1148 /* Adjust the priorites of any owned priority mutexes: */
1149 if (pthread->priority_mutex_count > 0) {
1151 * Rescan the mutexes owned by this thread and correct
1152 * their priorities to account for this threads change
1153 * in priority. This has the side effect of changing
1154 * the threads active priority.
1156 * Be sure to lock the first mutex in the list of owned
1157 * mutexes. This acts as a barrier against another
1158 * simultaneous call to change the threads priority
1159 * and from the owning thread releasing the mutex.
1161 m = TAILQ_FIRST(&pthread->pri_mutexq);
1163 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1165 * Make sure the thread still owns the lock.
1167 if (m == TAILQ_FIRST(&pthread->pri_mutexq))
1168 mutex_rescan_owned(curthread, pthread,
1169 /* rescan all owned */ NULL);
1170 THR_LOCK_RELEASE(curthread, &m->m_lock);
1175 * If this thread is waiting on a priority inheritence mutex,
1176 * check for priority adjustments. A change in priority can
1177 * also cause a ceiling violation(*) for a thread waiting on
1178 * a priority protection mutex; we don't perform the check here
1179 * as it is done in pthread_mutex_unlock.
1181 * (*) It should be noted that a priority change to a thread
1182 * _after_ taking and owning a priority ceiling mutex
1183 * does not affect ownership of that mutex; the ceiling
1184 * priority is only checked before mutex ownership occurs.
1186 if (propagate_prio != 0) {
1188 * Lock the thread's scheduling queue. This is a bit
1189 * convoluted; the "in synchronization queue flag" can
1190 * only be cleared with both the thread's scheduling and
1191 * mutex locks held. The thread's pointer to the wanted
1192 * mutex is guaranteed to be valid during this time.
1194 THR_THREAD_LOCK(curthread, pthread);
1196 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1197 ((m = pthread->data.mutex) == NULL))
1198 THR_THREAD_UNLOCK(curthread, pthread);
1201 * This thread is currently waiting on a mutex; unlock
1202 * the scheduling queue lock and lock the mutex. We
1203 * can't hold both at the same time because the locking
1204 * order could cause a deadlock.
1206 THR_THREAD_UNLOCK(curthread, pthread);
1207 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1210 * Check to make sure this thread is still in the
1211 * same state (the lock above can yield the CPU to
1212 * another thread or the thread may be running on
1215 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1216 (pthread->data.mutex == m)) {
1218 * Remove and reinsert this thread into
1219 * the list of waiting threads to preserve
1220 * decreasing priority order.
1222 mutex_queue_remove(m, pthread);
1223 mutex_queue_enq(m, pthread);
1225 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1226 /* Adjust priorities: */
1227 mutex_priority_adjust(curthread, m);
1230 /* Unlock the mutex structure: */
1231 THR_LOCK_RELEASE(curthread, &m->m_lock);
1237 * Called when a new thread is added to the mutex waiting queue or
1238 * when a threads priority changes that is already in the mutex
1241 * This must be called with the mutex locked by the current thread.
1244 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1246 pthread_mutex_t m = mutex;
1247 struct pthread *pthread_next, *pthread = mutex->m_owner;
1248 int done, temp_prio;
1251 * Calculate the mutex priority as the maximum of the highest
1252 * active priority of any waiting threads and the owning threads
1253 * active priority(*).
1255 * (*) Because the owning threads current active priority may
1256 * reflect priority inherited from this mutex (and the mutex
1257 * priority may have changed) we must recalculate the active
1258 * priority based on the threads saved inherited priority
1259 * and its base priority.
1261 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1262 temp_prio = MAX(pthread_next->active_priority,
1263 MAX(m->m_saved_prio, pthread->base_priority));
1265 /* See if this mutex really needs adjusting: */
1266 if (temp_prio == m->m_prio)
1267 /* No need to propagate the priority: */
1270 /* Set new priority of the mutex: */
1271 m->m_prio = temp_prio;
1274 * Don't unlock the mutex passed in as an argument. It is
1275 * expected to be locked and unlocked by the caller.
1280 * Save the threads priority before rescanning the
1283 temp_prio = pthread->active_priority;
1286 * Fix the priorities for all mutexes held by the owning
1287 * thread since taking this mutex. This also has a
1288 * potential side-effect of changing the threads priority.
1290 * At this point the mutex is locked by the current thread.
1291 * The owning thread can't release the mutex until it is
1292 * unlocked, so we should be able to safely walk its list
1295 mutex_rescan_owned(curthread, pthread, m);
1298 * If this isn't the first time through the loop,
1299 * the current mutex needs to be unlocked.
1302 THR_LOCK_RELEASE(curthread, &m->m_lock);
1304 /* Assume we're done unless told otherwise: */
1308 * If the thread is currently waiting on a mutex, check
1309 * to see if the threads new priority has affected the
1310 * priority of the mutex.
1312 if ((temp_prio != pthread->active_priority) &&
1313 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1314 ((m = pthread->data.mutex) != NULL) &&
1315 (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1316 /* Lock the mutex structure: */
1317 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1320 * Make sure the thread is still waiting on the
1323 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1324 (m == pthread->data.mutex)) {
1326 * The priority for this thread has changed.
1327 * Remove and reinsert this thread into the
1328 * list of waiting threads to preserve
1329 * decreasing priority order.
1331 mutex_queue_remove(m, pthread);
1332 mutex_queue_enq(m, pthread);
1335 * Grab the waiting thread with highest
1338 pthread_next = TAILQ_FIRST(&m->m_queue);
1341 * Calculate the mutex priority as the maximum
1342 * of the highest active priority of any
1343 * waiting threads and the owning threads
1346 temp_prio = MAX(pthread_next->active_priority,
1347 MAX(m->m_saved_prio,
1348 m->m_owner->base_priority));
1350 if (temp_prio != m->m_prio) {
1352 * The priority needs to be propagated
1353 * to the mutex this thread is waiting
1354 * on and up to the owner of that mutex.
1356 m->m_prio = temp_prio;
1357 pthread = m->m_owner;
1359 /* We're not done yet: */
1363 /* Only release the mutex if we're done: */
1365 THR_LOCK_RELEASE(curthread, &m->m_lock);
1367 } while (done == 0);
1371 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1372 struct pthread_mutex *mutex)
1374 struct pthread_mutex *m;
1375 struct pthread *pthread_next;
1376 int active_prio, inherited_prio;
1379 * Start walking the mutexes the thread has taken since
1380 * taking this mutex.
1382 if (mutex == NULL) {
1384 * A null mutex means start at the beginning of the owned
1387 m = TAILQ_FIRST(&pthread->pri_mutexq);
1389 /* There is no inherited priority yet. */
1393 * The caller wants to start after a specific mutex. It
1394 * is assumed that this mutex is a priority inheritence
1395 * mutex and that its priority has been correctly
1398 m = TAILQ_NEXT(mutex, m_qe);
1400 /* Start inheriting priority from the specified mutex. */
1401 inherited_prio = mutex->m_prio;
1403 active_prio = MAX(inherited_prio, pthread->base_priority);
1405 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1407 * We only want to deal with priority inheritence
1408 * mutexes. This might be optimized by only placing
1409 * priority inheritence mutexes into the owned mutex
1410 * list, but it may prove to be useful having all
1411 * owned mutexes in this list. Consider a thread
1412 * exiting while holding mutexes...
1414 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1416 * Fix the owners saved (inherited) priority to
1417 * reflect the priority of the previous mutex.
1419 m->m_saved_prio = inherited_prio;
1421 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1422 /* Recalculate the priority of the mutex: */
1423 m->m_prio = MAX(active_prio,
1424 pthread_next->active_priority);
1426 m->m_prio = active_prio;
1428 /* Recalculate new inherited and active priorities: */
1429 inherited_prio = m->m_prio;
1430 active_prio = MAX(m->m_prio, pthread->base_priority);
1435 * Fix the threads inherited priority and recalculate its
1438 pthread->inherited_priority = inherited_prio;
1439 active_prio = MAX(inherited_prio, pthread->base_priority);
1441 if (active_prio != pthread->active_priority) {
1442 /* Lock the thread's scheduling queue: */
1443 THR_THREAD_LOCK(curthread, pthread);
1445 /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */
1448 * This thread is not in a run queue. Just set
1449 * its active priority.
1451 pthread->active_priority = active_prio;
1455 * This thread is in a run queue. Remove it from
1456 * the queue before changing its priority:
1458 /* THR_RUNQ_REMOVE(pthread);*/
1460 * POSIX states that if the priority is being
1461 * lowered, the thread must be inserted at the
1462 * head of the queue for its priority if it owns
1463 * any priority protection or inheritence mutexes.
1465 if ((active_prio < pthread->active_priority) &&
1466 (pthread->priority_mutex_count > 0)) {
1467 /* Set the new active priority. */
1468 pthread->active_priority = active_prio;
1469 /* THR_RUNQ_INSERT_HEAD(pthread); */
1471 /* Set the new active priority. */
1472 pthread->active_priority = active_prio;
1473 /* THR_RUNQ_INSERT_TAIL(pthread);*/
1476 THR_THREAD_UNLOCK(curthread, pthread);
1481 _mutex_unlock_private(pthread_t pthread)
1483 struct pthread_mutex *m, *m_next;
1485 for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) {
1486 m_next = TAILQ_NEXT(m, m_qe);
1487 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1488 pthread_mutex_unlock(&m);
1493 * Dequeue a waiting thread from the head of a mutex queue in descending
1496 * In order to properly dequeue a thread from the mutex queue and
1497 * make it runnable without the possibility of errant wakeups, it
1498 * is necessary to lock the thread's scheduling queue while also
1499 * holding the mutex lock.
1502 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1504 struct pthread *pthread;
1507 /* Keep dequeueing until we find a valid thread: */
1508 mutex->m_owner = NULL;
1509 pthread = TAILQ_FIRST(&mutex->m_queue);
1510 while (pthread != NULL) {
1511 /* Take the thread's scheduling lock: */
1512 THR_THREAD_LOCK(curthread, pthread);
1514 /* Remove the thread from the mutex queue: */
1515 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1516 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1519 * Only exit the loop if the thread hasn't been
1522 switch (mutex->m_protocol) {
1523 case PTHREAD_PRIO_NONE:
1525 * Assign the new owner and add the mutex to the
1526 * thread's list of owned mutexes.
1528 mutex->m_owner = pthread;
1529 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1532 case PTHREAD_PRIO_INHERIT:
1534 * Assign the new owner and add the mutex to the
1535 * thread's list of owned mutexes.
1537 mutex->m_owner = pthread;
1538 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1540 /* Track number of priority mutexes owned: */
1541 pthread->priority_mutex_count++;
1544 * Set the priority of the mutex. Since our waiting
1545 * threads are in descending priority order, the
1546 * priority of the mutex becomes the active priority
1547 * of the thread we just dequeued.
1549 mutex->m_prio = pthread->active_priority;
1551 /* Save the owning threads inherited priority: */
1552 mutex->m_saved_prio = pthread->inherited_priority;
1555 * The owning threads inherited priority now becomes
1556 * his active priority (the priority of the mutex).
1558 pthread->inherited_priority = mutex->m_prio;
1561 case PTHREAD_PRIO_PROTECT:
1562 if (pthread->active_priority > mutex->m_prio) {
1564 * Either the mutex ceiling priority has
1565 * been lowered and/or this threads priority
1566 * has been raised subsequent to the thread
1567 * being queued on the waiting list.
1569 pthread->error = EINVAL;
1573 * Assign the new owner and add the mutex
1574 * to the thread's list of owned mutexes.
1576 mutex->m_owner = pthread;
1577 TAILQ_INSERT_TAIL(&pthread->pri_mutexq,
1580 /* Track number of priority mutexes owned: */
1581 pthread->priority_mutex_count++;
1584 * Save the owning threads inherited
1587 mutex->m_saved_prio =
1588 pthread->inherited_priority;
1591 * The owning thread inherits the ceiling
1592 * priority of the mutex and executes at
1595 pthread->inherited_priority = mutex->m_prio;
1596 pthread->active_priority = mutex->m_prio;
1602 /* Make the thread runnable and unlock the scheduling queue: */
1604 _thr_umtx_wake(&pthread->cycle, 1);
1606 THR_THREAD_UNLOCK(curthread, pthread);
1607 if (mutex->m_owner == pthread)
1608 /* We're done; a valid owner was found. */
1611 /* Get the next thread from the waiting queue: */
1612 pthread = TAILQ_NEXT(pthread, sqe);
1615 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1616 /* This mutex has no priority: */
1623 * Dequeue a waiting thread from the head of a mutex queue in descending
1627 mutex_queue_deq(struct pthread_mutex *mutex)
1631 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1632 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1633 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1641 * Remove a waiting thread from a mutex queue in descending priority order.
1644 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1646 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1647 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1648 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1653 * Enqueue a waiting thread to a queue in descending priority order.
1656 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1658 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1660 THR_ASSERT_NOT_IN_SYNCQ(pthread);
1662 * For the common case of all threads having equal priority,
1663 * we perform a quick check against the priority of the thread
1664 * at the tail of the queue.
1666 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1667 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1669 tid = TAILQ_FIRST(&mutex->m_queue);
1670 while (pthread->active_priority <= tid->active_priority)
1671 tid = TAILQ_NEXT(tid, sqe);
1672 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1674 pthread->sflags |= THR_FLAGS_IN_SYNCQ;