test/libpthread: Clean up a bit.
[dragonfly.git] / lib / libc_r / uthread / uthread_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/lib/libc_r/uthread/uthread_mutex.c,v 1.20.2.8 2002/10/22 14:44:03 fjoe Exp $
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <pthread.h>
40 #include "pthread_private.h"
41
42 #if defined(_PTHREADS_INVARIANTS)
43 #define _MUTEX_INIT_LINK(m)             do {            \
44         (m)->m_qe.tqe_prev = NULL;                      \
45         (m)->m_qe.tqe_next = NULL;                      \
46 } while (0)
47 #define _MUTEX_ASSERT_IS_OWNED(m)       do {            \
48         if ((m)->m_qe.tqe_prev == NULL)                 \
49                 PANIC("mutex is not on list");          \
50 } while (0)
51 #define _MUTEX_ASSERT_NOT_OWNED(m)      do {            \
52         if (((m)->m_qe.tqe_prev != NULL) ||             \
53             ((m)->m_qe.tqe_next != NULL))               \
54                 PANIC("mutex is on list");              \
55 } while (0)
56 #else
57 #define _MUTEX_INIT_LINK(m)
58 #define _MUTEX_ASSERT_IS_OWNED(m)
59 #define _MUTEX_ASSERT_NOT_OWNED(m)
60 #endif
61
62 /*
63  * Prototypes
64  */
65 static inline int       mutex_self_trylock(pthread_mutex_t);
66 static inline int       mutex_self_lock(pthread_mutex_t);
67 static inline int       mutex_unlock_common(pthread_mutex_t *, int);
68 static void             mutex_priority_adjust(pthread_mutex_t);
69 static void             mutex_rescan_owned (pthread_t, pthread_mutex_t);
70 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
71 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
72 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
73
74
75 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
76
77 /* Reinitialize a mutex to defaults. */
78 int
79 _mutex_reinit(pthread_mutex_t * mutex)
80 {
81         int     ret = 0;
82
83         if (mutex == NULL)
84                 ret = EINVAL;
85         else if (*mutex == NULL)
86                 ret = pthread_mutex_init(mutex, NULL);
87         else {
88                 /*
89                  * Initialize the mutex structure:
90                  */
91                 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
92                 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
93                 TAILQ_INIT(&(*mutex)->m_queue);
94                 (*mutex)->m_owner = NULL;
95                 (*mutex)->m_data.m_count = 0;
96                 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
97                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
98                 (*mutex)->m_refcount = 0;
99                 (*mutex)->m_prio = 0;
100                 (*mutex)->m_saved_prio = 0;
101                 _MUTEX_INIT_LINK(*mutex);
102                 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
103         }
104         return (ret);
105 }
106
107 int
108 _pthread_mutex_init(pthread_mutex_t * __restrict mutex,
109     const pthread_mutexattr_t * __restrict mutex_attr)
110 {
111         enum pthread_mutextype  type;
112         int             protocol;
113         int             ceiling;
114         pthread_mutex_t pmutex;
115         int             ret = 0;
116
117         if (mutex == NULL)
118                 ret = EINVAL;
119
120         /* Check if default mutex attributes: */
121         else if (mutex_attr == NULL || *mutex_attr == NULL) {
122                 /* Default to a (error checking) POSIX mutex: */
123                 type = PTHREAD_MUTEX_ERRORCHECK;
124                 protocol = PTHREAD_PRIO_NONE;
125                 ceiling = PTHREAD_MAX_PRIORITY;
126         }
127
128         /* Check mutex type: */
129         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
130             ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
131                 /* Return an invalid argument error: */
132                 ret = EINVAL;
133
134         /* Check mutex protocol: */
135         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
136             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
137                 /* Return an invalid argument error: */
138                 ret = EINVAL;
139
140         else {
141                 /* Use the requested mutex type and protocol: */
142                 type = (*mutex_attr)->m_type;
143                 protocol = (*mutex_attr)->m_protocol;
144                 ceiling = (*mutex_attr)->m_ceiling;
145         }
146
147         /* Check no errors so far: */
148         if (ret == 0) {
149                 if ((pmutex = (pthread_mutex_t)
150                     malloc(sizeof(struct pthread_mutex))) == NULL)
151                         ret = ENOMEM;
152                 else {
153                         /* Reset the mutex flags: */
154                         pmutex->m_flags = 0;
155
156                         /* Process according to mutex type: */
157                         switch (type) {
158                         /* case PTHREAD_MUTEX_DEFAULT: */
159                         case PTHREAD_MUTEX_ERRORCHECK:
160                         case PTHREAD_MUTEX_NORMAL:
161                                 /* Nothing to do here. */
162                                 break;
163
164                         /* Single UNIX Spec 2 recursive mutex: */
165                         case PTHREAD_MUTEX_RECURSIVE:
166                                 /* Reset the mutex count: */
167                                 pmutex->m_data.m_count = 0;
168                                 break;
169
170                         /* Trap invalid mutex types: */
171                         default:
172                                 /* Return an invalid argument error: */
173                                 ret = EINVAL;
174                                 break;
175                         }
176                         if (ret == 0) {
177                                 /* Initialise the rest of the mutex: */
178                                 TAILQ_INIT(&pmutex->m_queue);
179                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
180                                 pmutex->m_owner = NULL;
181                                 pmutex->m_type = type;
182                                 pmutex->m_protocol = protocol;
183                                 pmutex->m_refcount = 0;
184                                 if (protocol == PTHREAD_PRIO_PROTECT)
185                                         pmutex->m_prio = ceiling;
186                                 else
187                                         pmutex->m_prio = 0;
188                                 pmutex->m_saved_prio = 0;
189                                 _MUTEX_INIT_LINK(pmutex);
190                                 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
191                                 *mutex = pmutex;
192                         } else {
193                                 free(pmutex);
194                                 *mutex = NULL;
195                         }
196                 }
197         }
198         /* Return the completion status: */
199         return(ret);
200 }
201
202 int
203 _pthread_mutex_destroy(pthread_mutex_t * mutex)
204 {
205         int     ret = 0;
206
207         if (mutex == NULL || *mutex == NULL)
208                 ret = EINVAL;
209         else {
210                 /* Lock the mutex structure: */
211                 _SPINLOCK(&(*mutex)->lock);
212
213                 /*
214                  * Check to see if this mutex is in use:
215                  */
216                 if (((*mutex)->m_owner != NULL) ||
217                     (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
218                     ((*mutex)->m_refcount != 0)) {
219                         ret = EBUSY;
220
221                         /* Unlock the mutex structure: */
222                         _SPINUNLOCK(&(*mutex)->lock);
223                 }
224                 else {
225                         /*
226                          * Free the memory allocated for the mutex
227                          * structure:
228                          */
229                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
230                         free(*mutex);
231
232                         /*
233                          * Leave the caller's pointer NULL now that
234                          * the mutex has been destroyed:
235                          */
236                         *mutex = NULL;
237                 }
238         }
239
240         /* Return the completion status: */
241         return (ret);
242 }
243
244 static int
245 init_static(pthread_mutex_t *mutex)
246 {
247         int     ret;
248
249         _SPINLOCK(&static_init_lock);
250
251         if (*mutex == NULL)
252                 ret = pthread_mutex_init(mutex, NULL);
253         else
254                 ret = 0;
255
256         _SPINUNLOCK(&static_init_lock);
257
258         return(ret);
259 }
260
261 int
262 _pthread_mutex_trylock(pthread_mutex_t * mutex)
263 {
264         struct pthread  *curthread = _get_curthread();
265         int     ret = 0;
266
267         if (mutex == NULL)
268                 ret = EINVAL;
269
270         /*
271          * If the mutex is statically initialized, perform the dynamic
272          * initialization:
273          */
274         else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
275                 /*
276                  * Defer signals to protect the scheduling queues from
277                  * access by the signal handler:
278                  */
279                 _thread_kern_sig_defer();
280
281                 /* Lock the mutex structure: */
282                 _SPINLOCK(&(*mutex)->lock);
283
284                 /*
285                  * If the mutex was statically allocated, properly
286                  * initialize the tail queue.
287                  */
288                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
289                         TAILQ_INIT(&(*mutex)->m_queue);
290                         _MUTEX_INIT_LINK(*mutex);
291                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
292                 }
293
294                 /* Process according to mutex type: */
295                 switch ((*mutex)->m_protocol) {
296                 /* Default POSIX mutex: */
297                 case PTHREAD_PRIO_NONE: 
298                         /* Check if this mutex is not locked: */
299                         if ((*mutex)->m_owner == NULL) {
300                                 /* Lock the mutex for the running thread: */
301                                 (*mutex)->m_owner = curthread;
302
303                                 /* Add to the list of owned mutexes: */
304                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
305                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
306                                     (*mutex), m_qe);
307                         } else if ((*mutex)->m_owner == curthread)
308                                 ret = mutex_self_trylock(*mutex);
309                         else
310                                 /* Return a busy error: */
311                                 ret = EBUSY;
312                         break;
313
314                 /* POSIX priority inheritance mutex: */
315                 case PTHREAD_PRIO_INHERIT:
316                         /* Check if this mutex is not locked: */
317                         if ((*mutex)->m_owner == NULL) {
318                                 /* Lock the mutex for the running thread: */
319                                 (*mutex)->m_owner = curthread;
320
321                                 /* Track number of priority mutexes owned: */
322                                 curthread->priority_mutex_count++;
323
324                                 /*
325                                  * The mutex takes on the attributes of the
326                                  * running thread when there are no waiters.
327                                  */
328                                 (*mutex)->m_prio = curthread->active_priority;
329                                 (*mutex)->m_saved_prio =
330                                     curthread->inherited_priority;
331
332                                 /* Add to the list of owned mutexes: */
333                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
334                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
335                                     (*mutex), m_qe);
336                         } else if ((*mutex)->m_owner == curthread)
337                                 ret = mutex_self_trylock(*mutex);
338                         else
339                                 /* Return a busy error: */
340                                 ret = EBUSY;
341                         break;
342
343                 /* POSIX priority protection mutex: */
344                 case PTHREAD_PRIO_PROTECT:
345                         /* Check for a priority ceiling violation: */
346                         if (curthread->active_priority > (*mutex)->m_prio)
347                                 ret = EINVAL;
348
349                         /* Check if this mutex is not locked: */
350                         else if ((*mutex)->m_owner == NULL) {
351                                 /* Lock the mutex for the running thread: */
352                                 (*mutex)->m_owner = curthread;
353
354                                 /* Track number of priority mutexes owned: */
355                                 curthread->priority_mutex_count++;
356
357                                 /*
358                                  * The running thread inherits the ceiling
359                                  * priority of the mutex and executes at that
360                                  * priority.
361                                  */
362                                 curthread->active_priority = (*mutex)->m_prio;
363                                 (*mutex)->m_saved_prio =
364                                     curthread->inherited_priority;
365                                 curthread->inherited_priority =
366                                     (*mutex)->m_prio;
367
368                                 /* Add to the list of owned mutexes: */
369                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
370                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
371                                     (*mutex), m_qe);
372                         } else if ((*mutex)->m_owner == curthread)
373                                 ret = mutex_self_trylock(*mutex);
374                         else
375                                 /* Return a busy error: */
376                                 ret = EBUSY;
377                         break;
378
379                 /* Trap invalid mutex types: */
380                 default:
381                         /* Return an invalid argument error: */
382                         ret = EINVAL;
383                         break;
384                 }
385
386                 /* Unlock the mutex structure: */
387                 _SPINUNLOCK(&(*mutex)->lock);
388
389                 /*
390                  * Undefer and handle pending signals, yielding if
391                  * necessary:
392                  */
393                 _thread_kern_sig_undefer();
394         }
395
396         /* Return the completion status: */
397         return (ret);
398 }
399
400 int
401 _pthread_mutex_lock(pthread_mutex_t * mutex)
402 {
403         struct pthread  *curthread = _get_curthread();
404         int     ret = 0;
405
406         if (_thread_initial == NULL)
407                 _thread_init();
408
409         if (mutex == NULL)
410                 return (EINVAL);
411
412         /*
413          * If the mutex is statically initialized, perform the dynamic
414          * initialization:
415          */
416         if ((*mutex == NULL) &&
417             ((ret = init_static(mutex)) != 0))
418                 return (ret);
419
420         /* Reset the interrupted flag: */
421         curthread->interrupted = 0;
422
423         /*
424          * Enter a loop waiting to become the mutex owner.  We need a
425          * loop in case the waiting thread is interrupted by a signal
426          * to execute a signal handler.  It is not (currently) possible
427          * to remain in the waiting queue while running a handler.
428          * Instead, the thread is interrupted and backed out of the
429          * waiting queue prior to executing the signal handler.
430          */
431         do {
432                 /*
433                  * Defer signals to protect the scheduling queues from
434                  * access by the signal handler:
435                  */
436                 _thread_kern_sig_defer();
437
438                 /* Lock the mutex structure: */
439                 _SPINLOCK(&(*mutex)->lock);
440
441                 /*
442                  * If the mutex was statically allocated, properly
443                  * initialize the tail queue.
444                  */
445                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
446                         TAILQ_INIT(&(*mutex)->m_queue);
447                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
448                         _MUTEX_INIT_LINK(*mutex);
449                 }
450
451                 /* Process according to mutex type: */
452                 switch ((*mutex)->m_protocol) {
453                 /* Default POSIX mutex: */
454                 case PTHREAD_PRIO_NONE:
455                         if ((*mutex)->m_owner == NULL) {
456                                 /* Lock the mutex for this thread: */
457                                 (*mutex)->m_owner = curthread;
458
459                                 /* Add to the list of owned mutexes: */
460                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
461                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
462                                     (*mutex), m_qe);
463
464                         } else if ((*mutex)->m_owner == curthread)
465                                 ret = mutex_self_lock(*mutex);
466                         else {
467                                 /*
468                                  * Join the queue of threads waiting to lock
469                                  * the mutex: 
470                                  */
471                                 mutex_queue_enq(*mutex, curthread);
472
473                                 /*
474                                  * Keep a pointer to the mutex this thread
475                                  * is waiting on:
476                                  */
477                                 curthread->data.mutex = *mutex;
478
479                                 /*
480                                  * Unlock the mutex structure and schedule the
481                                  * next thread:
482                                  */
483                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
484                                     &(*mutex)->lock, __FILE__, __LINE__);
485
486                                 /* Lock the mutex structure again: */
487                                 _SPINLOCK(&(*mutex)->lock);
488                         }
489                         break;
490
491                 /* POSIX priority inheritance mutex: */
492                 case PTHREAD_PRIO_INHERIT:
493                         /* Check if this mutex is not locked: */
494                         if ((*mutex)->m_owner == NULL) {
495                                 /* Lock the mutex for this thread: */
496                                 (*mutex)->m_owner = curthread;
497
498                                 /* Track number of priority mutexes owned: */
499                                 curthread->priority_mutex_count++;
500
501                                 /*
502                                  * The mutex takes on attributes of the
503                                  * running thread when there are no waiters.
504                                  */
505                                 (*mutex)->m_prio = curthread->active_priority;
506                                 (*mutex)->m_saved_prio =
507                                     curthread->inherited_priority;
508                                 curthread->inherited_priority =
509                                     (*mutex)->m_prio;
510
511                                 /* Add to the list of owned mutexes: */
512                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
513                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
514                                     (*mutex), m_qe);
515
516                         } else if ((*mutex)->m_owner == curthread)
517                                 ret = mutex_self_lock(*mutex);
518                         else {
519                                 /*
520                                  * Join the queue of threads waiting to lock
521                                  * the mutex: 
522                                  */
523                                 mutex_queue_enq(*mutex, curthread);
524
525                                 /*
526                                  * Keep a pointer to the mutex this thread
527                                  * is waiting on:
528                                  */
529                                 curthread->data.mutex = *mutex;
530
531                                 if (curthread->active_priority >
532                                     (*mutex)->m_prio)
533                                         /* Adjust priorities: */
534                                         mutex_priority_adjust(*mutex);
535
536                                 /*
537                                  * Unlock the mutex structure and schedule the
538                                  * next thread:
539                                  */
540                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
541                                     &(*mutex)->lock, __FILE__, __LINE__);
542
543                                 /* Lock the mutex structure again: */
544                                 _SPINLOCK(&(*mutex)->lock);
545                         }
546                         break;
547
548                 /* POSIX priority protection mutex: */
549                 case PTHREAD_PRIO_PROTECT:
550                         /* Check for a priority ceiling violation: */
551                         if (curthread->active_priority > (*mutex)->m_prio)
552                                 ret = EINVAL;
553
554                         /* Check if this mutex is not locked: */
555                         else if ((*mutex)->m_owner == NULL) {
556                                 /*
557                                  * Lock the mutex for the running
558                                  * thread:
559                                  */
560                                 (*mutex)->m_owner = curthread;
561
562                                 /* Track number of priority mutexes owned: */
563                                 curthread->priority_mutex_count++;
564
565                                 /*
566                                  * The running thread inherits the ceiling
567                                  * priority of the mutex and executes at that
568                                  * priority:
569                                  */
570                                 curthread->active_priority = (*mutex)->m_prio;
571                                 (*mutex)->m_saved_prio =
572                                     curthread->inherited_priority;
573                                 curthread->inherited_priority =
574                                     (*mutex)->m_prio;
575
576                                 /* Add to the list of owned mutexes: */
577                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
578                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
579                                     (*mutex), m_qe);
580                         } else if ((*mutex)->m_owner == curthread)
581                                 ret = mutex_self_lock(*mutex);
582                         else {
583                                 /*
584                                  * Join the queue of threads waiting to lock
585                                  * the mutex: 
586                                  */
587                                 mutex_queue_enq(*mutex, curthread);
588
589                                 /*
590                                  * Keep a pointer to the mutex this thread
591                                  * is waiting on:
592                                  */
593                                 curthread->data.mutex = *mutex;
594
595                                 /* Clear any previous error: */
596                                 errno = 0;
597
598                                 /*
599                                  * Unlock the mutex structure and schedule the
600                                  * next thread:
601                                  */
602                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
603                                     &(*mutex)->lock, __FILE__, __LINE__);
604
605                                 /* Lock the mutex structure again: */
606                                 _SPINLOCK(&(*mutex)->lock);
607
608                                 /*
609                                  * The threads priority may have changed while
610                                  * waiting for the mutex causing a ceiling
611                                  * violation.
612                                  */
613                                 ret = errno;
614                                 errno = 0;
615                         }
616                         break;
617
618                 /* Trap invalid mutex types: */
619                 default:
620                         /* Return an invalid argument error: */
621                         ret = EINVAL;
622                         break;
623                 }
624
625                 /*
626                  * Check to see if this thread was interrupted and
627                  * is still in the mutex queue of waiting threads:
628                  */
629                 if (curthread->interrupted != 0)
630                         mutex_queue_remove(*mutex, curthread);
631
632                 /* Unlock the mutex structure: */
633                 _SPINUNLOCK(&(*mutex)->lock);
634
635                 /*
636                  * Undefer and handle pending signals, yielding if
637                  * necessary:
638                  */
639                 _thread_kern_sig_undefer();
640         } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
641             (curthread->interrupted == 0));
642
643         if (curthread->interrupted != 0 &&
644             curthread->continuation != NULL)
645                 curthread->continuation((void *) curthread);
646
647         /* Return the completion status: */
648         return (ret);
649 }
650
651 int
652 _pthread_mutex_unlock(pthread_mutex_t * mutex)
653 {
654         return (mutex_unlock_common(mutex, /* add reference */ 0));
655 }
656
657 int
658 _mutex_cv_unlock(pthread_mutex_t * mutex)
659 {
660         return (mutex_unlock_common(mutex, /* add reference */ 1));
661 }
662
663 int
664 _mutex_cv_lock(pthread_mutex_t * mutex)
665 {
666         int     ret;
667         if ((ret = pthread_mutex_lock(mutex)) == 0)
668                 (*mutex)->m_refcount--;
669         return (ret);
670 }
671
672 static inline int
673 mutex_self_trylock(pthread_mutex_t mutex)
674 {
675         int     ret = 0;
676
677         switch (mutex->m_type) {
678
679         /* case PTHREAD_MUTEX_DEFAULT: */
680         case PTHREAD_MUTEX_ERRORCHECK:
681         case PTHREAD_MUTEX_NORMAL:
682                 /*
683                  * POSIX specifies that mutexes should return EDEADLK if a
684                  * recursive lock is detected.
685                  */
686                 ret = EBUSY; 
687                 break;
688
689         case PTHREAD_MUTEX_RECURSIVE:
690                 /* Increment the lock count: */
691                 mutex->m_data.m_count++;
692                 break;
693
694         default:
695                 /* Trap invalid mutex types; */
696                 ret = EINVAL;
697         }
698
699         return(ret);
700 }
701
702 static inline int
703 mutex_self_lock(pthread_mutex_t mutex)
704 {
705         int ret = 0;
706
707         switch (mutex->m_type) {
708         /* case PTHREAD_MUTEX_DEFAULT: */
709         case PTHREAD_MUTEX_ERRORCHECK:
710                 /*
711                  * POSIX specifies that mutexes should return EDEADLK if a
712                  * recursive lock is detected.
713                  */
714                 ret = EDEADLK; 
715                 break;
716
717         case PTHREAD_MUTEX_NORMAL:
718                 /*
719                  * What SS2 define as a 'normal' mutex.  Intentionally
720                  * deadlock on attempts to get a lock you already own.
721                  */
722                 _thread_kern_sched_state_unlock(PS_DEADLOCK,
723                     &mutex->lock, __FILE__, __LINE__);
724                 break;
725
726         case PTHREAD_MUTEX_RECURSIVE:
727                 /* Increment the lock count: */
728                 mutex->m_data.m_count++;
729                 break;
730
731         default:
732                 /* Trap invalid mutex types; */
733                 ret = EINVAL;
734         }
735
736         return(ret);
737 }
738
739 static inline int
740 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
741 {
742         struct pthread  *curthread = _get_curthread();
743         int     ret = 0;
744
745         if (mutex == NULL || *mutex == NULL) {
746                 ret = EINVAL;
747         } else {
748                 /*
749                  * Defer signals to protect the scheduling queues from
750                  * access by the signal handler:
751                  */
752                 _thread_kern_sig_defer();
753
754                 /* Lock the mutex structure: */
755                 _SPINLOCK(&(*mutex)->lock);
756
757                 /* Process according to mutex type: */
758                 switch ((*mutex)->m_protocol) {
759                 /* Default POSIX mutex: */
760                 case PTHREAD_PRIO_NONE:
761                         /*
762                          * Check if the running thread is not the owner of the
763                          * mutex:
764                          */
765                         if ((*mutex)->m_owner != curthread) {
766                                 /*
767                                  * Return an invalid argument error for no
768                                  * owner and a permission error otherwise:
769                                  */
770                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
771                         }
772                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
773                             ((*mutex)->m_data.m_count > 0)) {
774                                 /* Decrement the count: */
775                                 (*mutex)->m_data.m_count--;
776                         } else {
777                                 /*
778                                  * Clear the count in case this is recursive
779                                  * mutex.
780                                  */
781                                 (*mutex)->m_data.m_count = 0;
782
783                                 /* Remove the mutex from the threads queue. */
784                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
785                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
786                                     (*mutex), m_qe);
787                                 _MUTEX_INIT_LINK(*mutex);
788
789                                 /*
790                                  * Get the next thread from the queue of
791                                  * threads waiting on the mutex: 
792                                  */
793                                 if (((*mutex)->m_owner =
794                                     mutex_queue_deq(*mutex)) != NULL) {
795                                         /* Make the new owner runnable: */
796                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
797                                             PS_RUNNING);
798
799                                         /*
800                                          * Add the mutex to the threads list of
801                                          * owned mutexes:
802                                          */
803                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
804                                             (*mutex), m_qe);
805
806                                         /*
807                                          * The owner is no longer waiting for
808                                          * this mutex:
809                                          */
810                                         (*mutex)->m_owner->data.mutex = NULL;
811                                 }
812                         }
813                         break;
814
815                 /* POSIX priority inheritance mutex: */
816                 case PTHREAD_PRIO_INHERIT:
817                         /*
818                          * Check if the running thread is not the owner of the
819                          * mutex:
820                          */
821                         if ((*mutex)->m_owner != curthread) {
822                                 /*
823                                  * Return an invalid argument error for no
824                                  * owner and a permission error otherwise:
825                                  */
826                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
827                         }
828                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
829                             ((*mutex)->m_data.m_count > 0)) {
830                                 /* Decrement the count: */
831                                 (*mutex)->m_data.m_count--;
832                         } else {
833                                 /*
834                                  * Clear the count in case this is recursive
835                                  * mutex.
836                                  */
837                                 (*mutex)->m_data.m_count = 0;
838
839                                 /*
840                                  * Restore the threads inherited priority and
841                                  * recompute the active priority (being careful
842                                  * not to override changes in the threads base
843                                  * priority subsequent to locking the mutex).
844                                  */
845                                 curthread->inherited_priority =
846                                         (*mutex)->m_saved_prio;
847                                 curthread->active_priority =
848                                     MAX(curthread->inherited_priority,
849                                     curthread->base_priority);
850
851                                 /*
852                                  * This thread now owns one less priority mutex.
853                                  */
854                                 curthread->priority_mutex_count--;
855
856                                 /* Remove the mutex from the threads queue. */
857                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
858                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
859                                     (*mutex), m_qe);
860                                 _MUTEX_INIT_LINK(*mutex);
861
862                                 /*
863                                  * Get the next thread from the queue of threads
864                                  * waiting on the mutex: 
865                                  */
866                                 if (((*mutex)->m_owner = 
867                                     mutex_queue_deq(*mutex)) == NULL)
868                                         /* This mutex has no priority. */
869                                         (*mutex)->m_prio = 0;
870                                 else {
871                                         /*
872                                          * Track number of priority mutexes owned:
873                                          */
874                                         (*mutex)->m_owner->priority_mutex_count++;
875
876                                         /*
877                                          * Add the mutex to the threads list
878                                          * of owned mutexes:
879                                          */
880                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
881                                             (*mutex), m_qe);
882
883                                         /*
884                                          * The owner is no longer waiting for
885                                          * this mutex:
886                                          */
887                                         (*mutex)->m_owner->data.mutex = NULL;
888
889                                         /*
890                                          * Set the priority of the mutex.  Since
891                                          * our waiting threads are in descending
892                                          * priority order, the priority of the
893                                          * mutex becomes the active priority of
894                                          * the thread we just dequeued.
895                                          */
896                                         (*mutex)->m_prio =
897                                             (*mutex)->m_owner->active_priority;
898
899                                         /*
900                                          * Save the owning threads inherited
901                                          * priority:
902                                          */
903                                         (*mutex)->m_saved_prio =
904                                                 (*mutex)->m_owner->inherited_priority;
905
906                                         /*
907                                          * The owning threads inherited priority
908                                          * now becomes his active priority (the
909                                          * priority of the mutex).
910                                          */
911                                         (*mutex)->m_owner->inherited_priority =
912                                                 (*mutex)->m_prio;
913
914                                         /*
915                                          * Make the new owner runnable:
916                                          */
917                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
918                                             PS_RUNNING);
919                                 }
920                         }
921                         break;
922
923                 /* POSIX priority ceiling mutex: */
924                 case PTHREAD_PRIO_PROTECT:
925                         /*
926                          * Check if the running thread is not the owner of the
927                          * mutex:
928                          */
929                         if ((*mutex)->m_owner != curthread) {
930                                 /*
931                                  * Return an invalid argument error for no
932                                  * owner and a permission error otherwise:
933                                  */
934                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
935                         }
936                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
937                             ((*mutex)->m_data.m_count > 0)) {
938                                 /* Decrement the count: */
939                                 (*mutex)->m_data.m_count--;
940                         } else {
941                                 /*
942                                  * Clear the count in case this is recursive
943                                  * mutex.
944                                  */
945                                 (*mutex)->m_data.m_count = 0;
946
947                                 /*
948                                  * Restore the threads inherited priority and
949                                  * recompute the active priority (being careful
950                                  * not to override changes in the threads base
951                                  * priority subsequent to locking the mutex).
952                                  */
953                                 curthread->inherited_priority =
954                                         (*mutex)->m_saved_prio;
955                                 curthread->active_priority =
956                                     MAX(curthread->inherited_priority,
957                                     curthread->base_priority);
958
959                                 /*
960                                  * This thread now owns one less priority mutex.
961                                  */
962                                 curthread->priority_mutex_count--;
963
964                                 /* Remove the mutex from the threads queue. */
965                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
966                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
967                                     (*mutex), m_qe);
968                                 _MUTEX_INIT_LINK(*mutex);
969
970                                 /*
971                                  * Enter a loop to find a waiting thread whose
972                                  * active priority will not cause a ceiling
973                                  * violation:
974                                  */
975                                 while ((((*mutex)->m_owner =
976                                     mutex_queue_deq(*mutex)) != NULL) &&
977                                     ((*mutex)->m_owner->active_priority >
978                                      (*mutex)->m_prio)) {
979                                         /*
980                                          * Either the mutex ceiling priority
981                                          * been lowered and/or this threads
982                                          * priority has been raised subsequent
983                                          * to this thread being queued on the
984                                          * waiting list.
985                                          */
986                                         tls_set_tcb((*mutex)->m_owner->tcb);
987                                         errno = EINVAL;
988                                         tls_set_tcb(curthread->tcb);
989                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
990                                             PS_RUNNING);
991                                         /*
992                                          * The thread is no longer waiting for
993                                          * this mutex:
994                                          */
995                                         (*mutex)->m_owner->data.mutex = NULL;
996                                 }
997
998                                 /* Check for a new owner: */
999                                 if ((*mutex)->m_owner != NULL) {
1000                                         /*
1001                                          * Track number of priority mutexes owned:
1002                                          */
1003                                         (*mutex)->m_owner->priority_mutex_count++;
1004
1005                                         /*
1006                                          * Add the mutex to the threads list
1007                                          * of owned mutexes:
1008                                          */
1009                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1010                                             (*mutex), m_qe);
1011
1012                                         /*
1013                                          * The owner is no longer waiting for
1014                                          * this mutex:
1015                                          */
1016                                         (*mutex)->m_owner->data.mutex = NULL;
1017
1018                                         /*
1019                                          * Save the owning threads inherited
1020                                          * priority:
1021                                          */
1022                                         (*mutex)->m_saved_prio =
1023                                                 (*mutex)->m_owner->inherited_priority;
1024
1025                                         /*
1026                                          * The owning thread inherits the
1027                                          * ceiling priority of the mutex and
1028                                          * executes at that priority:
1029                                          */
1030                                         (*mutex)->m_owner->inherited_priority =
1031                                             (*mutex)->m_prio;
1032                                         (*mutex)->m_owner->active_priority =
1033                                             (*mutex)->m_prio;
1034
1035                                         /*
1036                                          * Make the new owner runnable:
1037                                          */
1038                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1039                                             PS_RUNNING);
1040                                 }
1041                         }
1042                         break;
1043
1044                 /* Trap invalid mutex types: */
1045                 default:
1046                         /* Return an invalid argument error: */
1047                         ret = EINVAL;
1048                         break;
1049                 }
1050
1051                 if ((ret == 0) && (add_reference != 0)) {
1052                         /* Increment the reference count: */
1053                         (*mutex)->m_refcount++;
1054                 }
1055
1056                 /* Unlock the mutex structure: */
1057                 _SPINUNLOCK(&(*mutex)->lock);
1058
1059                 /*
1060                  * Undefer and handle pending signals, yielding if
1061                  * necessary:
1062                  */
1063                 _thread_kern_sig_undefer();
1064         }
1065
1066         /* Return the completion status: */
1067         return (ret);
1068 }
1069
1070
1071 /*
1072  * This function is called when a change in base priority occurs for
1073  * a thread that is holding or waiting for a priority protection or
1074  * inheritance mutex.  A change in a threads base priority can effect
1075  * changes to active priorities of other threads and to the ordering
1076  * of mutex locking by waiting threads.
1077  *
1078  * This must be called while thread scheduling is deferred.
1079  */
1080 void
1081 _mutex_notify_priochange(pthread_t pthread)
1082 {
1083         /* Adjust the priorites of any owned priority mutexes: */
1084         if (pthread->priority_mutex_count > 0) {
1085                 /*
1086                  * Rescan the mutexes owned by this thread and correct
1087                  * their priorities to account for this threads change
1088                  * in priority.  This has the side effect of changing
1089                  * the threads active priority.
1090                  */
1091                 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1092         }
1093
1094         /*
1095          * If this thread is waiting on a priority inheritance mutex,
1096          * check for priority adjustments.  A change in priority can
1097          * also effect a ceiling violation(*) for a thread waiting on
1098          * a priority protection mutex; we don't perform the check here
1099          * as it is done in pthread_mutex_unlock.
1100          *
1101          * (*) It should be noted that a priority change to a thread
1102          *     _after_ taking and owning a priority ceiling mutex
1103          *     does not affect ownership of that mutex; the ceiling
1104          *     priority is only checked before mutex ownership occurs.
1105          */
1106         if (pthread->state == PS_MUTEX_WAIT) {
1107                 /* Lock the mutex structure: */
1108                 _SPINLOCK(&pthread->data.mutex->lock);
1109
1110                 /*
1111                  * Check to make sure this thread is still in the same state
1112                  * (the spinlock above can yield the CPU to another thread):
1113                  */
1114                 if (pthread->state == PS_MUTEX_WAIT) {
1115                         /*
1116                          * Remove and reinsert this thread into the list of
1117                          * waiting threads to preserve decreasing priority
1118                          * order.
1119                          */
1120                         mutex_queue_remove(pthread->data.mutex, pthread);
1121                         mutex_queue_enq(pthread->data.mutex, pthread);
1122
1123                         if (pthread->data.mutex->m_protocol ==
1124                              PTHREAD_PRIO_INHERIT) {
1125                                 /* Adjust priorities: */
1126                                 mutex_priority_adjust(pthread->data.mutex);
1127                         }
1128                 }
1129
1130                 /* Unlock the mutex structure: */
1131                 _SPINUNLOCK(&pthread->data.mutex->lock);
1132         }
1133 }
1134
1135 /*
1136  * Called when a new thread is added to the mutex waiting queue or
1137  * when a threads priority changes that is already in the mutex
1138  * waiting queue.
1139  */
1140 static void
1141 mutex_priority_adjust(pthread_mutex_t mutex)
1142 {
1143         pthread_t       pthread_next, pthread = mutex->m_owner;
1144         int             temp_prio;
1145         pthread_mutex_t m = mutex;
1146
1147         /*
1148          * Calculate the mutex priority as the maximum of the highest
1149          * active priority of any waiting threads and the owning threads
1150          * active priority(*).
1151          *
1152          * (*) Because the owning threads current active priority may
1153          *     reflect priority inherited from this mutex (and the mutex
1154          *     priority may have changed) we must recalculate the active
1155          *     priority based on the threads saved inherited priority
1156          *     and its base priority.
1157          */
1158         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1159         temp_prio = MAX(pthread_next->active_priority,
1160             MAX(m->m_saved_prio, pthread->base_priority));
1161
1162         /* See if this mutex really needs adjusting: */
1163         if (temp_prio == m->m_prio)
1164                 /* No need to propagate the priority: */
1165                 return;
1166
1167         /* Set new priority of the mutex: */
1168         m->m_prio = temp_prio;
1169
1170         while (m != NULL) {
1171                 /*
1172                  * Save the threads priority before rescanning the
1173                  * owned mutexes:
1174                  */
1175                 temp_prio = pthread->active_priority;
1176
1177                 /*
1178                  * Fix the priorities for all the mutexes this thread has
1179                  * locked since taking this mutex.  This also has a
1180                  * potential side-effect of changing the threads priority.
1181                  */
1182                 mutex_rescan_owned(pthread, m);
1183
1184                 /*
1185                  * If the thread is currently waiting on a mutex, check
1186                  * to see if the threads new priority has affected the
1187                  * priority of the mutex.
1188                  */
1189                 if ((temp_prio != pthread->active_priority) &&
1190                     (pthread->state == PS_MUTEX_WAIT) &&
1191                     (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1192                         /* Grab the mutex this thread is waiting on: */
1193                         m = pthread->data.mutex;
1194
1195                         /*
1196                          * The priority for this thread has changed.  Remove
1197                          * and reinsert this thread into the list of waiting
1198                          * threads to preserve decreasing priority order.
1199                          */
1200                         mutex_queue_remove(m, pthread);
1201                         mutex_queue_enq(m, pthread);
1202
1203                         /* Grab the waiting thread with highest priority: */
1204                         pthread_next = TAILQ_FIRST(&m->m_queue);
1205
1206                         /*
1207                          * Calculate the mutex priority as the maximum of the
1208                          * highest active priority of any waiting threads and
1209                          * the owning threads active priority.
1210                          */
1211                         temp_prio = MAX(pthread_next->active_priority,
1212                             MAX(m->m_saved_prio, m->m_owner->base_priority));
1213
1214                         if (temp_prio != m->m_prio) {
1215                                 /*
1216                                  * The priority needs to be propagated to the
1217                                  * mutex this thread is waiting on and up to
1218                                  * the owner of that mutex.
1219                                  */
1220                                 m->m_prio = temp_prio;
1221                                 pthread = m->m_owner;
1222                         }
1223                         else
1224                                 /* We're done: */
1225                                 m = NULL;
1226
1227                 }
1228                 else
1229                         /* We're done: */
1230                         m = NULL;
1231         }
1232 }
1233
1234 static void
1235 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1236 {
1237         int             active_prio, inherited_prio;
1238         pthread_mutex_t m;
1239         pthread_t       pthread_next;
1240
1241         /*
1242          * Start walking the mutexes the thread has taken since
1243          * taking this mutex.
1244          */
1245         if (mutex == NULL) {
1246                 /*
1247                  * A null mutex means start at the beginning of the owned
1248                  * mutex list.
1249                  */
1250                 m = TAILQ_FIRST(&pthread->mutexq);
1251
1252                 /* There is no inherited priority yet. */
1253                 inherited_prio = 0;
1254         }
1255         else {
1256                 /*
1257                  * The caller wants to start after a specific mutex.  It
1258                  * is assumed that this mutex is a priority inheritance
1259                  * mutex and that its priority has been correctly
1260                  * calculated.
1261                  */
1262                 m = TAILQ_NEXT(mutex, m_qe);
1263
1264                 /* Start inheriting priority from the specified mutex. */
1265                 inherited_prio = mutex->m_prio;
1266         }
1267         active_prio = MAX(inherited_prio, pthread->base_priority);
1268
1269         while (m != NULL) {
1270                 /*
1271                  * We only want to deal with priority inheritance
1272                  * mutexes.  This might be optimized by only placing
1273                  * priority inheritance mutexes into the owned mutex
1274                  * list, but it may prove to be useful having all
1275                  * owned mutexes in this list.  Consider a thread
1276                  * exiting while holding mutexes...
1277                  */
1278                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1279                         /*
1280                          * Fix the owners saved (inherited) priority to
1281                          * reflect the priority of the previous mutex.
1282                          */
1283                         m->m_saved_prio = inherited_prio;
1284
1285                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1286                                 /* Recalculate the priority of the mutex: */
1287                                 m->m_prio = MAX(active_prio,
1288                                      pthread_next->active_priority);
1289                         else
1290                                 m->m_prio = active_prio;
1291
1292                         /* Recalculate new inherited and active priorities: */
1293                         inherited_prio = m->m_prio;
1294                         active_prio = MAX(m->m_prio, pthread->base_priority);
1295                 }
1296
1297                 /* Advance to the next mutex owned by this thread: */
1298                 m = TAILQ_NEXT(m, m_qe);
1299         }
1300
1301         /*
1302          * Fix the threads inherited priority and recalculate its
1303          * active priority.
1304          */
1305         pthread->inherited_priority = inherited_prio;
1306         active_prio = MAX(inherited_prio, pthread->base_priority);
1307
1308         if (active_prio != pthread->active_priority) {
1309                 /*
1310                  * If this thread is in the priority queue, it must be
1311                  * removed and reinserted for its new priority.
1312                  */
1313                 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1314                         /*
1315                          * Remove the thread from the priority queue
1316                          * before changing its priority:
1317                          */
1318                         PTHREAD_PRIOQ_REMOVE(pthread);
1319
1320                         /*
1321                          * POSIX states that if the priority is being
1322                          * lowered, the thread must be inserted at the
1323                          * head of the queue for its priority if it owns
1324                          * any priority protection or inheritance mutexes.
1325                          */
1326                         if ((active_prio < pthread->active_priority) &&
1327                             (pthread->priority_mutex_count > 0)) {
1328                                 /* Set the new active priority. */
1329                                 pthread->active_priority = active_prio;
1330
1331                                 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1332                         }
1333                         else {
1334                                 /* Set the new active priority. */
1335                                 pthread->active_priority = active_prio;
1336
1337                                 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1338                         }
1339                 }
1340                 else {
1341                         /* Set the new active priority. */
1342                         pthread->active_priority = active_prio;
1343                 }
1344         }
1345 }
1346
1347 void
1348 _mutex_unlock_private(pthread_t pthread)
1349 {
1350         struct pthread_mutex    *m, *m_next;
1351
1352         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1353                 m_next = TAILQ_NEXT(m, m_qe);
1354                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1355                         pthread_mutex_unlock(&m);
1356         }
1357 }
1358
1359 void
1360 _mutex_lock_backout(pthread_t pthread)
1361 {
1362         struct pthread_mutex    *mutex;
1363
1364         /*
1365          * Defer signals to protect the scheduling queues from
1366          * access by the signal handler:
1367          */
1368         _thread_kern_sig_defer();
1369         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1370                 mutex = pthread->data.mutex;
1371
1372                 /* Lock the mutex structure: */
1373                 _SPINLOCK(&mutex->lock);
1374
1375                 mutex_queue_remove(mutex, pthread);
1376
1377                 /* This thread is no longer waiting for the mutex: */
1378                 pthread->data.mutex = NULL;
1379
1380                 /* Unlock the mutex structure: */
1381                 _SPINUNLOCK(&mutex->lock);
1382
1383         }
1384         /*
1385          * Undefer and handle pending signals, yielding if
1386          * necessary:
1387          */
1388         _thread_kern_sig_undefer();
1389 }
1390
1391 /*
1392  * Dequeue a waiting thread from the head of a mutex queue in descending
1393  * priority order.
1394  */
1395 static inline pthread_t
1396 mutex_queue_deq(pthread_mutex_t mutex)
1397 {
1398         pthread_t pthread;
1399
1400         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1401                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1402                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1403
1404                 /*
1405                  * Only exit the loop if the thread hasn't been
1406                  * cancelled.
1407                  */
1408                 if (pthread->interrupted == 0)
1409                         break;
1410         }
1411
1412         return(pthread);
1413 }
1414
1415 /*
1416  * Remove a waiting thread from a mutex queue in descending priority order.
1417  */
1418 static inline void
1419 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1420 {
1421         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1422                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1423                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1424         }
1425 }
1426
1427 /*
1428  * Enqueue a waiting thread to a queue in descending priority order.
1429  */
1430 static inline void
1431 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1432 {
1433         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1434
1435         PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1436         /*
1437          * For the common case of all threads having equal priority,
1438          * we perform a quick check against the priority of the thread
1439          * at the tail of the queue.
1440          */
1441         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1442                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1443         else {
1444                 tid = TAILQ_FIRST(&mutex->m_queue);
1445                 while (pthread->active_priority <= tid->active_priority)
1446                         tid = TAILQ_NEXT(tid, sqe);
1447                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1448         }
1449         pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1450 }
1451
1452 __strong_reference(_pthread_mutex_init, pthread_mutex_init);
1453 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
1454 __strong_reference(_pthread_mutex_trylock, pthread_mutex_trylock);
1455 __strong_reference(_pthread_mutex_lock, pthread_mutex_lock);
1456 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);