Initial import of binutils 2.22 on the new vendor branch
[dragonfly.git] / lib / libc_r / uthread / uthread_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/lib/libc_r/uthread/uthread_mutex.c,v 1.20.2.8 2002/10/22 14:44:03 fjoe Exp $
33  * $DragonFly: src/lib/libc_r/uthread/uthread_mutex.c,v 1.4 2005/05/30 20:50:53 joerg Exp $
34  */
35 #include <stdlib.h>
36 #include <errno.h>
37 #include <string.h>
38 #include <sys/param.h>
39 #include <sys/queue.h>
40 #include <pthread.h>
41 #include "pthread_private.h"
42
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m)             do {            \
45         (m)->m_qe.tqe_prev = NULL;                      \
46         (m)->m_qe.tqe_next = NULL;                      \
47 } while (0)
48 #define _MUTEX_ASSERT_IS_OWNED(m)       do {            \
49         if ((m)->m_qe.tqe_prev == NULL)                 \
50                 PANIC("mutex is not on list");          \
51 } while (0)
52 #define _MUTEX_ASSERT_NOT_OWNED(m)      do {            \
53         if (((m)->m_qe.tqe_prev != NULL) ||             \
54             ((m)->m_qe.tqe_next != NULL))               \
55                 PANIC("mutex is on list");              \
56 } while (0)
57 #else
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
61 #endif
62
63 /*
64  * Prototypes
65  */
66 static inline int       mutex_self_trylock(pthread_mutex_t);
67 static inline int       mutex_self_lock(pthread_mutex_t);
68 static inline int       mutex_unlock_common(pthread_mutex_t *, int);
69 static void             mutex_priority_adjust(pthread_mutex_t);
70 static void             mutex_rescan_owned (pthread_t, pthread_mutex_t);
71 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
72 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
73 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
74
75
76 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
77
78 /* Reinitialize a mutex to defaults. */
79 int
80 _mutex_reinit(pthread_mutex_t * mutex)
81 {
82         int     ret = 0;
83
84         if (mutex == NULL)
85                 ret = EINVAL;
86         else if (*mutex == NULL)
87                 ret = pthread_mutex_init(mutex, NULL);
88         else {
89                 /*
90                  * Initialize the mutex structure:
91                  */
92                 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
93                 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
94                 TAILQ_INIT(&(*mutex)->m_queue);
95                 (*mutex)->m_owner = NULL;
96                 (*mutex)->m_data.m_count = 0;
97                 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
98                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
99                 (*mutex)->m_refcount = 0;
100                 (*mutex)->m_prio = 0;
101                 (*mutex)->m_saved_prio = 0;
102                 _MUTEX_INIT_LINK(*mutex);
103                 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
104         }
105         return (ret);
106 }
107
108 int
109 _pthread_mutex_init(pthread_mutex_t * mutex,
110                    const pthread_mutexattr_t * mutex_attr)
111 {
112         enum pthread_mutextype  type;
113         int             protocol;
114         int             ceiling;
115         pthread_mutex_t pmutex;
116         int             ret = 0;
117
118         if (mutex == NULL)
119                 ret = EINVAL;
120
121         /* Check if default mutex attributes: */
122         else if (mutex_attr == NULL || *mutex_attr == NULL) {
123                 /* Default to a (error checking) POSIX mutex: */
124                 type = PTHREAD_MUTEX_ERRORCHECK;
125                 protocol = PTHREAD_PRIO_NONE;
126                 ceiling = PTHREAD_MAX_PRIORITY;
127         }
128
129         /* Check mutex type: */
130         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131             ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132                 /* Return an invalid argument error: */
133                 ret = EINVAL;
134
135         /* Check mutex protocol: */
136         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138                 /* Return an invalid argument error: */
139                 ret = EINVAL;
140
141         else {
142                 /* Use the requested mutex type and protocol: */
143                 type = (*mutex_attr)->m_type;
144                 protocol = (*mutex_attr)->m_protocol;
145                 ceiling = (*mutex_attr)->m_ceiling;
146         }
147
148         /* Check no errors so far: */
149         if (ret == 0) {
150                 if ((pmutex = (pthread_mutex_t)
151                     malloc(sizeof(struct pthread_mutex))) == NULL)
152                         ret = ENOMEM;
153                 else {
154                         /* Reset the mutex flags: */
155                         pmutex->m_flags = 0;
156
157                         /* Process according to mutex type: */
158                         switch (type) {
159                         /* case PTHREAD_MUTEX_DEFAULT: */
160                         case PTHREAD_MUTEX_ERRORCHECK:
161                         case PTHREAD_MUTEX_NORMAL:
162                                 /* Nothing to do here. */
163                                 break;
164
165                         /* Single UNIX Spec 2 recursive mutex: */
166                         case PTHREAD_MUTEX_RECURSIVE:
167                                 /* Reset the mutex count: */
168                                 pmutex->m_data.m_count = 0;
169                                 break;
170
171                         /* Trap invalid mutex types: */
172                         default:
173                                 /* Return an invalid argument error: */
174                                 ret = EINVAL;
175                                 break;
176                         }
177                         if (ret == 0) {
178                                 /* Initialise the rest of the mutex: */
179                                 TAILQ_INIT(&pmutex->m_queue);
180                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
181                                 pmutex->m_owner = NULL;
182                                 pmutex->m_type = type;
183                                 pmutex->m_protocol = protocol;
184                                 pmutex->m_refcount = 0;
185                                 if (protocol == PTHREAD_PRIO_PROTECT)
186                                         pmutex->m_prio = ceiling;
187                                 else
188                                         pmutex->m_prio = 0;
189                                 pmutex->m_saved_prio = 0;
190                                 _MUTEX_INIT_LINK(pmutex);
191                                 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
192                                 *mutex = pmutex;
193                         } else {
194                                 free(pmutex);
195                                 *mutex = NULL;
196                         }
197                 }
198         }
199         /* Return the completion status: */
200         return(ret);
201 }
202
203 int
204 _pthread_mutex_destroy(pthread_mutex_t * mutex)
205 {
206         int     ret = 0;
207
208         if (mutex == NULL || *mutex == NULL)
209                 ret = EINVAL;
210         else {
211                 /* Lock the mutex structure: */
212                 _SPINLOCK(&(*mutex)->lock);
213
214                 /*
215                  * Check to see if this mutex is in use:
216                  */
217                 if (((*mutex)->m_owner != NULL) ||
218                     (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
219                     ((*mutex)->m_refcount != 0)) {
220                         ret = EBUSY;
221
222                         /* Unlock the mutex structure: */
223                         _SPINUNLOCK(&(*mutex)->lock);
224                 }
225                 else {
226                         /*
227                          * Free the memory allocated for the mutex
228                          * structure:
229                          */
230                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
231                         free(*mutex);
232
233                         /*
234                          * Leave the caller's pointer NULL now that
235                          * the mutex has been destroyed:
236                          */
237                         *mutex = NULL;
238                 }
239         }
240
241         /* Return the completion status: */
242         return (ret);
243 }
244
245 static int
246 init_static(pthread_mutex_t *mutex)
247 {
248         int     ret;
249
250         _SPINLOCK(&static_init_lock);
251
252         if (*mutex == NULL)
253                 ret = pthread_mutex_init(mutex, NULL);
254         else
255                 ret = 0;
256
257         _SPINUNLOCK(&static_init_lock);
258
259         return(ret);
260 }
261
262 int
263 _pthread_mutex_trylock(pthread_mutex_t * mutex)
264 {
265         struct pthread  *curthread = _get_curthread();
266         int     ret = 0;
267
268         if (mutex == NULL)
269                 ret = EINVAL;
270
271         /*
272          * If the mutex is statically initialized, perform the dynamic
273          * initialization:
274          */
275         else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
276                 /*
277                  * Defer signals to protect the scheduling queues from
278                  * access by the signal handler:
279                  */
280                 _thread_kern_sig_defer();
281
282                 /* Lock the mutex structure: */
283                 _SPINLOCK(&(*mutex)->lock);
284
285                 /*
286                  * If the mutex was statically allocated, properly
287                  * initialize the tail queue.
288                  */
289                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
290                         TAILQ_INIT(&(*mutex)->m_queue);
291                         _MUTEX_INIT_LINK(*mutex);
292                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
293                 }
294
295                 /* Process according to mutex type: */
296                 switch ((*mutex)->m_protocol) {
297                 /* Default POSIX mutex: */
298                 case PTHREAD_PRIO_NONE: 
299                         /* Check if this mutex is not locked: */
300                         if ((*mutex)->m_owner == NULL) {
301                                 /* Lock the mutex for the running thread: */
302                                 (*mutex)->m_owner = curthread;
303
304                                 /* Add to the list of owned mutexes: */
305                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
306                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
307                                     (*mutex), m_qe);
308                         } else if ((*mutex)->m_owner == curthread)
309                                 ret = mutex_self_trylock(*mutex);
310                         else
311                                 /* Return a busy error: */
312                                 ret = EBUSY;
313                         break;
314
315                 /* POSIX priority inheritence mutex: */
316                 case PTHREAD_PRIO_INHERIT:
317                         /* Check if this mutex is not locked: */
318                         if ((*mutex)->m_owner == NULL) {
319                                 /* Lock the mutex for the running thread: */
320                                 (*mutex)->m_owner = curthread;
321
322                                 /* Track number of priority mutexes owned: */
323                                 curthread->priority_mutex_count++;
324
325                                 /*
326                                  * The mutex takes on the attributes of the
327                                  * running thread when there are no waiters.
328                                  */
329                                 (*mutex)->m_prio = curthread->active_priority;
330                                 (*mutex)->m_saved_prio =
331                                     curthread->inherited_priority;
332
333                                 /* Add to the list of owned mutexes: */
334                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
335                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
336                                     (*mutex), m_qe);
337                         } else if ((*mutex)->m_owner == curthread)
338                                 ret = mutex_self_trylock(*mutex);
339                         else
340                                 /* Return a busy error: */
341                                 ret = EBUSY;
342                         break;
343
344                 /* POSIX priority protection mutex: */
345                 case PTHREAD_PRIO_PROTECT:
346                         /* Check for a priority ceiling violation: */
347                         if (curthread->active_priority > (*mutex)->m_prio)
348                                 ret = EINVAL;
349
350                         /* Check if this mutex is not locked: */
351                         else if ((*mutex)->m_owner == NULL) {
352                                 /* Lock the mutex for the running thread: */
353                                 (*mutex)->m_owner = curthread;
354
355                                 /* Track number of priority mutexes owned: */
356                                 curthread->priority_mutex_count++;
357
358                                 /*
359                                  * The running thread inherits the ceiling
360                                  * priority of the mutex and executes at that
361                                  * priority.
362                                  */
363                                 curthread->active_priority = (*mutex)->m_prio;
364                                 (*mutex)->m_saved_prio =
365                                     curthread->inherited_priority;
366                                 curthread->inherited_priority =
367                                     (*mutex)->m_prio;
368
369                                 /* Add to the list of owned mutexes: */
370                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
371                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
372                                     (*mutex), m_qe);
373                         } else if ((*mutex)->m_owner == curthread)
374                                 ret = mutex_self_trylock(*mutex);
375                         else
376                                 /* Return a busy error: */
377                                 ret = EBUSY;
378                         break;
379
380                 /* Trap invalid mutex types: */
381                 default:
382                         /* Return an invalid argument error: */
383                         ret = EINVAL;
384                         break;
385                 }
386
387                 /* Unlock the mutex structure: */
388                 _SPINUNLOCK(&(*mutex)->lock);
389
390                 /*
391                  * Undefer and handle pending signals, yielding if
392                  * necessary:
393                  */
394                 _thread_kern_sig_undefer();
395         }
396
397         /* Return the completion status: */
398         return (ret);
399 }
400
401 int
402 _pthread_mutex_lock(pthread_mutex_t * mutex)
403 {
404         struct pthread  *curthread = _get_curthread();
405         int     ret = 0;
406
407         if (_thread_initial == NULL)
408                 _thread_init();
409
410         if (mutex == NULL)
411                 return (EINVAL);
412
413         /*
414          * If the mutex is statically initialized, perform the dynamic
415          * initialization:
416          */
417         if ((*mutex == NULL) &&
418             ((ret = init_static(mutex)) != 0))
419                 return (ret);
420
421         /* Reset the interrupted flag: */
422         curthread->interrupted = 0;
423
424         /*
425          * Enter a loop waiting to become the mutex owner.  We need a
426          * loop in case the waiting thread is interrupted by a signal
427          * to execute a signal handler.  It is not (currently) possible
428          * to remain in the waiting queue while running a handler.
429          * Instead, the thread is interrupted and backed out of the
430          * waiting queue prior to executing the signal handler.
431          */
432         do {
433                 /*
434                  * Defer signals to protect the scheduling queues from
435                  * access by the signal handler:
436                  */
437                 _thread_kern_sig_defer();
438
439                 /* Lock the mutex structure: */
440                 _SPINLOCK(&(*mutex)->lock);
441
442                 /*
443                  * If the mutex was statically allocated, properly
444                  * initialize the tail queue.
445                  */
446                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
447                         TAILQ_INIT(&(*mutex)->m_queue);
448                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
449                         _MUTEX_INIT_LINK(*mutex);
450                 }
451
452                 /* Process according to mutex type: */
453                 switch ((*mutex)->m_protocol) {
454                 /* Default POSIX mutex: */
455                 case PTHREAD_PRIO_NONE:
456                         if ((*mutex)->m_owner == NULL) {
457                                 /* Lock the mutex for this thread: */
458                                 (*mutex)->m_owner = curthread;
459
460                                 /* Add to the list of owned mutexes: */
461                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
462                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
463                                     (*mutex), m_qe);
464
465                         } else if ((*mutex)->m_owner == curthread)
466                                 ret = mutex_self_lock(*mutex);
467                         else {
468                                 /*
469                                  * Join the queue of threads waiting to lock
470                                  * the mutex: 
471                                  */
472                                 mutex_queue_enq(*mutex, curthread);
473
474                                 /*
475                                  * Keep a pointer to the mutex this thread
476                                  * is waiting on:
477                                  */
478                                 curthread->data.mutex = *mutex;
479
480                                 /*
481                                  * Unlock the mutex structure and schedule the
482                                  * next thread:
483                                  */
484                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
485                                     &(*mutex)->lock, __FILE__, __LINE__);
486
487                                 /* Lock the mutex structure again: */
488                                 _SPINLOCK(&(*mutex)->lock);
489                         }
490                         break;
491
492                 /* POSIX priority inheritence mutex: */
493                 case PTHREAD_PRIO_INHERIT:
494                         /* Check if this mutex is not locked: */
495                         if ((*mutex)->m_owner == NULL) {
496                                 /* Lock the mutex for this thread: */
497                                 (*mutex)->m_owner = curthread;
498
499                                 /* Track number of priority mutexes owned: */
500                                 curthread->priority_mutex_count++;
501
502                                 /*
503                                  * The mutex takes on attributes of the
504                                  * running thread when there are no waiters.
505                                  */
506                                 (*mutex)->m_prio = curthread->active_priority;
507                                 (*mutex)->m_saved_prio =
508                                     curthread->inherited_priority;
509                                 curthread->inherited_priority =
510                                     (*mutex)->m_prio;
511
512                                 /* Add to the list of owned mutexes: */
513                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
514                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
515                                     (*mutex), m_qe);
516
517                         } else if ((*mutex)->m_owner == curthread)
518                                 ret = mutex_self_lock(*mutex);
519                         else {
520                                 /*
521                                  * Join the queue of threads waiting to lock
522                                  * the mutex: 
523                                  */
524                                 mutex_queue_enq(*mutex, curthread);
525
526                                 /*
527                                  * Keep a pointer to the mutex this thread
528                                  * is waiting on:
529                                  */
530                                 curthread->data.mutex = *mutex;
531
532                                 if (curthread->active_priority >
533                                     (*mutex)->m_prio)
534                                         /* Adjust priorities: */
535                                         mutex_priority_adjust(*mutex);
536
537                                 /*
538                                  * Unlock the mutex structure and schedule the
539                                  * next thread:
540                                  */
541                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
542                                     &(*mutex)->lock, __FILE__, __LINE__);
543
544                                 /* Lock the mutex structure again: */
545                                 _SPINLOCK(&(*mutex)->lock);
546                         }
547                         break;
548
549                 /* POSIX priority protection mutex: */
550                 case PTHREAD_PRIO_PROTECT:
551                         /* Check for a priority ceiling violation: */
552                         if (curthread->active_priority > (*mutex)->m_prio)
553                                 ret = EINVAL;
554
555                         /* Check if this mutex is not locked: */
556                         else if ((*mutex)->m_owner == NULL) {
557                                 /*
558                                  * Lock the mutex for the running
559                                  * thread:
560                                  */
561                                 (*mutex)->m_owner = curthread;
562
563                                 /* Track number of priority mutexes owned: */
564                                 curthread->priority_mutex_count++;
565
566                                 /*
567                                  * The running thread inherits the ceiling
568                                  * priority of the mutex and executes at that
569                                  * priority:
570                                  */
571                                 curthread->active_priority = (*mutex)->m_prio;
572                                 (*mutex)->m_saved_prio =
573                                     curthread->inherited_priority;
574                                 curthread->inherited_priority =
575                                     (*mutex)->m_prio;
576
577                                 /* Add to the list of owned mutexes: */
578                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
579                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
580                                     (*mutex), m_qe);
581                         } else if ((*mutex)->m_owner == curthread)
582                                 ret = mutex_self_lock(*mutex);
583                         else {
584                                 /*
585                                  * Join the queue of threads waiting to lock
586                                  * the mutex: 
587                                  */
588                                 mutex_queue_enq(*mutex, curthread);
589
590                                 /*
591                                  * Keep a pointer to the mutex this thread
592                                  * is waiting on:
593                                  */
594                                 curthread->data.mutex = *mutex;
595
596                                 /* Clear any previous error: */
597                                 errno = 0;
598
599                                 /*
600                                  * Unlock the mutex structure and schedule the
601                                  * next thread:
602                                  */
603                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
604                                     &(*mutex)->lock, __FILE__, __LINE__);
605
606                                 /* Lock the mutex structure again: */
607                                 _SPINLOCK(&(*mutex)->lock);
608
609                                 /*
610                                  * The threads priority may have changed while
611                                  * waiting for the mutex causing a ceiling
612                                  * violation.
613                                  */
614                                 ret = errno;
615                                 errno = 0;
616                         }
617                         break;
618
619                 /* Trap invalid mutex types: */
620                 default:
621                         /* Return an invalid argument error: */
622                         ret = EINVAL;
623                         break;
624                 }
625
626                 /*
627                  * Check to see if this thread was interrupted and
628                  * is still in the mutex queue of waiting threads:
629                  */
630                 if (curthread->interrupted != 0)
631                         mutex_queue_remove(*mutex, curthread);
632
633                 /* Unlock the mutex structure: */
634                 _SPINUNLOCK(&(*mutex)->lock);
635
636                 /*
637                  * Undefer and handle pending signals, yielding if
638                  * necessary:
639                  */
640                 _thread_kern_sig_undefer();
641         } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
642             (curthread->interrupted == 0));
643
644         if (curthread->interrupted != 0 &&
645             curthread->continuation != NULL)
646                 curthread->continuation((void *) curthread);
647
648         /* Return the completion status: */
649         return (ret);
650 }
651
652 int
653 _pthread_mutex_unlock(pthread_mutex_t * mutex)
654 {
655         return (mutex_unlock_common(mutex, /* add reference */ 0));
656 }
657
658 int
659 _mutex_cv_unlock(pthread_mutex_t * mutex)
660 {
661         return (mutex_unlock_common(mutex, /* add reference */ 1));
662 }
663
664 int
665 _mutex_cv_lock(pthread_mutex_t * mutex)
666 {
667         int     ret;
668         if ((ret = pthread_mutex_lock(mutex)) == 0)
669                 (*mutex)->m_refcount--;
670         return (ret);
671 }
672
673 static inline int
674 mutex_self_trylock(pthread_mutex_t mutex)
675 {
676         int     ret = 0;
677
678         switch (mutex->m_type) {
679
680         /* case PTHREAD_MUTEX_DEFAULT: */
681         case PTHREAD_MUTEX_ERRORCHECK:
682         case PTHREAD_MUTEX_NORMAL:
683                 /*
684                  * POSIX specifies that mutexes should return EDEADLK if a
685                  * recursive lock is detected.
686                  */
687                 ret = EBUSY; 
688                 break;
689
690         case PTHREAD_MUTEX_RECURSIVE:
691                 /* Increment the lock count: */
692                 mutex->m_data.m_count++;
693                 break;
694
695         default:
696                 /* Trap invalid mutex types; */
697                 ret = EINVAL;
698         }
699
700         return(ret);
701 }
702
703 static inline int
704 mutex_self_lock(pthread_mutex_t mutex)
705 {
706         int ret = 0;
707
708         switch (mutex->m_type) {
709         /* case PTHREAD_MUTEX_DEFAULT: */
710         case PTHREAD_MUTEX_ERRORCHECK:
711                 /*
712                  * POSIX specifies that mutexes should return EDEADLK if a
713                  * recursive lock is detected.
714                  */
715                 ret = EDEADLK; 
716                 break;
717
718         case PTHREAD_MUTEX_NORMAL:
719                 /*
720                  * What SS2 define as a 'normal' mutex.  Intentionally
721                  * deadlock on attempts to get a lock you already own.
722                  */
723                 _thread_kern_sched_state_unlock(PS_DEADLOCK,
724                     &mutex->lock, __FILE__, __LINE__);
725                 break;
726
727         case PTHREAD_MUTEX_RECURSIVE:
728                 /* Increment the lock count: */
729                 mutex->m_data.m_count++;
730                 break;
731
732         default:
733                 /* Trap invalid mutex types; */
734                 ret = EINVAL;
735         }
736
737         return(ret);
738 }
739
740 static inline int
741 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
742 {
743         struct pthread  *curthread = _get_curthread();
744         int     ret = 0;
745
746         if (mutex == NULL || *mutex == NULL) {
747                 ret = EINVAL;
748         } else {
749                 /*
750                  * Defer signals to protect the scheduling queues from
751                  * access by the signal handler:
752                  */
753                 _thread_kern_sig_defer();
754
755                 /* Lock the mutex structure: */
756                 _SPINLOCK(&(*mutex)->lock);
757
758                 /* Process according to mutex type: */
759                 switch ((*mutex)->m_protocol) {
760                 /* Default POSIX mutex: */
761                 case PTHREAD_PRIO_NONE:
762                         /*
763                          * Check if the running thread is not the owner of the
764                          * mutex:
765                          */
766                         if ((*mutex)->m_owner != curthread) {
767                                 /*
768                                  * Return an invalid argument error for no
769                                  * owner and a permission error otherwise:
770                                  */
771                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
772                         }
773                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
774                             ((*mutex)->m_data.m_count > 0)) {
775                                 /* Decrement the count: */
776                                 (*mutex)->m_data.m_count--;
777                         } else {
778                                 /*
779                                  * Clear the count in case this is recursive
780                                  * mutex.
781                                  */
782                                 (*mutex)->m_data.m_count = 0;
783
784                                 /* Remove the mutex from the threads queue. */
785                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
786                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
787                                     (*mutex), m_qe);
788                                 _MUTEX_INIT_LINK(*mutex);
789
790                                 /*
791                                  * Get the next thread from the queue of
792                                  * threads waiting on the mutex: 
793                                  */
794                                 if (((*mutex)->m_owner =
795                                     mutex_queue_deq(*mutex)) != NULL) {
796                                         /* Make the new owner runnable: */
797                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
798                                             PS_RUNNING);
799
800                                         /*
801                                          * Add the mutex to the threads list of
802                                          * owned mutexes:
803                                          */
804                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
805                                             (*mutex), m_qe);
806
807                                         /*
808                                          * The owner is no longer waiting for
809                                          * this mutex:
810                                          */
811                                         (*mutex)->m_owner->data.mutex = NULL;
812                                 }
813                         }
814                         break;
815
816                 /* POSIX priority inheritence mutex: */
817                 case PTHREAD_PRIO_INHERIT:
818                         /*
819                          * Check if the running thread is not the owner of the
820                          * mutex:
821                          */
822                         if ((*mutex)->m_owner != curthread) {
823                                 /*
824                                  * Return an invalid argument error for no
825                                  * owner and a permission error otherwise:
826                                  */
827                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
828                         }
829                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
830                             ((*mutex)->m_data.m_count > 0)) {
831                                 /* Decrement the count: */
832                                 (*mutex)->m_data.m_count--;
833                         } else {
834                                 /*
835                                  * Clear the count in case this is recursive
836                                  * mutex.
837                                  */
838                                 (*mutex)->m_data.m_count = 0;
839
840                                 /*
841                                  * Restore the threads inherited priority and
842                                  * recompute the active priority (being careful
843                                  * not to override changes in the threads base
844                                  * priority subsequent to locking the mutex).
845                                  */
846                                 curthread->inherited_priority =
847                                         (*mutex)->m_saved_prio;
848                                 curthread->active_priority =
849                                     MAX(curthread->inherited_priority,
850                                     curthread->base_priority);
851
852                                 /*
853                                  * This thread now owns one less priority mutex.
854                                  */
855                                 curthread->priority_mutex_count--;
856
857                                 /* Remove the mutex from the threads queue. */
858                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
859                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
860                                     (*mutex), m_qe);
861                                 _MUTEX_INIT_LINK(*mutex);
862
863                                 /*
864                                  * Get the next thread from the queue of threads
865                                  * waiting on the mutex: 
866                                  */
867                                 if (((*mutex)->m_owner = 
868                                     mutex_queue_deq(*mutex)) == NULL)
869                                         /* This mutex has no priority. */
870                                         (*mutex)->m_prio = 0;
871                                 else {
872                                         /*
873                                          * Track number of priority mutexes owned:
874                                          */
875                                         (*mutex)->m_owner->priority_mutex_count++;
876
877                                         /*
878                                          * Add the mutex to the threads list
879                                          * of owned mutexes:
880                                          */
881                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
882                                             (*mutex), m_qe);
883
884                                         /*
885                                          * The owner is no longer waiting for
886                                          * this mutex:
887                                          */
888                                         (*mutex)->m_owner->data.mutex = NULL;
889
890                                         /*
891                                          * Set the priority of the mutex.  Since
892                                          * our waiting threads are in descending
893                                          * priority order, the priority of the
894                                          * mutex becomes the active priority of
895                                          * the thread we just dequeued.
896                                          */
897                                         (*mutex)->m_prio =
898                                             (*mutex)->m_owner->active_priority;
899
900                                         /*
901                                          * Save the owning threads inherited
902                                          * priority:
903                                          */
904                                         (*mutex)->m_saved_prio =
905                                                 (*mutex)->m_owner->inherited_priority;
906
907                                         /*
908                                          * The owning threads inherited priority
909                                          * now becomes his active priority (the
910                                          * priority of the mutex).
911                                          */
912                                         (*mutex)->m_owner->inherited_priority =
913                                                 (*mutex)->m_prio;
914
915                                         /*
916                                          * Make the new owner runnable:
917                                          */
918                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
919                                             PS_RUNNING);
920                                 }
921                         }
922                         break;
923
924                 /* POSIX priority ceiling mutex: */
925                 case PTHREAD_PRIO_PROTECT:
926                         /*
927                          * Check if the running thread is not the owner of the
928                          * mutex:
929                          */
930                         if ((*mutex)->m_owner != curthread) {
931                                 /*
932                                  * Return an invalid argument error for no
933                                  * owner and a permission error otherwise:
934                                  */
935                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
936                         }
937                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
938                             ((*mutex)->m_data.m_count > 0)) {
939                                 /* Decrement the count: */
940                                 (*mutex)->m_data.m_count--;
941                         } else {
942                                 /*
943                                  * Clear the count in case this is recursive
944                                  * mutex.
945                                  */
946                                 (*mutex)->m_data.m_count = 0;
947
948                                 /*
949                                  * Restore the threads inherited priority and
950                                  * recompute the active priority (being careful
951                                  * not to override changes in the threads base
952                                  * priority subsequent to locking the mutex).
953                                  */
954                                 curthread->inherited_priority =
955                                         (*mutex)->m_saved_prio;
956                                 curthread->active_priority =
957                                     MAX(curthread->inherited_priority,
958                                     curthread->base_priority);
959
960                                 /*
961                                  * This thread now owns one less priority mutex.
962                                  */
963                                 curthread->priority_mutex_count--;
964
965                                 /* Remove the mutex from the threads queue. */
966                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
967                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
968                                     (*mutex), m_qe);
969                                 _MUTEX_INIT_LINK(*mutex);
970
971                                 /*
972                                  * Enter a loop to find a waiting thread whose
973                                  * active priority will not cause a ceiling
974                                  * violation:
975                                  */
976                                 while ((((*mutex)->m_owner =
977                                     mutex_queue_deq(*mutex)) != NULL) &&
978                                     ((*mutex)->m_owner->active_priority >
979                                      (*mutex)->m_prio)) {
980                                         /*
981                                          * Either the mutex ceiling priority
982                                          * been lowered and/or this threads
983                                          * priority has been raised subsequent
984                                          * to this thread being queued on the
985                                          * waiting list.
986                                          */
987                                         tls_set_tcb((*mutex)->m_owner->tcb);
988                                         errno = EINVAL;
989                                         tls_set_tcb(curthread->tcb);
990                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
991                                             PS_RUNNING);
992                                         /*
993                                          * The thread is no longer waiting for
994                                          * this mutex:
995                                          */
996                                         (*mutex)->m_owner->data.mutex = NULL;
997                                 }
998
999                                 /* Check for a new owner: */
1000                                 if ((*mutex)->m_owner != NULL) {
1001                                         /*
1002                                          * Track number of priority mutexes owned:
1003                                          */
1004                                         (*mutex)->m_owner->priority_mutex_count++;
1005
1006                                         /*
1007                                          * Add the mutex to the threads list
1008                                          * of owned mutexes:
1009                                          */
1010                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1011                                             (*mutex), m_qe);
1012
1013                                         /*
1014                                          * The owner is no longer waiting for
1015                                          * this mutex:
1016                                          */
1017                                         (*mutex)->m_owner->data.mutex = NULL;
1018
1019                                         /*
1020                                          * Save the owning threads inherited
1021                                          * priority:
1022                                          */
1023                                         (*mutex)->m_saved_prio =
1024                                                 (*mutex)->m_owner->inherited_priority;
1025
1026                                         /*
1027                                          * The owning thread inherits the
1028                                          * ceiling priority of the mutex and
1029                                          * executes at that priority:
1030                                          */
1031                                         (*mutex)->m_owner->inherited_priority =
1032                                             (*mutex)->m_prio;
1033                                         (*mutex)->m_owner->active_priority =
1034                                             (*mutex)->m_prio;
1035
1036                                         /*
1037                                          * Make the new owner runnable:
1038                                          */
1039                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1040                                             PS_RUNNING);
1041                                 }
1042                         }
1043                         break;
1044
1045                 /* Trap invalid mutex types: */
1046                 default:
1047                         /* Return an invalid argument error: */
1048                         ret = EINVAL;
1049                         break;
1050                 }
1051
1052                 if ((ret == 0) && (add_reference != 0)) {
1053                         /* Increment the reference count: */
1054                         (*mutex)->m_refcount++;
1055                 }
1056
1057                 /* Unlock the mutex structure: */
1058                 _SPINUNLOCK(&(*mutex)->lock);
1059
1060                 /*
1061                  * Undefer and handle pending signals, yielding if
1062                  * necessary:
1063                  */
1064                 _thread_kern_sig_undefer();
1065         }
1066
1067         /* Return the completion status: */
1068         return (ret);
1069 }
1070
1071
1072 /*
1073  * This function is called when a change in base priority occurs for
1074  * a thread that is holding or waiting for a priority protection or
1075  * inheritence mutex.  A change in a threads base priority can effect
1076  * changes to active priorities of other threads and to the ordering
1077  * of mutex locking by waiting threads.
1078  *
1079  * This must be called while thread scheduling is deferred.
1080  */
1081 void
1082 _mutex_notify_priochange(pthread_t pthread)
1083 {
1084         /* Adjust the priorites of any owned priority mutexes: */
1085         if (pthread->priority_mutex_count > 0) {
1086                 /*
1087                  * Rescan the mutexes owned by this thread and correct
1088                  * their priorities to account for this threads change
1089                  * in priority.  This has the side effect of changing
1090                  * the threads active priority.
1091                  */
1092                 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1093         }
1094
1095         /*
1096          * If this thread is waiting on a priority inheritence mutex,
1097          * check for priority adjustments.  A change in priority can
1098          * also effect a ceiling violation(*) for a thread waiting on
1099          * a priority protection mutex; we don't perform the check here
1100          * as it is done in pthread_mutex_unlock.
1101          *
1102          * (*) It should be noted that a priority change to a thread
1103          *     _after_ taking and owning a priority ceiling mutex
1104          *     does not affect ownership of that mutex; the ceiling
1105          *     priority is only checked before mutex ownership occurs.
1106          */
1107         if (pthread->state == PS_MUTEX_WAIT) {
1108                 /* Lock the mutex structure: */
1109                 _SPINLOCK(&pthread->data.mutex->lock);
1110
1111                 /*
1112                  * Check to make sure this thread is still in the same state
1113                  * (the spinlock above can yield the CPU to another thread):
1114                  */
1115                 if (pthread->state == PS_MUTEX_WAIT) {
1116                         /*
1117                          * Remove and reinsert this thread into the list of
1118                          * waiting threads to preserve decreasing priority
1119                          * order.
1120                          */
1121                         mutex_queue_remove(pthread->data.mutex, pthread);
1122                         mutex_queue_enq(pthread->data.mutex, pthread);
1123
1124                         if (pthread->data.mutex->m_protocol ==
1125                              PTHREAD_PRIO_INHERIT) {
1126                                 /* Adjust priorities: */
1127                                 mutex_priority_adjust(pthread->data.mutex);
1128                         }
1129                 }
1130
1131                 /* Unlock the mutex structure: */
1132                 _SPINUNLOCK(&pthread->data.mutex->lock);
1133         }
1134 }
1135
1136 /*
1137  * Called when a new thread is added to the mutex waiting queue or
1138  * when a threads priority changes that is already in the mutex
1139  * waiting queue.
1140  */
1141 static void
1142 mutex_priority_adjust(pthread_mutex_t mutex)
1143 {
1144         pthread_t       pthread_next, pthread = mutex->m_owner;
1145         int             temp_prio;
1146         pthread_mutex_t m = mutex;
1147
1148         /*
1149          * Calculate the mutex priority as the maximum of the highest
1150          * active priority of any waiting threads and the owning threads
1151          * active priority(*).
1152          *
1153          * (*) Because the owning threads current active priority may
1154          *     reflect priority inherited from this mutex (and the mutex
1155          *     priority may have changed) we must recalculate the active
1156          *     priority based on the threads saved inherited priority
1157          *     and its base priority.
1158          */
1159         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1160         temp_prio = MAX(pthread_next->active_priority,
1161             MAX(m->m_saved_prio, pthread->base_priority));
1162
1163         /* See if this mutex really needs adjusting: */
1164         if (temp_prio == m->m_prio)
1165                 /* No need to propagate the priority: */
1166                 return;
1167
1168         /* Set new priority of the mutex: */
1169         m->m_prio = temp_prio;
1170
1171         while (m != NULL) {
1172                 /*
1173                  * Save the threads priority before rescanning the
1174                  * owned mutexes:
1175                  */
1176                 temp_prio = pthread->active_priority;
1177
1178                 /*
1179                  * Fix the priorities for all the mutexes this thread has
1180                  * locked since taking this mutex.  This also has a
1181                  * potential side-effect of changing the threads priority.
1182                  */
1183                 mutex_rescan_owned(pthread, m);
1184
1185                 /*
1186                  * If the thread is currently waiting on a mutex, check
1187                  * to see if the threads new priority has affected the
1188                  * priority of the mutex.
1189                  */
1190                 if ((temp_prio != pthread->active_priority) &&
1191                     (pthread->state == PS_MUTEX_WAIT) &&
1192                     (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1193                         /* Grab the mutex this thread is waiting on: */
1194                         m = pthread->data.mutex;
1195
1196                         /*
1197                          * The priority for this thread has changed.  Remove
1198                          * and reinsert this thread into the list of waiting
1199                          * threads to preserve decreasing priority order.
1200                          */
1201                         mutex_queue_remove(m, pthread);
1202                         mutex_queue_enq(m, pthread);
1203
1204                         /* Grab the waiting thread with highest priority: */
1205                         pthread_next = TAILQ_FIRST(&m->m_queue);
1206
1207                         /*
1208                          * Calculate the mutex priority as the maximum of the
1209                          * highest active priority of any waiting threads and
1210                          * the owning threads active priority.
1211                          */
1212                         temp_prio = MAX(pthread_next->active_priority,
1213                             MAX(m->m_saved_prio, m->m_owner->base_priority));
1214
1215                         if (temp_prio != m->m_prio) {
1216                                 /*
1217                                  * The priority needs to be propagated to the
1218                                  * mutex this thread is waiting on and up to
1219                                  * the owner of that mutex.
1220                                  */
1221                                 m->m_prio = temp_prio;
1222                                 pthread = m->m_owner;
1223                         }
1224                         else
1225                                 /* We're done: */
1226                                 m = NULL;
1227
1228                 }
1229                 else
1230                         /* We're done: */
1231                         m = NULL;
1232         }
1233 }
1234
1235 static void
1236 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1237 {
1238         int             active_prio, inherited_prio;
1239         pthread_mutex_t m;
1240         pthread_t       pthread_next;
1241
1242         /*
1243          * Start walking the mutexes the thread has taken since
1244          * taking this mutex.
1245          */
1246         if (mutex == NULL) {
1247                 /*
1248                  * A null mutex means start at the beginning of the owned
1249                  * mutex list.
1250                  */
1251                 m = TAILQ_FIRST(&pthread->mutexq);
1252
1253                 /* There is no inherited priority yet. */
1254                 inherited_prio = 0;
1255         }
1256         else {
1257                 /*
1258                  * The caller wants to start after a specific mutex.  It
1259                  * is assumed that this mutex is a priority inheritence
1260                  * mutex and that its priority has been correctly
1261                  * calculated.
1262                  */
1263                 m = TAILQ_NEXT(mutex, m_qe);
1264
1265                 /* Start inheriting priority from the specified mutex. */
1266                 inherited_prio = mutex->m_prio;
1267         }
1268         active_prio = MAX(inherited_prio, pthread->base_priority);
1269
1270         while (m != NULL) {
1271                 /*
1272                  * We only want to deal with priority inheritence
1273                  * mutexes.  This might be optimized by only placing
1274                  * priority inheritence mutexes into the owned mutex
1275                  * list, but it may prove to be useful having all
1276                  * owned mutexes in this list.  Consider a thread
1277                  * exiting while holding mutexes...
1278                  */
1279                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1280                         /*
1281                          * Fix the owners saved (inherited) priority to
1282                          * reflect the priority of the previous mutex.
1283                          */
1284                         m->m_saved_prio = inherited_prio;
1285
1286                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1287                                 /* Recalculate the priority of the mutex: */
1288                                 m->m_prio = MAX(active_prio,
1289                                      pthread_next->active_priority);
1290                         else
1291                                 m->m_prio = active_prio;
1292
1293                         /* Recalculate new inherited and active priorities: */
1294                         inherited_prio = m->m_prio;
1295                         active_prio = MAX(m->m_prio, pthread->base_priority);
1296                 }
1297
1298                 /* Advance to the next mutex owned by this thread: */
1299                 m = TAILQ_NEXT(m, m_qe);
1300         }
1301
1302         /*
1303          * Fix the threads inherited priority and recalculate its
1304          * active priority.
1305          */
1306         pthread->inherited_priority = inherited_prio;
1307         active_prio = MAX(inherited_prio, pthread->base_priority);
1308
1309         if (active_prio != pthread->active_priority) {
1310                 /*
1311                  * If this thread is in the priority queue, it must be
1312                  * removed and reinserted for its new priority.
1313                  */
1314                 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1315                         /*
1316                          * Remove the thread from the priority queue
1317                          * before changing its priority:
1318                          */
1319                         PTHREAD_PRIOQ_REMOVE(pthread);
1320
1321                         /*
1322                          * POSIX states that if the priority is being
1323                          * lowered, the thread must be inserted at the
1324                          * head of the queue for its priority if it owns
1325                          * any priority protection or inheritence mutexes.
1326                          */
1327                         if ((active_prio < pthread->active_priority) &&
1328                             (pthread->priority_mutex_count > 0)) {
1329                                 /* Set the new active priority. */
1330                                 pthread->active_priority = active_prio;
1331
1332                                 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1333                         }
1334                         else {
1335                                 /* Set the new active priority. */
1336                                 pthread->active_priority = active_prio;
1337
1338                                 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1339                         }
1340                 }
1341                 else {
1342                         /* Set the new active priority. */
1343                         pthread->active_priority = active_prio;
1344                 }
1345         }
1346 }
1347
1348 void
1349 _mutex_unlock_private(pthread_t pthread)
1350 {
1351         struct pthread_mutex    *m, *m_next;
1352
1353         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1354                 m_next = TAILQ_NEXT(m, m_qe);
1355                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1356                         pthread_mutex_unlock(&m);
1357         }
1358 }
1359
1360 void
1361 _mutex_lock_backout(pthread_t pthread)
1362 {
1363         struct pthread_mutex    *mutex;
1364
1365         /*
1366          * Defer signals to protect the scheduling queues from
1367          * access by the signal handler:
1368          */
1369         _thread_kern_sig_defer();
1370         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1371                 mutex = pthread->data.mutex;
1372
1373                 /* Lock the mutex structure: */
1374                 _SPINLOCK(&mutex->lock);
1375
1376                 mutex_queue_remove(mutex, pthread);
1377
1378                 /* This thread is no longer waiting for the mutex: */
1379                 pthread->data.mutex = NULL;
1380
1381                 /* Unlock the mutex structure: */
1382                 _SPINUNLOCK(&mutex->lock);
1383
1384         }
1385         /*
1386          * Undefer and handle pending signals, yielding if
1387          * necessary:
1388          */
1389         _thread_kern_sig_undefer();
1390 }
1391
1392 /*
1393  * Dequeue a waiting thread from the head of a mutex queue in descending
1394  * priority order.
1395  */
1396 static inline pthread_t
1397 mutex_queue_deq(pthread_mutex_t mutex)
1398 {
1399         pthread_t pthread;
1400
1401         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1402                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1403                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1404
1405                 /*
1406                  * Only exit the loop if the thread hasn't been
1407                  * cancelled.
1408                  */
1409                 if (pthread->interrupted == 0)
1410                         break;
1411         }
1412
1413         return(pthread);
1414 }
1415
1416 /*
1417  * Remove a waiting thread from a mutex queue in descending priority order.
1418  */
1419 static inline void
1420 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1421 {
1422         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1423                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1424                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1425         }
1426 }
1427
1428 /*
1429  * Enqueue a waiting thread to a queue in descending priority order.
1430  */
1431 static inline void
1432 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1433 {
1434         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1435
1436         PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1437         /*
1438          * For the common case of all threads having equal priority,
1439          * we perform a quick check against the priority of the thread
1440          * at the tail of the queue.
1441          */
1442         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1443                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1444         else {
1445                 tid = TAILQ_FIRST(&mutex->m_queue);
1446                 while (pthread->active_priority <= tid->active_priority)
1447                         tid = TAILQ_NEXT(tid, sqe);
1448                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1449         }
1450         pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1451 }
1452
1453 __strong_reference(_pthread_mutex_init, pthread_mutex_init);
1454 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
1455 __strong_reference(_pthread_mutex_trylock, pthread_mutex_trylock);
1456 __strong_reference(_pthread_mutex_lock, pthread_mutex_lock);
1457 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);