aa6415cea7145b96e9efac0e26a278d522671df3
[dragonfly.git] / lib / libc_r / uthread / uthread_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/lib/libc_r/uthread/uthread_mutex.c,v 1.20.2.8 2002/10/22 14:44:03 fjoe Exp $
33  * $DragonFly: src/lib/libc_r/uthread/uthread_mutex.c,v 1.2 2003/06/17 04:26:48 dillon Exp $
34  */
35 #include <stdlib.h>
36 #include <errno.h>
37 #include <string.h>
38 #include <sys/param.h>
39 #include <sys/queue.h>
40 #include <pthread.h>
41 #include "pthread_private.h"
42
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m)             do {            \
45         (m)->m_qe.tqe_prev = NULL;                      \
46         (m)->m_qe.tqe_next = NULL;                      \
47 } while (0)
48 #define _MUTEX_ASSERT_IS_OWNED(m)       do {            \
49         if ((m)->m_qe.tqe_prev == NULL)                 \
50                 PANIC("mutex is not on list");          \
51 } while (0)
52 #define _MUTEX_ASSERT_NOT_OWNED(m)      do {            \
53         if (((m)->m_qe.tqe_prev != NULL) ||             \
54             ((m)->m_qe.tqe_next != NULL))               \
55                 PANIC("mutex is on list");              \
56 } while (0)
57 #else
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
61 #endif
62
63 /*
64  * Prototypes
65  */
66 static inline int       mutex_self_trylock(pthread_mutex_t);
67 static inline int       mutex_self_lock(pthread_mutex_t);
68 static inline int       mutex_unlock_common(pthread_mutex_t *, int);
69 static void             mutex_priority_adjust(pthread_mutex_t);
70 static void             mutex_rescan_owned (pthread_t, pthread_mutex_t);
71 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
72 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
73 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
74
75
76 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
77
78 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
79 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
80 __weak_reference(_pthread_mutex_trylock, pthread_mutex_trylock);
81 __weak_reference(_pthread_mutex_lock, pthread_mutex_lock);
82 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
83
84 /* Reinitialize a mutex to defaults. */
85 int
86 _mutex_reinit(pthread_mutex_t * mutex)
87 {
88         int     ret = 0;
89
90         if (mutex == NULL)
91                 ret = EINVAL;
92         else if (*mutex == NULL)
93                 ret = pthread_mutex_init(mutex, NULL);
94         else {
95                 /*
96                  * Initialize the mutex structure:
97                  */
98                 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
99                 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
100                 TAILQ_INIT(&(*mutex)->m_queue);
101                 (*mutex)->m_owner = NULL;
102                 (*mutex)->m_data.m_count = 0;
103                 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
104                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
105                 (*mutex)->m_refcount = 0;
106                 (*mutex)->m_prio = 0;
107                 (*mutex)->m_saved_prio = 0;
108                 _MUTEX_INIT_LINK(*mutex);
109                 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
110         }
111         return (ret);
112 }
113
114 int
115 _pthread_mutex_init(pthread_mutex_t * mutex,
116                    const pthread_mutexattr_t * mutex_attr)
117 {
118         enum pthread_mutextype  type;
119         int             protocol;
120         int             ceiling;
121         pthread_mutex_t pmutex;
122         int             ret = 0;
123
124         if (mutex == NULL)
125                 ret = EINVAL;
126
127         /* Check if default mutex attributes: */
128         else if (mutex_attr == NULL || *mutex_attr == NULL) {
129                 /* Default to a (error checking) POSIX mutex: */
130                 type = PTHREAD_MUTEX_ERRORCHECK;
131                 protocol = PTHREAD_PRIO_NONE;
132                 ceiling = PTHREAD_MAX_PRIORITY;
133         }
134
135         /* Check mutex type: */
136         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
137             ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
138                 /* Return an invalid argument error: */
139                 ret = EINVAL;
140
141         /* Check mutex protocol: */
142         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
143             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
144                 /* Return an invalid argument error: */
145                 ret = EINVAL;
146
147         else {
148                 /* Use the requested mutex type and protocol: */
149                 type = (*mutex_attr)->m_type;
150                 protocol = (*mutex_attr)->m_protocol;
151                 ceiling = (*mutex_attr)->m_ceiling;
152         }
153
154         /* Check no errors so far: */
155         if (ret == 0) {
156                 if ((pmutex = (pthread_mutex_t)
157                     malloc(sizeof(struct pthread_mutex))) == NULL)
158                         ret = ENOMEM;
159                 else {
160                         /* Reset the mutex flags: */
161                         pmutex->m_flags = 0;
162
163                         /* Process according to mutex type: */
164                         switch (type) {
165                         /* case PTHREAD_MUTEX_DEFAULT: */
166                         case PTHREAD_MUTEX_ERRORCHECK:
167                         case PTHREAD_MUTEX_NORMAL:
168                                 /* Nothing to do here. */
169                                 break;
170
171                         /* Single UNIX Spec 2 recursive mutex: */
172                         case PTHREAD_MUTEX_RECURSIVE:
173                                 /* Reset the mutex count: */
174                                 pmutex->m_data.m_count = 0;
175                                 break;
176
177                         /* Trap invalid mutex types: */
178                         default:
179                                 /* Return an invalid argument error: */
180                                 ret = EINVAL;
181                                 break;
182                         }
183                         if (ret == 0) {
184                                 /* Initialise the rest of the mutex: */
185                                 TAILQ_INIT(&pmutex->m_queue);
186                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
187                                 pmutex->m_owner = NULL;
188                                 pmutex->m_type = type;
189                                 pmutex->m_protocol = protocol;
190                                 pmutex->m_refcount = 0;
191                                 if (protocol == PTHREAD_PRIO_PROTECT)
192                                         pmutex->m_prio = ceiling;
193                                 else
194                                         pmutex->m_prio = 0;
195                                 pmutex->m_saved_prio = 0;
196                                 _MUTEX_INIT_LINK(pmutex);
197                                 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
198                                 *mutex = pmutex;
199                         } else {
200                                 free(pmutex);
201                                 *mutex = NULL;
202                         }
203                 }
204         }
205         /* Return the completion status: */
206         return(ret);
207 }
208
209 int
210 _pthread_mutex_destroy(pthread_mutex_t * mutex)
211 {
212         int     ret = 0;
213
214         if (mutex == NULL || *mutex == NULL)
215                 ret = EINVAL;
216         else {
217                 /* Lock the mutex structure: */
218                 _SPINLOCK(&(*mutex)->lock);
219
220                 /*
221                  * Check to see if this mutex is in use:
222                  */
223                 if (((*mutex)->m_owner != NULL) ||
224                     (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
225                     ((*mutex)->m_refcount != 0)) {
226                         ret = EBUSY;
227
228                         /* Unlock the mutex structure: */
229                         _SPINUNLOCK(&(*mutex)->lock);
230                 }
231                 else {
232                         /*
233                          * Free the memory allocated for the mutex
234                          * structure:
235                          */
236                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
237                         free(*mutex);
238
239                         /*
240                          * Leave the caller's pointer NULL now that
241                          * the mutex has been destroyed:
242                          */
243                         *mutex = NULL;
244                 }
245         }
246
247         /* Return the completion status: */
248         return (ret);
249 }
250
251 static int
252 init_static(pthread_mutex_t *mutex)
253 {
254         int     ret;
255
256         _SPINLOCK(&static_init_lock);
257
258         if (*mutex == NULL)
259                 ret = pthread_mutex_init(mutex, NULL);
260         else
261                 ret = 0;
262
263         _SPINUNLOCK(&static_init_lock);
264
265         return(ret);
266 }
267
268 int
269 _pthread_mutex_trylock(pthread_mutex_t * mutex)
270 {
271         struct pthread  *curthread = _get_curthread();
272         int     ret = 0;
273
274         if (mutex == NULL)
275                 ret = EINVAL;
276
277         /*
278          * If the mutex is statically initialized, perform the dynamic
279          * initialization:
280          */
281         else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
282                 /*
283                  * Defer signals to protect the scheduling queues from
284                  * access by the signal handler:
285                  */
286                 _thread_kern_sig_defer();
287
288                 /* Lock the mutex structure: */
289                 _SPINLOCK(&(*mutex)->lock);
290
291                 /*
292                  * If the mutex was statically allocated, properly
293                  * initialize the tail queue.
294                  */
295                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
296                         TAILQ_INIT(&(*mutex)->m_queue);
297                         _MUTEX_INIT_LINK(*mutex);
298                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
299                 }
300
301                 /* Process according to mutex type: */
302                 switch ((*mutex)->m_protocol) {
303                 /* Default POSIX mutex: */
304                 case PTHREAD_PRIO_NONE: 
305                         /* Check if this mutex is not locked: */
306                         if ((*mutex)->m_owner == NULL) {
307                                 /* Lock the mutex for the running thread: */
308                                 (*mutex)->m_owner = curthread;
309
310                                 /* Add to the list of owned mutexes: */
311                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
312                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
313                                     (*mutex), m_qe);
314                         } else if ((*mutex)->m_owner == curthread)
315                                 ret = mutex_self_trylock(*mutex);
316                         else
317                                 /* Return a busy error: */
318                                 ret = EBUSY;
319                         break;
320
321                 /* POSIX priority inheritence mutex: */
322                 case PTHREAD_PRIO_INHERIT:
323                         /* Check if this mutex is not locked: */
324                         if ((*mutex)->m_owner == NULL) {
325                                 /* Lock the mutex for the running thread: */
326                                 (*mutex)->m_owner = curthread;
327
328                                 /* Track number of priority mutexes owned: */
329                                 curthread->priority_mutex_count++;
330
331                                 /*
332                                  * The mutex takes on the attributes of the
333                                  * running thread when there are no waiters.
334                                  */
335                                 (*mutex)->m_prio = curthread->active_priority;
336                                 (*mutex)->m_saved_prio =
337                                     curthread->inherited_priority;
338
339                                 /* Add to the list of owned mutexes: */
340                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
341                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
342                                     (*mutex), m_qe);
343                         } else if ((*mutex)->m_owner == curthread)
344                                 ret = mutex_self_trylock(*mutex);
345                         else
346                                 /* Return a busy error: */
347                                 ret = EBUSY;
348                         break;
349
350                 /* POSIX priority protection mutex: */
351                 case PTHREAD_PRIO_PROTECT:
352                         /* Check for a priority ceiling violation: */
353                         if (curthread->active_priority > (*mutex)->m_prio)
354                                 ret = EINVAL;
355
356                         /* Check if this mutex is not locked: */
357                         else if ((*mutex)->m_owner == NULL) {
358                                 /* Lock the mutex for the running thread: */
359                                 (*mutex)->m_owner = curthread;
360
361                                 /* Track number of priority mutexes owned: */
362                                 curthread->priority_mutex_count++;
363
364                                 /*
365                                  * The running thread inherits the ceiling
366                                  * priority of the mutex and executes at that
367                                  * priority.
368                                  */
369                                 curthread->active_priority = (*mutex)->m_prio;
370                                 (*mutex)->m_saved_prio =
371                                     curthread->inherited_priority;
372                                 curthread->inherited_priority =
373                                     (*mutex)->m_prio;
374
375                                 /* Add to the list of owned mutexes: */
376                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
377                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
378                                     (*mutex), m_qe);
379                         } else if ((*mutex)->m_owner == curthread)
380                                 ret = mutex_self_trylock(*mutex);
381                         else
382                                 /* Return a busy error: */
383                                 ret = EBUSY;
384                         break;
385
386                 /* Trap invalid mutex types: */
387                 default:
388                         /* Return an invalid argument error: */
389                         ret = EINVAL;
390                         break;
391                 }
392
393                 /* Unlock the mutex structure: */
394                 _SPINUNLOCK(&(*mutex)->lock);
395
396                 /*
397                  * Undefer and handle pending signals, yielding if
398                  * necessary:
399                  */
400                 _thread_kern_sig_undefer();
401         }
402
403         /* Return the completion status: */
404         return (ret);
405 }
406
407 int
408 _pthread_mutex_lock(pthread_mutex_t * mutex)
409 {
410         struct pthread  *curthread = _get_curthread();
411         int     ret = 0;
412
413         if (_thread_initial == NULL)
414                 _thread_init();
415
416         if (mutex == NULL)
417                 return (EINVAL);
418
419         /*
420          * If the mutex is statically initialized, perform the dynamic
421          * initialization:
422          */
423         if ((*mutex == NULL) &&
424             ((ret = init_static(mutex)) != 0))
425                 return (ret);
426
427         /* Reset the interrupted flag: */
428         curthread->interrupted = 0;
429
430         /*
431          * Enter a loop waiting to become the mutex owner.  We need a
432          * loop in case the waiting thread is interrupted by a signal
433          * to execute a signal handler.  It is not (currently) possible
434          * to remain in the waiting queue while running a handler.
435          * Instead, the thread is interrupted and backed out of the
436          * waiting queue prior to executing the signal handler.
437          */
438         do {
439                 /*
440                  * Defer signals to protect the scheduling queues from
441                  * access by the signal handler:
442                  */
443                 _thread_kern_sig_defer();
444
445                 /* Lock the mutex structure: */
446                 _SPINLOCK(&(*mutex)->lock);
447
448                 /*
449                  * If the mutex was statically allocated, properly
450                  * initialize the tail queue.
451                  */
452                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
453                         TAILQ_INIT(&(*mutex)->m_queue);
454                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
455                         _MUTEX_INIT_LINK(*mutex);
456                 }
457
458                 /* Process according to mutex type: */
459                 switch ((*mutex)->m_protocol) {
460                 /* Default POSIX mutex: */
461                 case PTHREAD_PRIO_NONE:
462                         if ((*mutex)->m_owner == NULL) {
463                                 /* Lock the mutex for this thread: */
464                                 (*mutex)->m_owner = curthread;
465
466                                 /* Add to the list of owned mutexes: */
467                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
468                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
469                                     (*mutex), m_qe);
470
471                         } else if ((*mutex)->m_owner == curthread)
472                                 ret = mutex_self_lock(*mutex);
473                         else {
474                                 /*
475                                  * Join the queue of threads waiting to lock
476                                  * the mutex: 
477                                  */
478                                 mutex_queue_enq(*mutex, curthread);
479
480                                 /*
481                                  * Keep a pointer to the mutex this thread
482                                  * is waiting on:
483                                  */
484                                 curthread->data.mutex = *mutex;
485
486                                 /*
487                                  * Unlock the mutex structure and schedule the
488                                  * next thread:
489                                  */
490                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
491                                     &(*mutex)->lock, __FILE__, __LINE__);
492
493                                 /* Lock the mutex structure again: */
494                                 _SPINLOCK(&(*mutex)->lock);
495                         }
496                         break;
497
498                 /* POSIX priority inheritence mutex: */
499                 case PTHREAD_PRIO_INHERIT:
500                         /* Check if this mutex is not locked: */
501                         if ((*mutex)->m_owner == NULL) {
502                                 /* Lock the mutex for this thread: */
503                                 (*mutex)->m_owner = curthread;
504
505                                 /* Track number of priority mutexes owned: */
506                                 curthread->priority_mutex_count++;
507
508                                 /*
509                                  * The mutex takes on attributes of the
510                                  * running thread when there are no waiters.
511                                  */
512                                 (*mutex)->m_prio = curthread->active_priority;
513                                 (*mutex)->m_saved_prio =
514                                     curthread->inherited_priority;
515                                 curthread->inherited_priority =
516                                     (*mutex)->m_prio;
517
518                                 /* Add to the list of owned mutexes: */
519                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
520                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
521                                     (*mutex), m_qe);
522
523                         } else if ((*mutex)->m_owner == curthread)
524                                 ret = mutex_self_lock(*mutex);
525                         else {
526                                 /*
527                                  * Join the queue of threads waiting to lock
528                                  * the mutex: 
529                                  */
530                                 mutex_queue_enq(*mutex, curthread);
531
532                                 /*
533                                  * Keep a pointer to the mutex this thread
534                                  * is waiting on:
535                                  */
536                                 curthread->data.mutex = *mutex;
537
538                                 if (curthread->active_priority >
539                                     (*mutex)->m_prio)
540                                         /* Adjust priorities: */
541                                         mutex_priority_adjust(*mutex);
542
543                                 /*
544                                  * Unlock the mutex structure and schedule the
545                                  * next thread:
546                                  */
547                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
548                                     &(*mutex)->lock, __FILE__, __LINE__);
549
550                                 /* Lock the mutex structure again: */
551                                 _SPINLOCK(&(*mutex)->lock);
552                         }
553                         break;
554
555                 /* POSIX priority protection mutex: */
556                 case PTHREAD_PRIO_PROTECT:
557                         /* Check for a priority ceiling violation: */
558                         if (curthread->active_priority > (*mutex)->m_prio)
559                                 ret = EINVAL;
560
561                         /* Check if this mutex is not locked: */
562                         else if ((*mutex)->m_owner == NULL) {
563                                 /*
564                                  * Lock the mutex for the running
565                                  * thread:
566                                  */
567                                 (*mutex)->m_owner = curthread;
568
569                                 /* Track number of priority mutexes owned: */
570                                 curthread->priority_mutex_count++;
571
572                                 /*
573                                  * The running thread inherits the ceiling
574                                  * priority of the mutex and executes at that
575                                  * priority:
576                                  */
577                                 curthread->active_priority = (*mutex)->m_prio;
578                                 (*mutex)->m_saved_prio =
579                                     curthread->inherited_priority;
580                                 curthread->inherited_priority =
581                                     (*mutex)->m_prio;
582
583                                 /* Add to the list of owned mutexes: */
584                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
585                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
586                                     (*mutex), m_qe);
587                         } else if ((*mutex)->m_owner == curthread)
588                                 ret = mutex_self_lock(*mutex);
589                         else {
590                                 /*
591                                  * Join the queue of threads waiting to lock
592                                  * the mutex: 
593                                  */
594                                 mutex_queue_enq(*mutex, curthread);
595
596                                 /*
597                                  * Keep a pointer to the mutex this thread
598                                  * is waiting on:
599                                  */
600                                 curthread->data.mutex = *mutex;
601
602                                 /* Clear any previous error: */
603                                 curthread->error = 0;
604
605                                 /*
606                                  * Unlock the mutex structure and schedule the
607                                  * next thread:
608                                  */
609                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
610                                     &(*mutex)->lock, __FILE__, __LINE__);
611
612                                 /* Lock the mutex structure again: */
613                                 _SPINLOCK(&(*mutex)->lock);
614
615                                 /*
616                                  * The threads priority may have changed while
617                                  * waiting for the mutex causing a ceiling
618                                  * violation.
619                                  */
620                                 ret = curthread->error;
621                                 curthread->error = 0;
622                         }
623                         break;
624
625                 /* Trap invalid mutex types: */
626                 default:
627                         /* Return an invalid argument error: */
628                         ret = EINVAL;
629                         break;
630                 }
631
632                 /*
633                  * Check to see if this thread was interrupted and
634                  * is still in the mutex queue of waiting threads:
635                  */
636                 if (curthread->interrupted != 0)
637                         mutex_queue_remove(*mutex, curthread);
638
639                 /* Unlock the mutex structure: */
640                 _SPINUNLOCK(&(*mutex)->lock);
641
642                 /*
643                  * Undefer and handle pending signals, yielding if
644                  * necessary:
645                  */
646                 _thread_kern_sig_undefer();
647         } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
648             (curthread->interrupted == 0));
649
650         if (curthread->interrupted != 0 &&
651             curthread->continuation != NULL)
652                 curthread->continuation((void *) curthread);
653
654         /* Return the completion status: */
655         return (ret);
656 }
657
658 int
659 _pthread_mutex_unlock(pthread_mutex_t * mutex)
660 {
661         return (mutex_unlock_common(mutex, /* add reference */ 0));
662 }
663
664 int
665 _mutex_cv_unlock(pthread_mutex_t * mutex)
666 {
667         return (mutex_unlock_common(mutex, /* add reference */ 1));
668 }
669
670 int
671 _mutex_cv_lock(pthread_mutex_t * mutex)
672 {
673         int     ret;
674         if ((ret = pthread_mutex_lock(mutex)) == 0)
675                 (*mutex)->m_refcount--;
676         return (ret);
677 }
678
679 static inline int
680 mutex_self_trylock(pthread_mutex_t mutex)
681 {
682         int     ret = 0;
683
684         switch (mutex->m_type) {
685
686         /* case PTHREAD_MUTEX_DEFAULT: */
687         case PTHREAD_MUTEX_ERRORCHECK:
688         case PTHREAD_MUTEX_NORMAL:
689                 /*
690                  * POSIX specifies that mutexes should return EDEADLK if a
691                  * recursive lock is detected.
692                  */
693                 ret = EBUSY; 
694                 break;
695
696         case PTHREAD_MUTEX_RECURSIVE:
697                 /* Increment the lock count: */
698                 mutex->m_data.m_count++;
699                 break;
700
701         default:
702                 /* Trap invalid mutex types; */
703                 ret = EINVAL;
704         }
705
706         return(ret);
707 }
708
709 static inline int
710 mutex_self_lock(pthread_mutex_t mutex)
711 {
712         int ret = 0;
713
714         switch (mutex->m_type) {
715         /* case PTHREAD_MUTEX_DEFAULT: */
716         case PTHREAD_MUTEX_ERRORCHECK:
717                 /*
718                  * POSIX specifies that mutexes should return EDEADLK if a
719                  * recursive lock is detected.
720                  */
721                 ret = EDEADLK; 
722                 break;
723
724         case PTHREAD_MUTEX_NORMAL:
725                 /*
726                  * What SS2 define as a 'normal' mutex.  Intentionally
727                  * deadlock on attempts to get a lock you already own.
728                  */
729                 _thread_kern_sched_state_unlock(PS_DEADLOCK,
730                     &mutex->lock, __FILE__, __LINE__);
731                 break;
732
733         case PTHREAD_MUTEX_RECURSIVE:
734                 /* Increment the lock count: */
735                 mutex->m_data.m_count++;
736                 break;
737
738         default:
739                 /* Trap invalid mutex types; */
740                 ret = EINVAL;
741         }
742
743         return(ret);
744 }
745
746 static inline int
747 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
748 {
749         struct pthread  *curthread = _get_curthread();
750         int     ret = 0;
751
752         if (mutex == NULL || *mutex == NULL) {
753                 ret = EINVAL;
754         } else {
755                 /*
756                  * Defer signals to protect the scheduling queues from
757                  * access by the signal handler:
758                  */
759                 _thread_kern_sig_defer();
760
761                 /* Lock the mutex structure: */
762                 _SPINLOCK(&(*mutex)->lock);
763
764                 /* Process according to mutex type: */
765                 switch ((*mutex)->m_protocol) {
766                 /* Default POSIX mutex: */
767                 case PTHREAD_PRIO_NONE:
768                         /*
769                          * Check if the running thread is not the owner of the
770                          * mutex:
771                          */
772                         if ((*mutex)->m_owner != curthread) {
773                                 /*
774                                  * Return an invalid argument error for no
775                                  * owner and a permission error otherwise:
776                                  */
777                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
778                         }
779                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
780                             ((*mutex)->m_data.m_count > 0)) {
781                                 /* Decrement the count: */
782                                 (*mutex)->m_data.m_count--;
783                         } else {
784                                 /*
785                                  * Clear the count in case this is recursive
786                                  * mutex.
787                                  */
788                                 (*mutex)->m_data.m_count = 0;
789
790                                 /* Remove the mutex from the threads queue. */
791                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
792                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
793                                     (*mutex), m_qe);
794                                 _MUTEX_INIT_LINK(*mutex);
795
796                                 /*
797                                  * Get the next thread from the queue of
798                                  * threads waiting on the mutex: 
799                                  */
800                                 if (((*mutex)->m_owner =
801                                     mutex_queue_deq(*mutex)) != NULL) {
802                                         /* Make the new owner runnable: */
803                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
804                                             PS_RUNNING);
805
806                                         /*
807                                          * Add the mutex to the threads list of
808                                          * owned mutexes:
809                                          */
810                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
811                                             (*mutex), m_qe);
812
813                                         /*
814                                          * The owner is no longer waiting for
815                                          * this mutex:
816                                          */
817                                         (*mutex)->m_owner->data.mutex = NULL;
818                                 }
819                         }
820                         break;
821
822                 /* POSIX priority inheritence mutex: */
823                 case PTHREAD_PRIO_INHERIT:
824                         /*
825                          * Check if the running thread is not the owner of the
826                          * mutex:
827                          */
828                         if ((*mutex)->m_owner != curthread) {
829                                 /*
830                                  * Return an invalid argument error for no
831                                  * owner and a permission error otherwise:
832                                  */
833                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
834                         }
835                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
836                             ((*mutex)->m_data.m_count > 0)) {
837                                 /* Decrement the count: */
838                                 (*mutex)->m_data.m_count--;
839                         } else {
840                                 /*
841                                  * Clear the count in case this is recursive
842                                  * mutex.
843                                  */
844                                 (*mutex)->m_data.m_count = 0;
845
846                                 /*
847                                  * Restore the threads inherited priority and
848                                  * recompute the active priority (being careful
849                                  * not to override changes in the threads base
850                                  * priority subsequent to locking the mutex).
851                                  */
852                                 curthread->inherited_priority =
853                                         (*mutex)->m_saved_prio;
854                                 curthread->active_priority =
855                                     MAX(curthread->inherited_priority,
856                                     curthread->base_priority);
857
858                                 /*
859                                  * This thread now owns one less priority mutex.
860                                  */
861                                 curthread->priority_mutex_count--;
862
863                                 /* Remove the mutex from the threads queue. */
864                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
865                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
866                                     (*mutex), m_qe);
867                                 _MUTEX_INIT_LINK(*mutex);
868
869                                 /*
870                                  * Get the next thread from the queue of threads
871                                  * waiting on the mutex: 
872                                  */
873                                 if (((*mutex)->m_owner = 
874                                     mutex_queue_deq(*mutex)) == NULL)
875                                         /* This mutex has no priority. */
876                                         (*mutex)->m_prio = 0;
877                                 else {
878                                         /*
879                                          * Track number of priority mutexes owned:
880                                          */
881                                         (*mutex)->m_owner->priority_mutex_count++;
882
883                                         /*
884                                          * Add the mutex to the threads list
885                                          * of owned mutexes:
886                                          */
887                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
888                                             (*mutex), m_qe);
889
890                                         /*
891                                          * The owner is no longer waiting for
892                                          * this mutex:
893                                          */
894                                         (*mutex)->m_owner->data.mutex = NULL;
895
896                                         /*
897                                          * Set the priority of the mutex.  Since
898                                          * our waiting threads are in descending
899                                          * priority order, the priority of the
900                                          * mutex becomes the active priority of
901                                          * the thread we just dequeued.
902                                          */
903                                         (*mutex)->m_prio =
904                                             (*mutex)->m_owner->active_priority;
905
906                                         /*
907                                          * Save the owning threads inherited
908                                          * priority:
909                                          */
910                                         (*mutex)->m_saved_prio =
911                                                 (*mutex)->m_owner->inherited_priority;
912
913                                         /*
914                                          * The owning threads inherited priority
915                                          * now becomes his active priority (the
916                                          * priority of the mutex).
917                                          */
918                                         (*mutex)->m_owner->inherited_priority =
919                                                 (*mutex)->m_prio;
920
921                                         /*
922                                          * Make the new owner runnable:
923                                          */
924                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
925                                             PS_RUNNING);
926                                 }
927                         }
928                         break;
929
930                 /* POSIX priority ceiling mutex: */
931                 case PTHREAD_PRIO_PROTECT:
932                         /*
933                          * Check if the running thread is not the owner of the
934                          * mutex:
935                          */
936                         if ((*mutex)->m_owner != curthread) {
937                                 /*
938                                  * Return an invalid argument error for no
939                                  * owner and a permission error otherwise:
940                                  */
941                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
942                         }
943                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
944                             ((*mutex)->m_data.m_count > 0)) {
945                                 /* Decrement the count: */
946                                 (*mutex)->m_data.m_count--;
947                         } else {
948                                 /*
949                                  * Clear the count in case this is recursive
950                                  * mutex.
951                                  */
952                                 (*mutex)->m_data.m_count = 0;
953
954                                 /*
955                                  * Restore the threads inherited priority and
956                                  * recompute the active priority (being careful
957                                  * not to override changes in the threads base
958                                  * priority subsequent to locking the mutex).
959                                  */
960                                 curthread->inherited_priority =
961                                         (*mutex)->m_saved_prio;
962                                 curthread->active_priority =
963                                     MAX(curthread->inherited_priority,
964                                     curthread->base_priority);
965
966                                 /*
967                                  * This thread now owns one less priority mutex.
968                                  */
969                                 curthread->priority_mutex_count--;
970
971                                 /* Remove the mutex from the threads queue. */
972                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
973                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
974                                     (*mutex), m_qe);
975                                 _MUTEX_INIT_LINK(*mutex);
976
977                                 /*
978                                  * Enter a loop to find a waiting thread whose
979                                  * active priority will not cause a ceiling
980                                  * violation:
981                                  */
982                                 while ((((*mutex)->m_owner =
983                                     mutex_queue_deq(*mutex)) != NULL) &&
984                                     ((*mutex)->m_owner->active_priority >
985                                      (*mutex)->m_prio)) {
986                                         /*
987                                          * Either the mutex ceiling priority
988                                          * been lowered and/or this threads
989                                          * priority has been raised subsequent
990                                          * to this thread being queued on the
991                                          * waiting list.
992                                          */
993                                         (*mutex)->m_owner->error = EINVAL;
994                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
995                                             PS_RUNNING);
996                                         /*
997                                          * The thread is no longer waiting for
998                                          * this mutex:
999                                          */
1000                                         (*mutex)->m_owner->data.mutex = NULL;
1001                                 }
1002
1003                                 /* Check for a new owner: */
1004                                 if ((*mutex)->m_owner != NULL) {
1005                                         /*
1006                                          * Track number of priority mutexes owned:
1007                                          */
1008                                         (*mutex)->m_owner->priority_mutex_count++;
1009
1010                                         /*
1011                                          * Add the mutex to the threads list
1012                                          * of owned mutexes:
1013                                          */
1014                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1015                                             (*mutex), m_qe);
1016
1017                                         /*
1018                                          * The owner is no longer waiting for
1019                                          * this mutex:
1020                                          */
1021                                         (*mutex)->m_owner->data.mutex = NULL;
1022
1023                                         /*
1024                                          * Save the owning threads inherited
1025                                          * priority:
1026                                          */
1027                                         (*mutex)->m_saved_prio =
1028                                                 (*mutex)->m_owner->inherited_priority;
1029
1030                                         /*
1031                                          * The owning thread inherits the
1032                                          * ceiling priority of the mutex and
1033                                          * executes at that priority:
1034                                          */
1035                                         (*mutex)->m_owner->inherited_priority =
1036                                             (*mutex)->m_prio;
1037                                         (*mutex)->m_owner->active_priority =
1038                                             (*mutex)->m_prio;
1039
1040                                         /*
1041                                          * Make the new owner runnable:
1042                                          */
1043                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1044                                             PS_RUNNING);
1045                                 }
1046                         }
1047                         break;
1048
1049                 /* Trap invalid mutex types: */
1050                 default:
1051                         /* Return an invalid argument error: */
1052                         ret = EINVAL;
1053                         break;
1054                 }
1055
1056                 if ((ret == 0) && (add_reference != 0)) {
1057                         /* Increment the reference count: */
1058                         (*mutex)->m_refcount++;
1059                 }
1060
1061                 /* Unlock the mutex structure: */
1062                 _SPINUNLOCK(&(*mutex)->lock);
1063
1064                 /*
1065                  * Undefer and handle pending signals, yielding if
1066                  * necessary:
1067                  */
1068                 _thread_kern_sig_undefer();
1069         }
1070
1071         /* Return the completion status: */
1072         return (ret);
1073 }
1074
1075
1076 /*
1077  * This function is called when a change in base priority occurs for
1078  * a thread that is holding or waiting for a priority protection or
1079  * inheritence mutex.  A change in a threads base priority can effect
1080  * changes to active priorities of other threads and to the ordering
1081  * of mutex locking by waiting threads.
1082  *
1083  * This must be called while thread scheduling is deferred.
1084  */
1085 void
1086 _mutex_notify_priochange(pthread_t pthread)
1087 {
1088         /* Adjust the priorites of any owned priority mutexes: */
1089         if (pthread->priority_mutex_count > 0) {
1090                 /*
1091                  * Rescan the mutexes owned by this thread and correct
1092                  * their priorities to account for this threads change
1093                  * in priority.  This has the side effect of changing
1094                  * the threads active priority.
1095                  */
1096                 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1097         }
1098
1099         /*
1100          * If this thread is waiting on a priority inheritence mutex,
1101          * check for priority adjustments.  A change in priority can
1102          * also effect a ceiling violation(*) for a thread waiting on
1103          * a priority protection mutex; we don't perform the check here
1104          * as it is done in pthread_mutex_unlock.
1105          *
1106          * (*) It should be noted that a priority change to a thread
1107          *     _after_ taking and owning a priority ceiling mutex
1108          *     does not affect ownership of that mutex; the ceiling
1109          *     priority is only checked before mutex ownership occurs.
1110          */
1111         if (pthread->state == PS_MUTEX_WAIT) {
1112                 /* Lock the mutex structure: */
1113                 _SPINLOCK(&pthread->data.mutex->lock);
1114
1115                 /*
1116                  * Check to make sure this thread is still in the same state
1117                  * (the spinlock above can yield the CPU to another thread):
1118                  */
1119                 if (pthread->state == PS_MUTEX_WAIT) {
1120                         /*
1121                          * Remove and reinsert this thread into the list of
1122                          * waiting threads to preserve decreasing priority
1123                          * order.
1124                          */
1125                         mutex_queue_remove(pthread->data.mutex, pthread);
1126                         mutex_queue_enq(pthread->data.mutex, pthread);
1127
1128                         if (pthread->data.mutex->m_protocol ==
1129                              PTHREAD_PRIO_INHERIT) {
1130                                 /* Adjust priorities: */
1131                                 mutex_priority_adjust(pthread->data.mutex);
1132                         }
1133                 }
1134
1135                 /* Unlock the mutex structure: */
1136                 _SPINUNLOCK(&pthread->data.mutex->lock);
1137         }
1138 }
1139
1140 /*
1141  * Called when a new thread is added to the mutex waiting queue or
1142  * when a threads priority changes that is already in the mutex
1143  * waiting queue.
1144  */
1145 static void
1146 mutex_priority_adjust(pthread_mutex_t mutex)
1147 {
1148         pthread_t       pthread_next, pthread = mutex->m_owner;
1149         int             temp_prio;
1150         pthread_mutex_t m = mutex;
1151
1152         /*
1153          * Calculate the mutex priority as the maximum of the highest
1154          * active priority of any waiting threads and the owning threads
1155          * active priority(*).
1156          *
1157          * (*) Because the owning threads current active priority may
1158          *     reflect priority inherited from this mutex (and the mutex
1159          *     priority may have changed) we must recalculate the active
1160          *     priority based on the threads saved inherited priority
1161          *     and its base priority.
1162          */
1163         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1164         temp_prio = MAX(pthread_next->active_priority,
1165             MAX(m->m_saved_prio, pthread->base_priority));
1166
1167         /* See if this mutex really needs adjusting: */
1168         if (temp_prio == m->m_prio)
1169                 /* No need to propagate the priority: */
1170                 return;
1171
1172         /* Set new priority of the mutex: */
1173         m->m_prio = temp_prio;
1174
1175         while (m != NULL) {
1176                 /*
1177                  * Save the threads priority before rescanning the
1178                  * owned mutexes:
1179                  */
1180                 temp_prio = pthread->active_priority;
1181
1182                 /*
1183                  * Fix the priorities for all the mutexes this thread has
1184                  * locked since taking this mutex.  This also has a
1185                  * potential side-effect of changing the threads priority.
1186                  */
1187                 mutex_rescan_owned(pthread, m);
1188
1189                 /*
1190                  * If the thread is currently waiting on a mutex, check
1191                  * to see if the threads new priority has affected the
1192                  * priority of the mutex.
1193                  */
1194                 if ((temp_prio != pthread->active_priority) &&
1195                     (pthread->state == PS_MUTEX_WAIT) &&
1196                     (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1197                         /* Grab the mutex this thread is waiting on: */
1198                         m = pthread->data.mutex;
1199
1200                         /*
1201                          * The priority for this thread has changed.  Remove
1202                          * and reinsert this thread into the list of waiting
1203                          * threads to preserve decreasing priority order.
1204                          */
1205                         mutex_queue_remove(m, pthread);
1206                         mutex_queue_enq(m, pthread);
1207
1208                         /* Grab the waiting thread with highest priority: */
1209                         pthread_next = TAILQ_FIRST(&m->m_queue);
1210
1211                         /*
1212                          * Calculate the mutex priority as the maximum of the
1213                          * highest active priority of any waiting threads and
1214                          * the owning threads active priority.
1215                          */
1216                         temp_prio = MAX(pthread_next->active_priority,
1217                             MAX(m->m_saved_prio, m->m_owner->base_priority));
1218
1219                         if (temp_prio != m->m_prio) {
1220                                 /*
1221                                  * The priority needs to be propagated to the
1222                                  * mutex this thread is waiting on and up to
1223                                  * the owner of that mutex.
1224                                  */
1225                                 m->m_prio = temp_prio;
1226                                 pthread = m->m_owner;
1227                         }
1228                         else
1229                                 /* We're done: */
1230                                 m = NULL;
1231
1232                 }
1233                 else
1234                         /* We're done: */
1235                         m = NULL;
1236         }
1237 }
1238
1239 static void
1240 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1241 {
1242         int             active_prio, inherited_prio;
1243         pthread_mutex_t m;
1244         pthread_t       pthread_next;
1245
1246         /*
1247          * Start walking the mutexes the thread has taken since
1248          * taking this mutex.
1249          */
1250         if (mutex == NULL) {
1251                 /*
1252                  * A null mutex means start at the beginning of the owned
1253                  * mutex list.
1254                  */
1255                 m = TAILQ_FIRST(&pthread->mutexq);
1256
1257                 /* There is no inherited priority yet. */
1258                 inherited_prio = 0;
1259         }
1260         else {
1261                 /*
1262                  * The caller wants to start after a specific mutex.  It
1263                  * is assumed that this mutex is a priority inheritence
1264                  * mutex and that its priority has been correctly
1265                  * calculated.
1266                  */
1267                 m = TAILQ_NEXT(mutex, m_qe);
1268
1269                 /* Start inheriting priority from the specified mutex. */
1270                 inherited_prio = mutex->m_prio;
1271         }
1272         active_prio = MAX(inherited_prio, pthread->base_priority);
1273
1274         while (m != NULL) {
1275                 /*
1276                  * We only want to deal with priority inheritence
1277                  * mutexes.  This might be optimized by only placing
1278                  * priority inheritence mutexes into the owned mutex
1279                  * list, but it may prove to be useful having all
1280                  * owned mutexes in this list.  Consider a thread
1281                  * exiting while holding mutexes...
1282                  */
1283                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1284                         /*
1285                          * Fix the owners saved (inherited) priority to
1286                          * reflect the priority of the previous mutex.
1287                          */
1288                         m->m_saved_prio = inherited_prio;
1289
1290                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1291                                 /* Recalculate the priority of the mutex: */
1292                                 m->m_prio = MAX(active_prio,
1293                                      pthread_next->active_priority);
1294                         else
1295                                 m->m_prio = active_prio;
1296
1297                         /* Recalculate new inherited and active priorities: */
1298                         inherited_prio = m->m_prio;
1299                         active_prio = MAX(m->m_prio, pthread->base_priority);
1300                 }
1301
1302                 /* Advance to the next mutex owned by this thread: */
1303                 m = TAILQ_NEXT(m, m_qe);
1304         }
1305
1306         /*
1307          * Fix the threads inherited priority and recalculate its
1308          * active priority.
1309          */
1310         pthread->inherited_priority = inherited_prio;
1311         active_prio = MAX(inherited_prio, pthread->base_priority);
1312
1313         if (active_prio != pthread->active_priority) {
1314                 /*
1315                  * If this thread is in the priority queue, it must be
1316                  * removed and reinserted for its new priority.
1317                  */
1318                 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1319                         /*
1320                          * Remove the thread from the priority queue
1321                          * before changing its priority:
1322                          */
1323                         PTHREAD_PRIOQ_REMOVE(pthread);
1324
1325                         /*
1326                          * POSIX states that if the priority is being
1327                          * lowered, the thread must be inserted at the
1328                          * head of the queue for its priority if it owns
1329                          * any priority protection or inheritence mutexes.
1330                          */
1331                         if ((active_prio < pthread->active_priority) &&
1332                             (pthread->priority_mutex_count > 0)) {
1333                                 /* Set the new active priority. */
1334                                 pthread->active_priority = active_prio;
1335
1336                                 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1337                         }
1338                         else {
1339                                 /* Set the new active priority. */
1340                                 pthread->active_priority = active_prio;
1341
1342                                 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1343                         }
1344                 }
1345                 else {
1346                         /* Set the new active priority. */
1347                         pthread->active_priority = active_prio;
1348                 }
1349         }
1350 }
1351
1352 void
1353 _mutex_unlock_private(pthread_t pthread)
1354 {
1355         struct pthread_mutex    *m, *m_next;
1356
1357         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1358                 m_next = TAILQ_NEXT(m, m_qe);
1359                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1360                         pthread_mutex_unlock(&m);
1361         }
1362 }
1363
1364 void
1365 _mutex_lock_backout(pthread_t pthread)
1366 {
1367         struct pthread_mutex    *mutex;
1368
1369         /*
1370          * Defer signals to protect the scheduling queues from
1371          * access by the signal handler:
1372          */
1373         _thread_kern_sig_defer();
1374         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1375                 mutex = pthread->data.mutex;
1376
1377                 /* Lock the mutex structure: */
1378                 _SPINLOCK(&mutex->lock);
1379
1380                 mutex_queue_remove(mutex, pthread);
1381
1382                 /* This thread is no longer waiting for the mutex: */
1383                 pthread->data.mutex = NULL;
1384
1385                 /* Unlock the mutex structure: */
1386                 _SPINUNLOCK(&mutex->lock);
1387
1388         }
1389         /*
1390          * Undefer and handle pending signals, yielding if
1391          * necessary:
1392          */
1393         _thread_kern_sig_undefer();
1394 }
1395
1396 /*
1397  * Dequeue a waiting thread from the head of a mutex queue in descending
1398  * priority order.
1399  */
1400 static inline pthread_t
1401 mutex_queue_deq(pthread_mutex_t mutex)
1402 {
1403         pthread_t pthread;
1404
1405         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1406                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1407                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1408
1409                 /*
1410                  * Only exit the loop if the thread hasn't been
1411                  * cancelled.
1412                  */
1413                 if (pthread->interrupted == 0)
1414                         break;
1415         }
1416
1417         return(pthread);
1418 }
1419
1420 /*
1421  * Remove a waiting thread from a mutex queue in descending priority order.
1422  */
1423 static inline void
1424 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1425 {
1426         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1427                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1428                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1429         }
1430 }
1431
1432 /*
1433  * Enqueue a waiting thread to a queue in descending priority order.
1434  */
1435 static inline void
1436 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1437 {
1438         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1439
1440         PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1441         /*
1442          * For the common case of all threads having equal priority,
1443          * we perform a quick check against the priority of the thread
1444          * at the tail of the queue.
1445          */
1446         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1447                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1448         else {
1449                 tid = TAILQ_FIRST(&mutex->m_queue);
1450                 while (pthread->active_priority <= tid->active_priority)
1451                         tid = TAILQ_NEXT(tid, sqe);
1452                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1453         }
1454         pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1455 }
1456