Merge from vendor branch HEIMDAL:
[dragonfly.git] / lib / libthread_xu / thread / thr_mutex.c
CommitLineData
71b3fa15
DX
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: src/lib/libpthread/thread/thr_mutex.c,v 1.46 2004/10/31 05:03:50 green Exp $
9219c44c 33 * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.2 2005/03/15 11:24:23 davidxu Exp $
71b3fa15
DX
34 */
35#include <stdlib.h>
36#include <errno.h>
37#include <string.h>
38#include <sys/param.h>
39#include <sys/queue.h>
40#include <pthread.h>
41#include "thr_private.h"
42
43#if defined(_PTHREADS_INVARIANTS)
44#define MUTEX_INIT_LINK(m) do { \
45 (m)->m_qe.tqe_prev = NULL; \
46 (m)->m_qe.tqe_next = NULL; \
47} while (0)
48#define MUTEX_ASSERT_IS_OWNED(m) do { \
49 if ((m)->m_qe.tqe_prev == NULL) \
50 PANIC("mutex is not on list"); \
51} while (0)
52#define MUTEX_ASSERT_NOT_OWNED(m) do { \
53 if (((m)->m_qe.tqe_prev != NULL) || \
54 ((m)->m_qe.tqe_next != NULL)) \
55 PANIC("mutex is on list"); \
56} while (0)
57#define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
58 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
59 "thread in syncq when it shouldn't be."); \
60} while (0);
61#else
62#define MUTEX_INIT_LINK(m)
63#define MUTEX_ASSERT_IS_OWNED(m)
64#define MUTEX_ASSERT_NOT_OWNED(m)
65#define THR_ASSERT_NOT_IN_SYNCQ(thr)
66#endif
67
68#define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
69#define MUTEX_DESTROY(m) do { \
70 free(m); \
71} while (0)
72
73
74/*
75 * Prototypes
76 */
77static long mutex_handoff(struct pthread *, struct pthread_mutex *);
78static int mutex_self_trylock(struct pthread *, pthread_mutex_t);
79static int mutex_self_lock(struct pthread *, pthread_mutex_t,
80 const struct timespec *abstime);
81static int mutex_unlock_common(pthread_mutex_t *, int);
82static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83static void mutex_rescan_owned (struct pthread *, struct pthread *,
84 struct pthread_mutex *);
85#if 0
86static pthread_t mutex_queue_deq(pthread_mutex_t);
87#endif
88static void mutex_queue_remove(pthread_mutex_t, pthread_t);
89static void mutex_queue_enq(pthread_mutex_t, pthread_t);
90
91__weak_reference(__pthread_mutex_init, pthread_mutex_init);
92__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
93__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
94__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
95
96/* Single underscore versions provided for libc internal usage: */
97/* No difference between libc and application usage of these: */
98__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
99__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
100
101static int
102mutex_init(pthread_mutex_t *mutex,
103 const pthread_mutexattr_t *mutex_attr, int private)
104{
105 struct pthread_mutex *pmutex;
106 enum pthread_mutextype type;
107 int protocol;
108 int ceiling;
109 int flags;
110 int ret = 0;
111
112 /* Check if default mutex attributes: */
113 if (mutex_attr == NULL || *mutex_attr == NULL) {
114 /* Default to a (error checking) POSIX mutex: */
115 type = PTHREAD_MUTEX_ERRORCHECK;
116 protocol = PTHREAD_PRIO_NONE;
117 ceiling = THR_MAX_PRIORITY;
118 flags = 0;
119 }
120
121 /* Check mutex type: */
122 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
123 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
124 /* Return an invalid argument error: */
125 ret = EINVAL;
126
127 /* Check mutex protocol: */
128 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
129 ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT))
130 /* Return an invalid argument error: */
131 ret = EINVAL;
132
133 else {
134 /* Use the requested mutex type and protocol: */
135 type = (*mutex_attr)->m_type;
136 protocol = (*mutex_attr)->m_protocol;
137 ceiling = (*mutex_attr)->m_ceiling;
138 flags = (*mutex_attr)->m_flags;
139 }
140
141 /* Check no errors so far: */
142 if (ret == 0) {
143 if ((pmutex = (pthread_mutex_t)
144 malloc(sizeof(struct pthread_mutex))) == NULL) {
145 ret = ENOMEM;
146 } else {
147 _thr_umtx_init(&pmutex->m_lock);
148 /* Set the mutex flags: */
149 pmutex->m_flags = flags;
150
151 /* Process according to mutex type: */
152 switch (type) {
153 /* case PTHREAD_MUTEX_DEFAULT: */
154 case PTHREAD_MUTEX_ERRORCHECK:
155 case PTHREAD_MUTEX_NORMAL:
156 /* Nothing to do here. */
157 break;
158
159 /* Single UNIX Spec 2 recursive mutex: */
160 case PTHREAD_MUTEX_RECURSIVE:
161 /* Reset the mutex count: */
162 pmutex->m_count = 0;
163 break;
164
165 /* Trap invalid mutex types: */
166 default:
167 /* Return an invalid argument error: */
168 ret = EINVAL;
169 break;
170 }
171 if (ret == 0) {
172 /* Initialise the rest of the mutex: */
173 TAILQ_INIT(&pmutex->m_queue);
174 pmutex->m_flags |= MUTEX_FLAGS_INITED;
175 if (private)
176 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
177 pmutex->m_owner = NULL;
178 pmutex->m_type = type;
179 pmutex->m_protocol = protocol;
180 pmutex->m_refcount = 0;
181 if (protocol == PTHREAD_PRIO_PROTECT)
182 pmutex->m_prio = ceiling;
183 else
184 pmutex->m_prio = -1;
185 pmutex->m_saved_prio = 0;
186 MUTEX_INIT_LINK(pmutex);
187 *mutex = pmutex;
188 } else {
189 /* Free the mutex lock structure: */
190 MUTEX_DESTROY(pmutex);
191 *mutex = NULL;
192 }
193 }
194 }
195 /* Return the completion status: */
196 return (ret);
197}
198
199static int
200init_static(struct pthread *thread, pthread_mutex_t *mutex)
201{
202 int ret;
203
204 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
205
206 if (*mutex == NULL)
207 ret = mutex_init(mutex, NULL, 0);
208 else
209 ret = 0;
210
211 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
212
213 return (ret);
214}
215
216static int
217init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
218{
219 int ret;
220
221 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
222
223 if (*mutex == NULL)
224 ret = mutex_init(mutex, NULL, 1);
225 else
226 ret = 0;
227
228 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
229
230 return (ret);
231}
232
233int
234_pthread_mutex_init(pthread_mutex_t *mutex,
235 const pthread_mutexattr_t *mutex_attr)
236{
237 return mutex_init(mutex, mutex_attr, 1);
238}
239
240int
241__pthread_mutex_init(pthread_mutex_t *mutex,
242 const pthread_mutexattr_t *mutex_attr)
243{
244 return mutex_init(mutex, mutex_attr, 0);
245}
246
247int
248_mutex_reinit(pthread_mutex_t *mutex)
249{
250 _thr_umtx_init(&(*mutex)->m_lock);
251 TAILQ_INIT(&(*mutex)->m_queue);
252 MUTEX_INIT_LINK(*mutex);
253 (*mutex)->m_owner = NULL;
254 (*mutex)->m_count = 0;
255 (*mutex)->m_refcount = 0;
256 (*mutex)->m_prio = 0;
257 (*mutex)->m_saved_prio = 0;
258 return (0);
259}
260
261void
262_mutex_fork(struct pthread *curthread)
263{
264 struct pthread_mutex *m;
265
266 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
267 m->m_lock = UMTX_LOCKED;
268
269 /* Clear contender for priority mutexes */
270 TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
271 /* clear another thread locked us */
272 _thr_umtx_init(&m->m_lock);
273 TAILQ_INIT(&m->m_queue);
274 }
275}
276
277int
278_pthread_mutex_destroy(pthread_mutex_t *mutex)
279{
280 struct pthread *curthread = _get_curthread();
281 pthread_mutex_t m;
282 int ret = 0;
283
284 if (mutex == NULL || *mutex == NULL)
285 ret = EINVAL;
286 else {
287 /*
288 * Try to lock the mutex structure, we only need to
289 * try once, if failed, the mutex is in used.
290 */
291 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
292 if (ret)
293 return (ret);
294
295 /*
296 * Check mutex other fields to see if this mutex is
297 * in use. Mostly for prority mutex types, or there
298 * are condition variables referencing it.
299 */
300 if (((*mutex)->m_owner != NULL) ||
301 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
302 ((*mutex)->m_refcount != 0)) {
303 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
304 ret = EBUSY;
305 } else {
306 /*
307 * Save a pointer to the mutex so it can be free'd
308 * and set the caller's pointer to NULL:
309 */
310 m = *mutex;
311 *mutex = NULL;
312
313 /* Unlock the mutex structure: */
314 _thr_umtx_unlock(&m->m_lock, curthread->tid);
315
316 /*
317 * Free the memory allocated for the mutex
318 * structure:
319 */
320 MUTEX_ASSERT_NOT_OWNED(m);
321 MUTEX_DESTROY(m);
322 }
323 }
324
325 /* Return the completion status: */
326 return (ret);
327}
328
329static int
330mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
331{
332 int ret = 0;
333
334 THR_ASSERT((mutex != NULL) && (*mutex != NULL),
335 "Uninitialized mutex in mutex_trylock_common");
336
337 /* Short cut for simple mutex. */
338 if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
339 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
340 if (ret == 0) {
341 (*mutex)->m_owner = curthread;
342 /* Add to the list of owned mutexes: */
343 MUTEX_ASSERT_NOT_OWNED(*mutex);
344 TAILQ_INSERT_TAIL(&curthread->mutexq,
345 (*mutex), m_qe);
346 } else if ((*mutex)->m_owner == curthread) {
347 ret = mutex_self_trylock(curthread, *mutex);
348 } /* else {} */
349
350 return (ret);
351 }
352
353 /* Code for priority mutex */
354
355 /* Lock the mutex structure: */
356 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
357
358 /*
359 * If the mutex was statically allocated, properly
360 * initialize the tail queue.
361 */
362 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
363 TAILQ_INIT(&(*mutex)->m_queue);
364 MUTEX_INIT_LINK(*mutex);
365 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
366 }
367
368 /* Process according to mutex type: */
369 switch ((*mutex)->m_protocol) {
370 /* POSIX priority inheritence mutex: */
371 case PTHREAD_PRIO_INHERIT:
372 /* Check if this mutex is not locked: */
373 if ((*mutex)->m_owner == NULL) {
374 /* Lock the mutex for the running thread: */
375 (*mutex)->m_owner = curthread;
376
377 THR_LOCK(curthread);
378 /* Track number of priority mutexes owned: */
379 curthread->priority_mutex_count++;
380
381 /*
382 * The mutex takes on the attributes of the
383 * running thread when there are no waiters.
384 */
385 (*mutex)->m_prio = curthread->active_priority;
386 (*mutex)->m_saved_prio =
387 curthread->inherited_priority;
388 curthread->inherited_priority = (*mutex)->m_prio;
389 THR_UNLOCK(curthread);
390
391 /* Add to the list of owned mutexes: */
392 MUTEX_ASSERT_NOT_OWNED(*mutex);
393 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
394 (*mutex), m_qe);
395 } else if ((*mutex)->m_owner == curthread)
396 ret = mutex_self_trylock(curthread, *mutex);
397 else
398 /* Return a busy error: */
399 ret = EBUSY;
400 break;
401
402 /* POSIX priority protection mutex: */
403 case PTHREAD_PRIO_PROTECT:
404 /* Check for a priority ceiling violation: */
405 if (curthread->active_priority > (*mutex)->m_prio)
406 ret = EINVAL;
407
408 /* Check if this mutex is not locked: */
409 else if ((*mutex)->m_owner == NULL) {
410 /* Lock the mutex for the running thread: */
411 (*mutex)->m_owner = curthread;
412
413 THR_LOCK(curthread);
414 /* Track number of priority mutexes owned: */
415 curthread->priority_mutex_count++;
416
417 /*
418 * The running thread inherits the ceiling
419 * priority of the mutex and executes at that
420 * priority.
421 */
422 curthread->active_priority = (*mutex)->m_prio;
423 (*mutex)->m_saved_prio =
424 curthread->inherited_priority;
425 curthread->inherited_priority =
426 (*mutex)->m_prio;
427 THR_UNLOCK(curthread);
428 /* Add to the list of owned mutexes: */
429 MUTEX_ASSERT_NOT_OWNED(*mutex);
430 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
431 (*mutex), m_qe);
432 } else if ((*mutex)->m_owner == curthread)
433 ret = mutex_self_trylock(curthread, *mutex);
434 else
435 /* Return a busy error: */
436 ret = EBUSY;
437 break;
438
439 /* Trap invalid mutex types: */
440 default:
441 /* Return an invalid argument error: */
442 ret = EINVAL;
443 break;
444 }
445
446 /* Unlock the mutex structure: */
447 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
448
449 /* Return the completion status: */
450 return (ret);
451}
452
453int
454__pthread_mutex_trylock(pthread_mutex_t *mutex)
455{
456 struct pthread *curthread = _get_curthread();
457 int ret = 0;
458
459 /*
460 * If the mutex is statically initialized, perform the dynamic
461 * initialization:
462 */
463 if ((*mutex != NULL) ||
464 ((ret = init_static(curthread, mutex)) == 0))
465 ret = mutex_trylock_common(curthread, mutex);
466
467 return (ret);
468}
469
470int
471_pthread_mutex_trylock(pthread_mutex_t *mutex)
472{
473 struct pthread *curthread = _get_curthread();
474 int ret = 0;
475
476 /*
477 * If the mutex is statically initialized, perform the dynamic
478 * initialization marking the mutex private (delete safe):
479 */
480 if ((*mutex != NULL) ||
481 ((ret = init_static_private(curthread, mutex)) == 0))
482 ret = mutex_trylock_common(curthread, mutex);
483
484 return (ret);
485}
486
487static int
488mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
489 const struct timespec * abstime)
490{
491 struct timespec ts, ts2;
492 long cycle;
493 int ret = 0;
494
495 THR_ASSERT((m != NULL) && (*m != NULL),
496 "Uninitialized mutex in mutex_lock_common");
497
498 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
499 abstime->tv_nsec >= 1000000000))
500 return (EINVAL);
501
502 /* Short cut for simple mutex. */
503
504 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
505 /* Default POSIX mutex: */
506 ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
507 if (ret == 0) {
508 (*m)->m_owner = curthread;
509 /* Add to the list of owned mutexes: */
510 MUTEX_ASSERT_NOT_OWNED(*m);
511 TAILQ_INSERT_TAIL(&curthread->mutexq,
512 (*m), m_qe);
513 } else if ((*m)->m_owner == curthread) {
514 ret = mutex_self_lock(curthread, *m, abstime);
515 } else {
516 if (abstime == NULL) {
517 THR_UMTX_LOCK(curthread, &(*m)->m_lock);
518 ret = 0;
519 } else {
520 clock_gettime(CLOCK_REALTIME, &ts);
521 TIMESPEC_SUB(&ts2, abstime, &ts);
522 ret = THR_UMTX_TIMEDLOCK(curthread,
523 &(*m)->m_lock, &ts2);
524 /*
525 * Timed out wait is not restarted if
526 * it was interrupted, not worth to do it.
527 */
528 if (ret == EINTR)
529 ret = ETIMEDOUT;
530 }
531 if (ret == 0) {
532 (*m)->m_owner = curthread;
533 /* Add to the list of owned mutexes: */
534 MUTEX_ASSERT_NOT_OWNED(*m);
535 TAILQ_INSERT_TAIL(&curthread->mutexq,
536 (*m), m_qe);
537 }
538 }
539 return (ret);
540 }
541
542 /* Code for priority mutex */
543
544 /*
545 * Enter a loop waiting to become the mutex owner. We need a
546 * loop in case the waiting thread is interrupted by a signal
547 * to execute a signal handler. It is not (currently) possible
548 * to remain in the waiting queue while running a handler.
549 * Instead, the thread is interrupted and backed out of the
550 * waiting queue prior to executing the signal handler.
551 */
552 do {
553 /* Lock the mutex structure: */
554 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
555
556 /*
557 * If the mutex was statically allocated, properly
558 * initialize the tail queue.
559 */
560 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
561 TAILQ_INIT(&(*m)->m_queue);
562 (*m)->m_flags |= MUTEX_FLAGS_INITED;
563 MUTEX_INIT_LINK(*m);
564 }
565
566 /* Process according to mutex type: */
567 switch ((*m)->m_protocol) {
568 /* POSIX priority inheritence mutex: */
569 case PTHREAD_PRIO_INHERIT:
570 /* Check if this mutex is not locked: */
571 if ((*m)->m_owner == NULL) {
572 /* Lock the mutex for this thread: */
573 (*m)->m_owner = curthread;
574
575 THR_LOCK(curthread);
576 /* Track number of priority mutexes owned: */
577 curthread->priority_mutex_count++;
578
579 /*
580 * The mutex takes on attributes of the
581 * running thread when there are no waiters.
582 * Make sure the thread's scheduling lock is
583 * held while priorities are adjusted.
584 */
585 (*m)->m_prio = curthread->active_priority;
586 (*m)->m_saved_prio =
587 curthread->inherited_priority;
588 curthread->inherited_priority = (*m)->m_prio;
589 THR_UNLOCK(curthread);
590
591 /* Add to the list of owned mutexes: */
592 MUTEX_ASSERT_NOT_OWNED(*m);
593 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
594 (*m), m_qe);
595
596 /* Unlock the mutex structure: */
597 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
598 } else if ((*m)->m_owner == curthread) {
599 ret = mutex_self_lock(curthread, *m, abstime);
600
601 /* Unlock the mutex structure: */
602 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
603 } else {
604 /*
605 * Join the queue of threads waiting to lock
606 * the mutex and save a pointer to the mutex.
607 */
608 mutex_queue_enq(*m, curthread);
609 curthread->data.mutex = *m;
610
611 if (curthread->active_priority > (*m)->m_prio)
612 /* Adjust priorities: */
613 mutex_priority_adjust(curthread, *m);
614
615 THR_LOCK(curthread);
616 cycle = curthread->cycle;
617 THR_UNLOCK(curthread);
618
619 /* Unlock the mutex structure: */
620 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
621
622 clock_gettime(CLOCK_REALTIME, &ts);
623 TIMESPEC_SUB(&ts2, abstime, &ts);
624 ret = _thr_umtx_wait(&curthread->cycle, cycle,
9219c44c 625 &ts2, CLOCK_REALTIME);
71b3fa15
DX
626 if (ret == EINTR)
627 ret = 0;
628
629 if (THR_IN_MUTEXQ(curthread)) {
630 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
631 mutex_queue_remove(*m, curthread);
632 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
633 }
634 /*
635 * Only clear these after assuring the
636 * thread is dequeued.
637 */
638 curthread->data.mutex = NULL;
639 }
640 break;
641
642 /* POSIX priority protection mutex: */
643 case PTHREAD_PRIO_PROTECT:
644 /* Check for a priority ceiling violation: */
645 if (curthread->active_priority > (*m)->m_prio) {
646 /* Unlock the mutex structure: */
647 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
648 ret = EINVAL;
649 }
650 /* Check if this mutex is not locked: */
651 else if ((*m)->m_owner == NULL) {
652 /*
653 * Lock the mutex for the running
654 * thread:
655 */
656 (*m)->m_owner = curthread;
657
658 THR_LOCK(curthread);
659 /* Track number of priority mutexes owned: */
660 curthread->priority_mutex_count++;
661
662 /*
663 * The running thread inherits the ceiling
664 * priority of the mutex and executes at that
665 * priority. Make sure the thread's
666 * scheduling lock is held while priorities
667 * are adjusted.
668 */
669 curthread->active_priority = (*m)->m_prio;
670 (*m)->m_saved_prio =
671 curthread->inherited_priority;
672 curthread->inherited_priority = (*m)->m_prio;
673 THR_UNLOCK(curthread);
674
675 /* Add to the list of owned mutexes: */
676 MUTEX_ASSERT_NOT_OWNED(*m);
677 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
678 (*m), m_qe);
679
680 /* Unlock the mutex structure: */
681 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
682 } else if ((*m)->m_owner == curthread) {
683 ret = mutex_self_lock(curthread, *m, abstime);
684
685 /* Unlock the mutex structure: */
686 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
687 } else {
688 /*
689 * Join the queue of threads waiting to lock
690 * the mutex and save a pointer to the mutex.
691 */
692 mutex_queue_enq(*m, curthread);
693 curthread->data.mutex = *m;
694
695 /* Clear any previous error: */
696 curthread->error = 0;
697
698 THR_LOCK(curthread);
699 cycle = curthread->cycle;
700 THR_UNLOCK(curthread);
701
702 /* Unlock the mutex structure: */
703 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
704
705 clock_gettime(CLOCK_REALTIME, &ts);
706 TIMESPEC_SUB(&ts2, abstime, &ts);
707 ret = _thr_umtx_wait(&curthread->cycle, cycle,
9219c44c 708 &ts2, CLOCK_REALTIME);
71b3fa15
DX
709 if (ret == EINTR)
710 ret = 0;
711
712 curthread->data.mutex = NULL;
713 if (THR_IN_MUTEXQ(curthread)) {
714 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
715 mutex_queue_remove(*m, curthread);
716 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
717 }
718 /*
719 * Only clear these after assuring the
720 * thread is dequeued.
721 */
722 curthread->data.mutex = NULL;
723
724 /*
725 * The threads priority may have changed while
726 * waiting for the mutex causing a ceiling
727 * violation.
728 */
729 ret = curthread->error;
730 curthread->error = 0;
731 }
732 break;
733
734 /* Trap invalid mutex types: */
735 default:
736 /* Unlock the mutex structure: */
737 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
738
739 /* Return an invalid argument error: */
740 ret = EINVAL;
741 break;
742 }
743
744 } while (((*m)->m_owner != curthread) && (ret == 0));
745
746 /* Return the completion status: */
747 return (ret);
748}
749
750int
751__pthread_mutex_lock(pthread_mutex_t *m)
752{
753 struct pthread *curthread;
754 int ret = 0;
755
756 _thr_check_init();
757
758 curthread = _get_curthread();
759
760 /*
761 * If the mutex is statically initialized, perform the dynamic
762 * initialization:
763 */
764 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
765 ret = mutex_lock_common(curthread, m, NULL);
766
767 return (ret);
768}
769
770__strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
771
772int
773_pthread_mutex_lock(pthread_mutex_t *m)
774{
775 struct pthread *curthread;
776 int ret = 0;
777
778 _thr_check_init();
779
780 curthread = _get_curthread();
781
782 /*
783 * If the mutex is statically initialized, perform the dynamic
784 * initialization marking it private (delete safe):
785 */
786 if ((*m != NULL) ||
787 ((ret = init_static_private(curthread, m)) == 0))
788 ret = mutex_lock_common(curthread, m, NULL);
789
790 return (ret);
791}
792
793int
794__pthread_mutex_timedlock(pthread_mutex_t *m,
795 const struct timespec *abs_timeout)
796{
797 struct pthread *curthread;
798 int ret = 0;
799
800 _thr_check_init();
801
802 curthread = _get_curthread();
803
804 /*
805 * If the mutex is statically initialized, perform the dynamic
806 * initialization:
807 */
808 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
809 ret = mutex_lock_common(curthread, m, abs_timeout);
810
811 return (ret);
812}
813
814int
815_pthread_mutex_timedlock(pthread_mutex_t *m,
816 const struct timespec *abs_timeout)
817{
818 struct pthread *curthread;
819 int ret = 0;
820
821 _thr_check_init();
822
823 curthread = _get_curthread();
824
825 /*
826 * If the mutex is statically initialized, perform the dynamic
827 * initialization marking it private (delete safe):
828 */
829 if ((*m != NULL) ||
830 ((ret = init_static_private(curthread, m)) == 0))
831 ret = mutex_lock_common(curthread, m, abs_timeout);
832
833 return (ret);
834}
835
836int
837_pthread_mutex_unlock(pthread_mutex_t *m)
838{
839 return (mutex_unlock_common(m, /* add reference */ 0));
840}
841
842__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
843
844int
845_mutex_cv_unlock(pthread_mutex_t *m)
846{
847 return (mutex_unlock_common(m, /* add reference */ 1));
848}
849
850int
851_mutex_cv_lock(pthread_mutex_t *m)
852{
853 struct pthread *curthread;
854 int ret;
855
856 curthread = _get_curthread();
857 if ((ret = _pthread_mutex_lock(m)) == 0)
858 (*m)->m_refcount--;
859 return (ret);
860}
861
862static int
863mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
864{
865 int ret;
866
867 switch (m->m_type) {
868 /* case PTHREAD_MUTEX_DEFAULT: */
869 case PTHREAD_MUTEX_ERRORCHECK:
870 case PTHREAD_MUTEX_NORMAL:
871 ret = EBUSY;
872 break;
873
874 case PTHREAD_MUTEX_RECURSIVE:
875 /* Increment the lock count: */
876 if (m->m_count + 1 > 0) {
877 m->m_count++;
878 ret = 0;
879 } else
880 ret = EAGAIN;
881 break;
882
883 default:
884 /* Trap invalid mutex types; */
885 ret = EINVAL;
886 }
887
888 return (ret);
889}
890
891static int
892mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
893 const struct timespec *abstime)
894{
895 struct timespec ts1, ts2;
896 int ret;
897
898 switch (m->m_type) {
899 /* case PTHREAD_MUTEX_DEFAULT: */
900 case PTHREAD_MUTEX_ERRORCHECK:
901 if (abstime) {
902 clock_gettime(CLOCK_REALTIME, &ts1);
903 TIMESPEC_SUB(&ts2, abstime, &ts1);
904 __sys_nanosleep(&ts2, NULL);
905 ret = ETIMEDOUT;
906 } else {
907 /*
908 * POSIX specifies that mutexes should return
909 * EDEADLK if a recursive lock is detected.
910 */
911 ret = EDEADLK;
912 }
913 break;
914
915 case PTHREAD_MUTEX_NORMAL:
916 /*
917 * What SS2 define as a 'normal' mutex. Intentionally
918 * deadlock on attempts to get a lock you already own.
919 */
920 ret = 0;
921 if (m->m_protocol != PTHREAD_PRIO_NONE) {
922 /* Unlock the mutex structure: */
923 THR_LOCK_RELEASE(curthread, &m->m_lock);
924 }
925 if (abstime) {
926 clock_gettime(CLOCK_REALTIME, &ts1);
927 TIMESPEC_SUB(&ts2, abstime, &ts1);
928 __sys_nanosleep(&ts2, NULL);
929 ret = ETIMEDOUT;
930 } else {
931 ts1.tv_sec = 30;
932 ts1.tv_nsec = 0;
933 for (;;)
934 __sys_nanosleep(&ts1, NULL);
935 }
936 break;
937
938 case PTHREAD_MUTEX_RECURSIVE:
939 /* Increment the lock count: */
940 if (m->m_count + 1 > 0) {
941 m->m_count++;
942 ret = 0;
943 } else
944 ret = EAGAIN;
945 break;
946
947 default:
948 /* Trap invalid mutex types; */
949 ret = EINVAL;
950 }
951
952 return (ret);
953}
954
955static int
956mutex_unlock_common(pthread_mutex_t *m, int add_reference)
957{
958 struct pthread *curthread = _get_curthread();
959 long tid = -1;
960 int ret = 0;
961
962 if (m == NULL || *m == NULL)
963 ret = EINVAL;
964 else {
965 /* Short cut for simple mutex. */
966
967 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
968 /*
969 * Check if the running thread is not the owner of the
970 * mutex:
971 */
972 if (__predict_false((*m)->m_owner != curthread)) {
973 ret = EPERM;
974 } else if (__predict_false(
975 (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
976 (*m)->m_count > 0)) {
977 /* Decrement the count: */
978 (*m)->m_count--;
979 if (add_reference)
980 (*m)->m_refcount++;
981 } else {
982 /*
983 * Clear the count in case this is a recursive
984 * mutex.
985 */
986 (*m)->m_count = 0;
987 (*m)->m_owner = NULL;
988 /* Remove the mutex from the threads queue. */
989 MUTEX_ASSERT_IS_OWNED(*m);
990 TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
991 MUTEX_INIT_LINK(*m);
992 if (add_reference)
993 (*m)->m_refcount++;
994 /*
995 * Hand off the mutex to the next waiting
996 * thread.
997 */
998 _thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
999 }
1000 return (ret);
1001 }
1002
1003 /* Code for priority mutex */
1004
1005 /* Lock the mutex structure: */
1006 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1007
1008 /* Process according to mutex type: */
1009 switch ((*m)->m_protocol) {
1010 /* POSIX priority inheritence mutex: */
1011 case PTHREAD_PRIO_INHERIT:
1012 /*
1013 * Check if the running thread is not the owner of the
1014 * mutex:
1015 */
1016 if ((*m)->m_owner != curthread)
1017 ret = EPERM;
1018 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1019 ((*m)->m_count > 0))
1020 /* Decrement the count: */
1021 (*m)->m_count--;
1022 else {
1023 /*
1024 * Clear the count in case this is recursive
1025 * mutex.
1026 */
1027 (*m)->m_count = 0;
1028
1029 /*
1030 * Restore the threads inherited priority and
1031 * recompute the active priority (being careful
1032 * not to override changes in the threads base
1033 * priority subsequent to locking the mutex).
1034 */
1035 THR_LOCK(curthread);
1036 curthread->inherited_priority =
1037 (*m)->m_saved_prio;
1038 curthread->active_priority =
1039 MAX(curthread->inherited_priority,
1040 curthread->base_priority);
1041
1042 /*
1043 * This thread now owns one less priority mutex.
1044 */
1045 curthread->priority_mutex_count--;
1046 THR_UNLOCK(curthread);
1047
1048 /* Remove the mutex from the threads queue. */
1049 MUTEX_ASSERT_IS_OWNED(*m);
1050 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1051 (*m), m_qe);
1052 MUTEX_INIT_LINK(*m);
1053
1054 /*
1055 * Hand off the mutex to the next waiting
1056 * thread:
1057 */
1058 tid = mutex_handoff(curthread, *m);
1059 }
1060 break;
1061
1062 /* POSIX priority ceiling mutex: */
1063 case PTHREAD_PRIO_PROTECT:
1064 /*
1065 * Check if the running thread is not the owner of the
1066 * mutex:
1067 */
1068 if ((*m)->m_owner != curthread)
1069 ret = EPERM;
1070 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1071 ((*m)->m_count > 0))
1072 /* Decrement the count: */
1073 (*m)->m_count--;
1074 else {
1075 /*
1076 * Clear the count in case this is a recursive
1077 * mutex.
1078 */
1079 (*m)->m_count = 0;
1080
1081 /*
1082 * Restore the threads inherited priority and
1083 * recompute the active priority (being careful
1084 * not to override changes in the threads base
1085 * priority subsequent to locking the mutex).
1086 */
1087 THR_LOCK(curthread);
1088 curthread->inherited_priority =
1089 (*m)->m_saved_prio;
1090 curthread->active_priority =
1091 MAX(curthread->inherited_priority,
1092 curthread->base_priority);
1093
1094 /*
1095 * This thread now owns one less priority mutex.
1096 */
1097 curthread->priority_mutex_count--;
1098 THR_UNLOCK(curthread);
1099
1100 /* Remove the mutex from the threads queue. */
1101 MUTEX_ASSERT_IS_OWNED(*m);
1102 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1103 (*m), m_qe);
1104 MUTEX_INIT_LINK(*m);
1105
1106 /*
1107 * Hand off the mutex to the next waiting
1108 * thread:
1109 */
1110 tid = mutex_handoff(curthread, *m);
1111 }
1112 break;
1113
1114 /* Trap invalid mutex types: */
1115 default:
1116 /* Return an invalid argument error: */
1117 ret = EINVAL;
1118 break;
1119 }
1120
1121 if ((ret == 0) && (add_reference != 0))
1122 /* Increment the reference count: */
1123 (*m)->m_refcount++;
1124
1125 /* Unlock the mutex structure: */
1126 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1127 }
1128
1129 /* Return the completion status: */
1130 return (ret);
1131}
1132
1133
1134/*
1135 * This function is called when a change in base priority occurs for
1136 * a thread that is holding or waiting for a priority protection or
1137 * inheritence mutex. A change in a threads base priority can effect
1138 * changes to active priorities of other threads and to the ordering
1139 * of mutex locking by waiting threads.
1140 *
1141 * This must be called without the target thread's scheduling lock held.
1142 */
1143void
1144_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1145 int propagate_prio)
1146{
1147 struct pthread_mutex *m;
1148
1149 /* Adjust the priorites of any owned priority mutexes: */
1150 if (pthread->priority_mutex_count > 0) {
1151 /*
1152 * Rescan the mutexes owned by this thread and correct
1153 * their priorities to account for this threads change
1154 * in priority. This has the side effect of changing
1155 * the threads active priority.
1156 *
1157 * Be sure to lock the first mutex in the list of owned
1158 * mutexes. This acts as a barrier against another
1159 * simultaneous call to change the threads priority
1160 * and from the owning thread releasing the mutex.
1161 */
1162 m = TAILQ_FIRST(&pthread->pri_mutexq);
1163 if (m != NULL) {
1164 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1165 /*
1166 * Make sure the thread still owns the lock.
1167 */
1168 if (m == TAILQ_FIRST(&pthread->pri_mutexq))
1169 mutex_rescan_owned(curthread, pthread,
1170 /* rescan all owned */ NULL);
1171 THR_LOCK_RELEASE(curthread, &m->m_lock);
1172 }
1173 }
1174
1175 /*
1176 * If this thread is waiting on a priority inheritence mutex,
1177 * check for priority adjustments. A change in priority can
1178 * also cause a ceiling violation(*) for a thread waiting on
1179 * a priority protection mutex; we don't perform the check here
1180 * as it is done in pthread_mutex_unlock.
1181 *
1182 * (*) It should be noted that a priority change to a thread
1183 * _after_ taking and owning a priority ceiling mutex
1184 * does not affect ownership of that mutex; the ceiling
1185 * priority is only checked before mutex ownership occurs.
1186 */
1187 if (propagate_prio != 0) {
1188 /*
1189 * Lock the thread's scheduling queue. This is a bit
1190 * convoluted; the "in synchronization queue flag" can
1191 * only be cleared with both the thread's scheduling and
1192 * mutex locks held. The thread's pointer to the wanted
1193 * mutex is guaranteed to be valid during this time.
1194 */
1195 THR_THREAD_LOCK(curthread, pthread);
1196
1197 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1198 ((m = pthread->data.mutex) == NULL))
1199 THR_THREAD_UNLOCK(curthread, pthread);
1200 else {
1201 /*
1202 * This thread is currently waiting on a mutex; unlock
1203 * the scheduling queue lock and lock the mutex. We
1204 * can't hold both at the same time because the locking
1205 * order could cause a deadlock.
1206 */
1207 THR_THREAD_UNLOCK(curthread, pthread);
1208 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1209
1210 /*
1211 * Check to make sure this thread is still in the
1212 * same state (the lock above can yield the CPU to
1213 * another thread or the thread may be running on
1214 * another CPU).
1215 */
1216 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1217 (pthread->data.mutex == m)) {
1218 /*
1219 * Remove and reinsert this thread into
1220 * the list of waiting threads to preserve
1221 * decreasing priority order.
1222 */
1223 mutex_queue_remove(m, pthread);
1224 mutex_queue_enq(m, pthread);
1225
1226 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1227 /* Adjust priorities: */
1228 mutex_priority_adjust(curthread, m);
1229 }
1230
1231 /* Unlock the mutex structure: */
1232 THR_LOCK_RELEASE(curthread, &m->m_lock);
1233 }
1234 }
1235}
1236
1237/*
1238 * Called when a new thread is added to the mutex waiting queue or
1239 * when a threads priority changes that is already in the mutex
1240 * waiting queue.
1241 *
1242 * This must be called with the mutex locked by the current thread.
1243 */
1244static void
1245mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1246{
1247 pthread_mutex_t m = mutex;
1248 struct pthread *pthread_next, *pthread = mutex->m_owner;
1249 int done, temp_prio;
1250
1251 /*
1252 * Calculate the mutex priority as the maximum of the highest
1253 * active priority of any waiting threads and the owning threads
1254 * active priority(*).
1255 *
1256 * (*) Because the owning threads current active priority may
1257 * reflect priority inherited from this mutex (and the mutex
1258 * priority may have changed) we must recalculate the active
1259 * priority based on the threads saved inherited priority
1260 * and its base priority.
1261 */
1262 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1263 temp_prio = MAX(pthread_next->active_priority,
1264 MAX(m->m_saved_prio, pthread->base_priority));
1265
1266 /* See if this mutex really needs adjusting: */
1267 if (temp_prio == m->m_prio)
1268 /* No need to propagate the priority: */
1269 return;
1270
1271 /* Set new priority of the mutex: */
1272 m->m_prio = temp_prio;
1273
1274 /*
1275 * Don't unlock the mutex passed in as an argument. It is
1276 * expected to be locked and unlocked by the caller.
1277 */
1278 done = 1;
1279 do {
1280 /*
1281 * Save the threads priority before rescanning the
1282 * owned mutexes:
1283 */
1284 temp_prio = pthread->active_priority;
1285
1286 /*
1287 * Fix the priorities for all mutexes held by the owning
1288 * thread since taking this mutex. This also has a
1289 * potential side-effect of changing the threads priority.
1290 *
1291 * At this point the mutex is locked by the current thread.
1292 * The owning thread can't release the mutex until it is
1293 * unlocked, so we should be able to safely walk its list
1294 * of owned mutexes.
1295 */
1296 mutex_rescan_owned(curthread, pthread, m);
1297
1298 /*
1299 * If this isn't the first time through the loop,
1300 * the current mutex needs to be unlocked.
1301 */
1302 if (done == 0)
1303 THR_LOCK_RELEASE(curthread, &m->m_lock);
1304
1305 /* Assume we're done unless told otherwise: */
1306 done = 1;
1307
1308 /*
1309 * If the thread is currently waiting on a mutex, check
1310 * to see if the threads new priority has affected the
1311 * priority of the mutex.
1312 */
1313 if ((temp_prio != pthread->active_priority) &&
1314 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1315 ((m = pthread->data.mutex) != NULL) &&
1316 (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1317 /* Lock the mutex structure: */
1318 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1319
1320 /*
1321 * Make sure the thread is still waiting on the
1322 * mutex:
1323 */
1324 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1325 (m == pthread->data.mutex)) {
1326 /*
1327 * The priority for this thread has changed.
1328 * Remove and reinsert this thread into the
1329 * list of waiting threads to preserve
1330 * decreasing priority order.
1331 */
1332 mutex_queue_remove(m, pthread);
1333 mutex_queue_enq(m, pthread);
1334
1335 /*
1336 * Grab the waiting thread with highest
1337 * priority:
1338 */
1339 pthread_next = TAILQ_FIRST(&m->m_queue);
1340
1341 /*
1342 * Calculate the mutex priority as the maximum
1343 * of the highest active priority of any
1344 * waiting threads and the owning threads
1345 * active priority.
1346 */
1347 temp_prio = MAX(pthread_next->active_priority,
1348 MAX(m->m_saved_prio,
1349 m->m_owner->base_priority));
1350
1351 if (temp_prio != m->m_prio) {
1352 /*
1353 * The priority needs to be propagated
1354 * to the mutex this thread is waiting
1355 * on and up to the owner of that mutex.
1356 */
1357 m->m_prio = temp_prio;
1358 pthread = m->m_owner;
1359
1360 /* We're not done yet: */
1361 done = 0;
1362 }
1363 }
1364 /* Only release the mutex if we're done: */
1365 if (done != 0)
1366 THR_LOCK_RELEASE(curthread, &m->m_lock);
1367 }
1368 } while (done == 0);
1369}
1370
1371static void
1372mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1373 struct pthread_mutex *mutex)
1374{
1375 struct pthread_mutex *m;
1376 struct pthread *pthread_next;
1377 int active_prio, inherited_prio;
1378
1379 /*
1380 * Start walking the mutexes the thread has taken since
1381 * taking this mutex.
1382 */
1383 if (mutex == NULL) {
1384 /*
1385 * A null mutex means start at the beginning of the owned
1386 * mutex list.
1387 */
1388 m = TAILQ_FIRST(&pthread->pri_mutexq);
1389
1390 /* There is no inherited priority yet. */
1391 inherited_prio = 0;
1392 } else {
1393 /*
1394 * The caller wants to start after a specific mutex. It
1395 * is assumed that this mutex is a priority inheritence
1396 * mutex and that its priority has been correctly
1397 * calculated.
1398 */
1399 m = TAILQ_NEXT(mutex, m_qe);
1400
1401 /* Start inheriting priority from the specified mutex. */
1402 inherited_prio = mutex->m_prio;
1403 }
1404 active_prio = MAX(inherited_prio, pthread->base_priority);
1405
1406 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1407 /*
1408 * We only want to deal with priority inheritence
1409 * mutexes. This might be optimized by only placing
1410 * priority inheritence mutexes into the owned mutex
1411 * list, but it may prove to be useful having all
1412 * owned mutexes in this list. Consider a thread
1413 * exiting while holding mutexes...
1414 */
1415 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1416 /*
1417 * Fix the owners saved (inherited) priority to
1418 * reflect the priority of the previous mutex.
1419 */
1420 m->m_saved_prio = inherited_prio;
1421
1422 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1423 /* Recalculate the priority of the mutex: */
1424 m->m_prio = MAX(active_prio,
1425 pthread_next->active_priority);
1426 else
1427 m->m_prio = active_prio;
1428
1429 /* Recalculate new inherited and active priorities: */
1430 inherited_prio = m->m_prio;
1431 active_prio = MAX(m->m_prio, pthread->base_priority);
1432 }
1433 }
1434
1435 /*
1436 * Fix the threads inherited priority and recalculate its
1437 * active priority.
1438 */
1439 pthread->inherited_priority = inherited_prio;
1440 active_prio = MAX(inherited_prio, pthread->base_priority);
1441
1442 if (active_prio != pthread->active_priority) {
1443 /* Lock the thread's scheduling queue: */
1444 THR_THREAD_LOCK(curthread, pthread);
1445
1446 /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */
1447 if (1) {
1448 /*
1449 * This thread is not in a run queue. Just set
1450 * its active priority.
1451 */
1452 pthread->active_priority = active_prio;
1453 }
1454 else {
1455 /*
1456 * This thread is in a run queue. Remove it from
1457 * the queue before changing its priority:
1458 */
1459 /* THR_RUNQ_REMOVE(pthread);*/
1460 /*
1461 * POSIX states that if the priority is being
1462 * lowered, the thread must be inserted at the
1463 * head of the queue for its priority if it owns
1464 * any priority protection or inheritence mutexes.
1465 */
1466 if ((active_prio < pthread->active_priority) &&
1467 (pthread->priority_mutex_count > 0)) {
1468 /* Set the new active priority. */
1469 pthread->active_priority = active_prio;
1470 /* THR_RUNQ_INSERT_HEAD(pthread); */
1471 } else {
1472 /* Set the new active priority. */
1473 pthread->active_priority = active_prio;
1474 /* THR_RUNQ_INSERT_TAIL(pthread);*/
1475 }
1476 }
1477 THR_THREAD_UNLOCK(curthread, pthread);
1478 }
1479}
1480
1481void
1482_mutex_unlock_private(pthread_t pthread)
1483{
1484 struct pthread_mutex *m, *m_next;
1485
1486 for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) {
1487 m_next = TAILQ_NEXT(m, m_qe);
1488 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1489 pthread_mutex_unlock(&m);
1490 }
1491}
1492
1493/*
1494 * Dequeue a waiting thread from the head of a mutex queue in descending
1495 * priority order.
1496 *
1497 * In order to properly dequeue a thread from the mutex queue and
1498 * make it runnable without the possibility of errant wakeups, it
1499 * is necessary to lock the thread's scheduling queue while also
1500 * holding the mutex lock.
1501 */
1502static long
1503mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1504{
1505 struct pthread *pthread;
1506 long tid = -1;
1507
1508 /* Keep dequeueing until we find a valid thread: */
1509 mutex->m_owner = NULL;
1510 pthread = TAILQ_FIRST(&mutex->m_queue);
1511 while (pthread != NULL) {
1512 /* Take the thread's scheduling lock: */
1513 THR_THREAD_LOCK(curthread, pthread);
1514
1515 /* Remove the thread from the mutex queue: */
1516 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1517 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1518
1519 /*
1520 * Only exit the loop if the thread hasn't been
1521 * cancelled.
1522 */
1523 switch (mutex->m_protocol) {
1524 case PTHREAD_PRIO_NONE:
1525 /*
1526 * Assign the new owner and add the mutex to the
1527 * thread's list of owned mutexes.
1528 */
1529 mutex->m_owner = pthread;
1530 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1531 break;
1532
1533 case PTHREAD_PRIO_INHERIT:
1534 /*
1535 * Assign the new owner and add the mutex to the
1536 * thread's list of owned mutexes.
1537 */
1538 mutex->m_owner = pthread;
1539 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1540
1541 /* Track number of priority mutexes owned: */
1542 pthread->priority_mutex_count++;
1543
1544 /*
1545 * Set the priority of the mutex. Since our waiting
1546 * threads are in descending priority order, the
1547 * priority of the mutex becomes the active priority
1548 * of the thread we just dequeued.
1549 */
1550 mutex->m_prio = pthread->active_priority;
1551
1552 /* Save the owning threads inherited priority: */
1553 mutex->m_saved_prio = pthread->inherited_priority;
1554
1555 /*
1556 * The owning threads inherited priority now becomes
1557 * his active priority (the priority of the mutex).
1558 */
1559 pthread->inherited_priority = mutex->m_prio;
1560 break;
1561
1562 case PTHREAD_PRIO_PROTECT:
1563 if (pthread->active_priority > mutex->m_prio) {
1564 /*
1565 * Either the mutex ceiling priority has
1566 * been lowered and/or this threads priority
1567 * has been raised subsequent to the thread
1568 * being queued on the waiting list.
1569 */
1570 pthread->error = EINVAL;
1571 }
1572 else {
1573 /*
1574 * Assign the new owner and add the mutex
1575 * to the thread's list of owned mutexes.
1576 */
1577 mutex->m_owner = pthread;
1578 TAILQ_INSERT_TAIL(&pthread->pri_mutexq,
1579 mutex, m_qe);
1580
1581 /* Track number of priority mutexes owned: */
1582 pthread->priority_mutex_count++;
1583
1584 /*
1585 * Save the owning threads inherited
1586 * priority:
1587 */
1588 mutex->m_saved_prio =
1589 pthread->inherited_priority;
1590
1591 /*
1592 * The owning thread inherits the ceiling
1593 * priority of the mutex and executes at
1594 * that priority:
1595 */
1596 pthread->inherited_priority = mutex->m_prio;
1597 pthread->active_priority = mutex->m_prio;
1598
1599 }
1600 break;
1601 }
1602
1603 /* Make the thread runnable and unlock the scheduling queue: */
1604 pthread->cycle++;
1605 _thr_umtx_wake(&pthread->cycle, 1);
1606
1607 THR_THREAD_UNLOCK(curthread, pthread);
1608 if (mutex->m_owner == pthread)
1609 /* We're done; a valid owner was found. */
1610 break;
1611 else
1612 /* Get the next thread from the waiting queue: */
1613 pthread = TAILQ_NEXT(pthread, sqe);
1614 }
1615
1616 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1617 /* This mutex has no priority: */
1618 mutex->m_prio = 0;
1619 return (tid);
1620}
1621
1622#if 0
1623/*
1624 * Dequeue a waiting thread from the head of a mutex queue in descending
1625 * priority order.
1626 */
1627static pthread_t
1628mutex_queue_deq(struct pthread_mutex *mutex)
1629{
1630 pthread_t pthread;
1631
1632 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1633 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1634 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1635 }
1636
1637 return (pthread);
1638}
1639#endif
1640
1641/*
1642 * Remove a waiting thread from a mutex queue in descending priority order.
1643 */
1644static void
1645mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1646{
1647 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1648 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1649 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1650 }
1651}
1652
1653/*
1654 * Enqueue a waiting thread to a queue in descending priority order.
1655 */
1656static void
1657mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1658{
1659 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1660
1661 THR_ASSERT_NOT_IN_SYNCQ(pthread);
1662 /*
1663 * For the common case of all threads having equal priority,
1664 * we perform a quick check against the priority of the thread
1665 * at the tail of the queue.
1666 */
1667 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1668 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1669 else {
1670 tid = TAILQ_FIRST(&mutex->m_queue);
1671 while (pthread->active_priority <= tid->active_priority)
1672 tid = TAILQ_NEXT(tid, sqe);
1673 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1674 }
1675 pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1676}