Cleanup the TLS implementation:
[dragonfly.git] / lib / libthread_xu / thread / thr_mutex.c
CommitLineData
71b3fa15
DX
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: src/lib/libpthread/thread/thr_mutex.c,v 1.46 2004/10/31 05:03:50 green Exp $
9e2ee207 33 * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.3 2005/03/29 19:26:20 joerg Exp $
71b3fa15 34 */
9e2ee207
JS
35
36#include <machine/tls.h>
37
71b3fa15
DX
38#include <stdlib.h>
39#include <errno.h>
40#include <string.h>
41#include <sys/param.h>
42#include <sys/queue.h>
43#include <pthread.h>
44#include "thr_private.h"
45
46#if defined(_PTHREADS_INVARIANTS)
47#define MUTEX_INIT_LINK(m) do { \
48 (m)->m_qe.tqe_prev = NULL; \
49 (m)->m_qe.tqe_next = NULL; \
50} while (0)
51#define MUTEX_ASSERT_IS_OWNED(m) do { \
52 if ((m)->m_qe.tqe_prev == NULL) \
53 PANIC("mutex is not on list"); \
54} while (0)
55#define MUTEX_ASSERT_NOT_OWNED(m) do { \
56 if (((m)->m_qe.tqe_prev != NULL) || \
57 ((m)->m_qe.tqe_next != NULL)) \
58 PANIC("mutex is on list"); \
59} while (0)
60#define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
61 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
62 "thread in syncq when it shouldn't be."); \
63} while (0);
64#else
65#define MUTEX_INIT_LINK(m)
66#define MUTEX_ASSERT_IS_OWNED(m)
67#define MUTEX_ASSERT_NOT_OWNED(m)
68#define THR_ASSERT_NOT_IN_SYNCQ(thr)
69#endif
70
71#define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
72#define MUTEX_DESTROY(m) do { \
73 free(m); \
74} while (0)
75
76
77/*
78 * Prototypes
79 */
80static long mutex_handoff(struct pthread *, struct pthread_mutex *);
81static int mutex_self_trylock(struct pthread *, pthread_mutex_t);
82static int mutex_self_lock(struct pthread *, pthread_mutex_t,
83 const struct timespec *abstime);
84static int mutex_unlock_common(pthread_mutex_t *, int);
85static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
86static void mutex_rescan_owned (struct pthread *, struct pthread *,
87 struct pthread_mutex *);
88#if 0
89static pthread_t mutex_queue_deq(pthread_mutex_t);
90#endif
91static void mutex_queue_remove(pthread_mutex_t, pthread_t);
92static void mutex_queue_enq(pthread_mutex_t, pthread_t);
93
94__weak_reference(__pthread_mutex_init, pthread_mutex_init);
95__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
96__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
97__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
98
99/* Single underscore versions provided for libc internal usage: */
100/* No difference between libc and application usage of these: */
101__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
102__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
103
104static int
105mutex_init(pthread_mutex_t *mutex,
106 const pthread_mutexattr_t *mutex_attr, int private)
107{
108 struct pthread_mutex *pmutex;
109 enum pthread_mutextype type;
110 int protocol;
111 int ceiling;
112 int flags;
113 int ret = 0;
114
115 /* Check if default mutex attributes: */
116 if (mutex_attr == NULL || *mutex_attr == NULL) {
117 /* Default to a (error checking) POSIX mutex: */
118 type = PTHREAD_MUTEX_ERRORCHECK;
119 protocol = PTHREAD_PRIO_NONE;
120 ceiling = THR_MAX_PRIORITY;
121 flags = 0;
122 }
123
124 /* Check mutex type: */
125 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
126 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
127 /* Return an invalid argument error: */
128 ret = EINVAL;
129
130 /* Check mutex protocol: */
131 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
132 ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT))
133 /* Return an invalid argument error: */
134 ret = EINVAL;
135
136 else {
137 /* Use the requested mutex type and protocol: */
138 type = (*mutex_attr)->m_type;
139 protocol = (*mutex_attr)->m_protocol;
140 ceiling = (*mutex_attr)->m_ceiling;
141 flags = (*mutex_attr)->m_flags;
142 }
143
144 /* Check no errors so far: */
145 if (ret == 0) {
146 if ((pmutex = (pthread_mutex_t)
147 malloc(sizeof(struct pthread_mutex))) == NULL) {
148 ret = ENOMEM;
149 } else {
150 _thr_umtx_init(&pmutex->m_lock);
151 /* Set the mutex flags: */
152 pmutex->m_flags = flags;
153
154 /* Process according to mutex type: */
155 switch (type) {
156 /* case PTHREAD_MUTEX_DEFAULT: */
157 case PTHREAD_MUTEX_ERRORCHECK:
158 case PTHREAD_MUTEX_NORMAL:
159 /* Nothing to do here. */
160 break;
161
162 /* Single UNIX Spec 2 recursive mutex: */
163 case PTHREAD_MUTEX_RECURSIVE:
164 /* Reset the mutex count: */
165 pmutex->m_count = 0;
166 break;
167
168 /* Trap invalid mutex types: */
169 default:
170 /* Return an invalid argument error: */
171 ret = EINVAL;
172 break;
173 }
174 if (ret == 0) {
175 /* Initialise the rest of the mutex: */
176 TAILQ_INIT(&pmutex->m_queue);
177 pmutex->m_flags |= MUTEX_FLAGS_INITED;
178 if (private)
179 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
180 pmutex->m_owner = NULL;
181 pmutex->m_type = type;
182 pmutex->m_protocol = protocol;
183 pmutex->m_refcount = 0;
184 if (protocol == PTHREAD_PRIO_PROTECT)
185 pmutex->m_prio = ceiling;
186 else
187 pmutex->m_prio = -1;
188 pmutex->m_saved_prio = 0;
189 MUTEX_INIT_LINK(pmutex);
190 *mutex = pmutex;
191 } else {
192 /* Free the mutex lock structure: */
193 MUTEX_DESTROY(pmutex);
194 *mutex = NULL;
195 }
196 }
197 }
198 /* Return the completion status: */
199 return (ret);
200}
201
202static int
203init_static(struct pthread *thread, pthread_mutex_t *mutex)
204{
205 int ret;
206
207 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
208
209 if (*mutex == NULL)
210 ret = mutex_init(mutex, NULL, 0);
211 else
212 ret = 0;
213
214 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
215
216 return (ret);
217}
218
219static int
220init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
221{
222 int ret;
223
224 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
225
226 if (*mutex == NULL)
227 ret = mutex_init(mutex, NULL, 1);
228 else
229 ret = 0;
230
231 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
232
233 return (ret);
234}
235
236int
237_pthread_mutex_init(pthread_mutex_t *mutex,
238 const pthread_mutexattr_t *mutex_attr)
239{
240 return mutex_init(mutex, mutex_attr, 1);
241}
242
243int
244__pthread_mutex_init(pthread_mutex_t *mutex,
245 const pthread_mutexattr_t *mutex_attr)
246{
247 return mutex_init(mutex, mutex_attr, 0);
248}
249
250int
251_mutex_reinit(pthread_mutex_t *mutex)
252{
253 _thr_umtx_init(&(*mutex)->m_lock);
254 TAILQ_INIT(&(*mutex)->m_queue);
255 MUTEX_INIT_LINK(*mutex);
256 (*mutex)->m_owner = NULL;
257 (*mutex)->m_count = 0;
258 (*mutex)->m_refcount = 0;
259 (*mutex)->m_prio = 0;
260 (*mutex)->m_saved_prio = 0;
261 return (0);
262}
263
264void
265_mutex_fork(struct pthread *curthread)
266{
267 struct pthread_mutex *m;
268
269 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
270 m->m_lock = UMTX_LOCKED;
271
272 /* Clear contender for priority mutexes */
273 TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
274 /* clear another thread locked us */
275 _thr_umtx_init(&m->m_lock);
276 TAILQ_INIT(&m->m_queue);
277 }
278}
279
280int
281_pthread_mutex_destroy(pthread_mutex_t *mutex)
282{
9e2ee207 283 struct pthread *curthread = tls_get_curthread();
71b3fa15
DX
284 pthread_mutex_t m;
285 int ret = 0;
286
287 if (mutex == NULL || *mutex == NULL)
288 ret = EINVAL;
289 else {
290 /*
291 * Try to lock the mutex structure, we only need to
292 * try once, if failed, the mutex is in used.
293 */
294 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
295 if (ret)
296 return (ret);
297
298 /*
299 * Check mutex other fields to see if this mutex is
300 * in use. Mostly for prority mutex types, or there
301 * are condition variables referencing it.
302 */
303 if (((*mutex)->m_owner != NULL) ||
304 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
305 ((*mutex)->m_refcount != 0)) {
306 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
307 ret = EBUSY;
308 } else {
309 /*
310 * Save a pointer to the mutex so it can be free'd
311 * and set the caller's pointer to NULL:
312 */
313 m = *mutex;
314 *mutex = NULL;
315
316 /* Unlock the mutex structure: */
317 _thr_umtx_unlock(&m->m_lock, curthread->tid);
318
319 /*
320 * Free the memory allocated for the mutex
321 * structure:
322 */
323 MUTEX_ASSERT_NOT_OWNED(m);
324 MUTEX_DESTROY(m);
325 }
326 }
327
328 /* Return the completion status: */
329 return (ret);
330}
331
332static int
333mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
334{
335 int ret = 0;
336
337 THR_ASSERT((mutex != NULL) && (*mutex != NULL),
338 "Uninitialized mutex in mutex_trylock_common");
339
340 /* Short cut for simple mutex. */
341 if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
342 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
343 if (ret == 0) {
344 (*mutex)->m_owner = curthread;
345 /* Add to the list of owned mutexes: */
346 MUTEX_ASSERT_NOT_OWNED(*mutex);
347 TAILQ_INSERT_TAIL(&curthread->mutexq,
348 (*mutex), m_qe);
349 } else if ((*mutex)->m_owner == curthread) {
350 ret = mutex_self_trylock(curthread, *mutex);
351 } /* else {} */
352
353 return (ret);
354 }
355
356 /* Code for priority mutex */
357
358 /* Lock the mutex structure: */
359 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
360
361 /*
362 * If the mutex was statically allocated, properly
363 * initialize the tail queue.
364 */
365 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
366 TAILQ_INIT(&(*mutex)->m_queue);
367 MUTEX_INIT_LINK(*mutex);
368 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
369 }
370
371 /* Process according to mutex type: */
372 switch ((*mutex)->m_protocol) {
373 /* POSIX priority inheritence mutex: */
374 case PTHREAD_PRIO_INHERIT:
375 /* Check if this mutex is not locked: */
376 if ((*mutex)->m_owner == NULL) {
377 /* Lock the mutex for the running thread: */
378 (*mutex)->m_owner = curthread;
379
380 THR_LOCK(curthread);
381 /* Track number of priority mutexes owned: */
382 curthread->priority_mutex_count++;
383
384 /*
385 * The mutex takes on the attributes of the
386 * running thread when there are no waiters.
387 */
388 (*mutex)->m_prio = curthread->active_priority;
389 (*mutex)->m_saved_prio =
390 curthread->inherited_priority;
391 curthread->inherited_priority = (*mutex)->m_prio;
392 THR_UNLOCK(curthread);
393
394 /* Add to the list of owned mutexes: */
395 MUTEX_ASSERT_NOT_OWNED(*mutex);
396 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
397 (*mutex), m_qe);
398 } else if ((*mutex)->m_owner == curthread)
399 ret = mutex_self_trylock(curthread, *mutex);
400 else
401 /* Return a busy error: */
402 ret = EBUSY;
403 break;
404
405 /* POSIX priority protection mutex: */
406 case PTHREAD_PRIO_PROTECT:
407 /* Check for a priority ceiling violation: */
408 if (curthread->active_priority > (*mutex)->m_prio)
409 ret = EINVAL;
410
411 /* Check if this mutex is not locked: */
412 else if ((*mutex)->m_owner == NULL) {
413 /* Lock the mutex for the running thread: */
414 (*mutex)->m_owner = curthread;
415
416 THR_LOCK(curthread);
417 /* Track number of priority mutexes owned: */
418 curthread->priority_mutex_count++;
419
420 /*
421 * The running thread inherits the ceiling
422 * priority of the mutex and executes at that
423 * priority.
424 */
425 curthread->active_priority = (*mutex)->m_prio;
426 (*mutex)->m_saved_prio =
427 curthread->inherited_priority;
428 curthread->inherited_priority =
429 (*mutex)->m_prio;
430 THR_UNLOCK(curthread);
431 /* Add to the list of owned mutexes: */
432 MUTEX_ASSERT_NOT_OWNED(*mutex);
433 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
434 (*mutex), m_qe);
435 } else if ((*mutex)->m_owner == curthread)
436 ret = mutex_self_trylock(curthread, *mutex);
437 else
438 /* Return a busy error: */
439 ret = EBUSY;
440 break;
441
442 /* Trap invalid mutex types: */
443 default:
444 /* Return an invalid argument error: */
445 ret = EINVAL;
446 break;
447 }
448
449 /* Unlock the mutex structure: */
450 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
451
452 /* Return the completion status: */
453 return (ret);
454}
455
456int
457__pthread_mutex_trylock(pthread_mutex_t *mutex)
458{
9e2ee207 459 struct pthread *curthread = tls_get_curthread();
71b3fa15
DX
460 int ret = 0;
461
462 /*
463 * If the mutex is statically initialized, perform the dynamic
464 * initialization:
465 */
466 if ((*mutex != NULL) ||
467 ((ret = init_static(curthread, mutex)) == 0))
468 ret = mutex_trylock_common(curthread, mutex);
469
470 return (ret);
471}
472
473int
474_pthread_mutex_trylock(pthread_mutex_t *mutex)
475{
9e2ee207 476 struct pthread *curthread = tls_get_curthread();
71b3fa15
DX
477 int ret = 0;
478
479 /*
480 * If the mutex is statically initialized, perform the dynamic
481 * initialization marking the mutex private (delete safe):
482 */
483 if ((*mutex != NULL) ||
484 ((ret = init_static_private(curthread, mutex)) == 0))
485 ret = mutex_trylock_common(curthread, mutex);
486
487 return (ret);
488}
489
490static int
491mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
492 const struct timespec * abstime)
493{
494 struct timespec ts, ts2;
495 long cycle;
496 int ret = 0;
497
498 THR_ASSERT((m != NULL) && (*m != NULL),
499 "Uninitialized mutex in mutex_lock_common");
500
501 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
502 abstime->tv_nsec >= 1000000000))
503 return (EINVAL);
504
505 /* Short cut for simple mutex. */
506
507 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
508 /* Default POSIX mutex: */
509 ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
510 if (ret == 0) {
511 (*m)->m_owner = curthread;
512 /* Add to the list of owned mutexes: */
513 MUTEX_ASSERT_NOT_OWNED(*m);
514 TAILQ_INSERT_TAIL(&curthread->mutexq,
515 (*m), m_qe);
516 } else if ((*m)->m_owner == curthread) {
517 ret = mutex_self_lock(curthread, *m, abstime);
518 } else {
519 if (abstime == NULL) {
520 THR_UMTX_LOCK(curthread, &(*m)->m_lock);
521 ret = 0;
522 } else {
523 clock_gettime(CLOCK_REALTIME, &ts);
524 TIMESPEC_SUB(&ts2, abstime, &ts);
525 ret = THR_UMTX_TIMEDLOCK(curthread,
526 &(*m)->m_lock, &ts2);
527 /*
528 * Timed out wait is not restarted if
529 * it was interrupted, not worth to do it.
530 */
531 if (ret == EINTR)
532 ret = ETIMEDOUT;
533 }
534 if (ret == 0) {
535 (*m)->m_owner = curthread;
536 /* Add to the list of owned mutexes: */
537 MUTEX_ASSERT_NOT_OWNED(*m);
538 TAILQ_INSERT_TAIL(&curthread->mutexq,
539 (*m), m_qe);
540 }
541 }
542 return (ret);
543 }
544
545 /* Code for priority mutex */
546
547 /*
548 * Enter a loop waiting to become the mutex owner. We need a
549 * loop in case the waiting thread is interrupted by a signal
550 * to execute a signal handler. It is not (currently) possible
551 * to remain in the waiting queue while running a handler.
552 * Instead, the thread is interrupted and backed out of the
553 * waiting queue prior to executing the signal handler.
554 */
555 do {
556 /* Lock the mutex structure: */
557 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
558
559 /*
560 * If the mutex was statically allocated, properly
561 * initialize the tail queue.
562 */
563 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
564 TAILQ_INIT(&(*m)->m_queue);
565 (*m)->m_flags |= MUTEX_FLAGS_INITED;
566 MUTEX_INIT_LINK(*m);
567 }
568
569 /* Process according to mutex type: */
570 switch ((*m)->m_protocol) {
571 /* POSIX priority inheritence mutex: */
572 case PTHREAD_PRIO_INHERIT:
573 /* Check if this mutex is not locked: */
574 if ((*m)->m_owner == NULL) {
575 /* Lock the mutex for this thread: */
576 (*m)->m_owner = curthread;
577
578 THR_LOCK(curthread);
579 /* Track number of priority mutexes owned: */
580 curthread->priority_mutex_count++;
581
582 /*
583 * The mutex takes on attributes of the
584 * running thread when there are no waiters.
585 * Make sure the thread's scheduling lock is
586 * held while priorities are adjusted.
587 */
588 (*m)->m_prio = curthread->active_priority;
589 (*m)->m_saved_prio =
590 curthread->inherited_priority;
591 curthread->inherited_priority = (*m)->m_prio;
592 THR_UNLOCK(curthread);
593
594 /* Add to the list of owned mutexes: */
595 MUTEX_ASSERT_NOT_OWNED(*m);
596 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
597 (*m), m_qe);
598
599 /* Unlock the mutex structure: */
600 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
601 } else if ((*m)->m_owner == curthread) {
602 ret = mutex_self_lock(curthread, *m, abstime);
603
604 /* Unlock the mutex structure: */
605 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
606 } else {
607 /*
608 * Join the queue of threads waiting to lock
609 * the mutex and save a pointer to the mutex.
610 */
611 mutex_queue_enq(*m, curthread);
612 curthread->data.mutex = *m;
613
614 if (curthread->active_priority > (*m)->m_prio)
615 /* Adjust priorities: */
616 mutex_priority_adjust(curthread, *m);
617
618 THR_LOCK(curthread);
619 cycle = curthread->cycle;
620 THR_UNLOCK(curthread);
621
622 /* Unlock the mutex structure: */
623 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
624
625 clock_gettime(CLOCK_REALTIME, &ts);
626 TIMESPEC_SUB(&ts2, abstime, &ts);
627 ret = _thr_umtx_wait(&curthread->cycle, cycle,
9219c44c 628 &ts2, CLOCK_REALTIME);
71b3fa15
DX
629 if (ret == EINTR)
630 ret = 0;
631
632 if (THR_IN_MUTEXQ(curthread)) {
633 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
634 mutex_queue_remove(*m, curthread);
635 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
636 }
637 /*
638 * Only clear these after assuring the
639 * thread is dequeued.
640 */
641 curthread->data.mutex = NULL;
642 }
643 break;
644
645 /* POSIX priority protection mutex: */
646 case PTHREAD_PRIO_PROTECT:
647 /* Check for a priority ceiling violation: */
648 if (curthread->active_priority > (*m)->m_prio) {
649 /* Unlock the mutex structure: */
650 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
651 ret = EINVAL;
652 }
653 /* Check if this mutex is not locked: */
654 else if ((*m)->m_owner == NULL) {
655 /*
656 * Lock the mutex for the running
657 * thread:
658 */
659 (*m)->m_owner = curthread;
660
661 THR_LOCK(curthread);
662 /* Track number of priority mutexes owned: */
663 curthread->priority_mutex_count++;
664
665 /*
666 * The running thread inherits the ceiling
667 * priority of the mutex and executes at that
668 * priority. Make sure the thread's
669 * scheduling lock is held while priorities
670 * are adjusted.
671 */
672 curthread->active_priority = (*m)->m_prio;
673 (*m)->m_saved_prio =
674 curthread->inherited_priority;
675 curthread->inherited_priority = (*m)->m_prio;
676 THR_UNLOCK(curthread);
677
678 /* Add to the list of owned mutexes: */
679 MUTEX_ASSERT_NOT_OWNED(*m);
680 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
681 (*m), m_qe);
682
683 /* Unlock the mutex structure: */
684 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
685 } else if ((*m)->m_owner == curthread) {
686 ret = mutex_self_lock(curthread, *m, abstime);
687
688 /* Unlock the mutex structure: */
689 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
690 } else {
691 /*
692 * Join the queue of threads waiting to lock
693 * the mutex and save a pointer to the mutex.
694 */
695 mutex_queue_enq(*m, curthread);
696 curthread->data.mutex = *m;
697
698 /* Clear any previous error: */
699 curthread->error = 0;
700
701 THR_LOCK(curthread);
702 cycle = curthread->cycle;
703 THR_UNLOCK(curthread);
704
705 /* Unlock the mutex structure: */
706 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
707
708 clock_gettime(CLOCK_REALTIME, &ts);
709 TIMESPEC_SUB(&ts2, abstime, &ts);
710 ret = _thr_umtx_wait(&curthread->cycle, cycle,
9219c44c 711 &ts2, CLOCK_REALTIME);
71b3fa15
DX
712 if (ret == EINTR)
713 ret = 0;
714
715 curthread->data.mutex = NULL;
716 if (THR_IN_MUTEXQ(curthread)) {
717 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
718 mutex_queue_remove(*m, curthread);
719 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
720 }
721 /*
722 * Only clear these after assuring the
723 * thread is dequeued.
724 */
725 curthread->data.mutex = NULL;
726
727 /*
728 * The threads priority may have changed while
729 * waiting for the mutex causing a ceiling
730 * violation.
731 */
732 ret = curthread->error;
733 curthread->error = 0;
734 }
735 break;
736
737 /* Trap invalid mutex types: */
738 default:
739 /* Unlock the mutex structure: */
740 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
741
742 /* Return an invalid argument error: */
743 ret = EINVAL;
744 break;
745 }
746
747 } while (((*m)->m_owner != curthread) && (ret == 0));
748
749 /* Return the completion status: */
750 return (ret);
751}
752
753int
754__pthread_mutex_lock(pthread_mutex_t *m)
755{
756 struct pthread *curthread;
757 int ret = 0;
758
759 _thr_check_init();
760
9e2ee207 761 curthread = tls_get_curthread();
71b3fa15
DX
762
763 /*
764 * If the mutex is statically initialized, perform the dynamic
765 * initialization:
766 */
767 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
768 ret = mutex_lock_common(curthread, m, NULL);
769
770 return (ret);
771}
772
773__strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
774
775int
776_pthread_mutex_lock(pthread_mutex_t *m)
777{
778 struct pthread *curthread;
779 int ret = 0;
780
781 _thr_check_init();
782
9e2ee207 783 curthread = tls_get_curthread();
71b3fa15
DX
784
785 /*
786 * If the mutex is statically initialized, perform the dynamic
787 * initialization marking it private (delete safe):
788 */
789 if ((*m != NULL) ||
790 ((ret = init_static_private(curthread, m)) == 0))
791 ret = mutex_lock_common(curthread, m, NULL);
792
793 return (ret);
794}
795
796int
797__pthread_mutex_timedlock(pthread_mutex_t *m,
798 const struct timespec *abs_timeout)
799{
800 struct pthread *curthread;
801 int ret = 0;
802
803 _thr_check_init();
804
9e2ee207 805 curthread = tls_get_curthread();
71b3fa15
DX
806
807 /*
808 * If the mutex is statically initialized, perform the dynamic
809 * initialization:
810 */
811 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
812 ret = mutex_lock_common(curthread, m, abs_timeout);
813
814 return (ret);
815}
816
817int
818_pthread_mutex_timedlock(pthread_mutex_t *m,
819 const struct timespec *abs_timeout)
820{
821 struct pthread *curthread;
822 int ret = 0;
823
824 _thr_check_init();
825
9e2ee207 826 curthread = tls_get_curthread();
71b3fa15
DX
827
828 /*
829 * If the mutex is statically initialized, perform the dynamic
830 * initialization marking it private (delete safe):
831 */
832 if ((*m != NULL) ||
833 ((ret = init_static_private(curthread, m)) == 0))
834 ret = mutex_lock_common(curthread, m, abs_timeout);
835
836 return (ret);
837}
838
839int
840_pthread_mutex_unlock(pthread_mutex_t *m)
841{
842 return (mutex_unlock_common(m, /* add reference */ 0));
843}
844
845__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
846
847int
848_mutex_cv_unlock(pthread_mutex_t *m)
849{
850 return (mutex_unlock_common(m, /* add reference */ 1));
851}
852
853int
854_mutex_cv_lock(pthread_mutex_t *m)
855{
856 struct pthread *curthread;
857 int ret;
858
9e2ee207 859 curthread = tls_get_curthread();
71b3fa15
DX
860 if ((ret = _pthread_mutex_lock(m)) == 0)
861 (*m)->m_refcount--;
862 return (ret);
863}
864
865static int
866mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
867{
868 int ret;
869
870 switch (m->m_type) {
871 /* case PTHREAD_MUTEX_DEFAULT: */
872 case PTHREAD_MUTEX_ERRORCHECK:
873 case PTHREAD_MUTEX_NORMAL:
874 ret = EBUSY;
875 break;
876
877 case PTHREAD_MUTEX_RECURSIVE:
878 /* Increment the lock count: */
879 if (m->m_count + 1 > 0) {
880 m->m_count++;
881 ret = 0;
882 } else
883 ret = EAGAIN;
884 break;
885
886 default:
887 /* Trap invalid mutex types; */
888 ret = EINVAL;
889 }
890
891 return (ret);
892}
893
894static int
895mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
896 const struct timespec *abstime)
897{
898 struct timespec ts1, ts2;
899 int ret;
900
901 switch (m->m_type) {
902 /* case PTHREAD_MUTEX_DEFAULT: */
903 case PTHREAD_MUTEX_ERRORCHECK:
904 if (abstime) {
905 clock_gettime(CLOCK_REALTIME, &ts1);
906 TIMESPEC_SUB(&ts2, abstime, &ts1);
907 __sys_nanosleep(&ts2, NULL);
908 ret = ETIMEDOUT;
909 } else {
910 /*
911 * POSIX specifies that mutexes should return
912 * EDEADLK if a recursive lock is detected.
913 */
914 ret = EDEADLK;
915 }
916 break;
917
918 case PTHREAD_MUTEX_NORMAL:
919 /*
920 * What SS2 define as a 'normal' mutex. Intentionally
921 * deadlock on attempts to get a lock you already own.
922 */
923 ret = 0;
924 if (m->m_protocol != PTHREAD_PRIO_NONE) {
925 /* Unlock the mutex structure: */
926 THR_LOCK_RELEASE(curthread, &m->m_lock);
927 }
928 if (abstime) {
929 clock_gettime(CLOCK_REALTIME, &ts1);
930 TIMESPEC_SUB(&ts2, abstime, &ts1);
931 __sys_nanosleep(&ts2, NULL);
932 ret = ETIMEDOUT;
933 } else {
934 ts1.tv_sec = 30;
935 ts1.tv_nsec = 0;
936 for (;;)
937 __sys_nanosleep(&ts1, NULL);
938 }
939 break;
940
941 case PTHREAD_MUTEX_RECURSIVE:
942 /* Increment the lock count: */
943 if (m->m_count + 1 > 0) {
944 m->m_count++;
945 ret = 0;
946 } else
947 ret = EAGAIN;
948 break;
949
950 default:
951 /* Trap invalid mutex types; */
952 ret = EINVAL;
953 }
954
955 return (ret);
956}
957
958static int
959mutex_unlock_common(pthread_mutex_t *m, int add_reference)
960{
9e2ee207 961 struct pthread *curthread = tls_get_curthread();
71b3fa15
DX
962 long tid = -1;
963 int ret = 0;
964
965 if (m == NULL || *m == NULL)
966 ret = EINVAL;
967 else {
968 /* Short cut for simple mutex. */
969
970 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
971 /*
972 * Check if the running thread is not the owner of the
973 * mutex:
974 */
975 if (__predict_false((*m)->m_owner != curthread)) {
976 ret = EPERM;
977 } else if (__predict_false(
978 (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
979 (*m)->m_count > 0)) {
980 /* Decrement the count: */
981 (*m)->m_count--;
982 if (add_reference)
983 (*m)->m_refcount++;
984 } else {
985 /*
986 * Clear the count in case this is a recursive
987 * mutex.
988 */
989 (*m)->m_count = 0;
990 (*m)->m_owner = NULL;
991 /* Remove the mutex from the threads queue. */
992 MUTEX_ASSERT_IS_OWNED(*m);
993 TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
994 MUTEX_INIT_LINK(*m);
995 if (add_reference)
996 (*m)->m_refcount++;
997 /*
998 * Hand off the mutex to the next waiting
999 * thread.
1000 */
1001 _thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
1002 }
1003 return (ret);
1004 }
1005
1006 /* Code for priority mutex */
1007
1008 /* Lock the mutex structure: */
1009 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1010
1011 /* Process according to mutex type: */
1012 switch ((*m)->m_protocol) {
1013 /* POSIX priority inheritence mutex: */
1014 case PTHREAD_PRIO_INHERIT:
1015 /*
1016 * Check if the running thread is not the owner of the
1017 * mutex:
1018 */
1019 if ((*m)->m_owner != curthread)
1020 ret = EPERM;
1021 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1022 ((*m)->m_count > 0))
1023 /* Decrement the count: */
1024 (*m)->m_count--;
1025 else {
1026 /*
1027 * Clear the count in case this is recursive
1028 * mutex.
1029 */
1030 (*m)->m_count = 0;
1031
1032 /*
1033 * Restore the threads inherited priority and
1034 * recompute the active priority (being careful
1035 * not to override changes in the threads base
1036 * priority subsequent to locking the mutex).
1037 */
1038 THR_LOCK(curthread);
1039 curthread->inherited_priority =
1040 (*m)->m_saved_prio;
1041 curthread->active_priority =
1042 MAX(curthread->inherited_priority,
1043 curthread->base_priority);
1044
1045 /*
1046 * This thread now owns one less priority mutex.
1047 */
1048 curthread->priority_mutex_count--;
1049 THR_UNLOCK(curthread);
1050
1051 /* Remove the mutex from the threads queue. */
1052 MUTEX_ASSERT_IS_OWNED(*m);
1053 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1054 (*m), m_qe);
1055 MUTEX_INIT_LINK(*m);
1056
1057 /*
1058 * Hand off the mutex to the next waiting
1059 * thread:
1060 */
1061 tid = mutex_handoff(curthread, *m);
1062 }
1063 break;
1064
1065 /* POSIX priority ceiling mutex: */
1066 case PTHREAD_PRIO_PROTECT:
1067 /*
1068 * Check if the running thread is not the owner of the
1069 * mutex:
1070 */
1071 if ((*m)->m_owner != curthread)
1072 ret = EPERM;
1073 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1074 ((*m)->m_count > 0))
1075 /* Decrement the count: */
1076 (*m)->m_count--;
1077 else {
1078 /*
1079 * Clear the count in case this is a recursive
1080 * mutex.
1081 */
1082 (*m)->m_count = 0;
1083
1084 /*
1085 * Restore the threads inherited priority and
1086 * recompute the active priority (being careful
1087 * not to override changes in the threads base
1088 * priority subsequent to locking the mutex).
1089 */
1090 THR_LOCK(curthread);
1091 curthread->inherited_priority =
1092 (*m)->m_saved_prio;
1093 curthread->active_priority =
1094 MAX(curthread->inherited_priority,
1095 curthread->base_priority);
1096
1097 /*
1098 * This thread now owns one less priority mutex.
1099 */
1100 curthread->priority_mutex_count--;
1101 THR_UNLOCK(curthread);
1102
1103 /* Remove the mutex from the threads queue. */
1104 MUTEX_ASSERT_IS_OWNED(*m);
1105 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1106 (*m), m_qe);
1107 MUTEX_INIT_LINK(*m);
1108
1109 /*
1110 * Hand off the mutex to the next waiting
1111 * thread:
1112 */
1113 tid = mutex_handoff(curthread, *m);
1114 }
1115 break;
1116
1117 /* Trap invalid mutex types: */
1118 default:
1119 /* Return an invalid argument error: */
1120 ret = EINVAL;
1121 break;
1122 }
1123
1124 if ((ret == 0) && (add_reference != 0))
1125 /* Increment the reference count: */
1126 (*m)->m_refcount++;
1127
1128 /* Unlock the mutex structure: */
1129 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1130 }
1131
1132 /* Return the completion status: */
1133 return (ret);
1134}
1135
1136
1137/*
1138 * This function is called when a change in base priority occurs for
1139 * a thread that is holding or waiting for a priority protection or
1140 * inheritence mutex. A change in a threads base priority can effect
1141 * changes to active priorities of other threads and to the ordering
1142 * of mutex locking by waiting threads.
1143 *
1144 * This must be called without the target thread's scheduling lock held.
1145 */
1146void
1147_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1148 int propagate_prio)
1149{
1150 struct pthread_mutex *m;
1151
1152 /* Adjust the priorites of any owned priority mutexes: */
1153 if (pthread->priority_mutex_count > 0) {
1154 /*
1155 * Rescan the mutexes owned by this thread and correct
1156 * their priorities to account for this threads change
1157 * in priority. This has the side effect of changing
1158 * the threads active priority.
1159 *
1160 * Be sure to lock the first mutex in the list of owned
1161 * mutexes. This acts as a barrier against another
1162 * simultaneous call to change the threads priority
1163 * and from the owning thread releasing the mutex.
1164 */
1165 m = TAILQ_FIRST(&pthread->pri_mutexq);
1166 if (m != NULL) {
1167 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1168 /*
1169 * Make sure the thread still owns the lock.
1170 */
1171 if (m == TAILQ_FIRST(&pthread->pri_mutexq))
1172 mutex_rescan_owned(curthread, pthread,
1173 /* rescan all owned */ NULL);
1174 THR_LOCK_RELEASE(curthread, &m->m_lock);
1175 }
1176 }
1177
1178 /*
1179 * If this thread is waiting on a priority inheritence mutex,
1180 * check for priority adjustments. A change in priority can
1181 * also cause a ceiling violation(*) for a thread waiting on
1182 * a priority protection mutex; we don't perform the check here
1183 * as it is done in pthread_mutex_unlock.
1184 *
1185 * (*) It should be noted that a priority change to a thread
1186 * _after_ taking and owning a priority ceiling mutex
1187 * does not affect ownership of that mutex; the ceiling
1188 * priority is only checked before mutex ownership occurs.
1189 */
1190 if (propagate_prio != 0) {
1191 /*
1192 * Lock the thread's scheduling queue. This is a bit
1193 * convoluted; the "in synchronization queue flag" can
1194 * only be cleared with both the thread's scheduling and
1195 * mutex locks held. The thread's pointer to the wanted
1196 * mutex is guaranteed to be valid during this time.
1197 */
1198 THR_THREAD_LOCK(curthread, pthread);
1199
1200 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1201 ((m = pthread->data.mutex) == NULL))
1202 THR_THREAD_UNLOCK(curthread, pthread);
1203 else {
1204 /*
1205 * This thread is currently waiting on a mutex; unlock
1206 * the scheduling queue lock and lock the mutex. We
1207 * can't hold both at the same time because the locking
1208 * order could cause a deadlock.
1209 */
1210 THR_THREAD_UNLOCK(curthread, pthread);
1211 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1212
1213 /*
1214 * Check to make sure this thread is still in the
1215 * same state (the lock above can yield the CPU to
1216 * another thread or the thread may be running on
1217 * another CPU).
1218 */
1219 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1220 (pthread->data.mutex == m)) {
1221 /*
1222 * Remove and reinsert this thread into
1223 * the list of waiting threads to preserve
1224 * decreasing priority order.
1225 */
1226 mutex_queue_remove(m, pthread);
1227 mutex_queue_enq(m, pthread);
1228
1229 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1230 /* Adjust priorities: */
1231 mutex_priority_adjust(curthread, m);
1232 }
1233
1234 /* Unlock the mutex structure: */
1235 THR_LOCK_RELEASE(curthread, &m->m_lock);
1236 }
1237 }
1238}
1239
1240/*
1241 * Called when a new thread is added to the mutex waiting queue or
1242 * when a threads priority changes that is already in the mutex
1243 * waiting queue.
1244 *
1245 * This must be called with the mutex locked by the current thread.
1246 */
1247static void
1248mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1249{
1250 pthread_mutex_t m = mutex;
1251 struct pthread *pthread_next, *pthread = mutex->m_owner;
1252 int done, temp_prio;
1253
1254 /*
1255 * Calculate the mutex priority as the maximum of the highest
1256 * active priority of any waiting threads and the owning threads
1257 * active priority(*).
1258 *
1259 * (*) Because the owning threads current active priority may
1260 * reflect priority inherited from this mutex (and the mutex
1261 * priority may have changed) we must recalculate the active
1262 * priority based on the threads saved inherited priority
1263 * and its base priority.
1264 */
1265 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1266 temp_prio = MAX(pthread_next->active_priority,
1267 MAX(m->m_saved_prio, pthread->base_priority));
1268
1269 /* See if this mutex really needs adjusting: */
1270 if (temp_prio == m->m_prio)
1271 /* No need to propagate the priority: */
1272 return;
1273
1274 /* Set new priority of the mutex: */
1275 m->m_prio = temp_prio;
1276
1277 /*
1278 * Don't unlock the mutex passed in as an argument. It is
1279 * expected to be locked and unlocked by the caller.
1280 */
1281 done = 1;
1282 do {
1283 /*
1284 * Save the threads priority before rescanning the
1285 * owned mutexes:
1286 */
1287 temp_prio = pthread->active_priority;
1288
1289 /*
1290 * Fix the priorities for all mutexes held by the owning
1291 * thread since taking this mutex. This also has a
1292 * potential side-effect of changing the threads priority.
1293 *
1294 * At this point the mutex is locked by the current thread.
1295 * The owning thread can't release the mutex until it is
1296 * unlocked, so we should be able to safely walk its list
1297 * of owned mutexes.
1298 */
1299 mutex_rescan_owned(curthread, pthread, m);
1300
1301 /*
1302 * If this isn't the first time through the loop,
1303 * the current mutex needs to be unlocked.
1304 */
1305 if (done == 0)
1306 THR_LOCK_RELEASE(curthread, &m->m_lock);
1307
1308 /* Assume we're done unless told otherwise: */
1309 done = 1;
1310
1311 /*
1312 * If the thread is currently waiting on a mutex, check
1313 * to see if the threads new priority has affected the
1314 * priority of the mutex.
1315 */
1316 if ((temp_prio != pthread->active_priority) &&
1317 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1318 ((m = pthread->data.mutex) != NULL) &&
1319 (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1320 /* Lock the mutex structure: */
1321 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1322
1323 /*
1324 * Make sure the thread is still waiting on the
1325 * mutex:
1326 */
1327 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1328 (m == pthread->data.mutex)) {
1329 /*
1330 * The priority for this thread has changed.
1331 * Remove and reinsert this thread into the
1332 * list of waiting threads to preserve
1333 * decreasing priority order.
1334 */
1335 mutex_queue_remove(m, pthread);
1336 mutex_queue_enq(m, pthread);
1337
1338 /*
1339 * Grab the waiting thread with highest
1340 * priority:
1341 */
1342 pthread_next = TAILQ_FIRST(&m->m_queue);
1343
1344 /*
1345 * Calculate the mutex priority as the maximum
1346 * of the highest active priority of any
1347 * waiting threads and the owning threads
1348 * active priority.
1349 */
1350 temp_prio = MAX(pthread_next->active_priority,
1351 MAX(m->m_saved_prio,
1352 m->m_owner->base_priority));
1353
1354 if (temp_prio != m->m_prio) {
1355 /*
1356 * The priority needs to be propagated
1357 * to the mutex this thread is waiting
1358 * on and up to the owner of that mutex.
1359 */
1360 m->m_prio = temp_prio;
1361 pthread = m->m_owner;
1362
1363 /* We're not done yet: */
1364 done = 0;
1365 }
1366 }
1367 /* Only release the mutex if we're done: */
1368 if (done != 0)
1369 THR_LOCK_RELEASE(curthread, &m->m_lock);
1370 }
1371 } while (done == 0);
1372}
1373
1374static void
1375mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1376 struct pthread_mutex *mutex)
1377{
1378 struct pthread_mutex *m;
1379 struct pthread *pthread_next;
1380 int active_prio, inherited_prio;
1381
1382 /*
1383 * Start walking the mutexes the thread has taken since
1384 * taking this mutex.
1385 */
1386 if (mutex == NULL) {
1387 /*
1388 * A null mutex means start at the beginning of the owned
1389 * mutex list.
1390 */
1391 m = TAILQ_FIRST(&pthread->pri_mutexq);
1392
1393 /* There is no inherited priority yet. */
1394 inherited_prio = 0;
1395 } else {
1396 /*
1397 * The caller wants to start after a specific mutex. It
1398 * is assumed that this mutex is a priority inheritence
1399 * mutex and that its priority has been correctly
1400 * calculated.
1401 */
1402 m = TAILQ_NEXT(mutex, m_qe);
1403
1404 /* Start inheriting priority from the specified mutex. */
1405 inherited_prio = mutex->m_prio;
1406 }
1407 active_prio = MAX(inherited_prio, pthread->base_priority);
1408
1409 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1410 /*
1411 * We only want to deal with priority inheritence
1412 * mutexes. This might be optimized by only placing
1413 * priority inheritence mutexes into the owned mutex
1414 * list, but it may prove to be useful having all
1415 * owned mutexes in this list. Consider a thread
1416 * exiting while holding mutexes...
1417 */
1418 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1419 /*
1420 * Fix the owners saved (inherited) priority to
1421 * reflect the priority of the previous mutex.
1422 */
1423 m->m_saved_prio = inherited_prio;
1424
1425 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1426 /* Recalculate the priority of the mutex: */
1427 m->m_prio = MAX(active_prio,
1428 pthread_next->active_priority);
1429 else
1430 m->m_prio = active_prio;
1431
1432 /* Recalculate new inherited and active priorities: */
1433 inherited_prio = m->m_prio;
1434 active_prio = MAX(m->m_prio, pthread->base_priority);
1435 }
1436 }
1437
1438 /*
1439 * Fix the threads inherited priority and recalculate its
1440 * active priority.
1441 */
1442 pthread->inherited_priority = inherited_prio;
1443 active_prio = MAX(inherited_prio, pthread->base_priority);
1444
1445 if (active_prio != pthread->active_priority) {
1446 /* Lock the thread's scheduling queue: */
1447 THR_THREAD_LOCK(curthread, pthread);
1448
1449 /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */
1450 if (1) {
1451 /*
1452 * This thread is not in a run queue. Just set
1453 * its active priority.
1454 */
1455 pthread->active_priority = active_prio;
1456 }
1457 else {
1458 /*
1459 * This thread is in a run queue. Remove it from
1460 * the queue before changing its priority:
1461 */
1462 /* THR_RUNQ_REMOVE(pthread);*/
1463 /*
1464 * POSIX states that if the priority is being
1465 * lowered, the thread must be inserted at the
1466 * head of the queue for its priority if it owns
1467 * any priority protection or inheritence mutexes.
1468 */
1469 if ((active_prio < pthread->active_priority) &&
1470 (pthread->priority_mutex_count > 0)) {
1471 /* Set the new active priority. */
1472 pthread->active_priority = active_prio;
1473 /* THR_RUNQ_INSERT_HEAD(pthread); */
1474 } else {
1475 /* Set the new active priority. */
1476 pthread->active_priority = active_prio;
1477 /* THR_RUNQ_INSERT_TAIL(pthread);*/
1478 }
1479 }
1480 THR_THREAD_UNLOCK(curthread, pthread);
1481 }
1482}
1483
1484void
1485_mutex_unlock_private(pthread_t pthread)
1486{
1487 struct pthread_mutex *m, *m_next;
1488
1489 for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) {
1490 m_next = TAILQ_NEXT(m, m_qe);
1491 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1492 pthread_mutex_unlock(&m);
1493 }
1494}
1495
1496/*
1497 * Dequeue a waiting thread from the head of a mutex queue in descending
1498 * priority order.
1499 *
1500 * In order to properly dequeue a thread from the mutex queue and
1501 * make it runnable without the possibility of errant wakeups, it
1502 * is necessary to lock the thread's scheduling queue while also
1503 * holding the mutex lock.
1504 */
1505static long
1506mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1507{
1508 struct pthread *pthread;
1509 long tid = -1;
1510
1511 /* Keep dequeueing until we find a valid thread: */
1512 mutex->m_owner = NULL;
1513 pthread = TAILQ_FIRST(&mutex->m_queue);
1514 while (pthread != NULL) {
1515 /* Take the thread's scheduling lock: */
1516 THR_THREAD_LOCK(curthread, pthread);
1517
1518 /* Remove the thread from the mutex queue: */
1519 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1520 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1521
1522 /*
1523 * Only exit the loop if the thread hasn't been
1524 * cancelled.
1525 */
1526 switch (mutex->m_protocol) {
1527 case PTHREAD_PRIO_NONE:
1528 /*
1529 * Assign the new owner and add the mutex to the
1530 * thread's list of owned mutexes.
1531 */
1532 mutex->m_owner = pthread;
1533 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1534 break;
1535
1536 case PTHREAD_PRIO_INHERIT:
1537 /*
1538 * Assign the new owner and add the mutex to the
1539 * thread's list of owned mutexes.
1540 */
1541 mutex->m_owner = pthread;
1542 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1543
1544 /* Track number of priority mutexes owned: */
1545 pthread->priority_mutex_count++;
1546
1547 /*
1548 * Set the priority of the mutex. Since our waiting
1549 * threads are in descending priority order, the
1550 * priority of the mutex becomes the active priority
1551 * of the thread we just dequeued.
1552 */
1553 mutex->m_prio = pthread->active_priority;
1554
1555 /* Save the owning threads inherited priority: */
1556 mutex->m_saved_prio = pthread->inherited_priority;
1557
1558 /*
1559 * The owning threads inherited priority now becomes
1560 * his active priority (the priority of the mutex).
1561 */
1562 pthread->inherited_priority = mutex->m_prio;
1563 break;
1564
1565 case PTHREAD_PRIO_PROTECT:
1566 if (pthread->active_priority > mutex->m_prio) {
1567 /*
1568 * Either the mutex ceiling priority has
1569 * been lowered and/or this threads priority
1570 * has been raised subsequent to the thread
1571 * being queued on the waiting list.
1572 */
1573 pthread->error = EINVAL;
1574 }
1575 else {
1576 /*
1577 * Assign the new owner and add the mutex
1578 * to the thread's list of owned mutexes.
1579 */
1580 mutex->m_owner = pthread;
1581 TAILQ_INSERT_TAIL(&pthread->pri_mutexq,
1582 mutex, m_qe);
1583
1584 /* Track number of priority mutexes owned: */
1585 pthread->priority_mutex_count++;
1586
1587 /*
1588 * Save the owning threads inherited
1589 * priority:
1590 */
1591 mutex->m_saved_prio =
1592 pthread->inherited_priority;
1593
1594 /*
1595 * The owning thread inherits the ceiling
1596 * priority of the mutex and executes at
1597 * that priority:
1598 */
1599 pthread->inherited_priority = mutex->m_prio;
1600 pthread->active_priority = mutex->m_prio;
1601
1602 }
1603 break;
1604 }
1605
1606 /* Make the thread runnable and unlock the scheduling queue: */
1607 pthread->cycle++;
1608 _thr_umtx_wake(&pthread->cycle, 1);
1609
1610 THR_THREAD_UNLOCK(curthread, pthread);
1611 if (mutex->m_owner == pthread)
1612 /* We're done; a valid owner was found. */
1613 break;
1614 else
1615 /* Get the next thread from the waiting queue: */
1616 pthread = TAILQ_NEXT(pthread, sqe);
1617 }
1618
1619 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1620 /* This mutex has no priority: */
1621 mutex->m_prio = 0;
1622 return (tid);
1623}
1624
1625#if 0
1626/*
1627 * Dequeue a waiting thread from the head of a mutex queue in descending
1628 * priority order.
1629 */
1630static pthread_t
1631mutex_queue_deq(struct pthread_mutex *mutex)
1632{
1633 pthread_t pthread;
1634
1635 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1636 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1637 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1638 }
1639
1640 return (pthread);
1641}
1642#endif
1643
1644/*
1645 * Remove a waiting thread from a mutex queue in descending priority order.
1646 */
1647static void
1648mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1649{
1650 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1651 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1652 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1653 }
1654}
1655
1656/*
1657 * Enqueue a waiting thread to a queue in descending priority order.
1658 */
1659static void
1660mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1661{
1662 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1663
1664 THR_ASSERT_NOT_IN_SYNCQ(pthread);
1665 /*
1666 * For the common case of all threads having equal priority,
1667 * we perform a quick check against the priority of the thread
1668 * at the tail of the queue.
1669 */
1670 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1671 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1672 else {
1673 tid = TAILQ_FIRST(&mutex->m_queue);
1674 while (pthread->active_priority <= tid->active_priority)
1675 tid = TAILQ_NEXT(tid, sqe);
1676 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1677 }
1678 pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1679}