2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_kern.c,v 1.28.2.13 2002/10/22 14:44:03 fjoe Exp $
33 * $DragonFly: src/lib/libc_r/uthread/uthread_kern.c,v 1.2 2003/06/17 04:26:48 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/types.h>
45 #include <sys/signalvar.h>
48 #include <sys/socket.h>
50 #include <sys/syscall.h>
53 #include "pthread_private.h"
55 /* #define DEBUG_THREAD_KERN */
56 #ifdef DEBUG_THREAD_KERN
57 #define DBG_MSG stdout_debug
62 /* Static function prototype definitions: */
64 thread_kern_poll(int wait_reqd);
67 dequeue_signals(void);
70 thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
72 /* Static variables: */
73 static int last_tick = 0;
74 static int called_from_handler = 0;
77 * This is called when a signal handler finishes and wants to
78 * return to a previous frame.
81 _thread_kern_sched_frame(struct pthread_signal_frame *psf)
83 struct pthread *curthread = _get_curthread();
86 * Flag the pthread kernel as executing scheduler code
87 * to avoid a signal from interrupting this execution and
88 * corrupting the (soon-to-be) current frame.
90 _thread_kern_in_sched = 1;
92 /* Restore the signal frame: */
93 _thread_sigframe_restore(curthread, psf);
95 /* The signal mask was restored; check for any pending signals: */
96 curthread->check_pending = 1;
98 /* Switch to the thread scheduler: */
99 ___longjmp(_thread_kern_sched_jb, 1);
104 _thread_kern_sched(ucontext_t *ucp)
106 struct pthread *curthread = _get_curthread();
109 * Flag the pthread kernel as executing scheduler code
110 * to avoid a scheduler signal from interrupting this
111 * execution and calling the scheduler again.
113 _thread_kern_in_sched = 1;
115 /* Check if this function was called from the signal handler: */
117 /* XXX - Save FP registers? */
119 called_from_handler = 1;
120 DBG_MSG("Entering scheduler due to signal\n");
123 /* Save the state of the current thread: */
124 if (_setjmp(curthread->ctx.jb) != 0) {
125 DBG_MSG("Returned from ___longjmp, thread %p\n",
128 * This point is reached when a longjmp() is called
129 * to restore the state of a thread.
131 * This is the normal way out of the scheduler.
133 _thread_kern_in_sched = 0;
135 if (curthread->sig_defer_count == 0) {
136 if (((curthread->cancelflags &
137 PTHREAD_AT_CANCEL_POINT) == 0) &&
138 ((curthread->cancelflags &
139 PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
141 * Cancellations override signals.
143 * Stick a cancellation point at the
144 * start of each async-cancellable
145 * thread's resumption.
147 * We allow threads woken at cancel
148 * points to do their own checks.
150 pthread_testcancel();
153 if (_sched_switch_hook != NULL) {
154 /* Run the installed switch hook: */
155 thread_run_switch_hook(_last_user_thread, curthread);
160 /* XXX - Restore FP registers? */
164 * Set the process signal mask in the context; it
165 * could have changed by the handler.
167 ucp->uc_sigmask = _process_sigmask;
169 /* Resume the interrupted thread: */
173 /* Switch to the thread scheduler: */
174 ___longjmp(_thread_kern_sched_jb, 1);
178 _thread_kern_sched_sig(void)
180 struct pthread *curthread = _get_curthread();
182 curthread->check_pending = 1;
183 _thread_kern_sched(NULL);
188 _thread_kern_scheduler(void)
192 struct pthread *curthread = _get_curthread();
193 pthread_t pthread, pthread_h;
194 unsigned int current_tick;
197 /* If the currently running thread is a user thread, save it: */
198 if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0)
199 _last_user_thread = curthread;
201 if (called_from_handler != 0) {
202 called_from_handler = 0;
205 * We were called from a signal handler; restore the process
208 if (__sys_sigprocmask(SIG_SETMASK,
209 &_process_sigmask, NULL) != 0)
210 PANIC("Unable to restore process mask after signal");
214 * Enter a scheduling loop that finds the next thread that is
215 * ready to run. This loop completes when there are no more threads
216 * in the global list or when a thread has its state restored by
217 * either a sigreturn (if the state was saved as a sigcontext) or a
218 * longjmp (if the state was saved by a setjmp).
220 while (!(TAILQ_EMPTY(&_thread_list))) {
221 /* Get the current time of day: */
223 TIMEVAL_TO_TIMESPEC(&tv, &ts);
224 current_tick = _sched_ticks;
227 * Protect the scheduling queues from access by the signal
233 if (curthread != &_thread_kern_thread) {
235 * This thread no longer needs to yield the CPU.
237 curthread->yield_on_sig_undefer = 0;
239 if (curthread->state != PS_RUNNING) {
241 * Save the current time as the time that the
242 * thread became inactive:
244 curthread->last_inactive = (long)current_tick;
245 if (curthread->last_inactive <
246 curthread->last_active) {
247 /* Account for a rollover: */
248 curthread->last_inactive =+
254 * Place the currently running thread into the
255 * appropriate queue(s).
257 switch (curthread->state) {
259 case PS_STATE_MAX: /* to silence -Wall */
262 * Dead and suspended threads are not placed
269 * Runnable threads can't be placed in the
270 * priority queue until after waiting threads
271 * are polled (to preserve round-robin
278 * States which do not depend on file descriptor I/O
279 * operations or timeouts:
291 /* No timeouts for these states: */
292 curthread->wakeup_time.tv_sec = -1;
293 curthread->wakeup_time.tv_nsec = -1;
295 /* Restart the time slice: */
296 curthread->slice_usec = -1;
298 /* Insert into the waiting queue: */
299 PTHREAD_WAITQ_INSERT(curthread);
302 /* States which can timeout: */
305 /* Restart the time slice: */
306 curthread->slice_usec = -1;
308 /* Insert into the waiting queue: */
309 PTHREAD_WAITQ_INSERT(curthread);
312 /* States that require periodic work: */
314 /* No timeouts for this state: */
315 curthread->wakeup_time.tv_sec = -1;
316 curthread->wakeup_time.tv_nsec = -1;
318 /* Increment spinblock count: */
326 /* Restart the time slice: */
327 curthread->slice_usec = -1;
329 /* Insert into the waiting queue: */
330 PTHREAD_WAITQ_INSERT(curthread);
332 /* Insert into the work queue: */
333 PTHREAD_WORKQ_INSERT(curthread);
338 * Are there pending signals for this thread?
340 * This check has to be performed after the thread
341 * has been placed in the queue(s) appropriate for
342 * its state. The process of adding pending signals
343 * can change a threads state, which in turn will
344 * attempt to add or remove the thread from any
345 * scheduling queue to which it belongs.
347 if (curthread->check_pending != 0) {
348 curthread->check_pending = 0;
349 _thread_sig_check_pending(curthread);
354 * Avoid polling file descriptors if there are none
357 if (TAILQ_EMPTY(&_workq) != 0) {
360 * Poll file descriptors only if a new scheduling signal
361 * has occurred or if we have no more runnable threads.
363 else if (((current_tick = _sched_ticks) != last_tick) ||
364 ((curthread->state != PS_RUNNING) &&
365 (PTHREAD_PRIOQ_FIRST() == NULL))) {
366 /* Unprotect the scheduling queues: */
370 * Poll file descriptors to update the state of threads
371 * waiting on file I/O where data may be available:
375 /* Protect the scheduling queues: */
378 last_tick = current_tick;
381 * Wake up threads that have timedout. This has to be
382 * done after polling in case a thread does a poll or
383 * select with zero time.
385 PTHREAD_WAITQ_SETACTIVE();
386 while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
387 (pthread->wakeup_time.tv_sec != -1) &&
388 (((pthread->wakeup_time.tv_sec == 0) &&
389 (pthread->wakeup_time.tv_nsec == 0)) ||
390 (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
391 ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
392 (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
393 switch (pthread->state) {
396 /* Return zero file descriptors ready: */
397 pthread->data.poll_data->nfds = 0;
401 * Remove this thread from the waiting queue
402 * (and work queue if necessary) and place it
403 * in the ready queue.
405 PTHREAD_WAITQ_CLEARACTIVE();
406 if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
407 PTHREAD_WORKQ_REMOVE(pthread);
408 PTHREAD_NEW_STATE(pthread, PS_RUNNING);
409 PTHREAD_WAITQ_SETACTIVE();
413 * Flag the timeout in the thread structure:
415 pthread->timeout = 1;
417 PTHREAD_WAITQ_CLEARACTIVE();
420 * Check to see if the current thread needs to be added
421 * to the priority queue:
423 if (add_to_prioq != 0) {
425 * Save the current time as the time that the
426 * thread became inactive:
428 current_tick = _sched_ticks;
429 curthread->last_inactive = (long)current_tick;
430 if (curthread->last_inactive <
431 curthread->last_active) {
432 /* Account for a rollover: */
433 curthread->last_inactive =+ UINT_MAX + 1;
436 if ((curthread->slice_usec != -1) &&
437 (curthread->attr.sched_policy != SCHED_FIFO)) {
439 * Accumulate the number of microseconds for
440 * which the current thread has run:
442 curthread->slice_usec +=
443 (curthread->last_inactive -
444 curthread->last_active) *
445 (long)_clock_res_usec;
446 /* Check for time quantum exceeded: */
447 if (curthread->slice_usec > TIMESLICE_USEC)
448 curthread->slice_usec = -1;
451 if (curthread->slice_usec == -1) {
453 * The thread exceeded its time
454 * quantum or it yielded the CPU;
455 * place it at the tail of the
456 * queue for its priority.
458 PTHREAD_PRIOQ_INSERT_TAIL(curthread);
461 * The thread hasn't exceeded its
462 * interval. Place it at the head
463 * of the queue for its priority.
465 PTHREAD_PRIOQ_INSERT_HEAD(curthread);
470 * Get the highest priority thread in the ready queue.
472 pthread_h = PTHREAD_PRIOQ_FIRST();
474 /* Check if there are no threads ready to run: */
475 if (pthread_h == NULL) {
477 * Lock the pthread kernel by changing the pointer to
478 * the running thread to point to the global kernel
481 _set_curthread(&_thread_kern_thread);
482 curthread = &_thread_kern_thread;
484 DBG_MSG("No runnable threads, using kernel thread %p\n",
487 /* Unprotect the scheduling queues: */
491 * There are no threads ready to run, so wait until
492 * something happens that changes this condition:
497 * This process' usage will likely be very small
498 * while waiting in a poll. Since the scheduling
499 * clock is based on the profiling timer, it is
500 * unlikely that the profiling timer will fire
501 * and update the time of day. To account for this,
502 * get the time of day after polling with a timeout.
504 gettimeofday((struct timeval *) &_sched_tod, NULL);
506 /* Check once more for a runnable thread: */
508 pthread_h = PTHREAD_PRIOQ_FIRST();
512 if (pthread_h != NULL) {
513 /* Remove the thread from the ready queue: */
514 PTHREAD_PRIOQ_REMOVE(pthread_h);
516 /* Unprotect the scheduling queues: */
520 * Check for signals queued while the scheduling
521 * queues were protected:
523 while (_sigq_check_reqd != 0) {
524 /* Clear before handling queued signals: */
525 _sigq_check_reqd = 0;
527 /* Protect the scheduling queues again: */
533 * Check for a higher priority thread that
534 * became runnable due to signal handling.
536 if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
537 (pthread->active_priority > pthread_h->active_priority)) {
538 /* Remove the thread from the ready queue: */
539 PTHREAD_PRIOQ_REMOVE(pthread);
542 * Insert the lower priority thread
543 * at the head of its priority list:
545 PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
547 /* There's a new thread in town: */
551 /* Unprotect the scheduling queues: */
555 /* Make the selected thread the current thread: */
556 _set_curthread(pthread_h);
557 curthread = pthread_h;
560 * Save the current time as the time that the thread
563 current_tick = _sched_ticks;
564 curthread->last_active = (long) current_tick;
567 * Check if this thread is running for the first time
568 * or running again after using its full time slice
571 if (curthread->slice_usec == -1) {
572 /* Reset the accumulated time slice period: */
573 curthread->slice_usec = 0;
577 * If we had a context switch, run any
578 * installed switch hooks.
580 if ((_sched_switch_hook != NULL) &&
581 (_last_user_thread != curthread)) {
582 thread_run_switch_hook(_last_user_thread,
586 * Continue the thread at its current frame:
589 _setcontext(&curthread->ctx.uc);
591 ___longjmp(curthread->ctx.jb, 1);
593 /* This point should not be reached. */
594 PANIC("Thread has returned from sigreturn or longjmp");
598 /* There are no more threads, so exit this process: */
603 _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
605 struct pthread *curthread = _get_curthread();
608 * Flag the pthread kernel as executing scheduler code
609 * to avoid a scheduler signal from interrupting this
610 * execution and calling the scheduler again.
612 _thread_kern_in_sched = 1;
615 * Prevent the signal handler from fiddling with this thread
616 * before its state is set and is placed into the proper queue.
620 /* Change the state of the current thread: */
621 curthread->state = state;
622 curthread->fname = fname;
623 curthread->lineno = lineno;
625 /* Schedule the next thread that is ready: */
626 _thread_kern_sched(NULL);
630 _thread_kern_sched_state_unlock(enum pthread_state state,
631 spinlock_t *lock, char *fname, int lineno)
633 struct pthread *curthread = _get_curthread();
636 * Flag the pthread kernel as executing scheduler code
637 * to avoid a scheduler signal from interrupting this
638 * execution and calling the scheduler again.
640 _thread_kern_in_sched = 1;
643 * Prevent the signal handler from fiddling with this thread
644 * before its state is set and it is placed into the proper
649 /* Change the state of the current thread: */
650 curthread->state = state;
651 curthread->fname = fname;
652 curthread->lineno = lineno;
656 /* Schedule the next thread that is ready: */
657 _thread_kern_sched(NULL);
661 thread_kern_poll(int wait_reqd)
665 int kern_pipe_added = 0;
668 struct pthread *pthread;
672 /* Check if the caller wants to wait: */
673 if (wait_reqd == 0) {
677 /* Get the current time of day: */
679 TIMEVAL_TO_TIMESPEC(&tv, &ts);
682 pthread = TAILQ_FIRST(&_waitingq);
685 if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
687 * Either there are no threads in the waiting queue,
688 * or there are no threads that can timeout.
692 else if (pthread->wakeup_time.tv_sec - ts.tv_sec > 60000)
693 /* Limit maximum timeout to prevent rollover. */
697 * Calculate the time left for the next thread to
700 timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
701 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
704 * Don't allow negative timeouts:
711 /* Protect the scheduling queues: */
715 * Check to see if the signal queue needs to be walked to look
716 * for threads awoken by a signal while in the scheduler.
718 if (_sigq_check_reqd != 0) {
719 /* Reset flag before handling queued signals: */
720 _sigq_check_reqd = 0;
726 * Check for a thread that became runnable due to a signal:
728 if (PTHREAD_PRIOQ_FIRST() != NULL) {
730 * Since there is at least one runnable thread,
737 * Form the poll table:
740 if (timeout_ms != 0) {
741 /* Add the kernel pipe to the poll table: */
742 _thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
743 _thread_pfd_table[nfds].events = POLLRDNORM;
744 _thread_pfd_table[nfds].revents = 0;
749 PTHREAD_WAITQ_SETACTIVE();
750 TAILQ_FOREACH(pthread, &_workq, qe) {
751 switch (pthread->state) {
754 * If the lock is available, let the thread run.
756 if (pthread->data.spinlock->access_lock == 0) {
757 PTHREAD_WAITQ_CLEARACTIVE();
758 PTHREAD_WORKQ_REMOVE(pthread);
759 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
760 PTHREAD_WAITQ_SETACTIVE();
761 /* One less thread in a spinblock state: */
764 * Since there is at least one runnable
765 * thread, disable the wait.
771 /* File descriptor read wait: */
773 /* Limit number of polled files to table size: */
774 if (nfds < _thread_dtablesize) {
775 _thread_pfd_table[nfds].events = POLLRDNORM;
776 _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
781 /* File descriptor write wait: */
783 /* Limit number of polled files to table size: */
784 if (nfds < _thread_dtablesize) {
785 _thread_pfd_table[nfds].events = POLLWRNORM;
786 _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
791 /* File descriptor poll or select wait: */
794 /* Limit number of polled files to table size: */
795 if (pthread->data.poll_data->nfds + nfds <
796 _thread_dtablesize) {
797 for (i = 0; i < pthread->data.poll_data->nfds; i++) {
798 _thread_pfd_table[nfds + i].fd =
799 pthread->data.poll_data->fds[i].fd;
800 _thread_pfd_table[nfds + i].events =
801 pthread->data.poll_data->fds[i].events;
803 nfds += pthread->data.poll_data->nfds;
807 /* Other states do not depend on file I/O. */
812 PTHREAD_WAITQ_CLEARACTIVE();
815 * Wait for a file descriptor to be ready for read, write, or
816 * an exception, or a timeout to occur:
818 count = __sys_poll(_thread_pfd_table, nfds, timeout_ms);
820 if (kern_pipe_added != 0)
822 * Remove the pthread kernel pipe file descriptor
823 * from the pollfd table:
830 * Check if it is possible that there are bytes in the kernel
831 * read pipe waiting to be read:
833 if (count < 0 || ((kern_pipe_added != 0) &&
834 (_thread_pfd_table[0].revents & POLLRDNORM))) {
836 * If the kernel read pipe was included in the
840 /* Decrement the count of file descriptors: */
844 if (_sigq_check_reqd != 0) {
845 /* Reset flag before handling signals: */
846 _sigq_check_reqd = 0;
853 * Check if any file descriptors are ready:
857 * Enter a loop to look for threads waiting on file
858 * descriptors that are flagged as available by the
861 PTHREAD_WAITQ_SETACTIVE();
862 TAILQ_FOREACH(pthread, &_workq, qe) {
863 switch (pthread->state) {
866 * If the lock is available, let the thread run.
868 if (pthread->data.spinlock->access_lock == 0) {
869 PTHREAD_WAITQ_CLEARACTIVE();
870 PTHREAD_WORKQ_REMOVE(pthread);
871 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
872 PTHREAD_WAITQ_SETACTIVE();
875 * One less thread in a spinblock state:
881 /* File descriptor read wait: */
883 if ((nfds < _thread_dtablesize) &&
884 ((_thread_pfd_table[nfds].revents
885 & (POLLRDNORM | POLLHUP
886 | POLLERR | POLLNVAL)) != 0)) {
887 PTHREAD_WAITQ_CLEARACTIVE();
888 PTHREAD_WORKQ_REMOVE(pthread);
889 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
890 PTHREAD_WAITQ_SETACTIVE();
895 /* File descriptor write wait: */
897 if ((nfds < _thread_dtablesize) &&
898 ((_thread_pfd_table[nfds].revents
899 & (POLLWRNORM | POLLHUP
900 | POLLERR | POLLNVAL)) != 0)) {
901 PTHREAD_WAITQ_CLEARACTIVE();
902 PTHREAD_WORKQ_REMOVE(pthread);
903 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
904 PTHREAD_WAITQ_SETACTIVE();
909 /* File descriptor poll or select wait: */
912 if (pthread->data.poll_data->nfds + nfds <
913 _thread_dtablesize) {
915 * Enter a loop looking for I/O
919 for (i = 0; i < pthread->data.poll_data->nfds; i++) {
920 if (_thread_pfd_table[nfds + i].revents != 0) {
921 pthread->data.poll_data->fds[i].revents =
922 _thread_pfd_table[nfds + i].revents;
927 /* Increment before destroying: */
928 nfds += pthread->data.poll_data->nfds;
931 pthread->data.poll_data->nfds = found;
932 PTHREAD_WAITQ_CLEARACTIVE();
933 PTHREAD_WORKQ_REMOVE(pthread);
934 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
935 PTHREAD_WAITQ_SETACTIVE();
939 nfds += pthread->data.poll_data->nfds;
942 /* Other states do not depend on file I/O. */
947 PTHREAD_WAITQ_CLEARACTIVE();
949 else if (_spinblock_count != 0) {
951 * Enter a loop to look for threads waiting on a spinlock
952 * that is now available.
954 PTHREAD_WAITQ_SETACTIVE();
955 TAILQ_FOREACH(pthread, &_workq, qe) {
956 if (pthread->state == PS_SPINBLOCK) {
958 * If the lock is available, let the thread run.
960 if (pthread->data.spinlock->access_lock == 0) {
961 PTHREAD_WAITQ_CLEARACTIVE();
962 PTHREAD_WORKQ_REMOVE(pthread);
963 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
964 PTHREAD_WAITQ_SETACTIVE();
967 * One less thread in a spinblock state:
973 PTHREAD_WAITQ_CLEARACTIVE();
976 /* Unprotect the scheduling queues: */
979 while (_sigq_check_reqd != 0) {
980 /* Handle queued signals: */
981 _sigq_check_reqd = 0;
983 /* Protect the scheduling queues: */
988 /* Unprotect the scheduling queues: */
994 _thread_kern_set_timeout(const struct timespec * timeout)
996 struct pthread *curthread = _get_curthread();
997 struct timespec current_time;
1000 /* Reset the timeout flag for the running thread: */
1001 curthread->timeout = 0;
1003 /* Check if the thread is to wait forever: */
1004 if (timeout == NULL) {
1006 * Set the wakeup time to something that can be recognised as
1007 * different to an actual time of day:
1009 curthread->wakeup_time.tv_sec = -1;
1010 curthread->wakeup_time.tv_nsec = -1;
1012 /* Check if no waiting is required: */
1013 else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
1014 /* Set the wake up time to 'immediately': */
1015 curthread->wakeup_time.tv_sec = 0;
1016 curthread->wakeup_time.tv_nsec = 0;
1018 /* Get the current time: */
1019 GET_CURRENT_TOD(tv);
1020 TIMEVAL_TO_TIMESPEC(&tv, ¤t_time);
1022 /* Calculate the time for the current thread to wake up: */
1023 curthread->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
1024 curthread->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec;
1026 /* Check if the nanosecond field needs to wrap: */
1027 if (curthread->wakeup_time.tv_nsec >= 1000000000) {
1028 /* Wrap the nanosecond field: */
1029 curthread->wakeup_time.tv_sec += 1;
1030 curthread->wakeup_time.tv_nsec -= 1000000000;
1036 _thread_kern_sig_defer(void)
1038 struct pthread *curthread = _get_curthread();
1040 /* Allow signal deferral to be recursive. */
1041 curthread->sig_defer_count++;
1045 _thread_kern_sig_undefer(void)
1047 struct pthread *curthread = _get_curthread();
1050 * Perform checks to yield only if we are about to undefer
1053 if (curthread->sig_defer_count > 1) {
1054 /* Decrement the signal deferral count. */
1055 curthread->sig_defer_count--;
1057 else if (curthread->sig_defer_count == 1) {
1058 /* Reenable signals: */
1059 curthread->sig_defer_count = 0;
1062 * Check if there are queued signals:
1064 if (_sigq_check_reqd != 0)
1065 _thread_kern_sched(NULL);
1068 * Check for asynchronous cancellation before delivering any
1071 if (((curthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
1072 ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
1073 pthread_testcancel();
1076 * If there are pending signals or this thread has
1077 * to yield the CPU, call the kernel scheduler:
1079 * XXX - Come back and revisit the pending signal problem
1081 if ((curthread->yield_on_sig_undefer != 0) ||
1082 SIGNOTEMPTY(curthread->sigpend)) {
1083 curthread->yield_on_sig_undefer = 0;
1084 _thread_kern_sched(NULL);
1090 dequeue_signals(void)
1096 * Enter a loop to clear the pthread kernel pipe:
1098 while (((num = __sys_read(_thread_kern_pipe[0], bufr,
1099 sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
1101 if ((num < 0) && (errno != EAGAIN)) {
1103 * The only error we should expect is if there is
1106 PANIC("Unable to read from thread kernel pipe");
1108 /* Handle any pending signals: */
1109 _thread_sig_handle_pending();
1113 thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
1115 pthread_t tid_out = thread_out;
1116 pthread_t tid_in = thread_in;
1118 if ((tid_out != NULL) &&
1119 (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0)
1121 if ((tid_in != NULL) &&
1122 (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0)
1125 if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
1126 /* Run the scheduler switch hook: */
1127 _sched_switch_hook(tid_out, tid_in);
1132 _get_curthread(void)
1134 if (_thread_initial == NULL)
1137 return (_thread_run);
1141 _set_curthread(struct pthread *newthread)
1143 _thread_run = newthread;