2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_fd.c,v 1.16.2.7 2002/10/22 14:44:03 fjoe Exp $
40 #include "pthread_private.h"
42 #define FDQ_INSERT(q,p) \
44 TAILQ_INSERT_TAIL(q,p,qe); \
45 p->flags |= PTHREAD_FLAGS_IN_FDQ; \
48 #define FDQ_REMOVE(q,p) \
50 if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \
51 TAILQ_REMOVE(q,p,qe); \
52 p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \
57 /* Static variables: */
58 static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER;
61 #ifdef _FDLOCKS_ENABLED
62 static inline pthread_t fd_next_reader(int fd);
63 static inline pthread_t fd_next_writer(int fd);
68 * This function *must* return -1 and set the thread specific errno
69 * as a system call. This is because the error return from this
70 * function is propagated directly back from thread-wrapped system
75 _thread_fd_table_init(int fd)
78 struct fd_table_entry *entry;
81 if (_thread_initial == NULL)
84 /* Check if the file descriptor is out of range: */
85 if (fd < 0 || fd >= _thread_dtablesize) {
86 /* Return a bad file descriptor error: */
92 * Check if memory has already been allocated for this file
95 else if (_thread_fd_table[fd] != NULL) {
96 /* Memory has already been allocated. */
98 /* Allocate memory for the file descriptor table entry: */
99 } else if ((entry = (struct fd_table_entry *)
100 malloc(sizeof(struct fd_table_entry))) == NULL) {
101 /* Return an insufficient memory error: */
105 /* Initialise the file locks: */
106 memset(&entry->lock, 0, sizeof(entry->lock));
107 entry->r_owner = NULL;
108 entry->w_owner = NULL;
109 entry->r_fname = NULL;
110 entry->w_fname = NULL;
113 entry->r_lockcount = 0;
114 entry->w_lockcount = 0;
116 /* Initialise the read/write queues: */
117 TAILQ_INIT(&entry->r_queue);
118 TAILQ_INIT(&entry->w_queue);
120 /* Get the flags for the file: */
121 if (((fd >= 3) || (_pthread_stdio_flags[fd] == -1)) &&
122 (entry->flags = __sys_fcntl(fd, F_GETFL, 0)) == -1) {
126 /* Check if a stdio descriptor: */
127 if ((fd < 3) && (_pthread_stdio_flags[fd] != -1))
129 * Use the stdio flags read by
130 * _pthread_init() to avoid
131 * mistaking the non-blocking
132 * flag that, when set on one
133 * stdio fd, is set on all stdio
136 entry->flags = _pthread_stdio_flags[fd];
139 * Make the file descriptor non-blocking.
140 * This might fail if the device driver does
141 * not support non-blocking calls, or if the
142 * driver is naturally non-blocking.
145 __sys_fcntl(fd, F_SETFL,
146 entry->flags | O_NONBLOCK);
149 /* Lock the file descriptor table: */
150 _SPINLOCK(&fd_table_lock);
153 * Check if another thread allocated the
154 * file descriptor entry while this thread
155 * was doing the same thing. The table wasn't
156 * kept locked during this operation because
157 * it has the potential to recurse.
159 if (_thread_fd_table[fd] == NULL) {
160 /* This thread wins: */
161 _thread_fd_table[fd] = entry;
165 /* Unlock the file descriptor table: */
166 _SPINUNLOCK(&fd_table_lock);
170 * Check if another thread initialised the table entry
171 * before this one could:
175 * Throw away the table entry that this thread
176 * prepared. The other thread wins.
181 /* Return the completion status: */
186 _thread_fd_getflags(int fd)
188 if (_thread_fd_table[fd] != NULL)
189 return (_thread_fd_table[fd]->flags);
195 _thread_fd_setflags(int fd, int flags)
197 if (_thread_fd_table[fd] != NULL)
198 _thread_fd_table[fd]->flags = flags;
201 #ifdef _FDLOCKS_ENABLED
203 _thread_fd_unlock(int fd, int lock_type)
205 struct pthread *curthread = _get_curthread();
209 * Check that the file descriptor table is initialised for this
212 if ((ret = _thread_fd_table_init(fd)) == 0) {
214 * Defer signals to protect the scheduling queues from
215 * access by the signal handler:
217 _thread_kern_sig_defer();
220 * Lock the file descriptor table entry to prevent
221 * other threads for clashing with the current
224 _SPINLOCK(&_thread_fd_table[fd]->lock);
226 /* Check if the running thread owns the read lock: */
227 if (_thread_fd_table[fd]->r_owner == curthread) {
228 /* Check the file descriptor and lock types: */
229 if (lock_type == FD_READ || lock_type == FD_RDWR) {
231 * Decrement the read lock count for the
234 _thread_fd_table[fd]->r_lockcount--;
237 * Check if the running thread still has read
238 * locks on this file descriptor:
240 if (_thread_fd_table[fd]->r_lockcount != 0) {
243 * Get the next thread in the queue for a
244 * read lock on this file descriptor:
246 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
248 /* Remove this thread from the queue: */
249 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
250 _thread_fd_table[fd]->r_owner);
253 * Set the state of the new owner of
254 * the thread to running:
256 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
259 * Reset the number of read locks.
260 * This will be incremented by the
261 * new owner of the lock when it sees
262 * that it has the lock.
264 _thread_fd_table[fd]->r_lockcount = 0;
268 /* Check if the running thread owns the write lock: */
269 if (_thread_fd_table[fd]->w_owner == curthread) {
270 /* Check the file descriptor and lock types: */
271 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
273 * Decrement the write lock count for the
276 _thread_fd_table[fd]->w_lockcount--;
279 * Check if the running thread still has
280 * write locks on this file descriptor:
282 if (_thread_fd_table[fd]->w_lockcount != 0) {
285 * Get the next thread in the queue for a
286 * write lock on this file descriptor:
288 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
290 /* Remove this thread from the queue: */
291 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
292 _thread_fd_table[fd]->w_owner);
295 * Set the state of the new owner of
296 * the thread to running:
298 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
301 * Reset the number of write locks.
302 * This will be incremented by the
303 * new owner of the lock when it
304 * sees that it has the lock.
306 _thread_fd_table[fd]->w_lockcount = 0;
311 /* Unlock the file descriptor table entry: */
312 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
315 * Undefer and handle pending signals, yielding if
318 _thread_kern_sig_undefer();
323 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
325 struct pthread *curthread = _get_curthread();
329 * Check that the file descriptor table is initialised for this
332 if ((ret = _thread_fd_table_init(fd)) == 0) {
333 /* Clear the interrupted flag: */
334 curthread->interrupted = 0;
337 * Lock the file descriptor table entry to prevent
338 * other threads for clashing with the current
341 _SPINLOCK(&_thread_fd_table[fd]->lock);
343 /* Check the file descriptor and lock types: */
344 if (lock_type == FD_READ || lock_type == FD_RDWR) {
346 * Wait for the file descriptor to be locked
347 * for read for the current thread:
349 while ((_thread_fd_table[fd]->r_owner != curthread) &&
350 (curthread->interrupted == 0)) {
352 * Check if the file descriptor is locked by
355 if (_thread_fd_table[fd]->r_owner != NULL) {
357 * Another thread has locked the file
358 * descriptor for read, so join the
359 * queue of threads waiting for a
360 * read lock on this file descriptor:
362 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
365 * Save the file descriptor details
366 * in the thread structure for the
369 curthread->data.fd.fd = fd;
371 /* Set the timeout: */
372 _thread_kern_set_timeout(timeout);
375 * Unlock the file descriptor
378 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
381 * Schedule this thread to wait on
382 * the read lock. It will only be
383 * woken when it becomes the next in
384 * the queue and is granted access
385 * to the lock by the thread
386 * that is unlocking the file
389 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
392 * Lock the file descriptor
395 _SPINLOCK(&_thread_fd_table[fd]->lock);
397 if (curthread->interrupted != 0) {
398 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
403 * The running thread now owns the
404 * read lock on this file descriptor:
406 _thread_fd_table[fd]->r_owner = curthread;
409 * Reset the number of read locks for
410 * this file descriptor:
412 _thread_fd_table[fd]->r_lockcount = 0;
416 if (_thread_fd_table[fd]->r_owner == curthread)
417 /* Increment the read lock count: */
418 _thread_fd_table[fd]->r_lockcount++;
421 /* Check the file descriptor and lock types: */
422 if (curthread->interrupted == 0 &&
423 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
425 * Wait for the file descriptor to be locked
426 * for write for the current thread:
428 while ((_thread_fd_table[fd]->w_owner != curthread) &&
429 (curthread->interrupted == 0)) {
431 * Check if the file descriptor is locked by
434 if (_thread_fd_table[fd]->w_owner != NULL) {
436 * Another thread has locked the file
437 * descriptor for write, so join the
438 * queue of threads waiting for a
439 * write lock on this file
442 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
445 * Save the file descriptor details
446 * in the thread structure for the
449 curthread->data.fd.fd = fd;
451 /* Set the timeout: */
452 _thread_kern_set_timeout(timeout);
455 * Unlock the file descriptor
458 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
461 * Schedule this thread to wait on
462 * the write lock. It will only be
463 * woken when it becomes the next in
464 * the queue and is granted access to
465 * the lock by the thread that is
466 * unlocking the file descriptor.
468 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
471 * Lock the file descriptor
474 _SPINLOCK(&_thread_fd_table[fd]->lock);
476 if (curthread->interrupted != 0) {
477 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
482 * The running thread now owns the
483 * write lock on this file
486 _thread_fd_table[fd]->w_owner = curthread;
489 * Reset the number of write locks
490 * for this file descriptor:
492 _thread_fd_table[fd]->w_lockcount = 0;
496 if (_thread_fd_table[fd]->w_owner == curthread)
497 /* Increment the write lock count: */
498 _thread_fd_table[fd]->w_lockcount++;
501 /* Unlock the file descriptor table entry: */
502 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
504 if (curthread->interrupted != 0) {
507 if (curthread->continuation != NULL)
508 curthread->continuation((void *)curthread);
512 /* Return the completion status: */
517 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
519 struct pthread *curthread = _get_curthread();
523 * Check that the file descriptor table is initialised for this
526 if ((ret = _thread_fd_table_init(fd)) == 0) {
528 * Defer signals to protect the scheduling queues from
529 * access by the signal handler:
531 _thread_kern_sig_defer();
534 * Lock the file descriptor table entry to prevent
535 * other threads for clashing with the current
538 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
540 /* Check if the running thread owns the read lock: */
541 if (_thread_fd_table[fd]->r_owner == curthread) {
542 /* Check the file descriptor and lock types: */
543 if (lock_type == FD_READ || lock_type == FD_RDWR) {
545 * Decrement the read lock count for the
548 _thread_fd_table[fd]->r_lockcount--;
551 * Check if the running thread still has read
552 * locks on this file descriptor:
554 if (_thread_fd_table[fd]->r_lockcount != 0) {
557 * Get the next thread in the queue for a
558 * read lock on this file descriptor:
560 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
562 /* Remove this thread from the queue: */
563 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
564 _thread_fd_table[fd]->r_owner);
567 * Set the state of the new owner of
568 * the thread to running:
570 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
573 * Reset the number of read locks.
574 * This will be incremented by the
575 * new owner of the lock when it sees
576 * that it has the lock.
578 _thread_fd_table[fd]->r_lockcount = 0;
582 /* Check if the running thread owns the write lock: */
583 if (_thread_fd_table[fd]->w_owner == curthread) {
584 /* Check the file descriptor and lock types: */
585 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
587 * Decrement the write lock count for the
590 _thread_fd_table[fd]->w_lockcount--;
593 * Check if the running thread still has
594 * write locks on this file descriptor:
596 if (_thread_fd_table[fd]->w_lockcount != 0) {
599 * Get the next thread in the queue for a
600 * write lock on this file descriptor:
602 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
604 /* Remove this thread from the queue: */
605 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
606 _thread_fd_table[fd]->w_owner);
609 * Set the state of the new owner of
610 * the thread to running:
612 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
615 * Reset the number of write locks.
616 * This will be incremented by the
617 * new owner of the lock when it
618 * sees that it has the lock.
620 _thread_fd_table[fd]->w_lockcount = 0;
625 /* Unlock the file descriptor table entry: */
626 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
629 * Undefer and handle pending signals, yielding if
632 _thread_kern_sig_undefer();
637 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
638 char *fname, int lineno)
640 struct pthread *curthread = _get_curthread();
644 * Check that the file descriptor table is initialised for this
647 if ((ret = _thread_fd_table_init(fd)) == 0) {
648 /* Clear the interrupted flag: */
649 curthread->interrupted = 0;
652 * Lock the file descriptor table entry to prevent
653 * other threads for clashing with the current
656 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
658 /* Check the file descriptor and lock types: */
659 if (lock_type == FD_READ || lock_type == FD_RDWR) {
661 * Wait for the file descriptor to be locked
662 * for read for the current thread:
664 while ((_thread_fd_table[fd]->r_owner != curthread) &&
665 (curthread->interrupted == 0)) {
667 * Check if the file descriptor is locked by
670 if (_thread_fd_table[fd]->r_owner != NULL) {
672 * Another thread has locked the file
673 * descriptor for read, so join the
674 * queue of threads waiting for a
675 * read lock on this file descriptor:
677 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
680 * Save the file descriptor details
681 * in the thread structure for the
684 curthread->data.fd.fd = fd;
685 curthread->data.fd.branch = lineno;
686 curthread->data.fd.fname = fname;
688 /* Set the timeout: */
689 _thread_kern_set_timeout(timeout);
692 * Unlock the file descriptor
695 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
698 * Schedule this thread to wait on
699 * the read lock. It will only be
700 * woken when it becomes the next in
701 * the queue and is granted access
702 * to the lock by the thread
703 * that is unlocking the file
706 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
709 * Lock the file descriptor
712 _SPINLOCK(&_thread_fd_table[fd]->lock);
714 if (curthread->interrupted != 0) {
715 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
720 * The running thread now owns the
721 * read lock on this file descriptor:
723 _thread_fd_table[fd]->r_owner = curthread;
726 * Reset the number of read locks for
727 * this file descriptor:
729 _thread_fd_table[fd]->r_lockcount = 0;
732 * Save the source file details for
735 _thread_fd_table[fd]->r_fname = fname;
736 _thread_fd_table[fd]->r_lineno = lineno;
740 if (_thread_fd_table[fd]->r_owner == curthread)
741 /* Increment the read lock count: */
742 _thread_fd_table[fd]->r_lockcount++;
745 /* Check the file descriptor and lock types: */
746 if (curthread->interrupted == 0 &&
747 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
749 * Wait for the file descriptor to be locked
750 * for write for the current thread:
752 while ((_thread_fd_table[fd]->w_owner != curthread) &&
753 (curthread->interrupted == 0)) {
755 * Check if the file descriptor is locked by
758 if (_thread_fd_table[fd]->w_owner != NULL) {
760 * Another thread has locked the file
761 * descriptor for write, so join the
762 * queue of threads waiting for a
763 * write lock on this file
766 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
769 * Save the file descriptor details
770 * in the thread structure for the
773 curthread->data.fd.fd = fd;
774 curthread->data.fd.branch = lineno;
775 curthread->data.fd.fname = fname;
777 /* Set the timeout: */
778 _thread_kern_set_timeout(timeout);
781 * Unlock the file descriptor
784 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
787 * Schedule this thread to wait on
788 * the write lock. It will only be
789 * woken when it becomes the next in
790 * the queue and is granted access to
791 * the lock by the thread that is
792 * unlocking the file descriptor.
794 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
797 * Lock the file descriptor
800 _SPINLOCK(&_thread_fd_table[fd]->lock);
802 if (curthread->interrupted != 0) {
803 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
808 * The running thread now owns the
809 * write lock on this file
812 _thread_fd_table[fd]->w_owner = curthread;
815 * Reset the number of write locks
816 * for this file descriptor:
818 _thread_fd_table[fd]->w_lockcount = 0;
821 * Save the source file details for
824 _thread_fd_table[fd]->w_fname = fname;
825 _thread_fd_table[fd]->w_lineno = lineno;
829 if (_thread_fd_table[fd]->w_owner == curthread)
830 /* Increment the write lock count: */
831 _thread_fd_table[fd]->w_lockcount++;
834 /* Unlock the file descriptor table entry: */
835 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
837 if (curthread->interrupted != 0) {
840 if (curthread->continuation != NULL)
841 curthread->continuation((void *)curthread);
845 /* Return the completion status: */
850 _thread_fd_unlock_owned(pthread_t pthread)
854 for (fd = 0; fd < _thread_dtablesize; fd++) {
855 if ((_thread_fd_table[fd] != NULL) &&
856 ((_thread_fd_table[fd]->r_owner == pthread) ||
857 (_thread_fd_table[fd]->w_owner == pthread))) {
859 * Defer signals to protect the scheduling queues
860 * from access by the signal handler:
862 _thread_kern_sig_defer();
865 * Lock the file descriptor table entry to prevent
866 * other threads for clashing with the current
869 _SPINLOCK(&_thread_fd_table[fd]->lock);
871 /* Check if the thread owns the read lock: */
872 if (_thread_fd_table[fd]->r_owner == pthread) {
873 /* Clear the read lock count: */
874 _thread_fd_table[fd]->r_lockcount = 0;
877 * Get the next thread in the queue for a
878 * read lock on this file descriptor:
880 if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) {
881 /* Remove this thread from the queue: */
882 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
883 _thread_fd_table[fd]->r_owner);
886 * Set the state of the new owner of
887 * the thread to running:
889 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
893 /* Check if the thread owns the write lock: */
894 if (_thread_fd_table[fd]->w_owner == pthread) {
895 /* Clear the write lock count: */
896 _thread_fd_table[fd]->w_lockcount = 0;
899 * Get the next thread in the queue for a
900 * write lock on this file descriptor:
902 if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) {
903 /* Remove this thread from the queue: */
904 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
905 _thread_fd_table[fd]->w_owner);
908 * Set the state of the new owner of
909 * the thread to running:
911 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
916 /* Unlock the file descriptor table entry: */
917 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
920 * Undefer and handle pending signals, yielding if
923 _thread_kern_sig_undefer();
929 _fd_lock_backout(pthread_t pthread)
934 * Defer signals to protect the scheduling queues
935 * from access by the signal handler:
937 _thread_kern_sig_defer();
939 switch (pthread->state) {
942 fd = pthread->data.fd.fd;
945 * Lock the file descriptor table entry to prevent
946 * other threads for clashing with the current
949 _SPINLOCK(&_thread_fd_table[fd]->lock);
951 /* Remove the thread from the waiting queue: */
952 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
956 fd = pthread->data.fd.fd;
959 * Lock the file descriptor table entry to prevent
960 * other threads from clashing with the current
963 _SPINLOCK(&_thread_fd_table[fd]->lock);
965 /* Remove the thread from the waiting queue: */
966 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
974 * Undefer and handle pending signals, yielding if
977 _thread_kern_sig_undefer();
980 static inline pthread_t
981 fd_next_reader(int fd)
985 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) &&
986 (pthread->interrupted != 0)) {
988 * This thread has either been interrupted by a signal or
989 * it has been canceled. Remove it from the queue.
991 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
997 static inline pthread_t
998 fd_next_writer(int fd)
1002 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) &&
1003 (pthread->interrupted != 0)) {
1005 * This thread has either been interrupted by a signal or
1006 * it has been canceled. Remove it from the queue.
1008 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
1017 _thread_fd_unlock(int fd, int lock_type)
1022 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
1025 * Insure that the file descriptor table is initialized for this
1028 return (_thread_fd_table_init(fd));
1032 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
1037 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
1038 char *fname, int lineno)
1041 * Insure that the file descriptor table is initialized for this
1044 return (_thread_fd_table_init(fd));
1048 _thread_fd_unlock_owned(pthread_t pthread)
1053 _fd_lock_backout(pthread_t pthread)