2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_fd.c,v 1.16.2.7 2002/10/22 14:44:03 fjoe Exp $
33 * $DragonFly: src/lib/libc_r/uthread/uthread_fd.c,v 1.2 2003/06/17 04:26:48 dillon Exp $
41 #include "pthread_private.h"
43 #define FDQ_INSERT(q,p) \
45 TAILQ_INSERT_TAIL(q,p,qe); \
46 p->flags |= PTHREAD_FLAGS_IN_FDQ; \
49 #define FDQ_REMOVE(q,p) \
51 if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \
52 TAILQ_REMOVE(q,p,qe); \
53 p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \
58 /* Static variables: */
59 static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER;
62 #ifdef _FDLOCKS_ENABLED
63 static inline pthread_t fd_next_reader(int fd);
64 static inline pthread_t fd_next_writer(int fd);
69 * This function *must* return -1 and set the thread specific errno
70 * as a system call. This is because the error return from this
71 * function is propagated directly back from thread-wrapped system
76 _thread_fd_table_init(int fd)
79 struct fd_table_entry *entry;
82 if (_thread_initial == NULL)
85 /* Check if the file descriptor is out of range: */
86 if (fd < 0 || fd >= _thread_dtablesize) {
87 /* Return a bad file descriptor error: */
93 * Check if memory has already been allocated for this file
96 else if (_thread_fd_table[fd] != NULL) {
97 /* Memory has already been allocated. */
99 /* Allocate memory for the file descriptor table entry: */
100 } else if ((entry = (struct fd_table_entry *)
101 malloc(sizeof(struct fd_table_entry))) == NULL) {
102 /* Return an insufficient memory error: */
106 /* Initialise the file locks: */
107 memset(&entry->lock, 0, sizeof(entry->lock));
108 entry->r_owner = NULL;
109 entry->w_owner = NULL;
110 entry->r_fname = NULL;
111 entry->w_fname = NULL;
114 entry->r_lockcount = 0;
115 entry->w_lockcount = 0;
117 /* Initialise the read/write queues: */
118 TAILQ_INIT(&entry->r_queue);
119 TAILQ_INIT(&entry->w_queue);
121 /* Get the flags for the file: */
122 if (((fd >= 3) || (_pthread_stdio_flags[fd] == -1)) &&
123 (entry->flags = __sys_fcntl(fd, F_GETFL, 0)) == -1) {
127 /* Check if a stdio descriptor: */
128 if ((fd < 3) && (_pthread_stdio_flags[fd] != -1))
130 * Use the stdio flags read by
131 * _pthread_init() to avoid
132 * mistaking the non-blocking
133 * flag that, when set on one
134 * stdio fd, is set on all stdio
137 entry->flags = _pthread_stdio_flags[fd];
140 * Make the file descriptor non-blocking.
141 * This might fail if the device driver does
142 * not support non-blocking calls, or if the
143 * driver is naturally non-blocking.
146 __sys_fcntl(fd, F_SETFL,
147 entry->flags | O_NONBLOCK);
150 /* Lock the file descriptor table: */
151 _SPINLOCK(&fd_table_lock);
154 * Check if another thread allocated the
155 * file descriptor entry while this thread
156 * was doing the same thing. The table wasn't
157 * kept locked during this operation because
158 * it has the potential to recurse.
160 if (_thread_fd_table[fd] == NULL) {
161 /* This thread wins: */
162 _thread_fd_table[fd] = entry;
166 /* Unlock the file descriptor table: */
167 _SPINUNLOCK(&fd_table_lock);
171 * Check if another thread initialised the table entry
172 * before this one could:
176 * Throw away the table entry that this thread
177 * prepared. The other thread wins.
182 /* Return the completion status: */
187 _thread_fd_getflags(int fd)
189 if (_thread_fd_table[fd] != NULL)
190 return (_thread_fd_table[fd]->flags);
196 _thread_fd_setflags(int fd, int flags)
198 if (_thread_fd_table[fd] != NULL)
199 _thread_fd_table[fd]->flags = flags;
202 #ifdef _FDLOCKS_ENABLED
204 _thread_fd_unlock(int fd, int lock_type)
206 struct pthread *curthread = _get_curthread();
210 * Check that the file descriptor table is initialised for this
213 if ((ret = _thread_fd_table_init(fd)) == 0) {
215 * Defer signals to protect the scheduling queues from
216 * access by the signal handler:
218 _thread_kern_sig_defer();
221 * Lock the file descriptor table entry to prevent
222 * other threads for clashing with the current
225 _SPINLOCK(&_thread_fd_table[fd]->lock);
227 /* Check if the running thread owns the read lock: */
228 if (_thread_fd_table[fd]->r_owner == curthread) {
229 /* Check the file descriptor and lock types: */
230 if (lock_type == FD_READ || lock_type == FD_RDWR) {
232 * Decrement the read lock count for the
235 _thread_fd_table[fd]->r_lockcount--;
238 * Check if the running thread still has read
239 * locks on this file descriptor:
241 if (_thread_fd_table[fd]->r_lockcount != 0) {
244 * Get the next thread in the queue for a
245 * read lock on this file descriptor:
247 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
249 /* Remove this thread from the queue: */
250 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
251 _thread_fd_table[fd]->r_owner);
254 * Set the state of the new owner of
255 * the thread to running:
257 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
260 * Reset the number of read locks.
261 * This will be incremented by the
262 * new owner of the lock when it sees
263 * that it has the lock.
265 _thread_fd_table[fd]->r_lockcount = 0;
269 /* Check if the running thread owns the write lock: */
270 if (_thread_fd_table[fd]->w_owner == curthread) {
271 /* Check the file descriptor and lock types: */
272 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
274 * Decrement the write lock count for the
277 _thread_fd_table[fd]->w_lockcount--;
280 * Check if the running thread still has
281 * write locks on this file descriptor:
283 if (_thread_fd_table[fd]->w_lockcount != 0) {
286 * Get the next thread in the queue for a
287 * write lock on this file descriptor:
289 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
291 /* Remove this thread from the queue: */
292 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
293 _thread_fd_table[fd]->w_owner);
296 * Set the state of the new owner of
297 * the thread to running:
299 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
302 * Reset the number of write locks.
303 * This will be incremented by the
304 * new owner of the lock when it
305 * sees that it has the lock.
307 _thread_fd_table[fd]->w_lockcount = 0;
312 /* Unlock the file descriptor table entry: */
313 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
316 * Undefer and handle pending signals, yielding if
319 _thread_kern_sig_undefer();
324 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
326 struct pthread *curthread = _get_curthread();
330 * Check that the file descriptor table is initialised for this
333 if ((ret = _thread_fd_table_init(fd)) == 0) {
334 /* Clear the interrupted flag: */
335 curthread->interrupted = 0;
338 * Lock the file descriptor table entry to prevent
339 * other threads for clashing with the current
342 _SPINLOCK(&_thread_fd_table[fd]->lock);
344 /* Check the file descriptor and lock types: */
345 if (lock_type == FD_READ || lock_type == FD_RDWR) {
347 * Wait for the file descriptor to be locked
348 * for read for the current thread:
350 while ((_thread_fd_table[fd]->r_owner != curthread) &&
351 (curthread->interrupted == 0)) {
353 * Check if the file descriptor is locked by
356 if (_thread_fd_table[fd]->r_owner != NULL) {
358 * Another thread has locked the file
359 * descriptor for read, so join the
360 * queue of threads waiting for a
361 * read lock on this file descriptor:
363 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
366 * Save the file descriptor details
367 * in the thread structure for the
370 curthread->data.fd.fd = fd;
372 /* Set the timeout: */
373 _thread_kern_set_timeout(timeout);
376 * Unlock the file descriptor
379 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
382 * Schedule this thread to wait on
383 * the read lock. It will only be
384 * woken when it becomes the next in
385 * the queue and is granted access
386 * to the lock by the thread
387 * that is unlocking the file
390 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
393 * Lock the file descriptor
396 _SPINLOCK(&_thread_fd_table[fd]->lock);
398 if (curthread->interrupted != 0) {
399 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
404 * The running thread now owns the
405 * read lock on this file descriptor:
407 _thread_fd_table[fd]->r_owner = curthread;
410 * Reset the number of read locks for
411 * this file descriptor:
413 _thread_fd_table[fd]->r_lockcount = 0;
417 if (_thread_fd_table[fd]->r_owner == curthread)
418 /* Increment the read lock count: */
419 _thread_fd_table[fd]->r_lockcount++;
422 /* Check the file descriptor and lock types: */
423 if (curthread->interrupted == 0 &&
424 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
426 * Wait for the file descriptor to be locked
427 * for write for the current thread:
429 while ((_thread_fd_table[fd]->w_owner != curthread) &&
430 (curthread->interrupted == 0)) {
432 * Check if the file descriptor is locked by
435 if (_thread_fd_table[fd]->w_owner != NULL) {
437 * Another thread has locked the file
438 * descriptor for write, so join the
439 * queue of threads waiting for a
440 * write lock on this file
443 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
446 * Save the file descriptor details
447 * in the thread structure for the
450 curthread->data.fd.fd = fd;
452 /* Set the timeout: */
453 _thread_kern_set_timeout(timeout);
456 * Unlock the file descriptor
459 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
462 * Schedule this thread to wait on
463 * the write lock. It will only be
464 * woken when it becomes the next in
465 * the queue and is granted access to
466 * the lock by the thread that is
467 * unlocking the file descriptor.
469 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
472 * Lock the file descriptor
475 _SPINLOCK(&_thread_fd_table[fd]->lock);
477 if (curthread->interrupted != 0) {
478 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
483 * The running thread now owns the
484 * write lock on this file
487 _thread_fd_table[fd]->w_owner = curthread;
490 * Reset the number of write locks
491 * for this file descriptor:
493 _thread_fd_table[fd]->w_lockcount = 0;
497 if (_thread_fd_table[fd]->w_owner == curthread)
498 /* Increment the write lock count: */
499 _thread_fd_table[fd]->w_lockcount++;
502 /* Unlock the file descriptor table entry: */
503 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
505 if (curthread->interrupted != 0) {
508 if (curthread->continuation != NULL)
509 curthread->continuation((void *)curthread);
513 /* Return the completion status: */
518 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
520 struct pthread *curthread = _get_curthread();
524 * Check that the file descriptor table is initialised for this
527 if ((ret = _thread_fd_table_init(fd)) == 0) {
529 * Defer signals to protect the scheduling queues from
530 * access by the signal handler:
532 _thread_kern_sig_defer();
535 * Lock the file descriptor table entry to prevent
536 * other threads for clashing with the current
539 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
541 /* Check if the running thread owns the read lock: */
542 if (_thread_fd_table[fd]->r_owner == curthread) {
543 /* Check the file descriptor and lock types: */
544 if (lock_type == FD_READ || lock_type == FD_RDWR) {
546 * Decrement the read lock count for the
549 _thread_fd_table[fd]->r_lockcount--;
552 * Check if the running thread still has read
553 * locks on this file descriptor:
555 if (_thread_fd_table[fd]->r_lockcount != 0) {
558 * Get the next thread in the queue for a
559 * read lock on this file descriptor:
561 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
563 /* Remove this thread from the queue: */
564 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
565 _thread_fd_table[fd]->r_owner);
568 * Set the state of the new owner of
569 * the thread to running:
571 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
574 * Reset the number of read locks.
575 * This will be incremented by the
576 * new owner of the lock when it sees
577 * that it has the lock.
579 _thread_fd_table[fd]->r_lockcount = 0;
583 /* Check if the running thread owns the write lock: */
584 if (_thread_fd_table[fd]->w_owner == curthread) {
585 /* Check the file descriptor and lock types: */
586 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
588 * Decrement the write lock count for the
591 _thread_fd_table[fd]->w_lockcount--;
594 * Check if the running thread still has
595 * write locks on this file descriptor:
597 if (_thread_fd_table[fd]->w_lockcount != 0) {
600 * Get the next thread in the queue for a
601 * write lock on this file descriptor:
603 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
605 /* Remove this thread from the queue: */
606 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
607 _thread_fd_table[fd]->w_owner);
610 * Set the state of the new owner of
611 * the thread to running:
613 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
616 * Reset the number of write locks.
617 * This will be incremented by the
618 * new owner of the lock when it
619 * sees that it has the lock.
621 _thread_fd_table[fd]->w_lockcount = 0;
626 /* Unlock the file descriptor table entry: */
627 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
630 * Undefer and handle pending signals, yielding if
633 _thread_kern_sig_undefer();
638 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
639 char *fname, int lineno)
641 struct pthread *curthread = _get_curthread();
645 * Check that the file descriptor table is initialised for this
648 if ((ret = _thread_fd_table_init(fd)) == 0) {
649 /* Clear the interrupted flag: */
650 curthread->interrupted = 0;
653 * Lock the file descriptor table entry to prevent
654 * other threads for clashing with the current
657 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
659 /* Check the file descriptor and lock types: */
660 if (lock_type == FD_READ || lock_type == FD_RDWR) {
662 * Wait for the file descriptor to be locked
663 * for read for the current thread:
665 while ((_thread_fd_table[fd]->r_owner != curthread) &&
666 (curthread->interrupted == 0)) {
668 * Check if the file descriptor is locked by
671 if (_thread_fd_table[fd]->r_owner != NULL) {
673 * Another thread has locked the file
674 * descriptor for read, so join the
675 * queue of threads waiting for a
676 * read lock on this file descriptor:
678 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
681 * Save the file descriptor details
682 * in the thread structure for the
685 curthread->data.fd.fd = fd;
686 curthread->data.fd.branch = lineno;
687 curthread->data.fd.fname = fname;
689 /* Set the timeout: */
690 _thread_kern_set_timeout(timeout);
693 * Unlock the file descriptor
696 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
699 * Schedule this thread to wait on
700 * the read lock. It will only be
701 * woken when it becomes the next in
702 * the queue and is granted access
703 * to the lock by the thread
704 * that is unlocking the file
707 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
710 * Lock the file descriptor
713 _SPINLOCK(&_thread_fd_table[fd]->lock);
715 if (curthread->interrupted != 0) {
716 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
721 * The running thread now owns the
722 * read lock on this file descriptor:
724 _thread_fd_table[fd]->r_owner = curthread;
727 * Reset the number of read locks for
728 * this file descriptor:
730 _thread_fd_table[fd]->r_lockcount = 0;
733 * Save the source file details for
736 _thread_fd_table[fd]->r_fname = fname;
737 _thread_fd_table[fd]->r_lineno = lineno;
741 if (_thread_fd_table[fd]->r_owner == curthread)
742 /* Increment the read lock count: */
743 _thread_fd_table[fd]->r_lockcount++;
746 /* Check the file descriptor and lock types: */
747 if (curthread->interrupted == 0 &&
748 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
750 * Wait for the file descriptor to be locked
751 * for write for the current thread:
753 while ((_thread_fd_table[fd]->w_owner != curthread) &&
754 (curthread->interrupted == 0)) {
756 * Check if the file descriptor is locked by
759 if (_thread_fd_table[fd]->w_owner != NULL) {
761 * Another thread has locked the file
762 * descriptor for write, so join the
763 * queue of threads waiting for a
764 * write lock on this file
767 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
770 * Save the file descriptor details
771 * in the thread structure for the
774 curthread->data.fd.fd = fd;
775 curthread->data.fd.branch = lineno;
776 curthread->data.fd.fname = fname;
778 /* Set the timeout: */
779 _thread_kern_set_timeout(timeout);
782 * Unlock the file descriptor
785 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
788 * Schedule this thread to wait on
789 * the write lock. It will only be
790 * woken when it becomes the next in
791 * the queue and is granted access to
792 * the lock by the thread that is
793 * unlocking the file descriptor.
795 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
798 * Lock the file descriptor
801 _SPINLOCK(&_thread_fd_table[fd]->lock);
803 if (curthread->interrupted != 0) {
804 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
809 * The running thread now owns the
810 * write lock on this file
813 _thread_fd_table[fd]->w_owner = curthread;
816 * Reset the number of write locks
817 * for this file descriptor:
819 _thread_fd_table[fd]->w_lockcount = 0;
822 * Save the source file details for
825 _thread_fd_table[fd]->w_fname = fname;
826 _thread_fd_table[fd]->w_lineno = lineno;
830 if (_thread_fd_table[fd]->w_owner == curthread)
831 /* Increment the write lock count: */
832 _thread_fd_table[fd]->w_lockcount++;
835 /* Unlock the file descriptor table entry: */
836 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
838 if (curthread->interrupted != 0) {
841 if (curthread->continuation != NULL)
842 curthread->continuation((void *)curthread);
846 /* Return the completion status: */
851 _thread_fd_unlock_owned(pthread_t pthread)
855 for (fd = 0; fd < _thread_dtablesize; fd++) {
856 if ((_thread_fd_table[fd] != NULL) &&
857 ((_thread_fd_table[fd]->r_owner == pthread) ||
858 (_thread_fd_table[fd]->w_owner == pthread))) {
860 * Defer signals to protect the scheduling queues
861 * from access by the signal handler:
863 _thread_kern_sig_defer();
866 * Lock the file descriptor table entry to prevent
867 * other threads for clashing with the current
870 _SPINLOCK(&_thread_fd_table[fd]->lock);
872 /* Check if the thread owns the read lock: */
873 if (_thread_fd_table[fd]->r_owner == pthread) {
874 /* Clear the read lock count: */
875 _thread_fd_table[fd]->r_lockcount = 0;
878 * Get the next thread in the queue for a
879 * read lock on this file descriptor:
881 if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) {
882 /* Remove this thread from the queue: */
883 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
884 _thread_fd_table[fd]->r_owner);
887 * Set the state of the new owner of
888 * the thread to running:
890 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
894 /* Check if the thread owns the write lock: */
895 if (_thread_fd_table[fd]->w_owner == pthread) {
896 /* Clear the write lock count: */
897 _thread_fd_table[fd]->w_lockcount = 0;
900 * Get the next thread in the queue for a
901 * write lock on this file descriptor:
903 if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) {
904 /* Remove this thread from the queue: */
905 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
906 _thread_fd_table[fd]->w_owner);
909 * Set the state of the new owner of
910 * the thread to running:
912 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
917 /* Unlock the file descriptor table entry: */
918 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
921 * Undefer and handle pending signals, yielding if
924 _thread_kern_sig_undefer();
930 _fd_lock_backout(pthread_t pthread)
935 * Defer signals to protect the scheduling queues
936 * from access by the signal handler:
938 _thread_kern_sig_defer();
940 switch (pthread->state) {
943 fd = pthread->data.fd.fd;
946 * Lock the file descriptor table entry to prevent
947 * other threads for clashing with the current
950 _SPINLOCK(&_thread_fd_table[fd]->lock);
952 /* Remove the thread from the waiting queue: */
953 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
957 fd = pthread->data.fd.fd;
960 * Lock the file descriptor table entry to prevent
961 * other threads from clashing with the current
964 _SPINLOCK(&_thread_fd_table[fd]->lock);
966 /* Remove the thread from the waiting queue: */
967 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
975 * Undefer and handle pending signals, yielding if
978 _thread_kern_sig_undefer();
981 static inline pthread_t
982 fd_next_reader(int fd)
986 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) &&
987 (pthread->interrupted != 0)) {
989 * This thread has either been interrupted by a signal or
990 * it has been canceled. Remove it from the queue.
992 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
998 static inline pthread_t
999 fd_next_writer(int fd)
1003 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) &&
1004 (pthread->interrupted != 0)) {
1006 * This thread has either been interrupted by a signal or
1007 * it has been canceled. Remove it from the queue.
1009 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
1018 _thread_fd_unlock(int fd, int lock_type)
1023 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
1026 * Insure that the file descriptor table is initialized for this
1029 return (_thread_fd_table_init(fd));
1033 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
1038 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
1039 char *fname, int lineno)
1042 * Insure that the file descriptor table is initialized for this
1045 return (_thread_fd_table_init(fd));
1049 _thread_fd_unlock_owned(pthread_t pthread)
1054 _fd_lock_backout(pthread_t pthread)