2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_fd.c,v 1.16.2.7 2002/10/22 14:44:03 fjoe Exp $
33 * $DragonFly: src/lib/libc_r/uthread/uthread_fd.c,v 1.3 2006/06/14 01:45:28 dillon Exp $
41 #include "pthread_private.h"
43 #define FDQ_INSERT(q,p) \
45 TAILQ_INSERT_TAIL(q,p,qe); \
46 p->flags |= PTHREAD_FLAGS_IN_FDQ; \
49 #define FDQ_REMOVE(q,p) \
51 if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \
52 TAILQ_REMOVE(q,p,qe); \
53 p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \
58 /* Static variables: */
59 static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER;
62 #ifdef _FDLOCKS_ENABLED
63 static inline pthread_t fd_next_reader(int fd);
64 static inline pthread_t fd_next_writer(int fd);
69 * This function *must* return -1 and set the thread specific errno
70 * as a system call. This is because the error return from this
71 * function is propagated directly back from thread-wrapped system
76 _thread_fd_table_init(int fd)
79 struct fd_table_entry *entry;
82 if (_thread_initial == NULL)
85 /* Check if the file descriptor is out of range: */
86 if (fd < 0 || fd >= _thread_dtablesize) {
87 /* Return a bad file descriptor error: */
93 * Check if memory has already been allocated for this file
96 else if (_thread_fd_table[fd] != NULL) {
97 /* Memory has already been allocated. */
99 /* Allocate memory for the file descriptor table entry: */
100 } else if ((entry = (struct fd_table_entry *)
101 malloc(sizeof(struct fd_table_entry))) == NULL) {
102 /* Return an insufficient memory error: */
106 /* Initialise the file locks: */
107 memset(&entry->lock, 0, sizeof(entry->lock));
108 entry->r_owner = NULL;
109 entry->w_owner = NULL;
110 entry->r_fname = NULL;
111 entry->w_fname = NULL;
114 entry->r_lockcount = 0;
115 entry->w_lockcount = 0;
117 /* Initialise the read/write queues: */
118 TAILQ_INIT(&entry->r_queue);
119 TAILQ_INIT(&entry->w_queue);
121 /* Get the flags for the file: */
122 if (((fd >= 3) || (_pthread_stdio_flags[fd] == -1)) &&
123 (entry->flags = __sys_fcntl(fd, F_GETFL, 0)) == -1) {
126 /* Check if a stdio descriptor: */
127 if ((fd < 3) && (_pthread_stdio_flags[fd] != -1)) {
129 * Use the stdio flags read by
130 * _pthread_init() to avoid
131 * mistaking the non-blocking
132 * flag that, when set on one
133 * stdio fd, is set on all stdio
136 entry->flags = _pthread_stdio_flags[fd];
140 * NOTE: We now use new system calls which allow
141 * the non-blocking mode to be set on a per-I/O
142 * basis, we no longer have to mess with the
143 * file pointer (which can have unexpected side
144 * effects since it might be shared with parent
145 * processes such as, oh, gmake).
148 /* Lock the file descriptor table: */
149 _SPINLOCK(&fd_table_lock);
152 * Check if another thread allocated the
153 * file descriptor entry while this thread
154 * was doing the same thing. The table wasn't
155 * kept locked during this operation because
156 * it has the potential to recurse.
158 if (_thread_fd_table[fd] == NULL) {
159 /* This thread wins: */
160 _thread_fd_table[fd] = entry;
164 /* Unlock the file descriptor table: */
165 _SPINUNLOCK(&fd_table_lock);
169 * Check if another thread initialised the table entry
170 * before this one could:
174 * Throw away the table entry that this thread
175 * prepared. The other thread wins.
180 /* Return the completion status: */
185 _thread_fd_getflags(int fd)
187 if (_thread_fd_table[fd] != NULL)
188 return (_thread_fd_table[fd]->flags);
194 _thread_fd_setflags(int fd, int flags)
196 if (_thread_fd_table[fd] != NULL)
197 _thread_fd_table[fd]->flags = flags;
200 #ifdef _FDLOCKS_ENABLED
202 _thread_fd_unlock(int fd, int lock_type)
204 struct pthread *curthread = _get_curthread();
208 * Early return if magic descriptor used by "at" family of syscalls.
214 * Check that the file descriptor table is initialised for this
217 if ((ret = _thread_fd_table_init(fd)) == 0) {
219 * Defer signals to protect the scheduling queues from
220 * access by the signal handler:
222 _thread_kern_sig_defer();
225 * Lock the file descriptor table entry to prevent
226 * other threads for clashing with the current
229 _SPINLOCK(&_thread_fd_table[fd]->lock);
231 /* Check if the running thread owns the read lock: */
232 if (_thread_fd_table[fd]->r_owner == curthread) {
233 /* Check the file descriptor and lock types: */
234 if (lock_type == FD_READ || lock_type == FD_RDWR) {
236 * Decrement the read lock count for the
239 _thread_fd_table[fd]->r_lockcount--;
242 * Check if the running thread still has read
243 * locks on this file descriptor:
245 if (_thread_fd_table[fd]->r_lockcount != 0) {
248 * Get the next thread in the queue for a
249 * read lock on this file descriptor:
251 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
253 /* Remove this thread from the queue: */
254 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
255 _thread_fd_table[fd]->r_owner);
258 * Set the state of the new owner of
259 * the thread to running:
261 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
264 * Reset the number of read locks.
265 * This will be incremented by the
266 * new owner of the lock when it sees
267 * that it has the lock.
269 _thread_fd_table[fd]->r_lockcount = 0;
273 /* Check if the running thread owns the write lock: */
274 if (_thread_fd_table[fd]->w_owner == curthread) {
275 /* Check the file descriptor and lock types: */
276 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
278 * Decrement the write lock count for the
281 _thread_fd_table[fd]->w_lockcount--;
284 * Check if the running thread still has
285 * write locks on this file descriptor:
287 if (_thread_fd_table[fd]->w_lockcount != 0) {
290 * Get the next thread in the queue for a
291 * write lock on this file descriptor:
293 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
295 /* Remove this thread from the queue: */
296 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
297 _thread_fd_table[fd]->w_owner);
300 * Set the state of the new owner of
301 * the thread to running:
303 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
306 * Reset the number of write locks.
307 * This will be incremented by the
308 * new owner of the lock when it
309 * sees that it has the lock.
311 _thread_fd_table[fd]->w_lockcount = 0;
316 /* Unlock the file descriptor table entry: */
317 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
320 * Undefer and handle pending signals, yielding if
323 _thread_kern_sig_undefer();
328 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
330 struct pthread *curthread = _get_curthread();
334 * Early return if magic descriptor used by "at" family of syscalls.
340 * Check that the file descriptor table is initialised for this
343 if ((ret = _thread_fd_table_init(fd)) == 0) {
344 /* Clear the interrupted flag: */
345 curthread->interrupted = 0;
348 * Lock the file descriptor table entry to prevent
349 * other threads for clashing with the current
352 _SPINLOCK(&_thread_fd_table[fd]->lock);
354 /* Check the file descriptor and lock types: */
355 if (lock_type == FD_READ || lock_type == FD_RDWR) {
357 * Wait for the file descriptor to be locked
358 * for read for the current thread:
360 while ((_thread_fd_table[fd]->r_owner != curthread) &&
361 (curthread->interrupted == 0)) {
363 * Check if the file descriptor is locked by
366 if (_thread_fd_table[fd]->r_owner != NULL) {
368 * Another thread has locked the file
369 * descriptor for read, so join the
370 * queue of threads waiting for a
371 * read lock on this file descriptor:
373 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
376 * Save the file descriptor details
377 * in the thread structure for the
380 curthread->data.fd.fd = fd;
382 /* Set the timeout: */
383 _thread_kern_set_timeout(timeout);
386 * Unlock the file descriptor
389 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
392 * Schedule this thread to wait on
393 * the read lock. It will only be
394 * woken when it becomes the next in
395 * the queue and is granted access
396 * to the lock by the thread
397 * that is unlocking the file
400 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
403 * Lock the file descriptor
406 _SPINLOCK(&_thread_fd_table[fd]->lock);
408 if (curthread->interrupted != 0) {
409 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
414 * The running thread now owns the
415 * read lock on this file descriptor:
417 _thread_fd_table[fd]->r_owner = curthread;
420 * Reset the number of read locks for
421 * this file descriptor:
423 _thread_fd_table[fd]->r_lockcount = 0;
427 if (_thread_fd_table[fd]->r_owner == curthread)
428 /* Increment the read lock count: */
429 _thread_fd_table[fd]->r_lockcount++;
432 /* Check the file descriptor and lock types: */
433 if (curthread->interrupted == 0 &&
434 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
436 * Wait for the file descriptor to be locked
437 * for write for the current thread:
439 while ((_thread_fd_table[fd]->w_owner != curthread) &&
440 (curthread->interrupted == 0)) {
442 * Check if the file descriptor is locked by
445 if (_thread_fd_table[fd]->w_owner != NULL) {
447 * Another thread has locked the file
448 * descriptor for write, so join the
449 * queue of threads waiting for a
450 * write lock on this file
453 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
456 * Save the file descriptor details
457 * in the thread structure for the
460 curthread->data.fd.fd = fd;
462 /* Set the timeout: */
463 _thread_kern_set_timeout(timeout);
466 * Unlock the file descriptor
469 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
472 * Schedule this thread to wait on
473 * the write lock. It will only be
474 * woken when it becomes the next in
475 * the queue and is granted access to
476 * the lock by the thread that is
477 * unlocking the file descriptor.
479 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
482 * Lock the file descriptor
485 _SPINLOCK(&_thread_fd_table[fd]->lock);
487 if (curthread->interrupted != 0) {
488 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
493 * The running thread now owns the
494 * write lock on this file
497 _thread_fd_table[fd]->w_owner = curthread;
500 * Reset the number of write locks
501 * for this file descriptor:
503 _thread_fd_table[fd]->w_lockcount = 0;
507 if (_thread_fd_table[fd]->w_owner == curthread)
508 /* Increment the write lock count: */
509 _thread_fd_table[fd]->w_lockcount++;
512 /* Unlock the file descriptor table entry: */
513 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
515 if (curthread->interrupted != 0) {
518 if (curthread->continuation != NULL)
519 curthread->continuation((void *)curthread);
523 /* Return the completion status: */
528 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
530 struct pthread *curthread = _get_curthread();
534 * Early return if magic descriptor used by "at" family of syscalls.
540 * Check that the file descriptor table is initialised for this
543 if ((ret = _thread_fd_table_init(fd)) == 0) {
545 * Defer signals to protect the scheduling queues from
546 * access by the signal handler:
548 _thread_kern_sig_defer();
551 * Lock the file descriptor table entry to prevent
552 * other threads for clashing with the current
555 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
557 /* Check if the running thread owns the read lock: */
558 if (_thread_fd_table[fd]->r_owner == curthread) {
559 /* Check the file descriptor and lock types: */
560 if (lock_type == FD_READ || lock_type == FD_RDWR) {
562 * Decrement the read lock count for the
565 _thread_fd_table[fd]->r_lockcount--;
568 * Check if the running thread still has read
569 * locks on this file descriptor:
571 if (_thread_fd_table[fd]->r_lockcount != 0) {
574 * Get the next thread in the queue for a
575 * read lock on this file descriptor:
577 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
579 /* Remove this thread from the queue: */
580 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
581 _thread_fd_table[fd]->r_owner);
584 * Set the state of the new owner of
585 * the thread to running:
587 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
590 * Reset the number of read locks.
591 * This will be incremented by the
592 * new owner of the lock when it sees
593 * that it has the lock.
595 _thread_fd_table[fd]->r_lockcount = 0;
599 /* Check if the running thread owns the write lock: */
600 if (_thread_fd_table[fd]->w_owner == curthread) {
601 /* Check the file descriptor and lock types: */
602 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
604 * Decrement the write lock count for the
607 _thread_fd_table[fd]->w_lockcount--;
610 * Check if the running thread still has
611 * write locks on this file descriptor:
613 if (_thread_fd_table[fd]->w_lockcount != 0) {
616 * Get the next thread in the queue for a
617 * write lock on this file descriptor:
619 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
621 /* Remove this thread from the queue: */
622 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
623 _thread_fd_table[fd]->w_owner);
626 * Set the state of the new owner of
627 * the thread to running:
629 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
632 * Reset the number of write locks.
633 * This will be incremented by the
634 * new owner of the lock when it
635 * sees that it has the lock.
637 _thread_fd_table[fd]->w_lockcount = 0;
642 /* Unlock the file descriptor table entry: */
643 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
646 * Undefer and handle pending signals, yielding if
649 _thread_kern_sig_undefer();
654 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
655 char *fname, int lineno)
657 struct pthread *curthread = _get_curthread();
661 * Early return if magic descriptor used by "at" family of syscalls.
667 * Check that the file descriptor table is initialised for this
670 if ((ret = _thread_fd_table_init(fd)) == 0) {
671 /* Clear the interrupted flag: */
672 curthread->interrupted = 0;
675 * Lock the file descriptor table entry to prevent
676 * other threads for clashing with the current
679 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
681 /* Check the file descriptor and lock types: */
682 if (lock_type == FD_READ || lock_type == FD_RDWR) {
684 * Wait for the file descriptor to be locked
685 * for read for the current thread:
687 while ((_thread_fd_table[fd]->r_owner != curthread) &&
688 (curthread->interrupted == 0)) {
690 * Check if the file descriptor is locked by
693 if (_thread_fd_table[fd]->r_owner != NULL) {
695 * Another thread has locked the file
696 * descriptor for read, so join the
697 * queue of threads waiting for a
698 * read lock on this file descriptor:
700 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
703 * Save the file descriptor details
704 * in the thread structure for the
707 curthread->data.fd.fd = fd;
708 curthread->data.fd.branch = lineno;
709 curthread->data.fd.fname = fname;
711 /* Set the timeout: */
712 _thread_kern_set_timeout(timeout);
715 * Unlock the file descriptor
718 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
721 * Schedule this thread to wait on
722 * the read lock. It will only be
723 * woken when it becomes the next in
724 * the queue and is granted access
725 * to the lock by the thread
726 * that is unlocking the file
729 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
732 * Lock the file descriptor
735 _SPINLOCK(&_thread_fd_table[fd]->lock);
737 if (curthread->interrupted != 0) {
738 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
743 * The running thread now owns the
744 * read lock on this file descriptor:
746 _thread_fd_table[fd]->r_owner = curthread;
749 * Reset the number of read locks for
750 * this file descriptor:
752 _thread_fd_table[fd]->r_lockcount = 0;
755 * Save the source file details for
758 _thread_fd_table[fd]->r_fname = fname;
759 _thread_fd_table[fd]->r_lineno = lineno;
763 if (_thread_fd_table[fd]->r_owner == curthread)
764 /* Increment the read lock count: */
765 _thread_fd_table[fd]->r_lockcount++;
768 /* Check the file descriptor and lock types: */
769 if (curthread->interrupted == 0 &&
770 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
772 * Wait for the file descriptor to be locked
773 * for write for the current thread:
775 while ((_thread_fd_table[fd]->w_owner != curthread) &&
776 (curthread->interrupted == 0)) {
778 * Check if the file descriptor is locked by
781 if (_thread_fd_table[fd]->w_owner != NULL) {
783 * Another thread has locked the file
784 * descriptor for write, so join the
785 * queue of threads waiting for a
786 * write lock on this file
789 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
792 * Save the file descriptor details
793 * in the thread structure for the
796 curthread->data.fd.fd = fd;
797 curthread->data.fd.branch = lineno;
798 curthread->data.fd.fname = fname;
800 /* Set the timeout: */
801 _thread_kern_set_timeout(timeout);
804 * Unlock the file descriptor
807 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
810 * Schedule this thread to wait on
811 * the write lock. It will only be
812 * woken when it becomes the next in
813 * the queue and is granted access to
814 * the lock by the thread that is
815 * unlocking the file descriptor.
817 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
820 * Lock the file descriptor
823 _SPINLOCK(&_thread_fd_table[fd]->lock);
825 if (curthread->interrupted != 0) {
826 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
831 * The running thread now owns the
832 * write lock on this file
835 _thread_fd_table[fd]->w_owner = curthread;
838 * Reset the number of write locks
839 * for this file descriptor:
841 _thread_fd_table[fd]->w_lockcount = 0;
844 * Save the source file details for
847 _thread_fd_table[fd]->w_fname = fname;
848 _thread_fd_table[fd]->w_lineno = lineno;
852 if (_thread_fd_table[fd]->w_owner == curthread)
853 /* Increment the write lock count: */
854 _thread_fd_table[fd]->w_lockcount++;
857 /* Unlock the file descriptor table entry: */
858 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
860 if (curthread->interrupted != 0) {
863 if (curthread->continuation != NULL)
864 curthread->continuation((void *)curthread);
868 /* Return the completion status: */
873 _thread_fd_unlock_owned(pthread_t pthread)
877 for (fd = 0; fd < _thread_dtablesize; fd++) {
878 if ((_thread_fd_table[fd] != NULL) &&
879 ((_thread_fd_table[fd]->r_owner == pthread) ||
880 (_thread_fd_table[fd]->w_owner == pthread))) {
882 * Defer signals to protect the scheduling queues
883 * from access by the signal handler:
885 _thread_kern_sig_defer();
888 * Lock the file descriptor table entry to prevent
889 * other threads for clashing with the current
892 _SPINLOCK(&_thread_fd_table[fd]->lock);
894 /* Check if the thread owns the read lock: */
895 if (_thread_fd_table[fd]->r_owner == pthread) {
896 /* Clear the read lock count: */
897 _thread_fd_table[fd]->r_lockcount = 0;
900 * Get the next thread in the queue for a
901 * read lock on this file descriptor:
903 if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) {
904 /* Remove this thread from the queue: */
905 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
906 _thread_fd_table[fd]->r_owner);
909 * Set the state of the new owner of
910 * the thread to running:
912 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
916 /* Check if the thread owns the write lock: */
917 if (_thread_fd_table[fd]->w_owner == pthread) {
918 /* Clear the write lock count: */
919 _thread_fd_table[fd]->w_lockcount = 0;
922 * Get the next thread in the queue for a
923 * write lock on this file descriptor:
925 if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) {
926 /* Remove this thread from the queue: */
927 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
928 _thread_fd_table[fd]->w_owner);
931 * Set the state of the new owner of
932 * the thread to running:
934 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
939 /* Unlock the file descriptor table entry: */
940 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
943 * Undefer and handle pending signals, yielding if
946 _thread_kern_sig_undefer();
952 _fd_lock_backout(pthread_t pthread)
957 * Defer signals to protect the scheduling queues
958 * from access by the signal handler:
960 _thread_kern_sig_defer();
962 switch (pthread->state) {
965 fd = pthread->data.fd.fd;
968 * Lock the file descriptor table entry to prevent
969 * other threads for clashing with the current
972 _SPINLOCK(&_thread_fd_table[fd]->lock);
974 /* Remove the thread from the waiting queue: */
975 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
979 fd = pthread->data.fd.fd;
982 * Lock the file descriptor table entry to prevent
983 * other threads from clashing with the current
986 _SPINLOCK(&_thread_fd_table[fd]->lock);
988 /* Remove the thread from the waiting queue: */
989 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
997 * Undefer and handle pending signals, yielding if
1000 _thread_kern_sig_undefer();
1003 static inline pthread_t
1004 fd_next_reader(int fd)
1008 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) &&
1009 (pthread->interrupted != 0)) {
1011 * This thread has either been interrupted by a signal or
1012 * it has been canceled. Remove it from the queue.
1014 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
1020 static inline pthread_t
1021 fd_next_writer(int fd)
1025 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) &&
1026 (pthread->interrupted != 0)) {
1028 * This thread has either been interrupted by a signal or
1029 * it has been canceled. Remove it from the queue.
1031 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
1040 _thread_fd_unlock(int fd, int lock_type)
1045 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
1048 * Insure that the file descriptor table is initialized for this
1049 * entry except if magic descriptor used by "at" family of syscalls.
1051 return ((fd != AT_FDCWD) ? _thread_fd_table_init(fd) : 0);
1055 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
1060 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
1061 char *fname, int lineno)
1064 * Insure that the file descriptor table is initialized for this
1065 * entry except if magic descriptor used by "at" family of syscalls.
1067 return ((fd != AT_FDCWD) ? _thread_fd_table_init(fd) : 0);
1071 _thread_fd_unlock_owned(pthread_t pthread)
1076 _fd_lock_backout(pthread_t pthread)