2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_select.c,v 1.16.2.5 2002/10/22 14:44:03 fjoe Exp $
33 * $DragonFly: src/lib/libc_r/uthread/uthread_select.c,v 1.3 2008/01/10 22:30:27 nth Exp $
40 #include <sys/param.h>
41 #include <sys/types.h>
43 #include <sys/fcntl.h>
45 #include "pthread_private.h"
48 _select(int numfds, fd_set * readfds, fd_set * writefds, fd_set * exceptfds,
49 struct timeval * timeout)
51 struct pthread *curthread = _get_curthread();
53 int i, ret = 0, f_wait = 1;
54 int pfd_index, got_events = 0, fd_count = 0;
55 struct pthread_poll_data data;
57 if (numfds > _thread_dtablesize) {
58 numfds = _thread_dtablesize;
60 /* Check if a timeout was specified: */
62 if (timeout->tv_sec < 0 ||
63 timeout->tv_usec < 0 || timeout->tv_usec >= 1000000) {
68 /* Convert the timeval to a timespec: */
69 TIMEVAL_TO_TIMESPEC(timeout, &ts);
71 /* Set the wake up time: */
72 _thread_kern_set_timeout(&ts);
73 if (ts.tv_sec == 0 && ts.tv_nsec == 0)
77 _thread_kern_set_timeout(NULL);
80 /* Count the number of file descriptors to be polled: */
81 if (readfds || writefds || exceptfds) {
82 for (i = 0; i < numfds; i++) {
83 if ((readfds && FD_ISSET(i, readfds)) ||
84 (exceptfds && FD_ISSET(i, exceptfds)) ||
85 (writefds && FD_ISSET(i, writefds))) {
92 * Allocate memory for poll data if it hasn't already been
93 * allocated or if previously allocated memory is insufficient.
95 if ((curthread->poll_data.fds == NULL) ||
96 (curthread->poll_data.nfds < fd_count)) {
97 data.fds = (struct pollfd *) realloc(curthread->poll_data.fds,
98 sizeof(struct pollfd) * MAX(128, fd_count));
99 if (data.fds == NULL) {
105 * Note that the threads poll data always
106 * indicates what is allocated, not what is
107 * currently being polled.
109 curthread->poll_data.fds = data.fds;
110 curthread->poll_data.nfds = MAX(128, fd_count);
114 /* Setup the wait data. */
115 data.fds = curthread->poll_data.fds;
116 data.nfds = fd_count;
119 * Setup the array of pollfds. Optimize this by
120 * running the loop in reverse and stopping when
121 * the number of selected file descriptors is reached.
123 for (i = numfds - 1, pfd_index = fd_count - 1;
124 (i >= 0) && (pfd_index >= 0); i--) {
125 data.fds[pfd_index].events = 0;
126 if (readfds && FD_ISSET(i, readfds)) {
127 data.fds[pfd_index].events = POLLRDNORM;
129 if (exceptfds && FD_ISSET(i, exceptfds)) {
130 data.fds[pfd_index].events |= POLLRDBAND;
132 if (writefds && FD_ISSET(i, writefds)) {
133 data.fds[pfd_index].events |= POLLWRNORM;
135 if (data.fds[pfd_index].events != 0) {
137 * Set the file descriptor to be polled and
138 * clear revents in case of a timeout which
139 * leaves fds unchanged:
141 data.fds[pfd_index].fd = i;
142 data.fds[pfd_index].revents = 0;
146 if (((ret = __sys_poll(data.fds, data.nfds, 0)) == 0) &&
148 curthread->data.poll_data = &data;
149 curthread->interrupted = 0;
150 _thread_kern_sched_state(PS_SELECT_WAIT, __FILE__, __LINE__);
151 if (curthread->interrupted) {
162 for (i = 0; i < fd_count; i++) {
164 * Check the results of the poll and clear
165 * this file descriptor from the fdset if
166 * the requested event wasn't ready.
170 * First check for invalid descriptor.
171 * If found, set errno and return -1.
173 if (data.fds[i].revents & POLLNVAL) {
179 if (readfds != NULL) {
180 if (FD_ISSET(data.fds[i].fd, readfds)) {
181 if ((data.fds[i].revents & (POLLIN
182 | POLLRDNORM | POLLERR
183 | POLLHUP | POLLNVAL)) != 0)
186 FD_CLR(data.fds[i].fd, readfds);
189 if (writefds != NULL) {
190 if (FD_ISSET(data.fds[i].fd, writefds)) {
191 if ((data.fds[i].revents & (POLLOUT
192 | POLLWRNORM | POLLWRBAND | POLLERR
193 | POLLHUP | POLLNVAL)) != 0)
196 FD_CLR(data.fds[i].fd,
200 if (exceptfds != NULL) {
201 if (FD_ISSET(data.fds[i].fd, exceptfds)) {
202 if (data.fds[i].revents & (POLLRDBAND |
206 FD_CLR(data.fds[i].fd,
220 select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
221 struct timeval *timeout)
225 _thread_enter_cancellation_point();
226 ret = _select(numfds, readfds, writefds, exceptfds, timeout);
227 _thread_leave_cancellation_point();
234 pselect(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
235 const struct timespec *timeout, const sigset_t *mask)
242 _thread_enter_cancellation_point();
244 if (timeout != NULL) {
245 tv.tv_sec = timeout->tv_sec;
246 tv.tv_usec = (timeout->tv_nsec + 999) / 1000;
253 * XXX The masking/select/unmasking sequence below is not atomic. See
256 * The Right Thing would be to mask/unmask signals kernel-side. We do
257 * this for single-threaded and libthread_xu processes but this is far
258 * from trivial for libc_r because select() is actually a poll()
259 * wrapper there and not using poll() would involve complex changes in
260 * the user thread scheduler. We're deprecating libc_r in favor of
261 * libthread_xu so the usefulness of such a change is questionable.
265 (void) sigprocmask(SIG_SETMASK, mask, &omask);
267 ret = _select(numfds, readfds, writefds, exceptfds, tvp);
270 (void) sigprocmask(SIG_SETMASK, &omask, NULL);
272 _thread_leave_cancellation_point();