13 mono_poll (mono_pollfd *ufds, unsigned int nfds, int timeout)
15 g_assert_not_reached ();
20 #if defined(HAVE_POLL) && !defined(__APPLE__)
22 mono_poll (mono_pollfd *ufds, unsigned int nfds, int timeout)
24 return poll (ufds, nfds, timeout);
29 mono_poll (mono_pollfd *ufds, unsigned int nfds, int timeout)
31 struct timeval tv, *tvptr;
32 int i, fd, events, affected, count;
33 fd_set rfds, wfds, efds;
40 tv.tv_sec = timeout / 1000;
41 tv.tv_usec = (timeout % 1000) * 1000;
49 for (i = 0; i < nfds; i++) {
56 if (nexc >= FD_SETSIZE) {
57 ufds [i].revents = MONO_POLLNVAL;
61 if (fd > FD_SETSIZE) {
62 ufds [i].revents = MONO_POLLNVAL;
67 events = ufds [i].events;
68 if ((events & MONO_POLLIN) != 0)
71 if ((events & MONO_POLLOUT) != 0)
81 affected = select (maxfd + 1, &rfds, &wfds, &efds, tvptr);
84 int error = WSAGetLastError ();
86 case WSAEFAULT: errno = EFAULT; break;
87 case WSAEINVAL: errno = EINVAL; break;
88 case WSAEINTR: errno = EINTR; break;
89 /* case WSAEINPROGRESS: errno = EINPROGRESS; break; */
90 case WSAEINPROGRESS: errno = EINTR; break;
91 case WSAENOTSOCK: errno = EBADF; break;
93 case WSAENETDOWN: errno = ENOSR; break;
103 for (i = 0; i < nfds && affected > 0; i++) {
108 events = ufds [i].events;
109 if ((events & MONO_POLLIN) != 0 && FD_ISSET (fd, &rfds)) {
110 ufds [i].revents |= MONO_POLLIN;
114 if ((events & MONO_POLLOUT) != 0 && FD_ISSET (fd, &wfds)) {
115 ufds [i].revents |= MONO_POLLOUT;
119 if (FD_ISSET (fd, &efds)) {
120 ufds [i].revents |= MONO_POLLERR;
124 if (ufds [i].revents != 0)
133 #endif /* #ifndef DISABLE_SOCKETS */