2 #define POLL_NEVENTS 1024
4 static mono_pollfd *poll_fds;
5 static guint poll_fds_capacity;
6 static guint poll_fds_size;
9 POLL_INIT_FD (mono_pollfd *poll_fd, gint fd, gint events)
12 poll_fd->events = events;
17 poll_init (gint wakeup_pipe_fd)
22 poll_fds_capacity = POLL_NEVENTS;
23 poll_fds = g_new0 (mono_pollfd, poll_fds_capacity);
25 POLL_INIT_FD (poll_fds, wakeup_pipe_fd, MONO_POLLIN);
26 for (i = 1; i < poll_fds_capacity; ++i)
27 POLL_INIT_FD (poll_fds + i, -1, 0);
39 poll_mark_bad_fds (mono_pollfd *poll_fds, gint poll_fds_size)
46 for (i = 0; i < poll_fds_size; i++) {
47 poll_fd = poll_fds + i;
48 if (poll_fd->fd == -1)
51 ret = mono_poll (poll_fd, 1, 0);
55 #if !defined(HOST_WIN32)
58 if (WSAGetLastError () == WSAEBADF)
61 poll_fd->revents |= MONO_POLLNVAL;
71 poll_update_add (gint fd, gint events, gboolean is_new)
73 gboolean found = FALSE;
76 for (j = 1; j < poll_fds_size; ++j) {
77 mono_pollfd *poll_fd = poll_fds + j;
78 if (poll_fd->fd == fd) {
85 for (j = 1; j < poll_fds_capacity; ++j) {
86 mono_pollfd *poll_fd = poll_fds + j;
87 if (poll_fd->fd == -1)
92 if (j == poll_fds_capacity) {
93 poll_fds_capacity += POLL_NEVENTS;
94 poll_fds = g_renew (mono_pollfd, poll_fds, poll_fds_capacity);
95 for (k = j; k < poll_fds_capacity; ++k)
96 POLL_INIT_FD (poll_fds + k, -1, 0);
99 POLL_INIT_FD (poll_fds + j, fd, events);
101 if (j >= poll_fds_size)
102 poll_fds_size = j + 1;
106 poll_event_wait (void)
110 ready = mono_poll (poll_fds, poll_fds_size, -1);
113 * Apart from EINTR, we only check EBADF, for the rest:
114 * EINVAL: mono_poll() 'protects' us from descriptor
115 * numbers above the limit if using select() by marking
116 * then as MONO_POLLERR. If a system poll() is being
117 * used, the number of descriptor we're passing will not
118 * be over sysconf(_SC_OPEN_MAX), as the error would have
119 * happened when opening.
121 * EFAULT: we own the memory pointed by pfds.
122 * ENOMEM: we're doomed anyway
125 #if !defined(HOST_WIN32)
128 switch (WSAGetLastError ())
131 #if !defined(HOST_WIN32)
136 mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ());
139 #if !defined(HOST_WIN32)
144 ready = poll_mark_bad_fds (poll_fds, poll_fds_size);
147 #if !defined(HOST_WIN32)
148 g_warning ("poll_event_wait: mono_poll () failed, error (%d) %s", errno, g_strerror (errno));
150 g_warning ("poll_event_wait: mono_poll () failed, error (%d)\n", WSAGetLastError ());
160 poll_event_get_fd_at (gint i, gint *events)
164 *events = ((poll_fds [i].revents & (MONO_POLLIN | MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) ? MONO_POLLIN : 0)
165 | ((poll_fds [i].revents & (MONO_POLLOUT | MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) ? MONO_POLLOUT : 0);
167 /* if nothing happened on the fd, then just return
168 * an invalid fd number so it is discarded */
169 return poll_fds [i].revents == 0 ? -1 : poll_fds [i].fd;
173 poll_event_get_fd_max (void)
175 return poll_fds_size;
179 poll_event_reset_fd_at (gint i, gint events)
181 g_assert (poll_fds [i].fd != -1);
182 g_assert (poll_fds [i].revents != 0);
184 POLL_INIT_FD (&poll_fds [i], events == 0 ? -1 : poll_fds [i].fd, events);
187 static ThreadPoolIOBackend backend_poll = {
189 .cleanup = poll_cleanup,
190 .update_add = poll_update_add,
191 .event_wait = poll_event_wait,
192 .event_get_fd_max = poll_event_get_fd_max,
193 .event_get_fd_at = poll_event_get_fd_at,
194 .event_reset_fd_at = poll_event_reset_fd_at,