4 #if defined(HAVE_POLL_H)
6 #elif defined(HAVE_SYS_POLL_H)
10 typedef struct pollfd mono_pollfd;
12 #elif defined(HOST_WIN32)
16 typedef WSAPOLLFD mono_pollfd;
19 /* poll is not defined */
23 static mono_pollfd *poll_fds;
24 static guint poll_fds_capacity;
25 static guint poll_fds_size;
28 POLL_INIT_FD (mono_pollfd *poll_fd, gint fd, gint events)
31 poll_fd->events = events;
36 poll_init (gint wakeup_pipe_fd)
38 g_assert (wakeup_pipe_fd >= 0);
41 poll_fds_capacity = 64;
43 poll_fds = g_new0 (mono_pollfd, poll_fds_capacity);
45 POLL_INIT_FD (&poll_fds [0], wakeup_pipe_fd, POLLIN);
57 poll_register_fd (gint fd, gint events, gboolean is_new)
63 g_assert (poll_fds_size <= poll_fds_capacity);
65 g_assert ((events & ~(EVENT_IN | EVENT_OUT)) == 0);
68 if (events & EVENT_IN)
70 if (events & EVENT_OUT)
71 poll_event |= POLLOUT;
73 for (i = 0; i < poll_fds_size; ++i) {
74 if (poll_fds [i].fd == fd) {
76 POLL_INIT_FD (&poll_fds [i], fd, poll_event);
83 for (i = 0; i < poll_fds_size; ++i) {
84 if (poll_fds [i].fd == -1) {
85 POLL_INIT_FD (&poll_fds [i], fd, poll_event);
92 if (poll_fds_size > poll_fds_capacity) {
93 poll_fds_capacity *= 2;
94 g_assert (poll_fds_size <= poll_fds_capacity);
96 poll_fds = g_renew (mono_pollfd, poll_fds, poll_fds_capacity);
99 POLL_INIT_FD (&poll_fds [poll_fds_size - 1], fd, poll_event);
103 poll_remove_fd (gint fd)
109 for (i = 0; i < poll_fds_size; ++i) {
110 if (poll_fds [i].fd == fd) {
111 POLL_INIT_FD (&poll_fds [i], -1, 0);
116 /* if we don't find the fd in poll_fds,
117 * it means we try to delete it twice */
118 g_assert (i < poll_fds_size);
120 /* if we find it again, it means we added
122 for (; i < poll_fds_size; ++i)
123 g_assert (poll_fds [i].fd != fd);
125 /* reduce the value of poll_fds_size so we
126 * do not keep it too big */
127 while (poll_fds_size > 1 && poll_fds [poll_fds_size - 1].fd == -1)
132 poll_event_wait (void (*callback) (gint fd, gint events, gpointer user_data), gpointer user_data)
136 for (i = 0; i < poll_fds_size; ++i)
137 poll_fds [i].revents = 0;
139 mono_gc_set_skip_thread (TRUE);
141 #if !defined(HOST_WIN32)
142 ready = poll (poll_fds, poll_fds_size, -1);
144 ready = WSAPoll(poll_fds, poll_fds_size, -1);
145 if (ready == SOCKET_ERROR)
149 mono_gc_set_skip_thread (FALSE);
153 * Apart from EINTR, we only check EBADF, for the rest:
154 * EINVAL: mono_poll() 'protects' us from descriptor
155 * numbers above the limit if using select() by marking
156 * then as POLLERR. If a system poll() is being
157 * used, the number of descriptor we're passing will not
158 * be over sysconf(_SC_OPEN_MAX), as the error would have
159 * happened when opening.
161 * EFAULT: we own the memory pointed by pfds.
162 * ENOMEM: we're doomed anyway
165 #if !defined(HOST_WIN32)
168 switch (WSAGetLastError ())
171 #if !defined(HOST_WIN32)
177 mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ());
182 #if !defined(HOST_WIN32)
183 g_error ("poll_event_wait: mono_poll () failed, error (%d) %s", errno, g_strerror (errno));
185 g_error ("poll_event_wait: mono_poll () failed, error (%d)\n", WSAGetLastError ());
196 g_assert (ready > 0);
198 for (i = 0; i < poll_fds_size; ++i) {
201 if (poll_fds [i].fd == -1)
203 if (poll_fds [i].revents == 0)
206 fd = poll_fds [i].fd;
207 if (poll_fds [i].revents & (POLLIN | POLLERR | POLLHUP | POLLNVAL))
209 if (poll_fds [i].revents & (POLLOUT | POLLERR | POLLHUP | POLLNVAL))
211 if (poll_fds [i].revents & (POLLERR | POLLHUP | POLLNVAL))
214 callback (fd, events, user_data);
223 static ThreadPoolIOBackend backend_poll = {
225 .cleanup = poll_cleanup,
226 .register_fd = poll_register_fd,
227 .remove_fd = poll_remove_fd,
228 .event_wait = poll_event_wait,