2 #define POLL_NEVENTS 1024
4 static mono_pollfd *poll_fds;
5 static guint poll_fds_capacity;
6 static guint poll_fds_size;
9 POLL_INIT_FD (mono_pollfd *poll_fd, gint fd, gint events)
12 poll_fd->events = events;
17 poll_init (gint wakeup_pipe_fd)
22 poll_fds_capacity = POLL_NEVENTS;
23 poll_fds = g_new0 (mono_pollfd, poll_fds_capacity);
25 POLL_INIT_FD (poll_fds, wakeup_pipe_fd, MONO_POLLIN);
26 for (i = 1; i < poll_fds_capacity; ++i)
27 POLL_INIT_FD (poll_fds + i, -1, 0);
39 poll_mark_bad_fds (mono_pollfd *poll_fds, gint poll_fds_size)
46 for (i = 0; i < poll_fds_size; i++) {
47 poll_fd = poll_fds + i;
48 if (poll_fd->fd == -1)
51 ret = mono_poll (poll_fd, 1, 0);
55 #if !defined(HOST_WIN32)
58 if (WSAGetLastError () == WSAEBADF)
61 poll_fd->revents |= MONO_POLLNVAL;
71 poll_update_add (ThreadPoolIOUpdate *update)
73 gboolean found = FALSE;
76 for (j = 1; j < poll_fds_size; ++j) {
77 mono_pollfd *poll_fd = poll_fds + j;
78 if (poll_fd->fd == update->fd) {
85 for (j = 1; j < poll_fds_capacity; ++j) {
86 mono_pollfd *poll_fd = poll_fds + j;
87 if (poll_fd->fd == -1)
92 if (j == poll_fds_capacity) {
93 poll_fds_capacity += POLL_NEVENTS;
94 poll_fds = g_renew (mono_pollfd, poll_fds, poll_fds_capacity);
95 for (k = j; k < poll_fds_capacity; ++k)
96 POLL_INIT_FD (poll_fds + k, -1, 0);
99 POLL_INIT_FD (poll_fds + j, update->fd, update->events);
101 if (j >= poll_fds_size)
102 poll_fds_size = j + 1;
106 poll_event_wait (void)
110 ready = mono_poll (poll_fds, poll_fds_size, -1);
113 * Apart from EINTR, we only check EBADF, for the rest:
114 * EINVAL: mono_poll() 'protects' us from descriptor
115 * numbers above the limit if using select() by marking
116 * then as MONO_POLLERR. If a system poll() is being
117 * used, the number of descriptor we're passing will not
118 * be over sysconf(_SC_OPEN_MAX), as the error would have
119 * happened when opening.
121 * EFAULT: we own the memory pointed by pfds.
122 * ENOMEM: we're doomed anyway
125 #if !defined(HOST_WIN32)
128 switch (WSAGetLastError ())
131 #if !defined(HOST_WIN32)
136 check_for_interruption_critical ();
139 #if !defined(HOST_WIN32)
144 ready = poll_mark_bad_fds (poll_fds, poll_fds_size);
147 #if !defined(HOST_WIN32)
148 g_warning ("poll_event_wait: mono_poll () failed, error (%d) %s", errno, g_strerror (errno));
150 g_warning ("poll_event_wait: mono_poll () failed, error (%d)\n", WSAGetLastError ());
160 poll_event_fd_at (guint i)
162 return poll_fds [i].fd;
166 poll_event_max (void)
168 return poll_fds_size;
172 poll_event_create_sockares_at (guint i, gint fd, MonoMList **list)
174 mono_pollfd *poll_fd;
178 poll_fd = &poll_fds [i];
181 g_assert (fd == poll_fd->fd);
183 if (fd == -1 || poll_fd->revents == 0)
186 if (*list && (poll_fd->revents & (MONO_POLLIN | MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) != 0) {
187 MonoSocketAsyncResult *io_event = get_sockares_for_event (list, MONO_POLLIN);
189 mono_threadpool_ms_enqueue_work_item (((MonoObject*) io_event)->vtable->domain, (MonoObject*) io_event);
191 if (*list && (poll_fd->revents & (MONO_POLLOUT | MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) != 0) {
192 MonoSocketAsyncResult *io_event = get_sockares_for_event (list, MONO_POLLOUT);
194 mono_threadpool_ms_enqueue_work_item (((MonoObject*) io_event)->vtable->domain, (MonoObject*) io_event);
198 poll_fd->events = get_events (*list);
200 POLL_INIT_FD (poll_fd, -1, 0);
205 static ThreadPoolIOBackend backend_poll = {
207 .cleanup = poll_cleanup,
208 .update_add = poll_update_add,
209 .event_wait = poll_event_wait,
210 .event_max = poll_event_max,
211 .event_fd_at = poll_event_fd_at,
212 .event_create_sockares_at = poll_event_create_sockares_at,