2 * handles.c: Generic and internal operations on handles
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002-2011 Novell, Inc.
8 * Copyright 2011 Xamarin Inc
18 #include <sys/types.h>
19 #ifdef HAVE_SYS_SOCKET_H
20 # include <sys/socket.h>
25 #ifdef HAVE_SYS_MMAN_H
26 # include <sys/mman.h>
33 #include <mono/io-layer/wapi.h>
34 #include <mono/io-layer/wapi-private.h>
35 #include <mono/io-layer/handles-private.h>
36 #include <mono/io-layer/misc-private.h>
37 #include <mono/io-layer/shared.h>
38 #include <mono/io-layer/collection.h>
39 #include <mono/io-layer/process-private.h>
40 #include <mono/io-layer/critical-section-private.h>
42 #include <mono/utils/mono-mutex.h>
46 #define DEBUG(...) g_message(__VA_ARGS__)
51 static void (*_wapi_handle_ops_get_close_func (WapiHandleType type))(gpointer, gpointer);
53 static WapiHandleCapability handle_caps[WAPI_HANDLE_COUNT]={0};
54 static struct _WapiHandleOps *handle_ops[WAPI_HANDLE_COUNT]={
62 #ifndef DISABLE_SOCKETS
68 &_wapi_namedmutex_ops,
70 &_wapi_namedevent_ops,
73 static void _wapi_shared_details (gpointer handle_info);
75 static void (*handle_details[WAPI_HANDLE_COUNT])(gpointer) = {
78 _wapi_console_details,
79 _wapi_shared_details, /* thread */
83 NULL, /* Nothing useful to see in a socket handle */
84 NULL, /* Nothing useful to see in a find handle */
85 _wapi_shared_details, /* process */
87 _wapi_shared_details, /* namedmutex */
88 _wapi_shared_details, /* namedsem */
89 _wapi_shared_details, /* namedevent */
92 const char *_wapi_handle_typename[] = {
111 * We can hold _WAPI_PRIVATE_MAX_SLOTS * _WAPI_HANDLE_INITIAL_COUNT handles.
112 * If 4M handles are not enough... Oh, well... we will crash.
114 #define SLOT_INDEX(x) (x / _WAPI_HANDLE_INITIAL_COUNT)
115 #define SLOT_OFFSET(x) (x % _WAPI_HANDLE_INITIAL_COUNT)
117 struct _WapiHandleUnshared *_wapi_private_handles [_WAPI_PRIVATE_MAX_SLOTS];
118 static guint32 _wapi_private_handle_count = 0;
119 static guint32 _wapi_private_handle_slot_count = 0;
121 struct _WapiHandleSharedLayout *_wapi_shared_layout = NULL;
124 * If SHM is enabled, this will point to shared memory, otherwise it will be NULL.
126 struct _WapiFileShareLayout *_wapi_fileshare_layout = NULL;
129 * If SHM is disabled, this will point to a hash of _WapiFileShare structures, otherwise
130 * it will be NULL. We use this instead of _wapi_fileshare_layout to avoid allocating a
133 static GHashTable *file_share_hash;
134 static CRITICAL_SECTION file_share_hash_mutex;
136 #define file_share_hash_lock() EnterCriticalSection (&file_share_hash_mutex)
137 #define file_share_hash_unlock() LeaveCriticalSection (&file_share_hash_mutex)
139 guint32 _wapi_fd_reserve;
142 * This is an internal handle which is used for handling waiting for multiple handles.
143 * Threads which wait for multiple handles wait on this one handle, and when a handle
144 * is signalled, this handle is signalled too.
146 static gpointer _wapi_global_signal_handle;
148 /* Point to the mutex/cond inside _wapi_global_signal_handle */
149 mono_mutex_t *_wapi_global_signal_mutex;
150 pthread_cond_t *_wapi_global_signal_cond;
153 gboolean _wapi_has_shut_down = FALSE;
155 /* Use this instead of getpid(), to cope with linuxthreads. It's a
156 * function rather than a variable lookup because we need to get at
157 * this before share_init() might have been called.
159 static pid_t _wapi_pid;
160 static mono_once_t pid_init_once = MONO_ONCE_INIT;
162 static void _wapi_handle_unref_full (gpointer handle, gboolean ignore_private_busy_handles);
164 static void pid_init (void)
166 _wapi_pid = getpid ();
169 pid_t _wapi_getpid (void)
171 mono_once (&pid_init_once, pid_init);
177 static mono_mutex_t scan_mutex;
179 static void handle_cleanup (void)
183 /* Every shared handle we were using ought really to be closed
184 * by now, but to make sure just blow them all away. The
185 * exiting finalizer thread in particular races us to the
186 * program exit and doesn't always win, so it can be left
187 * cluttering up the shared file. Anything else left over is
190 for(i = SLOT_INDEX (0); _wapi_private_handles[i] != NULL; i++) {
191 for(j = SLOT_OFFSET (0); j < _WAPI_HANDLE_INITIAL_COUNT; j++) {
192 struct _WapiHandleUnshared *handle_data = &_wapi_private_handles[i][j];
193 int type = handle_data->type;
194 gpointer handle = GINT_TO_POINTER (i*_WAPI_HANDLE_INITIAL_COUNT+j);
196 if (_WAPI_SHARED_HANDLE (type)) {
197 if (type == WAPI_HANDLE_THREAD) {
198 /* Special-case thread handles
199 * because they need extra
200 * cleanup. This also avoids
201 * a race condition between
202 * the application exit and
203 * the finalizer thread - if
204 * it finishes up between now
205 * and actual app termination
206 * it will find all its handle
207 * details have been blown
208 * away, so this sets those
211 g_assert (0); /*This condition is freaking impossible*/
212 _wapi_thread_set_termination_details (handle, 0);
216 for(k = handle_data->ref; k > 0; k--) {
217 DEBUG ("%s: unreffing %s handle %p", __func__, _wapi_handle_typename[type], handle);
219 _wapi_handle_unref_full (handle, TRUE);
224 _wapi_shm_semaphores_remove ();
226 _wapi_shm_detach (WAPI_SHM_DATA);
227 _wapi_shm_detach (WAPI_SHM_FILESHARE);
229 if (file_share_hash) {
230 g_hash_table_destroy (file_share_hash);
231 DeleteCriticalSection (&file_share_hash_mutex);
234 for (i = 0; i < _WAPI_PRIVATE_MAX_SLOTS; ++i)
235 g_free (_wapi_private_handles [i]);
241 * Initialize the io-layer.
246 g_assert ((sizeof (handle_ops) / sizeof (handle_ops[0]))
247 == WAPI_HANDLE_COUNT);
249 _wapi_fd_reserve = getdtablesize();
251 /* This is needed by the code in _wapi_handle_new_internal */
252 _wapi_fd_reserve = (_wapi_fd_reserve + (_WAPI_HANDLE_INITIAL_COUNT - 1)) & ~(_WAPI_HANDLE_INITIAL_COUNT - 1);
256 * The entries in _wapi_private_handles reserved for fds are allocated lazily to
260 _wapi_private_handles [idx++] = g_new0 (struct _WapiHandleUnshared,
261 _WAPI_HANDLE_INITIAL_COUNT);
264 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
265 _wapi_private_handle_slot_count ++;
266 } while(_wapi_fd_reserve > _wapi_private_handle_count);
268 _wapi_shm_semaphores_init ();
270 _wapi_shared_layout = _wapi_shm_attach (WAPI_SHM_DATA);
271 g_assert (_wapi_shared_layout != NULL);
273 if (_wapi_shm_enabled ()) {
274 /* This allocates a 4mb array, so do it only if SHM is enabled */
275 _wapi_fileshare_layout = _wapi_shm_attach (WAPI_SHM_FILESHARE);
276 g_assert (_wapi_fileshare_layout != NULL);
279 #if !defined (DISABLE_SHARED_HANDLES)
280 if (_wapi_shm_enabled ())
281 _wapi_collection_init ();
284 mono_mutex_init (&scan_mutex);
286 _wapi_global_signal_handle = _wapi_handle_new (WAPI_HANDLE_EVENT, NULL);
288 _wapi_global_signal_cond = &_WAPI_PRIVATE_HANDLES (GPOINTER_TO_UINT (_wapi_global_signal_handle)).signal_cond;
289 _wapi_global_signal_mutex = &_WAPI_PRIVATE_HANDLES (GPOINTER_TO_UINT (_wapi_global_signal_handle)).signal_mutex;
292 /* Using g_atexit here instead of an explicit function call in
293 * a cleanup routine lets us cope when a third-party library
294 * calls exit (eg if an X client loses the connection to its
297 g_atexit (handle_cleanup);
303 g_assert (_wapi_has_shut_down == FALSE);
305 _wapi_has_shut_down = TRUE;
307 _wapi_error_cleanup ();
308 _wapi_thread_cleanup ();
311 static void _wapi_handle_init_shared (struct _WapiHandleShared *handle,
313 gpointer handle_specific)
315 g_assert (_wapi_has_shut_down == FALSE);
318 handle->timestamp = (guint32)(time (NULL) & 0xFFFFFFFF);
319 handle->signalled = FALSE;
320 handle->handle_refs = 1;
322 if (handle_specific != NULL) {
323 memcpy (&handle->u, handle_specific, sizeof (handle->u));
327 static void _wapi_handle_init (struct _WapiHandleUnshared *handle,
328 WapiHandleType type, gpointer handle_specific)
332 g_assert (_wapi_has_shut_down == FALSE);
335 handle->signalled = FALSE;
338 if (!_WAPI_SHARED_HANDLE(type)) {
339 thr_ret = pthread_cond_init (&handle->signal_cond, NULL);
340 g_assert (thr_ret == 0);
342 thr_ret = mono_mutex_init (&handle->signal_mutex);
343 g_assert (thr_ret == 0);
345 if (handle_specific != NULL) {
346 memcpy (&handle->u, handle_specific,
352 static guint32 _wapi_handle_new_shared (WapiHandleType type,
353 gpointer handle_specific)
356 static guint32 last = 1;
359 g_assert (_wapi_has_shut_down == FALSE);
361 /* Leave the first slot empty as a guard */
363 /* FIXME: expandable array */
364 for(offset = last; offset <_WAPI_HANDLE_INITIAL_COUNT; offset++) {
365 struct _WapiHandleShared *handle = &_wapi_shared_layout->handles[offset];
367 if(handle->type == WAPI_HANDLE_UNUSED) {
368 thr_ret = _wapi_handle_lock_shared_handles ();
369 g_assert (thr_ret == 0);
371 if (InterlockedCompareExchange ((gint32 *)&handle->type, type, WAPI_HANDLE_UNUSED) == WAPI_HANDLE_UNUSED) {
374 _wapi_handle_init_shared (handle, type,
377 _wapi_handle_unlock_shared_handles ();
381 /* Someone else beat us to it, just
386 _wapi_handle_unlock_shared_handles ();
391 /* Try again from the beginning */
396 /* Will need to expand the array. The caller will sort it out */
402 * _wapi_handle_new_internal:
403 * @type: Init handle to this type
405 * Search for a free handle and initialize it. Return the handle on
406 * success and 0 on failure. This is only called from
407 * _wapi_handle_new, and scan_mutex must be held.
409 static guint32 _wapi_handle_new_internal (WapiHandleType type,
410 gpointer handle_specific)
413 static guint32 last = 0;
414 gboolean retry = FALSE;
416 g_assert (_wapi_has_shut_down == FALSE);
418 /* A linear scan should be fast enough. Start from the last
419 * allocation, assuming that handles are allocated more often
420 * than they're freed. Leave the space reserved for file
424 if (last < _wapi_fd_reserve) {
425 last = _wapi_fd_reserve;
432 for(i = SLOT_INDEX (count); i < _wapi_private_handle_slot_count; i++) {
433 if (_wapi_private_handles [i]) {
434 for (k = SLOT_OFFSET (count); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
435 struct _WapiHandleUnshared *handle = &_wapi_private_handles [i][k];
437 if(handle->type == WAPI_HANDLE_UNUSED) {
440 _wapi_handle_init (handle, type, handle_specific);
448 if(retry && last > _wapi_fd_reserve) {
449 /* Try again from the beginning */
450 last = _wapi_fd_reserve;
454 /* Will need to expand the array. The caller will sort it out */
460 _wapi_handle_new (WapiHandleType type, gpointer handle_specific)
462 guint32 handle_idx = 0;
466 g_assert (_wapi_has_shut_down == FALSE);
468 DEBUG ("%s: Creating new handle of type %s", __func__,
469 _wapi_handle_typename[type]);
471 g_assert(!_WAPI_FD_HANDLE(type));
473 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
474 (void *)&scan_mutex);
475 thr_ret = mono_mutex_lock (&scan_mutex);
476 g_assert (thr_ret == 0);
478 while ((handle_idx = _wapi_handle_new_internal (type, handle_specific)) == 0) {
479 /* Try and expand the array, and have another go */
480 int idx = SLOT_INDEX (_wapi_private_handle_count);
481 if (idx >= _WAPI_PRIVATE_MAX_SLOTS) {
485 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
486 _WAPI_HANDLE_INITIAL_COUNT);
488 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
489 _wapi_private_handle_slot_count ++;
492 thr_ret = mono_mutex_unlock (&scan_mutex);
493 g_assert (thr_ret == 0);
494 pthread_cleanup_pop (0);
496 if (handle_idx == 0) {
497 /* We ran out of slots */
498 handle = _WAPI_HANDLE_INVALID;
502 /* Make sure we left the space for fd mappings */
503 g_assert (handle_idx >= _wapi_fd_reserve);
505 handle = GUINT_TO_POINTER (handle_idx);
507 DEBUG ("%s: Allocated new handle %p", __func__, handle);
509 if (_WAPI_SHARED_HANDLE(type)) {
510 /* Add the shared section too */
513 ref = _wapi_handle_new_shared (type, handle_specific);
515 _wapi_handle_collect ();
516 ref = _wapi_handle_new_shared (type, handle_specific);
518 /* FIXME: grow the arrays */
519 handle = _WAPI_HANDLE_INVALID;
524 _WAPI_PRIVATE_HANDLES(handle_idx).u.shared.offset = ref;
525 DEBUG ("%s: New shared handle at offset 0x%x", __func__,
533 gpointer _wapi_handle_new_from_offset (WapiHandleType type, guint32 offset,
536 guint32 handle_idx = 0;
537 gpointer handle = INVALID_HANDLE_VALUE;
539 struct _WapiHandleShared *shared;
541 g_assert (_wapi_has_shut_down == FALSE);
543 DEBUG ("%s: Creating new handle of type %s to offset %d", __func__,
544 _wapi_handle_typename[type], offset);
546 g_assert(!_WAPI_FD_HANDLE(type));
547 g_assert(_WAPI_SHARED_HANDLE(type));
548 g_assert(offset != 0);
550 shared = &_wapi_shared_layout->handles[offset];
552 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
553 /* Bump up the timestamp for this offset */
554 InterlockedExchange ((gint32 *)&shared->timestamp, now);
557 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
558 (void *)&scan_mutex);
559 thr_ret = mono_mutex_lock (&scan_mutex);
560 g_assert (thr_ret == 0);
562 for (i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
563 if (_wapi_private_handles [i]) {
564 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
565 struct _WapiHandleUnshared *handle_data = &_wapi_private_handles [i][k];
567 if (handle_data->type == type &&
568 handle_data->u.shared.offset == offset) {
569 handle = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
570 goto first_pass_done;
577 thr_ret = mono_mutex_unlock (&scan_mutex);
578 g_assert (thr_ret == 0);
579 pthread_cleanup_pop (0);
581 if (handle != INVALID_HANDLE_VALUE) {
582 _wapi_handle_ref (handle);
584 DEBUG ("%s: Returning old handle %p referencing 0x%x",
585 __func__, handle, offset);
589 /* Prevent entries expiring under us as we search */
590 thr_ret = _wapi_handle_lock_shared_handles ();
591 g_assert (thr_ret == 0);
593 if (shared->type == WAPI_HANDLE_UNUSED) {
594 /* Someone deleted this handle while we were working */
595 DEBUG ("%s: Handle at 0x%x unused", __func__, offset);
599 if (shared->type != type) {
600 DEBUG ("%s: Wrong type at %d 0x%x! Found %s wanted %s",
601 __func__, offset, offset,
602 _wapi_handle_typename[shared->type],
603 _wapi_handle_typename[type]);
607 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
608 (void *)&scan_mutex);
609 thr_ret = mono_mutex_lock (&scan_mutex);
610 g_assert (thr_ret == 0);
612 while ((handle_idx = _wapi_handle_new_internal (type, NULL)) == 0) {
613 /* Try and expand the array, and have another go */
614 int idx = SLOT_INDEX (_wapi_private_handle_count);
615 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
616 _WAPI_HANDLE_INITIAL_COUNT);
618 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
619 _wapi_private_handle_slot_count ++;
622 thr_ret = mono_mutex_unlock (&scan_mutex);
623 g_assert (thr_ret == 0);
624 pthread_cleanup_pop (0);
626 /* Make sure we left the space for fd mappings */
627 g_assert (handle_idx >= _wapi_fd_reserve);
629 handle = GUINT_TO_POINTER (handle_idx);
631 _WAPI_PRIVATE_HANDLES(handle_idx).u.shared.offset = offset;
632 InterlockedIncrement ((gint32 *)&shared->handle_refs);
634 DEBUG ("%s: Allocated new handle %p referencing 0x%x (shared refs %d)", __func__, handle, offset, shared->handle_refs);
637 _wapi_handle_unlock_shared_handles ();
643 init_handles_slot (int idx)
647 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
648 (void *)&scan_mutex);
649 thr_ret = mono_mutex_lock (&scan_mutex);
650 g_assert (thr_ret == 0);
652 if (_wapi_private_handles [idx] == NULL) {
653 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
654 _WAPI_HANDLE_INITIAL_COUNT);
655 g_assert (_wapi_private_handles [idx]);
658 thr_ret = mono_mutex_unlock (&scan_mutex);
659 g_assert (thr_ret == 0);
660 pthread_cleanup_pop (0);
663 gpointer _wapi_handle_new_fd (WapiHandleType type, int fd,
664 gpointer handle_specific)
666 struct _WapiHandleUnshared *handle;
669 g_assert (_wapi_has_shut_down == FALSE);
671 DEBUG ("%s: Creating new handle of type %s", __func__,
672 _wapi_handle_typename[type]);
674 g_assert(_WAPI_FD_HANDLE(type));
675 g_assert(!_WAPI_SHARED_HANDLE(type));
677 if (fd >= _wapi_fd_reserve) {
678 DEBUG ("%s: fd %d is too big", __func__, fd);
680 return(GUINT_TO_POINTER (_WAPI_HANDLE_INVALID));
683 /* Initialize the array entries on demand */
684 if (_wapi_private_handles [SLOT_INDEX (fd)] == NULL)
685 init_handles_slot (SLOT_INDEX (fd));
687 handle = &_WAPI_PRIVATE_HANDLES(fd);
689 if (handle->type != WAPI_HANDLE_UNUSED) {
690 DEBUG ("%s: fd %d is already in use!", __func__, fd);
691 /* FIXME: clean up this handle? We can't do anything
692 * with the fd, cos thats the new one
696 DEBUG ("%s: Assigning new fd handle %d", __func__, fd);
698 /* Prevent file share entries racing with us, when the file
699 * handle is only half initialised
701 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
702 g_assert(thr_ret == 0);
704 _wapi_handle_init (handle, type, handle_specific);
706 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
708 return(GUINT_TO_POINTER(fd));
711 gboolean _wapi_lookup_handle (gpointer handle, WapiHandleType type,
712 gpointer *handle_specific)
714 struct _WapiHandleUnshared *handle_data;
715 guint32 handle_idx = GPOINTER_TO_UINT(handle);
717 if (!_WAPI_PRIVATE_VALID_SLOT (handle_idx)) {
721 /* Initialize the array entries on demand */
722 if (_wapi_private_handles [SLOT_INDEX (handle_idx)] == NULL)
723 init_handles_slot (SLOT_INDEX (handle_idx));
725 handle_data = &_WAPI_PRIVATE_HANDLES(handle_idx);
727 if (handle_data->type != type) {
731 if (handle_specific == NULL) {
735 if (_WAPI_SHARED_HANDLE(type)) {
736 struct _WapiHandle_shared_ref *ref;
737 struct _WapiHandleShared *shared_handle_data;
739 ref = &handle_data->u.shared;
740 shared_handle_data = &_wapi_shared_layout->handles[ref->offset];
742 if (shared_handle_data->type != type) {
743 /* The handle must have been deleted on us
748 *handle_specific = &shared_handle_data->u;
750 *handle_specific = &handle_data->u;
757 _wapi_handle_foreach (WapiHandleType type,
758 gboolean (*on_each)(gpointer test, gpointer user),
761 struct _WapiHandleUnshared *handle_data = NULL;
766 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
767 (void *)&scan_mutex);
768 thr_ret = mono_mutex_lock (&scan_mutex);
769 g_assert (thr_ret == 0);
771 for (i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
772 if (_wapi_private_handles [i]) {
773 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
774 handle_data = &_wapi_private_handles [i][k];
776 if (handle_data->type == type) {
777 ret = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
778 if (on_each (ret, user_data) == TRUE)
785 thr_ret = mono_mutex_unlock (&scan_mutex);
786 g_assert (thr_ret == 0);
787 pthread_cleanup_pop (0);
790 /* This might list some shared handles twice if they are already
791 * opened by this process, and the check function returns FALSE the
792 * first time. Shared handles that are created during the search are
793 * unreffed if the check function returns FALSE, so callers must not
794 * rely on the handle persisting (unless the check function returns
796 * The caller owns the returned handle.
798 gpointer _wapi_search_handle (WapiHandleType type,
799 gboolean (*check)(gpointer test, gpointer user),
801 gpointer *handle_specific,
802 gboolean search_shared)
804 struct _WapiHandleUnshared *handle_data = NULL;
805 struct _WapiHandleShared *shared = NULL;
808 gboolean found = FALSE;
811 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
812 (void *)&scan_mutex);
813 thr_ret = mono_mutex_lock (&scan_mutex);
814 g_assert (thr_ret == 0);
816 for (i = SLOT_INDEX (0); !found && i < _wapi_private_handle_slot_count; i++) {
817 if (_wapi_private_handles [i]) {
818 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
819 handle_data = &_wapi_private_handles [i][k];
821 if (handle_data->type == type) {
822 ret = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
823 if (check (ret, user_data) == TRUE) {
824 _wapi_handle_ref (ret);
827 if (_WAPI_SHARED_HANDLE (type)) {
828 shared = &_wapi_shared_layout->handles[i];
838 thr_ret = mono_mutex_unlock (&scan_mutex);
839 g_assert (thr_ret == 0);
840 pthread_cleanup_pop (0);
842 if (!found && search_shared && _WAPI_SHARED_HANDLE (type)) {
843 /* Not found yet, so search the shared memory too */
844 DEBUG ("%s: Looking at other shared handles...", __func__);
846 for (i = 0; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
847 shared = &_wapi_shared_layout->handles[i];
849 if (shared->type == type) {
850 /* Tell new_from_offset to not
851 * timestamp this handle, because
852 * otherwise it will ping every handle
853 * in the list and they will never
856 ret = _wapi_handle_new_from_offset (type, i,
858 if (ret == INVALID_HANDLE_VALUE) {
859 /* This handle was deleted
860 * while we were looking at it
865 DEBUG ("%s: Opened tmp handle %p (type %s) from offset %d", __func__, ret, _wapi_handle_typename[type], i);
867 /* It's possible that the shared part
868 * of this handle has now been blown
869 * away (after new_from_offset
870 * successfully opened it,) if its
871 * timestamp is too old. The check
872 * function needs to be aware of this,
873 * and cope if the handle has
876 if (check (ret, user_data) == TRUE) {
877 /* Timestamp this handle, but make
878 * sure it still exists first
880 thr_ret = _wapi_handle_lock_shared_handles ();
881 g_assert (thr_ret == 0);
883 if (shared->type == type) {
884 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
885 InterlockedExchange ((gint32 *)&shared->timestamp, now);
888 handle_data = &_WAPI_PRIVATE_HANDLES(GPOINTER_TO_UINT(ret));
890 _wapi_handle_unlock_shared_handles ();
893 /* It's been deleted,
897 _wapi_handle_unlock_shared_handles ();
901 /* This isn't the handle we're looking
902 * for, so drop the reference we took
903 * in _wapi_handle_new_from_offset ()
905 _wapi_handle_unref (ret);
915 if(handle_specific != NULL) {
916 if (_WAPI_SHARED_HANDLE(type)) {
917 g_assert(shared->type == type);
919 *handle_specific = &shared->u;
921 *handle_specific = &handle_data->u;
929 /* Returns the offset of the metadata array, or -1 on error, or 0 for
930 * not found (0 is not a valid offset)
932 gint32 _wapi_search_handle_namespace (WapiHandleType type,
935 struct _WapiHandleShared *shared_handle_data;
940 g_assert(_WAPI_SHARED_HANDLE(type));
942 DEBUG ("%s: Lookup for handle named [%s] type %s", __func__,
943 utf8_name, _wapi_handle_typename[type]);
945 /* Do a handle collection before starting to look, so that any
946 * stale cruft gets removed
948 _wapi_handle_collect ();
950 thr_ret = _wapi_handle_lock_shared_handles ();
951 g_assert (thr_ret == 0);
953 for(i = 1; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
954 WapiSharedNamespace *sharedns;
956 shared_handle_data = &_wapi_shared_layout->handles[i];
958 /* Check mutex, event, semaphore, timer, job and
959 * file-mapping object names. So far only mutex,
960 * semaphore and event are implemented.
962 if (!_WAPI_SHARED_NAMESPACE (shared_handle_data->type)) {
966 DEBUG ("%s: found a shared namespace handle at 0x%x (type %s)", __func__, i, _wapi_handle_typename[shared_handle_data->type]);
968 sharedns=(WapiSharedNamespace *)&shared_handle_data->u;
970 DEBUG ("%s: name is [%s]", __func__, sharedns->name);
972 if (strcmp (sharedns->name, utf8_name) == 0) {
973 if (shared_handle_data->type != type) {
974 /* Its the wrong type, so fail now */
975 DEBUG ("%s: handle 0x%x matches name but is wrong type: %s", __func__, i, _wapi_handle_typename[shared_handle_data->type]);
979 DEBUG ("%s: handle 0x%x matches name and type", __func__, i);
987 _wapi_handle_unlock_shared_handles ();
992 void _wapi_handle_ref (gpointer handle)
994 guint32 idx = GPOINTER_TO_UINT(handle);
995 struct _WapiHandleUnshared *handle_data;
997 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1001 if (_wapi_handle_type (handle) == WAPI_HANDLE_UNUSED) {
1002 g_warning ("%s: Attempting to ref unused handle %p", __func__,
1007 handle_data = &_WAPI_PRIVATE_HANDLES(idx);
1009 InterlockedIncrement ((gint32 *)&handle_data->ref);
1011 /* It's possible for processes to exit before getting around
1012 * to updating timestamps in the collection thread, so if a
1013 * shared handle is reffed do the timestamp here as well just
1016 if (_WAPI_SHARED_HANDLE(handle_data->type)) {
1017 struct _WapiHandleShared *shared_data = &_wapi_shared_layout->handles[handle_data->u.shared.offset];
1018 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
1019 InterlockedExchange ((gint32 *)&shared_data->timestamp, now);
1023 g_message ("%s: %s handle %p ref now %d", __func__,
1024 _wapi_handle_typename[_WAPI_PRIVATE_HANDLES (idx).type],
1026 _WAPI_PRIVATE_HANDLES(idx).ref);
1030 /* The handle must not be locked on entry to this function */
1031 static void _wapi_handle_unref_full (gpointer handle, gboolean ignore_private_busy_handles)
1033 guint32 idx = GPOINTER_TO_UINT(handle);
1034 gboolean destroy = FALSE, early_exit = FALSE;
1037 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1041 if (_wapi_handle_type (handle) == WAPI_HANDLE_UNUSED) {
1042 g_warning ("%s: Attempting to unref unused handle %p",
1047 /* Possible race condition here if another thread refs the
1048 * handle between here and setting the type to UNUSED. I
1049 * could lock a mutex, but I'm not sure that allowing a handle
1050 * reference to reach 0 isn't an application bug anyway.
1052 destroy = (InterlockedDecrement ((gint32 *)&_WAPI_PRIVATE_HANDLES(idx).ref) ==0);
1055 g_message ("%s: %s handle %p ref now %d (destroy %s)", __func__,
1056 _wapi_handle_typename[_WAPI_PRIVATE_HANDLES (idx).type],
1058 _WAPI_PRIVATE_HANDLES(idx).ref, destroy?"TRUE":"FALSE");
1062 /* Need to copy the handle info, reset the slot in the
1063 * array, and _only then_ call the close function to
1064 * avoid race conditions (eg file descriptors being
1065 * closed, and another file being opened getting the
1066 * same fd racing the memset())
1068 struct _WapiHandleUnshared handle_data;
1069 struct _WapiHandleShared shared_handle_data;
1070 WapiHandleType type = _WAPI_PRIVATE_HANDLES(idx).type;
1071 void (*close_func)(gpointer, gpointer) = _wapi_handle_ops_get_close_func (type);
1072 gboolean is_shared = _WAPI_SHARED_HANDLE(type);
1075 /* If this is a shared handle we need to take
1076 * the shared lock outside of the scan_mutex
1077 * lock to avoid deadlocks
1079 thr_ret = _wapi_handle_lock_shared_handles ();
1080 g_assert (thr_ret == 0);
1083 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup, (void *)&scan_mutex);
1084 thr_ret = mono_mutex_lock (&scan_mutex);
1086 DEBUG ("%s: Destroying handle %p", __func__, handle);
1088 memcpy (&handle_data, &_WAPI_PRIVATE_HANDLES(idx),
1089 sizeof (struct _WapiHandleUnshared));
1091 memset (&_WAPI_PRIVATE_HANDLES(idx).u, '\0',
1092 sizeof(_WAPI_PRIVATE_HANDLES(idx).u));
1094 _WAPI_PRIVATE_HANDLES(idx).type = WAPI_HANDLE_UNUSED;
1097 /* Destroy the mutex and cond var. We hope nobody
1098 * tried to grab them between the handle unlock and
1099 * now, but pthreads doesn't have a
1100 * "unlock_and_destroy" atomic function.
1102 thr_ret = mono_mutex_destroy (&_WAPI_PRIVATE_HANDLES(idx).signal_mutex);
1103 /*WARNING gross hack to make cleanup not crash when exiting without the whole runtime teardown.*/
1104 if (thr_ret == EBUSY && ignore_private_busy_handles) {
1108 g_error ("Error destroying handle %p mutex due to %d\n", handle, thr_ret);
1110 thr_ret = pthread_cond_destroy (&_WAPI_PRIVATE_HANDLES(idx).signal_cond);
1111 if (thr_ret == EBUSY && ignore_private_busy_handles)
1113 else if (thr_ret != 0)
1114 g_error ("Error destroying handle %p cond var due to %d\n", handle, thr_ret);
1117 struct _WapiHandleShared *shared = &_wapi_shared_layout->handles[handle_data.u.shared.offset];
1119 memcpy (&shared_handle_data, shared,
1120 sizeof (struct _WapiHandleShared));
1122 /* It's possible that this handle is already
1123 * pointing at a deleted shared section
1126 g_message ("%s: %s handle %p shared refs before dec %d", __func__, _wapi_handle_typename[type], handle, shared->handle_refs);
1129 if (shared->handle_refs > 0) {
1130 shared->handle_refs--;
1131 if (shared->handle_refs == 0) {
1132 memset (shared, '\0', sizeof (struct _WapiHandleShared));
1137 thr_ret = mono_mutex_unlock (&scan_mutex);
1138 g_assert (thr_ret == 0);
1139 pthread_cleanup_pop (0);
1144 _wapi_handle_unlock_shared_handles ();
1147 if (close_func != NULL) {
1149 close_func (handle, &shared_handle_data.u);
1151 close_func (handle, &handle_data.u);
1157 void _wapi_handle_unref (gpointer handle)
1159 _wapi_handle_unref_full (handle, FALSE);
1162 void _wapi_handle_register_capabilities (WapiHandleType type,
1163 WapiHandleCapability caps)
1165 handle_caps[type] = caps;
1168 gboolean _wapi_handle_test_capabilities (gpointer handle,
1169 WapiHandleCapability caps)
1171 guint32 idx = GPOINTER_TO_UINT(handle);
1172 WapiHandleType type;
1174 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1178 type = _WAPI_PRIVATE_HANDLES(idx).type;
1180 DEBUG ("%s: testing 0x%x against 0x%x (%d)", __func__,
1181 handle_caps[type], caps, handle_caps[type] & caps);
1183 return((handle_caps[type] & caps) != 0);
1186 static void (*_wapi_handle_ops_get_close_func (WapiHandleType type))(gpointer, gpointer)
1188 if (handle_ops[type] != NULL &&
1189 handle_ops[type]->close != NULL) {
1190 return (handle_ops[type]->close);
1196 void _wapi_handle_ops_close (gpointer handle, gpointer data)
1198 guint32 idx = GPOINTER_TO_UINT(handle);
1199 WapiHandleType type;
1201 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1205 type = _WAPI_PRIVATE_HANDLES(idx).type;
1207 if (handle_ops[type] != NULL &&
1208 handle_ops[type]->close != NULL) {
1209 handle_ops[type]->close (handle, data);
1213 void _wapi_handle_ops_signal (gpointer handle)
1215 guint32 idx = GPOINTER_TO_UINT(handle);
1216 WapiHandleType type;
1218 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1222 type = _WAPI_PRIVATE_HANDLES(idx).type;
1224 if (handle_ops[type] != NULL && handle_ops[type]->signal != NULL) {
1225 handle_ops[type]->signal (handle);
1229 gboolean _wapi_handle_ops_own (gpointer handle)
1231 guint32 idx = GPOINTER_TO_UINT(handle);
1232 WapiHandleType type;
1234 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1238 type = _WAPI_PRIVATE_HANDLES(idx).type;
1240 if (handle_ops[type] != NULL && handle_ops[type]->own_handle != NULL) {
1241 return(handle_ops[type]->own_handle (handle));
1247 gboolean _wapi_handle_ops_isowned (gpointer handle)
1249 guint32 idx = GPOINTER_TO_UINT(handle);
1250 WapiHandleType type;
1252 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1256 type = _WAPI_PRIVATE_HANDLES(idx).type;
1258 if (handle_ops[type] != NULL && handle_ops[type]->is_owned != NULL) {
1259 return(handle_ops[type]->is_owned (handle));
1265 guint32 _wapi_handle_ops_special_wait (gpointer handle, guint32 timeout, gboolean alertable)
1267 guint32 idx = GPOINTER_TO_UINT(handle);
1268 WapiHandleType type;
1270 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1271 return(WAIT_FAILED);
1274 type = _WAPI_PRIVATE_HANDLES(idx).type;
1276 if (handle_ops[type] != NULL &&
1277 handle_ops[type]->special_wait != NULL) {
1278 return(handle_ops[type]->special_wait (handle, timeout, alertable));
1280 return(WAIT_FAILED);
1284 void _wapi_handle_ops_prewait (gpointer handle)
1286 guint32 idx = GPOINTER_TO_UINT (handle);
1287 WapiHandleType type;
1289 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1293 type = _WAPI_PRIVATE_HANDLES (idx).type;
1295 if (handle_ops[type] != NULL &&
1296 handle_ops[type]->prewait != NULL) {
1297 handle_ops[type]->prewait (handle);
1304 * @handle: The handle to release
1306 * Closes and invalidates @handle, releasing any resources it
1307 * consumes. When the last handle to a temporary or non-persistent
1308 * object is closed, that object can be deleted. Closing the same
1309 * handle twice is an error.
1311 * Return value: %TRUE on success, %FALSE otherwise.
1313 gboolean CloseHandle(gpointer handle)
1315 if (handle == NULL) {
1316 /* Problem: because we map file descriptors to the
1317 * same-numbered handle we can't tell the difference
1318 * between a bogus handle and the handle to stdin.
1319 * Assume that it's the console handle if that handle
1322 if (_WAPI_PRIVATE_HANDLES (0).type != WAPI_HANDLE_CONSOLE) {
1323 SetLastError (ERROR_INVALID_PARAMETER);
1327 if (handle == _WAPI_HANDLE_INVALID){
1328 SetLastError (ERROR_INVALID_PARAMETER);
1332 _wapi_handle_unref (handle);
1337 /* Lots more to implement here, but this is all we need at the moment */
1338 gboolean DuplicateHandle (gpointer srcprocess, gpointer src,
1339 gpointer targetprocess, gpointer *target,
1340 guint32 access G_GNUC_UNUSED, gboolean inherit G_GNUC_UNUSED, guint32 options G_GNUC_UNUSED)
1342 if (srcprocess != _WAPI_PROCESS_CURRENT ||
1343 targetprocess != _WAPI_PROCESS_CURRENT) {
1344 /* Duplicating other process's handles is not supported */
1345 SetLastError (ERROR_INVALID_HANDLE);
1349 if (src == _WAPI_PROCESS_CURRENT) {
1350 *target = _wapi_process_duplicate ();
1351 } else if (src == _WAPI_THREAD_CURRENT) {
1352 *target = _wapi_thread_duplicate ();
1354 _wapi_handle_ref (src);
1361 gboolean _wapi_handle_count_signalled_handles (guint32 numhandles,
1367 guint32 count, i, iter=0;
1370 WapiHandleType type;
1372 /* Lock all the handles, with backoff */
1374 thr_ret = _wapi_handle_lock_shared_handles ();
1375 g_assert (thr_ret == 0);
1377 for(i=0; i<numhandles; i++) {
1378 gpointer handle = handles[i];
1379 guint32 idx = GPOINTER_TO_UINT(handle);
1381 DEBUG ("%s: attempting to lock %p", __func__, handle);
1383 type = _WAPI_PRIVATE_HANDLES(idx).type;
1385 thr_ret = _wapi_handle_trylock_handle (handle);
1390 DEBUG ("%s: attempt failed for %p: %s", __func__,
1391 handle, strerror (thr_ret));
1393 thr_ret = _wapi_handle_unlock_shared_handles ();
1394 g_assert (thr_ret == 0);
1397 handle = handles[i];
1398 idx = GPOINTER_TO_UINT(handle);
1400 thr_ret = _wapi_handle_unlock_handle (handle);
1401 g_assert (thr_ret == 0);
1404 /* If iter ever reaches 100 the nanosleep will
1405 * return EINVAL immediately, but we have a
1406 * design flaw if that happens.
1410 g_warning ("%s: iteration overflow!",
1415 DEBUG ("%s: Backing off for %d ms", __func__,
1417 _wapi_handle_spin (10 * iter);
1423 DEBUG ("%s: Locked all handles", __func__);
1428 for(i=0; i<numhandles; i++) {
1429 gpointer handle = handles[i];
1430 guint32 idx = GPOINTER_TO_UINT(handle);
1432 type = _WAPI_PRIVATE_HANDLES(idx).type;
1434 DEBUG ("%s: Checking handle %p", __func__, handle);
1436 if(((_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_OWN)==TRUE) &&
1437 (_wapi_handle_ops_isowned (handle) == TRUE)) ||
1438 (_WAPI_SHARED_HANDLE(type) &&
1439 WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) ||
1440 (!_WAPI_SHARED_HANDLE(type) &&
1441 _WAPI_PRIVATE_HANDLES(idx).signalled == TRUE)) {
1444 DEBUG ("%s: Handle %p signalled", __func__,
1452 DEBUG ("%s: %d event handles signalled", __func__, count);
1454 if ((waitall == TRUE && count == numhandles) ||
1455 (waitall == FALSE && count > 0)) {
1461 DEBUG ("%s: Returning %d", __func__, ret);
1468 void _wapi_handle_unlock_handles (guint32 numhandles, gpointer *handles)
1473 thr_ret = _wapi_handle_unlock_shared_handles ();
1474 g_assert (thr_ret == 0);
1476 for(i=0; i<numhandles; i++) {
1477 gpointer handle = handles[i];
1479 DEBUG ("%s: unlocking handle %p", __func__, handle);
1481 thr_ret = _wapi_handle_unlock_handle (handle);
1482 g_assert (thr_ret == 0);
1486 static int timedwait_signal_poll_cond (pthread_cond_t *cond, mono_mutex_t *mutex, struct timespec *timeout, gboolean alertable)
1488 struct timespec fake_timeout;
1493 ret=mono_cond_timedwait (cond, mutex, timeout);
1495 ret=mono_cond_wait (cond, mutex);
1497 _wapi_calc_timeout (&fake_timeout, 100);
1499 if (timeout != NULL && ((fake_timeout.tv_sec > timeout->tv_sec) ||
1500 (fake_timeout.tv_sec == timeout->tv_sec &&
1501 fake_timeout.tv_nsec > timeout->tv_nsec))) {
1502 /* Real timeout is less than 100ms time */
1503 ret=mono_cond_timedwait (cond, mutex, timeout);
1505 ret=mono_cond_timedwait (cond, mutex, &fake_timeout);
1507 /* Mask the fake timeout, this will cause
1508 * another poll if the cond was not really signaled
1510 if (ret==ETIMEDOUT) {
1519 int _wapi_handle_wait_signal (gboolean poll)
1521 return _wapi_handle_timedwait_signal_handle (_wapi_global_signal_handle, NULL, TRUE, poll);
1524 int _wapi_handle_timedwait_signal (struct timespec *timeout, gboolean poll)
1526 return _wapi_handle_timedwait_signal_handle (_wapi_global_signal_handle, timeout, TRUE, poll);
1529 int _wapi_handle_wait_signal_handle (gpointer handle, gboolean alertable)
1531 DEBUG ("%s: waiting for %p", __func__, handle);
1533 return _wapi_handle_timedwait_signal_handle (handle, NULL, alertable, FALSE);
1536 int _wapi_handle_timedwait_signal_handle (gpointer handle,
1537 struct timespec *timeout, gboolean alertable, gboolean poll)
1539 DEBUG ("%s: waiting for %p (type %s)", __func__, handle,
1540 _wapi_handle_typename[_wapi_handle_type (handle)]);
1542 if (_WAPI_SHARED_HANDLE (_wapi_handle_type (handle))) {
1543 if (WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) {
1546 if (timeout != NULL) {
1547 struct timespec fake_timeout;
1548 _wapi_calc_timeout (&fake_timeout, 100);
1550 if ((fake_timeout.tv_sec > timeout->tv_sec) ||
1551 (fake_timeout.tv_sec == timeout->tv_sec &&
1552 fake_timeout.tv_nsec > timeout->tv_nsec)) {
1553 /* FIXME: Real timeout is less than
1554 * 100ms time, but is it really worth
1555 * calculating to the exact ms?
1557 _wapi_handle_spin (100);
1559 if (WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) {
1566 _wapi_handle_spin (100);
1570 guint32 idx = GPOINTER_TO_UINT(handle);
1572 pthread_cond_t *cond;
1573 mono_mutex_t *mutex;
1575 if (alertable && !wapi_thread_set_wait_handle (handle))
1578 cond = &_WAPI_PRIVATE_HANDLES (idx).signal_cond;
1579 mutex = &_WAPI_PRIVATE_HANDLES (idx).signal_mutex;
1582 /* This is needed when waiting for process handles */
1583 res = timedwait_signal_poll_cond (cond, mutex, timeout, alertable);
1586 res = mono_cond_timedwait (cond, mutex, timeout);
1588 res = mono_cond_wait (cond, mutex);
1592 wapi_thread_clear_wait_handle (handle);
1599 _wapi_free_share_info (_WapiFileShare *share_info)
1601 if (!_wapi_shm_enabled ()) {
1602 file_share_hash_lock ();
1603 g_hash_table_remove (file_share_hash, share_info);
1604 file_share_hash_unlock ();
1605 /* The hashtable dtor frees share_info */
1607 memset (share_info, '\0', sizeof(struct _WapiFileShare));
1612 wapi_share_info_equal (gconstpointer ka, gconstpointer kb)
1614 const _WapiFileShare *s1 = ka;
1615 const _WapiFileShare *s2 = kb;
1617 return (s1->device == s2->device && s1->inode == s2->inode) ? 1 : 0;
1621 wapi_share_info_hash (gconstpointer data)
1623 const _WapiFileShare *s = data;
1628 gboolean _wapi_handle_get_or_set_share (dev_t device, ino_t inode,
1629 guint32 new_sharemode,
1631 guint32 *old_sharemode,
1632 guint32 *old_access,
1633 struct _WapiFileShare **share_info)
1635 struct _WapiFileShare *file_share;
1636 guint32 now = (guint32)(time(NULL) & 0xFFFFFFFF);
1637 int thr_ret, i, first_unused = -1;
1638 gboolean exists = FALSE;
1640 /* Prevents entries from expiring under us as we search
1642 thr_ret = _wapi_handle_lock_shared_handles ();
1643 g_assert (thr_ret == 0);
1645 /* Prevent new entries racing with us */
1646 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1647 g_assert (thr_ret == 0);
1649 if (!_wapi_shm_enabled ()) {
1653 * Instead of allocating a 4MB array, we use a hash table to keep track of this
1654 * info. This is needed even if SHM is disabled, to track sharing inside
1655 * the current process.
1657 if (!file_share_hash) {
1658 file_share_hash = g_hash_table_new_full (wapi_share_info_hash, wapi_share_info_equal, NULL, g_free);
1659 InitializeCriticalSection (&file_share_hash_mutex);
1662 tmp.device = device;
1665 file_share_hash_lock ();
1667 file_share = g_hash_table_lookup (file_share_hash, &tmp);
1669 *old_sharemode = file_share->sharemode;
1670 *old_access = file_share->access;
1671 *share_info = file_share;
1673 InterlockedIncrement ((gint32 *)&file_share->handle_refs);
1676 file_share = g_new0 (_WapiFileShare, 1);
1678 file_share->device = device;
1679 file_share->inode = inode;
1680 file_share->opened_by_pid = _wapi_getpid ();
1681 file_share->sharemode = new_sharemode;
1682 file_share->access = new_access;
1683 file_share->handle_refs = 1;
1684 *share_info = file_share;
1686 g_hash_table_insert (file_share_hash, file_share, file_share);
1689 file_share_hash_unlock ();
1691 /* If a linear scan gets too slow we'll have to fit a hash
1692 * table onto the shared mem backing store
1695 for (i = 0; i <= _wapi_fileshare_layout->hwm; i++) {
1696 file_share = &_wapi_fileshare_layout->share_info[i];
1698 /* Make a note of an unused slot, in case we need to
1701 if (first_unused == -1 && file_share->handle_refs == 0) {
1706 if (file_share->handle_refs == 0) {
1710 if (file_share->device == device &&
1711 file_share->inode == inode) {
1712 *old_sharemode = file_share->sharemode;
1713 *old_access = file_share->access;
1714 *share_info = file_share;
1716 /* Increment the reference count while we
1717 * still have sole access to the shared area.
1718 * This makes the increment atomic wrt
1721 InterlockedIncrement ((gint32 *)&file_share->handle_refs);
1729 if (i == _WAPI_FILESHARE_SIZE && first_unused == -1) {
1732 if (first_unused == -1) {
1733 file_share = &_wapi_fileshare_layout->share_info[++i];
1734 _wapi_fileshare_layout->hwm = i;
1736 file_share = &_wapi_fileshare_layout->share_info[first_unused];
1739 file_share->device = device;
1740 file_share->inode = inode;
1741 file_share->opened_by_pid = _wapi_getpid ();
1742 file_share->sharemode = new_sharemode;
1743 file_share->access = new_access;
1744 file_share->handle_refs = 1;
1745 *share_info = file_share;
1749 if (*share_info != NULL) {
1750 InterlockedExchange ((gint32 *)&(*share_info)->timestamp, now);
1754 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1756 _wapi_handle_unlock_shared_handles ();
1761 /* If we don't have the info in /proc, check if the process that
1762 * opened this share info is still there (it's not a perfect method,
1765 static void _wapi_handle_check_share_by_pid (struct _WapiFileShare *share_info)
1767 #if defined(__native_client__)
1768 g_assert_not_reached ();
1770 if (kill (share_info->opened_by_pid, 0) == -1 &&
1773 /* It's gone completely (or there's a new process
1774 * owned by someone else) so mark this share info as
1777 DEBUG ("%s: Didn't find it, destroying entry", __func__);
1779 _wapi_free_share_info (share_info);
1785 /* Scan /proc/<pids>/fd/ for open file descriptors to the file in
1786 * question. If there are none, reset the share info.
1788 * This implementation is Linux-specific; legacy systems will have to
1789 * implement their own ways of finding out if a particular file is
1790 * open by a process.
1792 void _wapi_handle_check_share (struct _WapiFileShare *share_info, int fd)
1794 gboolean found = FALSE, proc_fds = FALSE;
1795 pid_t self = _wapi_getpid ();
1799 /* Prevents entries from expiring under us if we remove this
1802 thr_ret = _wapi_handle_lock_shared_handles ();
1803 g_assert (thr_ret == 0);
1805 /* Prevent new entries racing with us */
1806 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1807 g_assert (thr_ret == 0);
1809 /* If there is no /proc, there's nothing more we can do here */
1810 if (access ("/proc", F_OK) == -1) {
1811 _wapi_handle_check_share_by_pid (share_info);
1815 /* If there's another handle that thinks it owns this fd, then even
1816 * if the fd has been closed behind our back consider it still owned.
1817 * See bugs 75764 and 75891
1819 for (i = 0; i < _wapi_fd_reserve; i++) {
1820 if (_wapi_private_handles [SLOT_INDEX (i)]) {
1821 struct _WapiHandleUnshared *handle = &_WAPI_PRIVATE_HANDLES(i);
1824 handle->type == WAPI_HANDLE_FILE) {
1825 struct _WapiHandle_file *file_handle = &handle->u.file;
1827 if (file_handle->share_info == share_info) {
1828 DEBUG ("%s: handle 0x%x has this file open!",
1837 for (i = 0; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
1838 struct _WapiHandleShared *shared;
1839 struct _WapiHandle_process *process_handle;
1841 shared = &_wapi_shared_layout->handles[i];
1843 if (shared->type == WAPI_HANDLE_PROCESS) {
1845 struct dirent *fd_entry;
1846 char subdir[_POSIX_PATH_MAX];
1848 process_handle = &shared->u.process;
1849 pid = process_handle->id;
1851 /* Look in /proc/<pid>/fd/ but ignore
1852 * /proc/<our pid>/fd/<fd>, as we have the
1855 g_snprintf (subdir, _POSIX_PATH_MAX, "/proc/%d/fd",
1858 fd_dir = opendir (subdir);
1859 if (fd_dir == NULL) {
1863 DEBUG ("%s: Looking in %s", __func__, subdir);
1867 while ((fd_entry = readdir (fd_dir)) != NULL) {
1868 char path[_POSIX_PATH_MAX];
1869 struct stat link_stat;
1871 if (!strcmp (fd_entry->d_name, ".") ||
1872 !strcmp (fd_entry->d_name, "..") ||
1874 fd == atoi (fd_entry->d_name))) {
1878 g_snprintf (path, _POSIX_PATH_MAX,
1879 "/proc/%d/fd/%s", pid,
1882 stat (path, &link_stat);
1883 if (link_stat.st_dev == share_info->device &&
1884 link_stat.st_ino == share_info->inode) {
1885 DEBUG ("%s: Found it at %s",
1896 if (proc_fds == FALSE) {
1897 _wapi_handle_check_share_by_pid (share_info);
1898 } else if (found == FALSE) {
1899 /* Blank out this entry, as it is stale */
1900 DEBUG ("%s: Didn't find it, destroying entry", __func__);
1902 _wapi_free_share_info (share_info);
1906 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1908 _wapi_handle_unlock_shared_handles ();
1912 // Other implementations (non-Linux)
1914 void _wapi_handle_check_share (struct _WapiFileShare *share_info, int fd)
1918 /* Prevents entries from expiring under us if we remove this
1920 thr_ret = _wapi_handle_lock_shared_handles ();
1921 g_assert (thr_ret == 0);
1923 /* Prevent new entries racing with us */
1924 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1925 g_assert (thr_ret == 0);
1927 _wapi_handle_check_share_by_pid (share_info);
1929 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1930 _wapi_handle_unlock_shared_handles ();
1934 void _wapi_handle_dump (void)
1936 struct _WapiHandleUnshared *handle_data;
1940 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
1941 (void *)&scan_mutex);
1942 thr_ret = mono_mutex_lock (&scan_mutex);
1943 g_assert (thr_ret == 0);
1945 for(i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
1946 if (_wapi_private_handles [i]) {
1947 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
1948 handle_data = &_wapi_private_handles [i][k];
1950 if (handle_data->type == WAPI_HANDLE_UNUSED) {
1954 g_print ("%3x [%7s] %s %d ",
1955 i * _WAPI_HANDLE_INITIAL_COUNT + k,
1956 _wapi_handle_typename[handle_data->type],
1957 handle_data->signalled?"Sg":"Un",
1959 handle_details[handle_data->type](&handle_data->u);
1965 thr_ret = mono_mutex_unlock (&scan_mutex);
1966 g_assert (thr_ret == 0);
1967 pthread_cleanup_pop (0);
1970 static void _wapi_shared_details (gpointer handle_info)
1972 struct _WapiHandle_shared_ref *shared = (struct _WapiHandle_shared_ref *)handle_info;
1974 g_print ("offset: 0x%x", shared->offset);
1977 void _wapi_handle_update_refs (void)
1981 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
1983 thr_ret = _wapi_handle_lock_shared_handles ();
1984 g_assert (thr_ret == 0);
1986 /* Prevent file share entries racing with us */
1987 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1988 g_assert(thr_ret == 0);
1990 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
1991 (void *)&scan_mutex);
1992 thr_ret = mono_mutex_lock (&scan_mutex);
1994 for(i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
1995 if (_wapi_private_handles [i]) {
1996 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
1997 struct _WapiHandleUnshared *handle = &_wapi_private_handles [i][k];
1999 if (_WAPI_SHARED_HANDLE(handle->type)) {
2000 struct _WapiHandleShared *shared_data;
2002 DEBUG ("%s: (%d) handle 0x%x is SHARED (%s)", __func__, _wapi_getpid (), i * _WAPI_HANDLE_INITIAL_COUNT + k, _wapi_handle_typename[handle->type]);
2004 shared_data = &_wapi_shared_layout->handles[handle->u.shared.offset];
2006 DEBUG ("%s: (%d) Updating timestamp of handle 0x%x", __func__, _wapi_getpid (), handle->u.shared.offset);
2008 InterlockedExchange ((gint32 *)&shared_data->timestamp, now);
2009 } else if (handle->type == WAPI_HANDLE_FILE) {
2010 struct _WapiHandle_file *file_handle = &handle->u.file;
2012 DEBUG ("%s: (%d) handle 0x%x is FILE", __func__, _wapi_getpid (), i * _WAPI_HANDLE_INITIAL_COUNT + k);
2014 g_assert (file_handle->share_info != NULL);
2016 DEBUG ("%s: (%d) Inc refs on fileshare 0x%x", __func__, _wapi_getpid (), (file_handle->share_info - &_wapi_fileshare_layout->share_info[0]) / sizeof(struct _WapiFileShare));
2018 InterlockedExchange ((gint32 *)&file_handle->share_info->timestamp, now);
2024 thr_ret = mono_mutex_unlock (&scan_mutex);
2025 g_assert (thr_ret == 0);
2026 pthread_cleanup_pop (0);
2028 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
2030 _wapi_handle_unlock_shared_handles ();