2 * handles.c: Generic and internal operations on handles
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002-2011 Novell, Inc.
8 * Copyright 2011 Xamarin Inc
17 #include <sys/types.h>
18 #ifdef HAVE_SYS_SOCKET_H
19 # include <sys/socket.h>
24 #ifdef HAVE_SYS_MMAN_H
25 # include <sys/mman.h>
32 #include <mono/io-layer/wapi.h>
33 #include <mono/io-layer/wapi-private.h>
34 #include <mono/io-layer/handles-private.h>
35 #include <mono/io-layer/misc-private.h>
36 #include <mono/io-layer/shared.h>
37 #include <mono/io-layer/collection.h>
38 #include <mono/io-layer/process-private.h>
39 #include <mono/io-layer/critical-section-private.h>
41 #include <mono/utils/mono-mutex.h>
45 #define DEBUG(...) g_message(__VA_ARGS__)
50 static void (*_wapi_handle_ops_get_close_func (WapiHandleType type))(gpointer, gpointer);
52 static WapiHandleCapability handle_caps[WAPI_HANDLE_COUNT]={0};
53 static struct _WapiHandleOps *handle_ops[WAPI_HANDLE_COUNT]={
61 #ifndef DISABLE_SOCKETS
67 &_wapi_namedmutex_ops,
69 &_wapi_namedevent_ops,
72 static void _wapi_shared_details (gpointer handle_info);
74 static void (*handle_details[WAPI_HANDLE_COUNT])(gpointer) = {
77 _wapi_console_details,
78 _wapi_shared_details, /* thread */
82 NULL, /* Nothing useful to see in a socket handle */
83 NULL, /* Nothing useful to see in a find handle */
84 _wapi_shared_details, /* process */
86 _wapi_shared_details, /* namedmutex */
87 _wapi_shared_details, /* namedsem */
88 _wapi_shared_details, /* namedevent */
91 const char *_wapi_handle_typename[] = {
110 * We can hold _WAPI_PRIVATE_MAX_SLOTS * _WAPI_HANDLE_INITIAL_COUNT handles.
111 * If 4M handles are not enough... Oh, well... we will crash.
113 #define SLOT_INDEX(x) (x / _WAPI_HANDLE_INITIAL_COUNT)
114 #define SLOT_OFFSET(x) (x % _WAPI_HANDLE_INITIAL_COUNT)
116 struct _WapiHandleUnshared *_wapi_private_handles [_WAPI_PRIVATE_MAX_SLOTS];
117 static guint32 _wapi_private_handle_count = 0;
118 static guint32 _wapi_private_handle_slot_count = 0;
120 struct _WapiHandleSharedLayout *_wapi_shared_layout = NULL;
123 * If SHM is enabled, this will point to shared memory, otherwise it will be NULL.
125 struct _WapiFileShareLayout *_wapi_fileshare_layout = NULL;
128 * If SHM is disabled, this will point to a hash of _WapiFileShare structures, otherwise
129 * it will be NULL. We use this instead of _wapi_fileshare_layout to avoid allocating a
132 static GHashTable *file_share_hash;
133 static CRITICAL_SECTION file_share_hash_mutex;
135 #define file_share_hash_lock() EnterCriticalSection (&file_share_hash_mutex)
136 #define file_share_hash_unlock() LeaveCriticalSection (&file_share_hash_mutex)
138 guint32 _wapi_fd_reserve;
141 * This is an internal handle which is used for handling waiting for multiple handles.
142 * Threads which wait for multiple handles wait on this one handle, and when a handle
143 * is signalled, this handle is signalled too.
145 static gpointer _wapi_global_signal_handle;
147 /* Point to the mutex/cond inside _wapi_global_signal_handle */
148 mono_mutex_t *_wapi_global_signal_mutex;
149 pthread_cond_t *_wapi_global_signal_cond;
152 gboolean _wapi_has_shut_down = FALSE;
154 /* Use this instead of getpid(), to cope with linuxthreads. It's a
155 * function rather than a variable lookup because we need to get at
156 * this before share_init() might have been called.
158 static pid_t _wapi_pid;
159 static mono_once_t pid_init_once = MONO_ONCE_INIT;
161 static void _wapi_handle_unref_full (gpointer handle, gboolean ignore_private_busy_handles);
163 static void pid_init (void)
165 _wapi_pid = getpid ();
168 pid_t _wapi_getpid (void)
170 mono_once (&pid_init_once, pid_init);
176 static mono_mutex_t scan_mutex;
178 static void handle_cleanup (void)
182 /* Every shared handle we were using ought really to be closed
183 * by now, but to make sure just blow them all away. The
184 * exiting finalizer thread in particular races us to the
185 * program exit and doesn't always win, so it can be left
186 * cluttering up the shared file. Anything else left over is
189 for(i = SLOT_INDEX (0); _wapi_private_handles[i] != NULL; i++) {
190 for(j = SLOT_OFFSET (0); j < _WAPI_HANDLE_INITIAL_COUNT; j++) {
191 struct _WapiHandleUnshared *handle_data = &_wapi_private_handles[i][j];
192 int type = handle_data->type;
193 gpointer handle = GINT_TO_POINTER (i*_WAPI_HANDLE_INITIAL_COUNT+j);
195 if (_WAPI_SHARED_HANDLE (type)) {
196 if (type == WAPI_HANDLE_THREAD) {
197 /* Special-case thread handles
198 * because they need extra
199 * cleanup. This also avoids
200 * a race condition between
201 * the application exit and
202 * the finalizer thread - if
203 * it finishes up between now
204 * and actual app termination
205 * it will find all its handle
206 * details have been blown
207 * away, so this sets those
210 g_assert (0); /*This condition is freaking impossible*/
211 _wapi_thread_set_termination_details (handle, 0);
215 for(k = handle_data->ref; k > 0; k--) {
216 DEBUG ("%s: unreffing %s handle %p", __func__, _wapi_handle_typename[type], handle);
218 _wapi_handle_unref_full (handle, TRUE);
223 _wapi_shm_semaphores_remove ();
225 _wapi_shm_detach (WAPI_SHM_DATA);
226 _wapi_shm_detach (WAPI_SHM_FILESHARE);
228 if (file_share_hash) {
229 g_hash_table_destroy (file_share_hash);
230 DeleteCriticalSection (&file_share_hash_mutex);
233 for (i = 0; i < _WAPI_PRIVATE_MAX_SLOTS; ++i)
234 g_free (_wapi_private_handles [i]);
240 * Initialize the io-layer.
245 g_assert ((sizeof (handle_ops) / sizeof (handle_ops[0]))
246 == WAPI_HANDLE_COUNT);
248 _wapi_fd_reserve = getdtablesize();
250 /* This is needed by the code in _wapi_handle_new_internal */
251 _wapi_fd_reserve = (_wapi_fd_reserve + (_WAPI_HANDLE_INITIAL_COUNT - 1)) & ~(_WAPI_HANDLE_INITIAL_COUNT - 1);
255 * The entries in _wapi_private_handles reserved for fds are allocated lazily to
259 _wapi_private_handles [idx++] = g_new0 (struct _WapiHandleUnshared,
260 _WAPI_HANDLE_INITIAL_COUNT);
263 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
264 _wapi_private_handle_slot_count ++;
265 } while(_wapi_fd_reserve > _wapi_private_handle_count);
267 _wapi_shm_semaphores_init ();
269 _wapi_shared_layout = _wapi_shm_attach (WAPI_SHM_DATA);
270 g_assert (_wapi_shared_layout != NULL);
272 if (_wapi_shm_enabled ()) {
273 /* This allocates a 4mb array, so do it only if SHM is enabled */
274 _wapi_fileshare_layout = _wapi_shm_attach (WAPI_SHM_FILESHARE);
275 g_assert (_wapi_fileshare_layout != NULL);
278 #if !defined (DISABLE_SHARED_HANDLES)
279 if (_wapi_shm_enabled ())
280 _wapi_collection_init ();
283 mono_mutex_init (&scan_mutex);
285 _wapi_global_signal_handle = _wapi_handle_new (WAPI_HANDLE_EVENT, NULL);
287 _wapi_global_signal_cond = &_WAPI_PRIVATE_HANDLES (GPOINTER_TO_UINT (_wapi_global_signal_handle)).signal_cond;
288 _wapi_global_signal_mutex = &_WAPI_PRIVATE_HANDLES (GPOINTER_TO_UINT (_wapi_global_signal_handle)).signal_mutex;
291 /* Using g_atexit here instead of an explicit function call in
292 * a cleanup routine lets us cope when a third-party library
293 * calls exit (eg if an X client loses the connection to its
296 g_atexit (handle_cleanup);
302 g_assert (_wapi_has_shut_down == FALSE);
304 _wapi_has_shut_down = TRUE;
306 _wapi_error_cleanup ();
307 _wapi_thread_cleanup ();
310 static void _wapi_handle_init_shared (struct _WapiHandleShared *handle,
312 gpointer handle_specific)
314 g_assert (_wapi_has_shut_down == FALSE);
317 handle->timestamp = (guint32)(time (NULL) & 0xFFFFFFFF);
318 handle->signalled = FALSE;
319 handle->handle_refs = 1;
321 if (handle_specific != NULL) {
322 memcpy (&handle->u, handle_specific, sizeof (handle->u));
326 static void _wapi_handle_init (struct _WapiHandleUnshared *handle,
327 WapiHandleType type, gpointer handle_specific)
331 g_assert (_wapi_has_shut_down == FALSE);
334 handle->signalled = FALSE;
337 if (!_WAPI_SHARED_HANDLE(type)) {
338 thr_ret = pthread_cond_init (&handle->signal_cond, NULL);
339 g_assert (thr_ret == 0);
341 thr_ret = mono_mutex_init (&handle->signal_mutex);
342 g_assert (thr_ret == 0);
344 if (handle_specific != NULL) {
345 memcpy (&handle->u, handle_specific,
351 static guint32 _wapi_handle_new_shared (WapiHandleType type,
352 gpointer handle_specific)
355 static guint32 last = 1;
358 g_assert (_wapi_has_shut_down == FALSE);
360 /* Leave the first slot empty as a guard */
362 /* FIXME: expandable array */
363 for(offset = last; offset <_WAPI_HANDLE_INITIAL_COUNT; offset++) {
364 struct _WapiHandleShared *handle = &_wapi_shared_layout->handles[offset];
366 if(handle->type == WAPI_HANDLE_UNUSED) {
367 thr_ret = _wapi_handle_lock_shared_handles ();
368 g_assert (thr_ret == 0);
370 if (InterlockedCompareExchange ((gint32 *)&handle->type, type, WAPI_HANDLE_UNUSED) == WAPI_HANDLE_UNUSED) {
373 _wapi_handle_init_shared (handle, type,
376 _wapi_handle_unlock_shared_handles ();
380 /* Someone else beat us to it, just
385 _wapi_handle_unlock_shared_handles ();
390 /* Try again from the beginning */
395 /* Will need to expand the array. The caller will sort it out */
401 * _wapi_handle_new_internal:
402 * @type: Init handle to this type
404 * Search for a free handle and initialize it. Return the handle on
405 * success and 0 on failure. This is only called from
406 * _wapi_handle_new, and scan_mutex must be held.
408 static guint32 _wapi_handle_new_internal (WapiHandleType type,
409 gpointer handle_specific)
412 static guint32 last = 0;
413 gboolean retry = FALSE;
415 g_assert (_wapi_has_shut_down == FALSE);
417 /* A linear scan should be fast enough. Start from the last
418 * allocation, assuming that handles are allocated more often
419 * than they're freed. Leave the space reserved for file
423 if (last < _wapi_fd_reserve) {
424 last = _wapi_fd_reserve;
431 for(i = SLOT_INDEX (count); i < _wapi_private_handle_slot_count; i++) {
432 if (_wapi_private_handles [i]) {
433 for (k = SLOT_OFFSET (count); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
434 struct _WapiHandleUnshared *handle = &_wapi_private_handles [i][k];
436 if(handle->type == WAPI_HANDLE_UNUSED) {
439 _wapi_handle_init (handle, type, handle_specific);
447 if(retry && last > _wapi_fd_reserve) {
448 /* Try again from the beginning */
449 last = _wapi_fd_reserve;
453 /* Will need to expand the array. The caller will sort it out */
459 _wapi_handle_new (WapiHandleType type, gpointer handle_specific)
461 guint32 handle_idx = 0;
465 g_assert (_wapi_has_shut_down == FALSE);
467 DEBUG ("%s: Creating new handle of type %s", __func__,
468 _wapi_handle_typename[type]);
470 g_assert(!_WAPI_FD_HANDLE(type));
472 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
473 (void *)&scan_mutex);
474 thr_ret = mono_mutex_lock (&scan_mutex);
475 g_assert (thr_ret == 0);
477 while ((handle_idx = _wapi_handle_new_internal (type, handle_specific)) == 0) {
478 /* Try and expand the array, and have another go */
479 int idx = SLOT_INDEX (_wapi_private_handle_count);
480 if (idx >= _WAPI_PRIVATE_MAX_SLOTS) {
484 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
485 _WAPI_HANDLE_INITIAL_COUNT);
487 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
488 _wapi_private_handle_slot_count ++;
491 thr_ret = mono_mutex_unlock (&scan_mutex);
492 g_assert (thr_ret == 0);
493 pthread_cleanup_pop (0);
495 if (handle_idx == 0) {
496 /* We ran out of slots */
497 handle = _WAPI_HANDLE_INVALID;
501 /* Make sure we left the space for fd mappings */
502 g_assert (handle_idx >= _wapi_fd_reserve);
504 handle = GUINT_TO_POINTER (handle_idx);
506 DEBUG ("%s: Allocated new handle %p", __func__, handle);
508 if (_WAPI_SHARED_HANDLE(type)) {
509 /* Add the shared section too */
512 ref = _wapi_handle_new_shared (type, handle_specific);
514 _wapi_handle_collect ();
515 ref = _wapi_handle_new_shared (type, handle_specific);
517 /* FIXME: grow the arrays */
518 handle = _WAPI_HANDLE_INVALID;
523 _WAPI_PRIVATE_HANDLES(handle_idx).u.shared.offset = ref;
524 DEBUG ("%s: New shared handle at offset 0x%x", __func__,
532 gpointer _wapi_handle_new_from_offset (WapiHandleType type, guint32 offset,
535 guint32 handle_idx = 0;
536 gpointer handle = INVALID_HANDLE_VALUE;
538 struct _WapiHandleShared *shared;
540 g_assert (_wapi_has_shut_down == FALSE);
542 DEBUG ("%s: Creating new handle of type %s to offset %d", __func__,
543 _wapi_handle_typename[type], offset);
545 g_assert(!_WAPI_FD_HANDLE(type));
546 g_assert(_WAPI_SHARED_HANDLE(type));
547 g_assert(offset != 0);
549 shared = &_wapi_shared_layout->handles[offset];
551 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
552 /* Bump up the timestamp for this offset */
553 InterlockedExchange ((gint32 *)&shared->timestamp, now);
556 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
557 (void *)&scan_mutex);
558 thr_ret = mono_mutex_lock (&scan_mutex);
559 g_assert (thr_ret == 0);
561 for (i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
562 if (_wapi_private_handles [i]) {
563 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
564 struct _WapiHandleUnshared *handle_data = &_wapi_private_handles [i][k];
566 if (handle_data->type == type &&
567 handle_data->u.shared.offset == offset) {
568 handle = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
569 goto first_pass_done;
576 thr_ret = mono_mutex_unlock (&scan_mutex);
577 g_assert (thr_ret == 0);
578 pthread_cleanup_pop (0);
580 if (handle != INVALID_HANDLE_VALUE) {
581 _wapi_handle_ref (handle);
583 DEBUG ("%s: Returning old handle %p referencing 0x%x",
584 __func__, handle, offset);
588 /* Prevent entries expiring under us as we search */
589 thr_ret = _wapi_handle_lock_shared_handles ();
590 g_assert (thr_ret == 0);
592 if (shared->type == WAPI_HANDLE_UNUSED) {
593 /* Someone deleted this handle while we were working */
594 DEBUG ("%s: Handle at 0x%x unused", __func__, offset);
598 if (shared->type != type) {
599 DEBUG ("%s: Wrong type at %d 0x%x! Found %s wanted %s",
600 __func__, offset, offset,
601 _wapi_handle_typename[shared->type],
602 _wapi_handle_typename[type]);
606 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
607 (void *)&scan_mutex);
608 thr_ret = mono_mutex_lock (&scan_mutex);
609 g_assert (thr_ret == 0);
611 while ((handle_idx = _wapi_handle_new_internal (type, NULL)) == 0) {
612 /* Try and expand the array, and have another go */
613 int idx = SLOT_INDEX (_wapi_private_handle_count);
614 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
615 _WAPI_HANDLE_INITIAL_COUNT);
617 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
618 _wapi_private_handle_slot_count ++;
621 thr_ret = mono_mutex_unlock (&scan_mutex);
622 g_assert (thr_ret == 0);
623 pthread_cleanup_pop (0);
625 /* Make sure we left the space for fd mappings */
626 g_assert (handle_idx >= _wapi_fd_reserve);
628 handle = GUINT_TO_POINTER (handle_idx);
630 _WAPI_PRIVATE_HANDLES(handle_idx).u.shared.offset = offset;
631 InterlockedIncrement ((gint32 *)&shared->handle_refs);
633 DEBUG ("%s: Allocated new handle %p referencing 0x%x (shared refs %d)", __func__, handle, offset, shared->handle_refs);
636 _wapi_handle_unlock_shared_handles ();
642 init_handles_slot (int idx)
646 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
647 (void *)&scan_mutex);
648 thr_ret = mono_mutex_lock (&scan_mutex);
649 g_assert (thr_ret == 0);
651 if (_wapi_private_handles [idx] == NULL) {
652 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
653 _WAPI_HANDLE_INITIAL_COUNT);
654 g_assert (_wapi_private_handles [idx]);
657 thr_ret = mono_mutex_unlock (&scan_mutex);
658 g_assert (thr_ret == 0);
659 pthread_cleanup_pop (0);
662 gpointer _wapi_handle_new_fd (WapiHandleType type, int fd,
663 gpointer handle_specific)
665 struct _WapiHandleUnshared *handle;
668 g_assert (_wapi_has_shut_down == FALSE);
670 DEBUG ("%s: Creating new handle of type %s", __func__,
671 _wapi_handle_typename[type]);
673 g_assert(_WAPI_FD_HANDLE(type));
674 g_assert(!_WAPI_SHARED_HANDLE(type));
676 if (fd >= _wapi_fd_reserve) {
677 DEBUG ("%s: fd %d is too big", __func__, fd);
679 return(GUINT_TO_POINTER (_WAPI_HANDLE_INVALID));
682 /* Initialize the array entries on demand */
683 if (_wapi_private_handles [SLOT_INDEX (fd)] == NULL)
684 init_handles_slot (SLOT_INDEX (fd));
686 handle = &_WAPI_PRIVATE_HANDLES(fd);
688 if (handle->type != WAPI_HANDLE_UNUSED) {
689 DEBUG ("%s: fd %d is already in use!", __func__, fd);
690 /* FIXME: clean up this handle? We can't do anything
691 * with the fd, cos thats the new one
695 DEBUG ("%s: Assigning new fd handle %d", __func__, fd);
697 /* Prevent file share entries racing with us, when the file
698 * handle is only half initialised
700 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
701 g_assert(thr_ret == 0);
703 _wapi_handle_init (handle, type, handle_specific);
705 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
707 return(GUINT_TO_POINTER(fd));
710 gboolean _wapi_lookup_handle (gpointer handle, WapiHandleType type,
711 gpointer *handle_specific)
713 struct _WapiHandleUnshared *handle_data;
714 guint32 handle_idx = GPOINTER_TO_UINT(handle);
716 if (!_WAPI_PRIVATE_VALID_SLOT (handle_idx)) {
720 /* Initialize the array entries on demand */
721 if (_wapi_private_handles [SLOT_INDEX (handle_idx)] == NULL)
722 init_handles_slot (SLOT_INDEX (handle_idx));
724 handle_data = &_WAPI_PRIVATE_HANDLES(handle_idx);
726 if (handle_data->type != type) {
730 if (handle_specific == NULL) {
734 if (_WAPI_SHARED_HANDLE(type)) {
735 struct _WapiHandle_shared_ref *ref;
736 struct _WapiHandleShared *shared_handle_data;
738 ref = &handle_data->u.shared;
739 shared_handle_data = &_wapi_shared_layout->handles[ref->offset];
741 if (shared_handle_data->type != type) {
742 /* The handle must have been deleted on us
747 *handle_specific = &shared_handle_data->u;
749 *handle_specific = &handle_data->u;
756 _wapi_handle_foreach (WapiHandleType type,
757 gboolean (*on_each)(gpointer test, gpointer user),
760 struct _WapiHandleUnshared *handle_data = NULL;
765 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
766 (void *)&scan_mutex);
767 thr_ret = mono_mutex_lock (&scan_mutex);
768 g_assert (thr_ret == 0);
770 for (i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
771 if (_wapi_private_handles [i]) {
772 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
773 handle_data = &_wapi_private_handles [i][k];
775 if (handle_data->type == type) {
776 ret = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
777 if (on_each (ret, user_data) == TRUE)
784 thr_ret = mono_mutex_unlock (&scan_mutex);
785 g_assert (thr_ret == 0);
786 pthread_cleanup_pop (0);
789 /* This might list some shared handles twice if they are already
790 * opened by this process, and the check function returns FALSE the
791 * first time. Shared handles that are created during the search are
792 * unreffed if the check function returns FALSE, so callers must not
793 * rely on the handle persisting (unless the check function returns
795 * The caller owns the returned handle.
797 gpointer _wapi_search_handle (WapiHandleType type,
798 gboolean (*check)(gpointer test, gpointer user),
800 gpointer *handle_specific,
801 gboolean search_shared)
803 struct _WapiHandleUnshared *handle_data = NULL;
804 struct _WapiHandleShared *shared = NULL;
807 gboolean found = FALSE;
810 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
811 (void *)&scan_mutex);
812 thr_ret = mono_mutex_lock (&scan_mutex);
813 g_assert (thr_ret == 0);
815 for (i = SLOT_INDEX (0); !found && i < _wapi_private_handle_slot_count; i++) {
816 if (_wapi_private_handles [i]) {
817 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
818 handle_data = &_wapi_private_handles [i][k];
820 if (handle_data->type == type) {
821 ret = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
822 if (check (ret, user_data) == TRUE) {
823 _wapi_handle_ref (ret);
826 if (_WAPI_SHARED_HANDLE (type)) {
827 shared = &_wapi_shared_layout->handles[i];
837 thr_ret = mono_mutex_unlock (&scan_mutex);
838 g_assert (thr_ret == 0);
839 pthread_cleanup_pop (0);
841 if (!found && search_shared && _WAPI_SHARED_HANDLE (type)) {
842 /* Not found yet, so search the shared memory too */
843 DEBUG ("%s: Looking at other shared handles...", __func__);
845 for (i = 0; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
846 shared = &_wapi_shared_layout->handles[i];
848 if (shared->type == type) {
849 /* Tell new_from_offset to not
850 * timestamp this handle, because
851 * otherwise it will ping every handle
852 * in the list and they will never
855 ret = _wapi_handle_new_from_offset (type, i,
857 if (ret == INVALID_HANDLE_VALUE) {
858 /* This handle was deleted
859 * while we were looking at it
864 DEBUG ("%s: Opened tmp handle %p (type %s) from offset %d", __func__, ret, _wapi_handle_typename[type], i);
866 /* It's possible that the shared part
867 * of this handle has now been blown
868 * away (after new_from_offset
869 * successfully opened it,) if its
870 * timestamp is too old. The check
871 * function needs to be aware of this,
872 * and cope if the handle has
875 if (check (ret, user_data) == TRUE) {
876 /* Timestamp this handle, but make
877 * sure it still exists first
879 thr_ret = _wapi_handle_lock_shared_handles ();
880 g_assert (thr_ret == 0);
882 if (shared->type == type) {
883 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
884 InterlockedExchange ((gint32 *)&shared->timestamp, now);
887 handle_data = &_WAPI_PRIVATE_HANDLES(GPOINTER_TO_UINT(ret));
889 _wapi_handle_unlock_shared_handles ();
892 /* It's been deleted,
896 _wapi_handle_unlock_shared_handles ();
900 /* This isn't the handle we're looking
901 * for, so drop the reference we took
902 * in _wapi_handle_new_from_offset ()
904 _wapi_handle_unref (ret);
914 if(handle_specific != NULL) {
915 if (_WAPI_SHARED_HANDLE(type)) {
916 g_assert(shared->type == type);
918 *handle_specific = &shared->u;
920 *handle_specific = &handle_data->u;
928 /* Returns the offset of the metadata array, or -1 on error, or 0 for
929 * not found (0 is not a valid offset)
931 gint32 _wapi_search_handle_namespace (WapiHandleType type,
934 struct _WapiHandleShared *shared_handle_data;
939 g_assert(_WAPI_SHARED_HANDLE(type));
941 DEBUG ("%s: Lookup for handle named [%s] type %s", __func__,
942 utf8_name, _wapi_handle_typename[type]);
944 /* Do a handle collection before starting to look, so that any
945 * stale cruft gets removed
947 _wapi_handle_collect ();
949 thr_ret = _wapi_handle_lock_shared_handles ();
950 g_assert (thr_ret == 0);
952 for(i = 1; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
953 WapiSharedNamespace *sharedns;
955 shared_handle_data = &_wapi_shared_layout->handles[i];
957 /* Check mutex, event, semaphore, timer, job and
958 * file-mapping object names. So far only mutex,
959 * semaphore and event are implemented.
961 if (!_WAPI_SHARED_NAMESPACE (shared_handle_data->type)) {
965 DEBUG ("%s: found a shared namespace handle at 0x%x (type %s)", __func__, i, _wapi_handle_typename[shared_handle_data->type]);
967 sharedns=(WapiSharedNamespace *)&shared_handle_data->u;
969 DEBUG ("%s: name is [%s]", __func__, sharedns->name);
971 if (strcmp (sharedns->name, utf8_name) == 0) {
972 if (shared_handle_data->type != type) {
973 /* Its the wrong type, so fail now */
974 DEBUG ("%s: handle 0x%x matches name but is wrong type: %s", __func__, i, _wapi_handle_typename[shared_handle_data->type]);
978 DEBUG ("%s: handle 0x%x matches name and type", __func__, i);
986 _wapi_handle_unlock_shared_handles ();
991 void _wapi_handle_ref (gpointer handle)
993 guint32 idx = GPOINTER_TO_UINT(handle);
994 struct _WapiHandleUnshared *handle_data;
996 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1000 if (_wapi_handle_type (handle) == WAPI_HANDLE_UNUSED) {
1001 g_warning ("%s: Attempting to ref unused handle %p", __func__,
1006 handle_data = &_WAPI_PRIVATE_HANDLES(idx);
1008 InterlockedIncrement ((gint32 *)&handle_data->ref);
1010 /* It's possible for processes to exit before getting around
1011 * to updating timestamps in the collection thread, so if a
1012 * shared handle is reffed do the timestamp here as well just
1015 if (_WAPI_SHARED_HANDLE(handle_data->type)) {
1016 struct _WapiHandleShared *shared_data = &_wapi_shared_layout->handles[handle_data->u.shared.offset];
1017 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
1018 InterlockedExchange ((gint32 *)&shared_data->timestamp, now);
1022 g_message ("%s: %s handle %p ref now %d", __func__,
1023 _wapi_handle_typename[_WAPI_PRIVATE_HANDLES (idx).type],
1025 _WAPI_PRIVATE_HANDLES(idx).ref);
1029 /* The handle must not be locked on entry to this function */
1030 static void _wapi_handle_unref_full (gpointer handle, gboolean ignore_private_busy_handles)
1032 guint32 idx = GPOINTER_TO_UINT(handle);
1033 gboolean destroy = FALSE, early_exit = FALSE;
1036 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1040 if (_wapi_handle_type (handle) == WAPI_HANDLE_UNUSED) {
1041 g_warning ("%s: Attempting to unref unused handle %p",
1046 /* Possible race condition here if another thread refs the
1047 * handle between here and setting the type to UNUSED. I
1048 * could lock a mutex, but I'm not sure that allowing a handle
1049 * reference to reach 0 isn't an application bug anyway.
1051 destroy = (InterlockedDecrement ((gint32 *)&_WAPI_PRIVATE_HANDLES(idx).ref) ==0);
1054 g_message ("%s: %s handle %p ref now %d (destroy %s)", __func__,
1055 _wapi_handle_typename[_WAPI_PRIVATE_HANDLES (idx).type],
1057 _WAPI_PRIVATE_HANDLES(idx).ref, destroy?"TRUE":"FALSE");
1061 /* Need to copy the handle info, reset the slot in the
1062 * array, and _only then_ call the close function to
1063 * avoid race conditions (eg file descriptors being
1064 * closed, and another file being opened getting the
1065 * same fd racing the memset())
1067 struct _WapiHandleUnshared handle_data;
1068 struct _WapiHandleShared shared_handle_data;
1069 WapiHandleType type = _WAPI_PRIVATE_HANDLES(idx).type;
1070 void (*close_func)(gpointer, gpointer) = _wapi_handle_ops_get_close_func (type);
1071 gboolean is_shared = _WAPI_SHARED_HANDLE(type);
1074 /* If this is a shared handle we need to take
1075 * the shared lock outside of the scan_mutex
1076 * lock to avoid deadlocks
1078 thr_ret = _wapi_handle_lock_shared_handles ();
1079 g_assert (thr_ret == 0);
1082 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup, (void *)&scan_mutex);
1083 thr_ret = mono_mutex_lock (&scan_mutex);
1085 DEBUG ("%s: Destroying handle %p", __func__, handle);
1087 memcpy (&handle_data, &_WAPI_PRIVATE_HANDLES(idx),
1088 sizeof (struct _WapiHandleUnshared));
1090 memset (&_WAPI_PRIVATE_HANDLES(idx).u, '\0',
1091 sizeof(_WAPI_PRIVATE_HANDLES(idx).u));
1093 _WAPI_PRIVATE_HANDLES(idx).type = WAPI_HANDLE_UNUSED;
1096 /* Destroy the mutex and cond var. We hope nobody
1097 * tried to grab them between the handle unlock and
1098 * now, but pthreads doesn't have a
1099 * "unlock_and_destroy" atomic function.
1101 thr_ret = mono_mutex_destroy (&_WAPI_PRIVATE_HANDLES(idx).signal_mutex);
1102 /*WARNING gross hack to make cleanup not crash when exiting without the whole runtime teardown.*/
1103 if (thr_ret == EBUSY && ignore_private_busy_handles) {
1107 g_error ("Error destroying handle %p mutex due to %d\n", handle, thr_ret);
1109 thr_ret = pthread_cond_destroy (&_WAPI_PRIVATE_HANDLES(idx).signal_cond);
1110 if (thr_ret == EBUSY && ignore_private_busy_handles)
1112 else if (thr_ret != 0)
1113 g_error ("Error destroying handle %p cond var due to %d\n", handle, thr_ret);
1116 struct _WapiHandleShared *shared = &_wapi_shared_layout->handles[handle_data.u.shared.offset];
1118 memcpy (&shared_handle_data, shared,
1119 sizeof (struct _WapiHandleShared));
1121 /* It's possible that this handle is already
1122 * pointing at a deleted shared section
1125 g_message ("%s: %s handle %p shared refs before dec %d", __func__, _wapi_handle_typename[type], handle, shared->handle_refs);
1128 if (shared->handle_refs > 0) {
1129 shared->handle_refs--;
1130 if (shared->handle_refs == 0) {
1131 memset (shared, '\0', sizeof (struct _WapiHandleShared));
1136 thr_ret = mono_mutex_unlock (&scan_mutex);
1137 g_assert (thr_ret == 0);
1138 pthread_cleanup_pop (0);
1143 _wapi_handle_unlock_shared_handles ();
1146 if (close_func != NULL) {
1148 close_func (handle, &shared_handle_data.u);
1150 close_func (handle, &handle_data.u);
1156 void _wapi_handle_unref (gpointer handle)
1158 _wapi_handle_unref_full (handle, FALSE);
1161 void _wapi_handle_register_capabilities (WapiHandleType type,
1162 WapiHandleCapability caps)
1164 handle_caps[type] = caps;
1167 gboolean _wapi_handle_test_capabilities (gpointer handle,
1168 WapiHandleCapability caps)
1170 guint32 idx = GPOINTER_TO_UINT(handle);
1171 WapiHandleType type;
1173 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1177 type = _WAPI_PRIVATE_HANDLES(idx).type;
1179 DEBUG ("%s: testing 0x%x against 0x%x (%d)", __func__,
1180 handle_caps[type], caps, handle_caps[type] & caps);
1182 return((handle_caps[type] & caps) != 0);
1185 static void (*_wapi_handle_ops_get_close_func (WapiHandleType type))(gpointer, gpointer)
1187 if (handle_ops[type] != NULL &&
1188 handle_ops[type]->close != NULL) {
1189 return (handle_ops[type]->close);
1195 void _wapi_handle_ops_close (gpointer handle, gpointer data)
1197 guint32 idx = GPOINTER_TO_UINT(handle);
1198 WapiHandleType type;
1200 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1204 type = _WAPI_PRIVATE_HANDLES(idx).type;
1206 if (handle_ops[type] != NULL &&
1207 handle_ops[type]->close != NULL) {
1208 handle_ops[type]->close (handle, data);
1212 void _wapi_handle_ops_signal (gpointer handle)
1214 guint32 idx = GPOINTER_TO_UINT(handle);
1215 WapiHandleType type;
1217 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1221 type = _WAPI_PRIVATE_HANDLES(idx).type;
1223 if (handle_ops[type] != NULL && handle_ops[type]->signal != NULL) {
1224 handle_ops[type]->signal (handle);
1228 gboolean _wapi_handle_ops_own (gpointer handle)
1230 guint32 idx = GPOINTER_TO_UINT(handle);
1231 WapiHandleType type;
1233 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1237 type = _WAPI_PRIVATE_HANDLES(idx).type;
1239 if (handle_ops[type] != NULL && handle_ops[type]->own_handle != NULL) {
1240 return(handle_ops[type]->own_handle (handle));
1246 gboolean _wapi_handle_ops_isowned (gpointer handle)
1248 guint32 idx = GPOINTER_TO_UINT(handle);
1249 WapiHandleType type;
1251 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1255 type = _WAPI_PRIVATE_HANDLES(idx).type;
1257 if (handle_ops[type] != NULL && handle_ops[type]->is_owned != NULL) {
1258 return(handle_ops[type]->is_owned (handle));
1264 guint32 _wapi_handle_ops_special_wait (gpointer handle, guint32 timeout, gboolean alertable)
1266 guint32 idx = GPOINTER_TO_UINT(handle);
1267 WapiHandleType type;
1269 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1270 return(WAIT_FAILED);
1273 type = _WAPI_PRIVATE_HANDLES(idx).type;
1275 if (handle_ops[type] != NULL &&
1276 handle_ops[type]->special_wait != NULL) {
1277 return(handle_ops[type]->special_wait (handle, timeout, alertable));
1279 return(WAIT_FAILED);
1283 void _wapi_handle_ops_prewait (gpointer handle)
1285 guint32 idx = GPOINTER_TO_UINT (handle);
1286 WapiHandleType type;
1288 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1292 type = _WAPI_PRIVATE_HANDLES (idx).type;
1294 if (handle_ops[type] != NULL &&
1295 handle_ops[type]->prewait != NULL) {
1296 handle_ops[type]->prewait (handle);
1303 * @handle: The handle to release
1305 * Closes and invalidates @handle, releasing any resources it
1306 * consumes. When the last handle to a temporary or non-persistent
1307 * object is closed, that object can be deleted. Closing the same
1308 * handle twice is an error.
1310 * Return value: %TRUE on success, %FALSE otherwise.
1312 gboolean CloseHandle(gpointer handle)
1314 if (handle == NULL) {
1315 /* Problem: because we map file descriptors to the
1316 * same-numbered handle we can't tell the difference
1317 * between a bogus handle and the handle to stdin.
1318 * Assume that it's the console handle if that handle
1321 if (_WAPI_PRIVATE_HANDLES (0).type != WAPI_HANDLE_CONSOLE) {
1322 SetLastError (ERROR_INVALID_PARAMETER);
1326 if (handle == _WAPI_HANDLE_INVALID){
1327 SetLastError (ERROR_INVALID_PARAMETER);
1331 _wapi_handle_unref (handle);
1336 /* Lots more to implement here, but this is all we need at the moment */
1337 gboolean DuplicateHandle (gpointer srcprocess, gpointer src,
1338 gpointer targetprocess, gpointer *target,
1339 guint32 access G_GNUC_UNUSED, gboolean inherit G_GNUC_UNUSED, guint32 options G_GNUC_UNUSED)
1341 if (srcprocess != _WAPI_PROCESS_CURRENT ||
1342 targetprocess != _WAPI_PROCESS_CURRENT) {
1343 /* Duplicating other process's handles is not supported */
1344 SetLastError (ERROR_INVALID_HANDLE);
1348 if (src == _WAPI_PROCESS_CURRENT) {
1349 *target = _wapi_process_duplicate ();
1350 } else if (src == _WAPI_THREAD_CURRENT) {
1351 *target = _wapi_thread_duplicate ();
1353 _wapi_handle_ref (src);
1360 gboolean _wapi_handle_count_signalled_handles (guint32 numhandles,
1366 guint32 count, i, iter=0;
1369 WapiHandleType type;
1371 /* Lock all the handles, with backoff */
1373 thr_ret = _wapi_handle_lock_shared_handles ();
1374 g_assert (thr_ret == 0);
1376 for(i=0; i<numhandles; i++) {
1377 gpointer handle = handles[i];
1378 guint32 idx = GPOINTER_TO_UINT(handle);
1380 DEBUG ("%s: attempting to lock %p", __func__, handle);
1382 type = _WAPI_PRIVATE_HANDLES(idx).type;
1384 thr_ret = _wapi_handle_trylock_handle (handle);
1389 DEBUG ("%s: attempt failed for %p: %s", __func__,
1390 handle, strerror (thr_ret));
1392 thr_ret = _wapi_handle_unlock_shared_handles ();
1393 g_assert (thr_ret == 0);
1396 handle = handles[i];
1397 idx = GPOINTER_TO_UINT(handle);
1399 thr_ret = _wapi_handle_unlock_handle (handle);
1400 g_assert (thr_ret == 0);
1403 /* If iter ever reaches 100 the nanosleep will
1404 * return EINVAL immediately, but we have a
1405 * design flaw if that happens.
1409 g_warning ("%s: iteration overflow!",
1414 DEBUG ("%s: Backing off for %d ms", __func__,
1416 _wapi_handle_spin (10 * iter);
1422 DEBUG ("%s: Locked all handles", __func__);
1427 for(i=0; i<numhandles; i++) {
1428 gpointer handle = handles[i];
1429 guint32 idx = GPOINTER_TO_UINT(handle);
1431 type = _WAPI_PRIVATE_HANDLES(idx).type;
1433 DEBUG ("%s: Checking handle %p", __func__, handle);
1435 if(((_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_OWN)==TRUE) &&
1436 (_wapi_handle_ops_isowned (handle) == TRUE)) ||
1437 (_WAPI_SHARED_HANDLE(type) &&
1438 WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) ||
1439 (!_WAPI_SHARED_HANDLE(type) &&
1440 _WAPI_PRIVATE_HANDLES(idx).signalled == TRUE)) {
1443 DEBUG ("%s: Handle %p signalled", __func__,
1451 DEBUG ("%s: %d event handles signalled", __func__, count);
1453 if ((waitall == TRUE && count == numhandles) ||
1454 (waitall == FALSE && count > 0)) {
1460 DEBUG ("%s: Returning %d", __func__, ret);
1467 void _wapi_handle_unlock_handles (guint32 numhandles, gpointer *handles)
1472 thr_ret = _wapi_handle_unlock_shared_handles ();
1473 g_assert (thr_ret == 0);
1475 for(i=0; i<numhandles; i++) {
1476 gpointer handle = handles[i];
1478 DEBUG ("%s: unlocking handle %p", __func__, handle);
1480 thr_ret = _wapi_handle_unlock_handle (handle);
1481 g_assert (thr_ret == 0);
1485 static int timedwait_signal_poll_cond (pthread_cond_t *cond, mono_mutex_t *mutex, struct timespec *timeout, gboolean alertable)
1487 struct timespec fake_timeout;
1492 ret=mono_cond_timedwait (cond, mutex, timeout);
1494 ret=mono_cond_wait (cond, mutex);
1496 _wapi_calc_timeout (&fake_timeout, 100);
1498 if (timeout != NULL && ((fake_timeout.tv_sec > timeout->tv_sec) ||
1499 (fake_timeout.tv_sec == timeout->tv_sec &&
1500 fake_timeout.tv_nsec > timeout->tv_nsec))) {
1501 /* Real timeout is less than 100ms time */
1502 ret=mono_cond_timedwait (cond, mutex, timeout);
1504 ret=mono_cond_timedwait (cond, mutex, &fake_timeout);
1506 /* Mask the fake timeout, this will cause
1507 * another poll if the cond was not really signaled
1509 if (ret==ETIMEDOUT) {
1518 int _wapi_handle_wait_signal (gboolean poll)
1520 return _wapi_handle_timedwait_signal_handle (_wapi_global_signal_handle, NULL, TRUE, poll);
1523 int _wapi_handle_timedwait_signal (struct timespec *timeout, gboolean poll)
1525 return _wapi_handle_timedwait_signal_handle (_wapi_global_signal_handle, timeout, TRUE, poll);
1528 int _wapi_handle_wait_signal_handle (gpointer handle, gboolean alertable)
1530 DEBUG ("%s: waiting for %p", __func__, handle);
1532 return _wapi_handle_timedwait_signal_handle (handle, NULL, alertable, FALSE);
1535 int _wapi_handle_timedwait_signal_handle (gpointer handle,
1536 struct timespec *timeout, gboolean alertable, gboolean poll)
1538 DEBUG ("%s: waiting for %p (type %s)", __func__, handle,
1539 _wapi_handle_typename[_wapi_handle_type (handle)]);
1541 if (_WAPI_SHARED_HANDLE (_wapi_handle_type (handle))) {
1542 if (WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) {
1545 if (timeout != NULL) {
1546 struct timespec fake_timeout;
1547 _wapi_calc_timeout (&fake_timeout, 100);
1549 if ((fake_timeout.tv_sec > timeout->tv_sec) ||
1550 (fake_timeout.tv_sec == timeout->tv_sec &&
1551 fake_timeout.tv_nsec > timeout->tv_nsec)) {
1552 /* FIXME: Real timeout is less than
1553 * 100ms time, but is it really worth
1554 * calculating to the exact ms?
1556 _wapi_handle_spin (100);
1558 if (WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) {
1565 _wapi_handle_spin (100);
1569 guint32 idx = GPOINTER_TO_UINT(handle);
1571 pthread_cond_t *cond;
1572 mono_mutex_t *mutex;
1574 if (alertable && !wapi_thread_set_wait_handle (handle))
1577 cond = &_WAPI_PRIVATE_HANDLES (idx).signal_cond;
1578 mutex = &_WAPI_PRIVATE_HANDLES (idx).signal_mutex;
1581 /* This is needed when waiting for process handles */
1582 res = timedwait_signal_poll_cond (cond, mutex, timeout, alertable);
1585 res = mono_cond_timedwait (cond, mutex, timeout);
1587 res = mono_cond_wait (cond, mutex);
1591 wapi_thread_clear_wait_handle (handle);
1598 _wapi_free_share_info (_WapiFileShare *share_info)
1600 if (!_wapi_shm_enabled ()) {
1601 file_share_hash_lock ();
1602 g_hash_table_remove (file_share_hash, share_info);
1603 file_share_hash_unlock ();
1604 /* The hashtable dtor frees share_info */
1606 memset (share_info, '\0', sizeof(struct _WapiFileShare));
1611 wapi_share_info_equal (gconstpointer ka, gconstpointer kb)
1613 const _WapiFileShare *s1 = ka;
1614 const _WapiFileShare *s2 = kb;
1616 return (s1->device == s2->device && s1->inode == s2->inode) ? 1 : 0;
1620 wapi_share_info_hash (gconstpointer data)
1622 const _WapiFileShare *s = data;
1627 gboolean _wapi_handle_get_or_set_share (dev_t device, ino_t inode,
1628 guint32 new_sharemode,
1630 guint32 *old_sharemode,
1631 guint32 *old_access,
1632 struct _WapiFileShare **share_info)
1634 struct _WapiFileShare *file_share;
1635 guint32 now = (guint32)(time(NULL) & 0xFFFFFFFF);
1636 int thr_ret, i, first_unused = -1;
1637 gboolean exists = FALSE;
1639 /* Prevents entries from expiring under us as we search
1641 thr_ret = _wapi_handle_lock_shared_handles ();
1642 g_assert (thr_ret == 0);
1644 /* Prevent new entries racing with us */
1645 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1646 g_assert (thr_ret == 0);
1648 if (!_wapi_shm_enabled ()) {
1652 * Instead of allocating a 4MB array, we use a hash table to keep track of this
1653 * info. This is needed even if SHM is disabled, to track sharing inside
1654 * the current process.
1656 if (!file_share_hash) {
1657 file_share_hash = g_hash_table_new_full (wapi_share_info_hash, wapi_share_info_equal, NULL, g_free);
1658 InitializeCriticalSection (&file_share_hash_mutex);
1661 tmp.device = device;
1664 file_share_hash_lock ();
1666 file_share = g_hash_table_lookup (file_share_hash, &tmp);
1668 *old_sharemode = file_share->sharemode;
1669 *old_access = file_share->access;
1670 *share_info = file_share;
1672 InterlockedIncrement ((gint32 *)&file_share->handle_refs);
1675 file_share = g_new0 (_WapiFileShare, 1);
1677 file_share->device = device;
1678 file_share->inode = inode;
1679 file_share->opened_by_pid = _wapi_getpid ();
1680 file_share->sharemode = new_sharemode;
1681 file_share->access = new_access;
1682 file_share->handle_refs = 1;
1683 *share_info = file_share;
1685 g_hash_table_insert (file_share_hash, file_share, file_share);
1688 file_share_hash_unlock ();
1690 /* If a linear scan gets too slow we'll have to fit a hash
1691 * table onto the shared mem backing store
1694 for (i = 0; i <= _wapi_fileshare_layout->hwm; i++) {
1695 file_share = &_wapi_fileshare_layout->share_info[i];
1697 /* Make a note of an unused slot, in case we need to
1700 if (first_unused == -1 && file_share->handle_refs == 0) {
1705 if (file_share->handle_refs == 0) {
1709 if (file_share->device == device &&
1710 file_share->inode == inode) {
1711 *old_sharemode = file_share->sharemode;
1712 *old_access = file_share->access;
1713 *share_info = file_share;
1715 /* Increment the reference count while we
1716 * still have sole access to the shared area.
1717 * This makes the increment atomic wrt
1720 InterlockedIncrement ((gint32 *)&file_share->handle_refs);
1728 if (i == _WAPI_FILESHARE_SIZE && first_unused == -1) {
1731 if (first_unused == -1) {
1732 file_share = &_wapi_fileshare_layout->share_info[++i];
1733 _wapi_fileshare_layout->hwm = i;
1735 file_share = &_wapi_fileshare_layout->share_info[first_unused];
1738 file_share->device = device;
1739 file_share->inode = inode;
1740 file_share->opened_by_pid = _wapi_getpid ();
1741 file_share->sharemode = new_sharemode;
1742 file_share->access = new_access;
1743 file_share->handle_refs = 1;
1744 *share_info = file_share;
1748 if (*share_info != NULL) {
1749 InterlockedExchange ((gint32 *)&(*share_info)->timestamp, now);
1753 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1755 _wapi_handle_unlock_shared_handles ();
1760 /* If we don't have the info in /proc, check if the process that
1761 * opened this share info is still there (it's not a perfect method,
1764 static void _wapi_handle_check_share_by_pid (struct _WapiFileShare *share_info)
1766 if (kill (share_info->opened_by_pid, 0) == -1 &&
1769 /* It's gone completely (or there's a new process
1770 * owned by someone else) so mark this share info as
1773 DEBUG ("%s: Didn't find it, destroying entry", __func__);
1775 _wapi_free_share_info (share_info);
1780 /* Scan /proc/<pids>/fd/ for open file descriptors to the file in
1781 * question. If there are none, reset the share info.
1783 * This implementation is Linux-specific; legacy systems will have to
1784 * implement their own ways of finding out if a particular file is
1785 * open by a process.
1787 void _wapi_handle_check_share (struct _WapiFileShare *share_info, int fd)
1789 gboolean found = FALSE, proc_fds = FALSE;
1790 pid_t self = _wapi_getpid ();
1794 /* Prevents entries from expiring under us if we remove this
1797 thr_ret = _wapi_handle_lock_shared_handles ();
1798 g_assert (thr_ret == 0);
1800 /* Prevent new entries racing with us */
1801 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1802 g_assert (thr_ret == 0);
1804 /* If there is no /proc, there's nothing more we can do here */
1805 if (access ("/proc", F_OK) == -1) {
1806 _wapi_handle_check_share_by_pid (share_info);
1810 /* If there's another handle that thinks it owns this fd, then even
1811 * if the fd has been closed behind our back consider it still owned.
1812 * See bugs 75764 and 75891
1814 for (i = 0; i < _wapi_fd_reserve; i++) {
1815 if (_wapi_private_handles [SLOT_INDEX (i)]) {
1816 struct _WapiHandleUnshared *handle = &_WAPI_PRIVATE_HANDLES(i);
1819 handle->type == WAPI_HANDLE_FILE) {
1820 struct _WapiHandle_file *file_handle = &handle->u.file;
1822 if (file_handle->share_info == share_info) {
1823 DEBUG ("%s: handle 0x%x has this file open!",
1832 for (i = 0; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
1833 struct _WapiHandleShared *shared;
1834 struct _WapiHandle_process *process_handle;
1836 shared = &_wapi_shared_layout->handles[i];
1838 if (shared->type == WAPI_HANDLE_PROCESS) {
1840 struct dirent *fd_entry;
1841 char subdir[_POSIX_PATH_MAX];
1843 process_handle = &shared->u.process;
1844 pid = process_handle->id;
1846 /* Look in /proc/<pid>/fd/ but ignore
1847 * /proc/<our pid>/fd/<fd>, as we have the
1850 g_snprintf (subdir, _POSIX_PATH_MAX, "/proc/%d/fd",
1853 fd_dir = opendir (subdir);
1854 if (fd_dir == NULL) {
1858 DEBUG ("%s: Looking in %s", __func__, subdir);
1862 while ((fd_entry = readdir (fd_dir)) != NULL) {
1863 char path[_POSIX_PATH_MAX];
1864 struct stat link_stat;
1866 if (!strcmp (fd_entry->d_name, ".") ||
1867 !strcmp (fd_entry->d_name, "..") ||
1869 fd == atoi (fd_entry->d_name))) {
1873 g_snprintf (path, _POSIX_PATH_MAX,
1874 "/proc/%d/fd/%s", pid,
1877 stat (path, &link_stat);
1878 if (link_stat.st_dev == share_info->device &&
1879 link_stat.st_ino == share_info->inode) {
1880 DEBUG ("%s: Found it at %s",
1891 if (proc_fds == FALSE) {
1892 _wapi_handle_check_share_by_pid (share_info);
1893 } else if (found == FALSE) {
1894 /* Blank out this entry, as it is stale */
1895 DEBUG ("%s: Didn't find it, destroying entry", __func__);
1897 _wapi_free_share_info (share_info);
1901 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1903 _wapi_handle_unlock_shared_handles ();
1907 // Other implementations (non-Linux)
1909 void _wapi_handle_check_share (struct _WapiFileShare *share_info, int fd)
1913 /* Prevents entries from expiring under us if we remove this
1915 thr_ret = _wapi_handle_lock_shared_handles ();
1916 g_assert (thr_ret == 0);
1918 /* Prevent new entries racing with us */
1919 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1920 g_assert (thr_ret == 0);
1922 _wapi_handle_check_share_by_pid (share_info);
1924 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1925 _wapi_handle_unlock_shared_handles ();
1929 void _wapi_handle_dump (void)
1931 struct _WapiHandleUnshared *handle_data;
1935 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
1936 (void *)&scan_mutex);
1937 thr_ret = mono_mutex_lock (&scan_mutex);
1938 g_assert (thr_ret == 0);
1940 for(i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
1941 if (_wapi_private_handles [i]) {
1942 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
1943 handle_data = &_wapi_private_handles [i][k];
1945 if (handle_data->type == WAPI_HANDLE_UNUSED) {
1949 g_print ("%3x [%7s] %s %d ",
1950 i * _WAPI_HANDLE_INITIAL_COUNT + k,
1951 _wapi_handle_typename[handle_data->type],
1952 handle_data->signalled?"Sg":"Un",
1954 handle_details[handle_data->type](&handle_data->u);
1960 thr_ret = mono_mutex_unlock (&scan_mutex);
1961 g_assert (thr_ret == 0);
1962 pthread_cleanup_pop (0);
1965 static void _wapi_shared_details (gpointer handle_info)
1967 struct _WapiHandle_shared_ref *shared = (struct _WapiHandle_shared_ref *)handle_info;
1969 g_print ("offset: 0x%x", shared->offset);
1972 void _wapi_handle_update_refs (void)
1976 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
1978 thr_ret = _wapi_handle_lock_shared_handles ();
1979 g_assert (thr_ret == 0);
1981 /* Prevent file share entries racing with us */
1982 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1983 g_assert(thr_ret == 0);
1985 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
1986 (void *)&scan_mutex);
1987 thr_ret = mono_mutex_lock (&scan_mutex);
1989 for(i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
1990 if (_wapi_private_handles [i]) {
1991 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
1992 struct _WapiHandleUnshared *handle = &_wapi_private_handles [i][k];
1994 if (_WAPI_SHARED_HANDLE(handle->type)) {
1995 struct _WapiHandleShared *shared_data;
1997 DEBUG ("%s: (%d) handle 0x%x is SHARED (%s)", __func__, _wapi_getpid (), i * _WAPI_HANDLE_INITIAL_COUNT + k, _wapi_handle_typename[handle->type]);
1999 shared_data = &_wapi_shared_layout->handles[handle->u.shared.offset];
2001 DEBUG ("%s: (%d) Updating timestamp of handle 0x%x", __func__, _wapi_getpid (), handle->u.shared.offset);
2003 InterlockedExchange ((gint32 *)&shared_data->timestamp, now);
2004 } else if (handle->type == WAPI_HANDLE_FILE) {
2005 struct _WapiHandle_file *file_handle = &handle->u.file;
2007 DEBUG ("%s: (%d) handle 0x%x is FILE", __func__, _wapi_getpid (), i * _WAPI_HANDLE_INITIAL_COUNT + k);
2009 g_assert (file_handle->share_info != NULL);
2011 DEBUG ("%s: (%d) Inc refs on fileshare 0x%x", __func__, _wapi_getpid (), (file_handle->share_info - &_wapi_fileshare_layout->share_info[0]) / sizeof(struct _WapiFileShare));
2013 InterlockedExchange ((gint32 *)&file_handle->share_info->timestamp, now);
2019 thr_ret = mono_mutex_unlock (&scan_mutex);
2020 g_assert (thr_ret == 0);
2021 pthread_cleanup_pop (0);
2023 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
2025 _wapi_handle_unlock_shared_handles ();