2 * handles.c: Generic and internal operations on handles
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002-2006 Novell, Inc.
16 #include <sys/types.h>
17 #ifdef HAVE_SYS_SOCKET_H
18 # include <sys/socket.h>
23 #ifdef HAVE_SYS_MMAN_H
24 # include <sys/mman.h>
31 #include <mono/io-layer/wapi.h>
32 #include <mono/io-layer/wapi-private.h>
33 #include <mono/io-layer/handles-private.h>
34 #include <mono/io-layer/mono-mutex.h>
35 #include <mono/io-layer/misc-private.h>
36 #include <mono/io-layer/shared.h>
37 #include <mono/io-layer/collection.h>
38 #include <mono/io-layer/process-private.h>
39 #include <mono/io-layer/critical-section-private.h>
44 #define DEBUG(...) g_message(__VA_ARGS__)
49 static void (*_wapi_handle_ops_get_close_func (WapiHandleType type))(gpointer, gpointer);
51 static WapiHandleCapability handle_caps[WAPI_HANDLE_COUNT]={0};
52 static struct _WapiHandleOps *handle_ops[WAPI_HANDLE_COUNT]={
60 #ifndef DISABLE_SOCKETS
66 &_wapi_namedmutex_ops,
68 &_wapi_namedevent_ops,
71 static void _wapi_shared_details (gpointer handle_info);
73 static void (*handle_details[WAPI_HANDLE_COUNT])(gpointer) = {
76 _wapi_console_details,
77 _wapi_shared_details, /* thread */
81 NULL, /* Nothing useful to see in a socket handle */
82 NULL, /* Nothing useful to see in a find handle */
83 _wapi_shared_details, /* process */
85 _wapi_shared_details, /* namedmutex */
86 _wapi_shared_details, /* namedsem */
87 _wapi_shared_details, /* namedevent */
90 const char *_wapi_handle_typename[] = {
109 * We can hold _WAPI_PRIVATE_MAX_SLOTS * _WAPI_HANDLE_INITIAL_COUNT handles.
110 * If 4M handles are not enough... Oh, well... we will crash.
112 #define SLOT_INDEX(x) (x / _WAPI_HANDLE_INITIAL_COUNT)
113 #define SLOT_OFFSET(x) (x % _WAPI_HANDLE_INITIAL_COUNT)
115 struct _WapiHandleUnshared *_wapi_private_handles [_WAPI_PRIVATE_MAX_SLOTS];
116 static guint32 _wapi_private_handle_count = 0;
117 static guint32 _wapi_private_handle_slot_count = 0;
119 struct _WapiHandleSharedLayout *_wapi_shared_layout = NULL;
122 * If SHM is enabled, this will point to shared memory, otherwise it will be NULL.
124 struct _WapiFileShareLayout *_wapi_fileshare_layout = NULL;
127 * If SHM is disabled, this will point to a hash of _WapiFileShare structures, otherwise
128 * it will be NULL. We use this instead of _wapi_fileshare_layout to avoid allocating a
131 static GHashTable *file_share_hash;
132 static CRITICAL_SECTION file_share_hash_mutex;
134 #define file_share_hash_lock() EnterCriticalSection (&file_share_hash_mutex)
135 #define file_share_hash_unlock() LeaveCriticalSection (&file_share_hash_mutex)
137 guint32 _wapi_fd_reserve;
140 * This is an internal handle which is used for handling waiting for multiple handles.
141 * Threads which wait for multiple handles wait on this one handle, and when a handle
142 * is signalled, this handle is signalled too.
144 static gpointer _wapi_global_signal_handle;
146 /* Point to the mutex/cond inside _wapi_global_signal_handle */
147 mono_mutex_t *_wapi_global_signal_mutex;
148 pthread_cond_t *_wapi_global_signal_cond;
151 gboolean _wapi_has_shut_down = FALSE;
153 /* Use this instead of getpid(), to cope with linuxthreads. It's a
154 * function rather than a variable lookup because we need to get at
155 * this before share_init() might have been called.
157 static pid_t _wapi_pid;
158 static mono_once_t pid_init_once = MONO_ONCE_INIT;
160 static void _wapi_handle_unref_full (gpointer handle, gboolean ignore_private_busy_handles);
162 static void pid_init (void)
164 _wapi_pid = getpid ();
167 pid_t _wapi_getpid (void)
169 mono_once (&pid_init_once, pid_init);
175 static mono_mutex_t scan_mutex = MONO_MUTEX_INITIALIZER;
177 static void handle_cleanup (void)
181 /* Every shared handle we were using ought really to be closed
182 * by now, but to make sure just blow them all away. The
183 * exiting finalizer thread in particular races us to the
184 * program exit and doesn't always win, so it can be left
185 * cluttering up the shared file. Anything else left over is
188 for(i = SLOT_INDEX (0); _wapi_private_handles[i] != NULL; i++) {
189 for(j = SLOT_OFFSET (0); j < _WAPI_HANDLE_INITIAL_COUNT; j++) {
190 struct _WapiHandleUnshared *handle_data = &_wapi_private_handles[i][j];
191 int type = handle_data->type;
192 gpointer handle = GINT_TO_POINTER (i*_WAPI_HANDLE_INITIAL_COUNT+j);
194 if (_WAPI_SHARED_HANDLE (type)) {
195 if (type == WAPI_HANDLE_THREAD) {
196 /* Special-case thread handles
197 * because they need extra
198 * cleanup. This also avoids
199 * a race condition between
200 * the application exit and
201 * the finalizer thread - if
202 * it finishes up between now
203 * and actual app termination
204 * it will find all its handle
205 * details have been blown
206 * away, so this sets those
209 g_assert (0); /*This condition is freaking impossible*/
210 _wapi_thread_set_termination_details (handle, 0);
214 for(k = handle_data->ref; k > 0; k--) {
215 DEBUG ("%s: unreffing %s handle %p", __func__, _wapi_handle_typename[type], handle);
217 _wapi_handle_unref_full (handle, TRUE);
222 _wapi_shm_semaphores_remove ();
224 _wapi_shm_detach (WAPI_SHM_DATA);
225 _wapi_shm_detach (WAPI_SHM_FILESHARE);
227 if (file_share_hash) {
228 g_hash_table_destroy (file_share_hash);
229 DeleteCriticalSection (&file_share_hash_mutex);
232 for (i = 0; i < _WAPI_PRIVATE_MAX_SLOTS; ++i)
233 g_free (_wapi_private_handles [i]);
239 * Initialize the io-layer.
244 g_assert ((sizeof (handle_ops) / sizeof (handle_ops[0]))
245 == WAPI_HANDLE_COUNT);
247 _wapi_fd_reserve = getdtablesize();
249 /* This is needed by the code in _wapi_handle_new_internal */
250 _wapi_fd_reserve = (_wapi_fd_reserve + (_WAPI_HANDLE_INITIAL_COUNT - 1)) & ~(_WAPI_HANDLE_INITIAL_COUNT - 1);
254 * The entries in _wapi_private_handles reserved for fds are allocated lazily to
258 _wapi_private_handles [idx++] = g_new0 (struct _WapiHandleUnshared,
259 _WAPI_HANDLE_INITIAL_COUNT);
262 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
263 _wapi_private_handle_slot_count ++;
264 } while(_wapi_fd_reserve > _wapi_private_handle_count);
266 _wapi_shm_semaphores_init ();
268 _wapi_shared_layout = _wapi_shm_attach (WAPI_SHM_DATA);
269 g_assert (_wapi_shared_layout != NULL);
271 if (_wapi_shm_enabled ()) {
272 /* This allocates a 4mb array, so do it only if SHM is enabled */
273 _wapi_fileshare_layout = _wapi_shm_attach (WAPI_SHM_FILESHARE);
274 g_assert (_wapi_fileshare_layout != NULL);
277 #if !defined (DISABLE_SHARED_HANDLES)
278 if (_wapi_shm_enabled ())
279 _wapi_collection_init ();
282 _wapi_global_signal_handle = _wapi_handle_new (WAPI_HANDLE_EVENT, NULL);
284 _wapi_global_signal_cond = &_WAPI_PRIVATE_HANDLES (GPOINTER_TO_UINT (_wapi_global_signal_handle)).signal_cond;
285 _wapi_global_signal_mutex = &_WAPI_PRIVATE_HANDLES (GPOINTER_TO_UINT (_wapi_global_signal_handle)).signal_mutex;
287 /* Using g_atexit here instead of an explicit function call in
288 * a cleanup routine lets us cope when a third-party library
289 * calls exit (eg if an X client loses the connection to its
292 g_atexit (handle_cleanup);
298 g_assert (_wapi_has_shut_down == FALSE);
300 _wapi_has_shut_down = TRUE;
302 _wapi_critical_section_cleanup ();
303 _wapi_error_cleanup ();
304 _wapi_thread_cleanup ();
307 static void _wapi_handle_init_shared (struct _WapiHandleShared *handle,
309 gpointer handle_specific)
311 g_assert (_wapi_has_shut_down == FALSE);
314 handle->timestamp = (guint32)(time (NULL) & 0xFFFFFFFF);
315 handle->signalled = FALSE;
316 handle->handle_refs = 1;
318 if (handle_specific != NULL) {
319 memcpy (&handle->u, handle_specific, sizeof (handle->u));
323 static void _wapi_handle_init (struct _WapiHandleUnshared *handle,
324 WapiHandleType type, gpointer handle_specific)
328 g_assert (_wapi_has_shut_down == FALSE);
331 handle->signalled = FALSE;
334 if (!_WAPI_SHARED_HANDLE(type)) {
335 thr_ret = pthread_cond_init (&handle->signal_cond, NULL);
336 g_assert (thr_ret == 0);
338 thr_ret = mono_mutex_init (&handle->signal_mutex, NULL);
339 g_assert (thr_ret == 0);
341 if (handle_specific != NULL) {
342 memcpy (&handle->u, handle_specific,
348 static guint32 _wapi_handle_new_shared (WapiHandleType type,
349 gpointer handle_specific)
352 static guint32 last = 1;
355 g_assert (_wapi_has_shut_down == FALSE);
357 /* Leave the first slot empty as a guard */
359 /* FIXME: expandable array */
360 for(offset = last; offset <_WAPI_HANDLE_INITIAL_COUNT; offset++) {
361 struct _WapiHandleShared *handle = &_wapi_shared_layout->handles[offset];
363 if(handle->type == WAPI_HANDLE_UNUSED) {
364 thr_ret = _wapi_handle_lock_shared_handles ();
365 g_assert (thr_ret == 0);
367 if (InterlockedCompareExchange ((gint32 *)&handle->type, type, WAPI_HANDLE_UNUSED) == WAPI_HANDLE_UNUSED) {
370 _wapi_handle_init_shared (handle, type,
373 _wapi_handle_unlock_shared_handles ();
377 /* Someone else beat us to it, just
382 _wapi_handle_unlock_shared_handles ();
387 /* Try again from the beginning */
392 /* Will need to expand the array. The caller will sort it out */
398 * _wapi_handle_new_internal:
399 * @type: Init handle to this type
401 * Search for a free handle and initialize it. Return the handle on
402 * success and 0 on failure. This is only called from
403 * _wapi_handle_new, and scan_mutex must be held.
405 static guint32 _wapi_handle_new_internal (WapiHandleType type,
406 gpointer handle_specific)
409 static guint32 last = 0;
410 gboolean retry = FALSE;
412 g_assert (_wapi_has_shut_down == FALSE);
414 /* A linear scan should be fast enough. Start from the last
415 * allocation, assuming that handles are allocated more often
416 * than they're freed. Leave the space reserved for file
420 if (last < _wapi_fd_reserve) {
421 last = _wapi_fd_reserve;
428 for(i = SLOT_INDEX (count); i < _wapi_private_handle_slot_count; i++) {
429 if (_wapi_private_handles [i]) {
430 for (k = SLOT_OFFSET (count); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
431 struct _WapiHandleUnshared *handle = &_wapi_private_handles [i][k];
433 if(handle->type == WAPI_HANDLE_UNUSED) {
436 _wapi_handle_init (handle, type, handle_specific);
444 if(retry && last > _wapi_fd_reserve) {
445 /* Try again from the beginning */
446 last = _wapi_fd_reserve;
450 /* Will need to expand the array. The caller will sort it out */
456 _wapi_handle_new (WapiHandleType type, gpointer handle_specific)
458 guint32 handle_idx = 0;
462 g_assert (_wapi_has_shut_down == FALSE);
464 DEBUG ("%s: Creating new handle of type %s", __func__,
465 _wapi_handle_typename[type]);
467 g_assert(!_WAPI_FD_HANDLE(type));
469 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
470 (void *)&scan_mutex);
471 thr_ret = mono_mutex_lock (&scan_mutex);
472 g_assert (thr_ret == 0);
474 while ((handle_idx = _wapi_handle_new_internal (type, handle_specific)) == 0) {
475 /* Try and expand the array, and have another go */
476 int idx = SLOT_INDEX (_wapi_private_handle_count);
477 if (idx >= _WAPI_PRIVATE_MAX_SLOTS) {
481 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
482 _WAPI_HANDLE_INITIAL_COUNT);
484 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
485 _wapi_private_handle_slot_count ++;
488 thr_ret = mono_mutex_unlock (&scan_mutex);
489 g_assert (thr_ret == 0);
490 pthread_cleanup_pop (0);
492 if (handle_idx == 0) {
493 /* We ran out of slots */
494 handle = _WAPI_HANDLE_INVALID;
498 /* Make sure we left the space for fd mappings */
499 g_assert (handle_idx >= _wapi_fd_reserve);
501 handle = GUINT_TO_POINTER (handle_idx);
503 DEBUG ("%s: Allocated new handle %p", __func__, handle);
505 if (_WAPI_SHARED_HANDLE(type)) {
506 /* Add the shared section too */
509 ref = _wapi_handle_new_shared (type, handle_specific);
511 _wapi_handle_collect ();
512 ref = _wapi_handle_new_shared (type, handle_specific);
514 /* FIXME: grow the arrays */
515 handle = _WAPI_HANDLE_INVALID;
520 _WAPI_PRIVATE_HANDLES(handle_idx).u.shared.offset = ref;
521 DEBUG ("%s: New shared handle at offset 0x%x", __func__,
529 gpointer _wapi_handle_new_from_offset (WapiHandleType type, guint32 offset,
532 guint32 handle_idx = 0;
533 gpointer handle = INVALID_HANDLE_VALUE;
535 struct _WapiHandleShared *shared;
537 g_assert (_wapi_has_shut_down == FALSE);
539 DEBUG ("%s: Creating new handle of type %s to offset %d", __func__,
540 _wapi_handle_typename[type], offset);
542 g_assert(!_WAPI_FD_HANDLE(type));
543 g_assert(_WAPI_SHARED_HANDLE(type));
544 g_assert(offset != 0);
546 shared = &_wapi_shared_layout->handles[offset];
548 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
549 /* Bump up the timestamp for this offset */
550 InterlockedExchange ((gint32 *)&shared->timestamp, now);
553 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
554 (void *)&scan_mutex);
555 thr_ret = mono_mutex_lock (&scan_mutex);
556 g_assert (thr_ret == 0);
558 for (i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
559 if (_wapi_private_handles [i]) {
560 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
561 struct _WapiHandleUnshared *handle_data = &_wapi_private_handles [i][k];
563 if (handle_data->type == type &&
564 handle_data->u.shared.offset == offset) {
565 handle = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
566 goto first_pass_done;
573 thr_ret = mono_mutex_unlock (&scan_mutex);
574 g_assert (thr_ret == 0);
575 pthread_cleanup_pop (0);
577 if (handle != INVALID_HANDLE_VALUE) {
578 _wapi_handle_ref (handle);
580 DEBUG ("%s: Returning old handle %p referencing 0x%x",
581 __func__, handle, offset);
585 /* Prevent entries expiring under us as we search */
586 thr_ret = _wapi_handle_lock_shared_handles ();
587 g_assert (thr_ret == 0);
589 if (shared->type == WAPI_HANDLE_UNUSED) {
590 /* Someone deleted this handle while we were working */
591 DEBUG ("%s: Handle at 0x%x unused", __func__, offset);
595 if (shared->type != type) {
596 DEBUG ("%s: Wrong type at %d 0x%x! Found %s wanted %s",
597 __func__, offset, offset,
598 _wapi_handle_typename[shared->type],
599 _wapi_handle_typename[type]);
603 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
604 (void *)&scan_mutex);
605 thr_ret = mono_mutex_lock (&scan_mutex);
606 g_assert (thr_ret == 0);
608 while ((handle_idx = _wapi_handle_new_internal (type, NULL)) == 0) {
609 /* Try and expand the array, and have another go */
610 int idx = SLOT_INDEX (_wapi_private_handle_count);
611 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
612 _WAPI_HANDLE_INITIAL_COUNT);
614 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
615 _wapi_private_handle_slot_count ++;
618 thr_ret = mono_mutex_unlock (&scan_mutex);
619 g_assert (thr_ret == 0);
620 pthread_cleanup_pop (0);
622 /* Make sure we left the space for fd mappings */
623 g_assert (handle_idx >= _wapi_fd_reserve);
625 handle = GUINT_TO_POINTER (handle_idx);
627 _WAPI_PRIVATE_HANDLES(handle_idx).u.shared.offset = offset;
628 InterlockedIncrement ((gint32 *)&shared->handle_refs);
630 DEBUG ("%s: Allocated new handle %p referencing 0x%x (shared refs %d)", __func__, handle, offset, shared->handle_refs);
633 _wapi_handle_unlock_shared_handles ();
639 init_handles_slot (int idx)
643 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
644 (void *)&scan_mutex);
645 thr_ret = mono_mutex_lock (&scan_mutex);
646 g_assert (thr_ret == 0);
648 if (_wapi_private_handles [idx] == NULL) {
649 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
650 _WAPI_HANDLE_INITIAL_COUNT);
651 g_assert (_wapi_private_handles [idx]);
654 thr_ret = mono_mutex_unlock (&scan_mutex);
655 g_assert (thr_ret == 0);
656 pthread_cleanup_pop (0);
659 gpointer _wapi_handle_new_fd (WapiHandleType type, int fd,
660 gpointer handle_specific)
662 struct _WapiHandleUnshared *handle;
665 g_assert (_wapi_has_shut_down == FALSE);
667 DEBUG ("%s: Creating new handle of type %s", __func__,
668 _wapi_handle_typename[type]);
670 g_assert(_WAPI_FD_HANDLE(type));
671 g_assert(!_WAPI_SHARED_HANDLE(type));
673 if (fd >= _wapi_fd_reserve) {
674 DEBUG ("%s: fd %d is too big", __func__, fd);
676 return(GUINT_TO_POINTER (_WAPI_HANDLE_INVALID));
679 /* Initialize the array entries on demand */
680 if (_wapi_private_handles [SLOT_INDEX (fd)] == NULL)
681 init_handles_slot (SLOT_INDEX (fd));
683 handle = &_WAPI_PRIVATE_HANDLES(fd);
685 if (handle->type != WAPI_HANDLE_UNUSED) {
686 DEBUG ("%s: fd %d is already in use!", __func__, fd);
687 /* FIXME: clean up this handle? We can't do anything
688 * with the fd, cos thats the new one
692 DEBUG ("%s: Assigning new fd handle %d", __func__, fd);
694 /* Prevent file share entries racing with us, when the file
695 * handle is only half initialised
697 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
698 g_assert(thr_ret == 0);
700 _wapi_handle_init (handle, type, handle_specific);
702 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
704 return(GUINT_TO_POINTER(fd));
707 gboolean _wapi_lookup_handle (gpointer handle, WapiHandleType type,
708 gpointer *handle_specific)
710 struct _WapiHandleUnshared *handle_data;
711 guint32 handle_idx = GPOINTER_TO_UINT(handle);
713 if (!_WAPI_PRIVATE_VALID_SLOT (handle_idx)) {
717 /* Initialize the array entries on demand */
718 if (_wapi_private_handles [SLOT_INDEX (handle_idx)] == NULL)
719 init_handles_slot (SLOT_INDEX (handle_idx));
721 handle_data = &_WAPI_PRIVATE_HANDLES(handle_idx);
723 if (handle_data->type != type) {
727 if (handle_specific == NULL) {
731 if (_WAPI_SHARED_HANDLE(type)) {
732 struct _WapiHandle_shared_ref *ref;
733 struct _WapiHandleShared *shared_handle_data;
735 ref = &handle_data->u.shared;
736 shared_handle_data = &_wapi_shared_layout->handles[ref->offset];
738 if (shared_handle_data->type != type) {
739 /* The handle must have been deleted on us
744 *handle_specific = &shared_handle_data->u;
746 *handle_specific = &handle_data->u;
753 _wapi_handle_foreach (WapiHandleType type,
754 gboolean (*on_each)(gpointer test, gpointer user),
757 struct _WapiHandleUnshared *handle_data = NULL;
762 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
763 (void *)&scan_mutex);
764 thr_ret = mono_mutex_lock (&scan_mutex);
765 g_assert (thr_ret == 0);
767 for (i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
768 if (_wapi_private_handles [i]) {
769 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
770 handle_data = &_wapi_private_handles [i][k];
772 if (handle_data->type == type) {
773 ret = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
774 if (on_each (ret, user_data) == TRUE)
781 thr_ret = mono_mutex_unlock (&scan_mutex);
782 g_assert (thr_ret == 0);
783 pthread_cleanup_pop (0);
786 /* This might list some shared handles twice if they are already
787 * opened by this process, and the check function returns FALSE the
788 * first time. Shared handles that are created during the search are
789 * unreffed if the check function returns FALSE, so callers must not
790 * rely on the handle persisting (unless the check function returns
792 * The caller owns the returned handle.
794 gpointer _wapi_search_handle (WapiHandleType type,
795 gboolean (*check)(gpointer test, gpointer user),
797 gpointer *handle_specific,
798 gboolean search_shared)
800 struct _WapiHandleUnshared *handle_data = NULL;
801 struct _WapiHandleShared *shared = NULL;
804 gboolean found = FALSE;
807 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
808 (void *)&scan_mutex);
809 thr_ret = mono_mutex_lock (&scan_mutex);
810 g_assert (thr_ret == 0);
812 for (i = SLOT_INDEX (0); !found && i < _wapi_private_handle_slot_count; i++) {
813 if (_wapi_private_handles [i]) {
814 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
815 handle_data = &_wapi_private_handles [i][k];
817 if (handle_data->type == type) {
818 ret = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
819 if (check (ret, user_data) == TRUE) {
820 _wapi_handle_ref (ret);
823 if (_WAPI_SHARED_HANDLE (type)) {
824 shared = &_wapi_shared_layout->handles[i];
834 thr_ret = mono_mutex_unlock (&scan_mutex);
835 g_assert (thr_ret == 0);
836 pthread_cleanup_pop (0);
838 if (!found && search_shared && _WAPI_SHARED_HANDLE (type)) {
839 /* Not found yet, so search the shared memory too */
840 DEBUG ("%s: Looking at other shared handles...", __func__);
842 for (i = 0; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
843 shared = &_wapi_shared_layout->handles[i];
845 if (shared->type == type) {
846 /* Tell new_from_offset to not
847 * timestamp this handle, because
848 * otherwise it will ping every handle
849 * in the list and they will never
852 ret = _wapi_handle_new_from_offset (type, i,
854 if (ret == INVALID_HANDLE_VALUE) {
855 /* This handle was deleted
856 * while we were looking at it
861 DEBUG ("%s: Opened tmp handle %p (type %s) from offset %d", __func__, ret, _wapi_handle_typename[type], i);
863 /* It's possible that the shared part
864 * of this handle has now been blown
865 * away (after new_from_offset
866 * successfully opened it,) if its
867 * timestamp is too old. The check
868 * function needs to be aware of this,
869 * and cope if the handle has
872 if (check (ret, user_data) == TRUE) {
873 /* Timestamp this handle, but make
874 * sure it still exists first
876 thr_ret = _wapi_handle_lock_shared_handles ();
877 g_assert (thr_ret == 0);
879 if (shared->type == type) {
880 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
881 InterlockedExchange ((gint32 *)&shared->timestamp, now);
884 handle_data = &_WAPI_PRIVATE_HANDLES(GPOINTER_TO_UINT(ret));
886 _wapi_handle_unlock_shared_handles ();
889 /* It's been deleted,
893 _wapi_handle_unlock_shared_handles ();
897 /* This isn't the handle we're looking
898 * for, so drop the reference we took
899 * in _wapi_handle_new_from_offset ()
901 _wapi_handle_unref (ret);
911 if(handle_specific != NULL) {
912 if (_WAPI_SHARED_HANDLE(type)) {
913 g_assert(shared->type == type);
915 *handle_specific = &shared->u;
917 *handle_specific = &handle_data->u;
925 /* Returns the offset of the metadata array, or -1 on error, or 0 for
926 * not found (0 is not a valid offset)
928 gint32 _wapi_search_handle_namespace (WapiHandleType type,
931 struct _WapiHandleShared *shared_handle_data;
936 g_assert(_WAPI_SHARED_HANDLE(type));
938 DEBUG ("%s: Lookup for handle named [%s] type %s", __func__,
939 utf8_name, _wapi_handle_typename[type]);
941 /* Do a handle collection before starting to look, so that any
942 * stale cruft gets removed
944 _wapi_handle_collect ();
946 thr_ret = _wapi_handle_lock_shared_handles ();
947 g_assert (thr_ret == 0);
949 for(i = 1; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
950 WapiSharedNamespace *sharedns;
952 shared_handle_data = &_wapi_shared_layout->handles[i];
954 /* Check mutex, event, semaphore, timer, job and
955 * file-mapping object names. So far only mutex,
956 * semaphore and event are implemented.
958 if (!_WAPI_SHARED_NAMESPACE (shared_handle_data->type)) {
962 DEBUG ("%s: found a shared namespace handle at 0x%x (type %s)", __func__, i, _wapi_handle_typename[shared_handle_data->type]);
964 sharedns=(WapiSharedNamespace *)&shared_handle_data->u;
966 DEBUG ("%s: name is [%s]", __func__, sharedns->name);
968 if (strcmp (sharedns->name, utf8_name) == 0) {
969 if (shared_handle_data->type != type) {
970 /* Its the wrong type, so fail now */
971 DEBUG ("%s: handle 0x%x matches name but is wrong type: %s", __func__, i, _wapi_handle_typename[shared_handle_data->type]);
975 DEBUG ("%s: handle 0x%x matches name and type", __func__, i);
983 _wapi_handle_unlock_shared_handles ();
988 void _wapi_handle_ref (gpointer handle)
990 guint32 idx = GPOINTER_TO_UINT(handle);
991 struct _WapiHandleUnshared *handle_data;
993 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
997 if (_wapi_handle_type (handle) == WAPI_HANDLE_UNUSED) {
998 g_warning ("%s: Attempting to ref unused handle %p", __func__,
1003 handle_data = &_WAPI_PRIVATE_HANDLES(idx);
1005 InterlockedIncrement ((gint32 *)&handle_data->ref);
1007 /* It's possible for processes to exit before getting around
1008 * to updating timestamps in the collection thread, so if a
1009 * shared handle is reffed do the timestamp here as well just
1012 if (_WAPI_SHARED_HANDLE(handle_data->type)) {
1013 struct _WapiHandleShared *shared_data = &_wapi_shared_layout->handles[handle_data->u.shared.offset];
1014 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
1015 InterlockedExchange ((gint32 *)&shared_data->timestamp, now);
1019 g_message ("%s: %s handle %p ref now %d", __func__,
1020 _wapi_handle_typename[_WAPI_PRIVATE_HANDLES (idx).type],
1022 _WAPI_PRIVATE_HANDLES(idx).ref);
1026 /* The handle must not be locked on entry to this function */
1027 static void _wapi_handle_unref_full (gpointer handle, gboolean ignore_private_busy_handles)
1029 guint32 idx = GPOINTER_TO_UINT(handle);
1030 gboolean destroy = FALSE, early_exit = FALSE;
1033 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1037 if (_wapi_handle_type (handle) == WAPI_HANDLE_UNUSED) {
1038 g_warning ("%s: Attempting to unref unused handle %p",
1043 /* Possible race condition here if another thread refs the
1044 * handle between here and setting the type to UNUSED. I
1045 * could lock a mutex, but I'm not sure that allowing a handle
1046 * reference to reach 0 isn't an application bug anyway.
1048 destroy = (InterlockedDecrement ((gint32 *)&_WAPI_PRIVATE_HANDLES(idx).ref) ==0);
1051 g_message ("%s: %s handle %p ref now %d (destroy %s)", __func__,
1052 _wapi_handle_typename[_WAPI_PRIVATE_HANDLES (idx).type],
1054 _WAPI_PRIVATE_HANDLES(idx).ref, destroy?"TRUE":"FALSE");
1058 /* Need to copy the handle info, reset the slot in the
1059 * array, and _only then_ call the close function to
1060 * avoid race conditions (eg file descriptors being
1061 * closed, and another file being opened getting the
1062 * same fd racing the memset())
1064 struct _WapiHandleUnshared handle_data;
1065 struct _WapiHandleShared shared_handle_data;
1066 WapiHandleType type = _WAPI_PRIVATE_HANDLES(idx).type;
1067 void (*close_func)(gpointer, gpointer) = _wapi_handle_ops_get_close_func (type);
1068 gboolean is_shared = _WAPI_SHARED_HANDLE(type);
1071 /* If this is a shared handle we need to take
1072 * the shared lock outside of the scan_mutex
1073 * lock to avoid deadlocks
1075 thr_ret = _wapi_handle_lock_shared_handles ();
1076 g_assert (thr_ret == 0);
1079 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup, (void *)&scan_mutex);
1080 thr_ret = mono_mutex_lock (&scan_mutex);
1082 DEBUG ("%s: Destroying handle %p", __func__, handle);
1084 memcpy (&handle_data, &_WAPI_PRIVATE_HANDLES(idx),
1085 sizeof (struct _WapiHandleUnshared));
1087 memset (&_WAPI_PRIVATE_HANDLES(idx).u, '\0',
1088 sizeof(_WAPI_PRIVATE_HANDLES(idx).u));
1090 _WAPI_PRIVATE_HANDLES(idx).type = WAPI_HANDLE_UNUSED;
1093 /* Destroy the mutex and cond var. We hope nobody
1094 * tried to grab them between the handle unlock and
1095 * now, but pthreads doesn't have a
1096 * "unlock_and_destroy" atomic function.
1098 thr_ret = mono_mutex_destroy (&_WAPI_PRIVATE_HANDLES(idx).signal_mutex);
1099 /*WARNING gross hack to make cleanup not crash when exiting without the whole runtime teardown.*/
1100 if (thr_ret == EBUSY && ignore_private_busy_handles) {
1104 g_error ("Error destroying handle %p mutex due to %d\n", handle, thr_ret);
1106 thr_ret = pthread_cond_destroy (&_WAPI_PRIVATE_HANDLES(idx).signal_cond);
1107 if (thr_ret == EBUSY && ignore_private_busy_handles)
1109 else if (thr_ret != 0)
1110 g_error ("Error destroying handle %p cond var due to %d\n", handle, thr_ret);
1113 struct _WapiHandleShared *shared = &_wapi_shared_layout->handles[handle_data.u.shared.offset];
1115 memcpy (&shared_handle_data, shared,
1116 sizeof (struct _WapiHandleShared));
1118 /* It's possible that this handle is already
1119 * pointing at a deleted shared section
1122 g_message ("%s: %s handle %p shared refs before dec %d", __func__, _wapi_handle_typename[type], handle, shared->handle_refs);
1125 if (shared->handle_refs > 0) {
1126 shared->handle_refs--;
1127 if (shared->handle_refs == 0) {
1128 memset (shared, '\0', sizeof (struct _WapiHandleShared));
1133 thr_ret = mono_mutex_unlock (&scan_mutex);
1134 g_assert (thr_ret == 0);
1135 pthread_cleanup_pop (0);
1140 _wapi_handle_unlock_shared_handles ();
1143 if (close_func != NULL) {
1145 close_func (handle, &shared_handle_data.u);
1147 close_func (handle, &handle_data.u);
1153 void _wapi_handle_unref (gpointer handle)
1155 _wapi_handle_unref_full (handle, FALSE);
1158 void _wapi_handle_register_capabilities (WapiHandleType type,
1159 WapiHandleCapability caps)
1161 handle_caps[type] = caps;
1164 gboolean _wapi_handle_test_capabilities (gpointer handle,
1165 WapiHandleCapability caps)
1167 guint32 idx = GPOINTER_TO_UINT(handle);
1168 WapiHandleType type;
1170 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1174 type = _WAPI_PRIVATE_HANDLES(idx).type;
1176 DEBUG ("%s: testing 0x%x against 0x%x (%d)", __func__,
1177 handle_caps[type], caps, handle_caps[type] & caps);
1179 return((handle_caps[type] & caps) != 0);
1182 static void (*_wapi_handle_ops_get_close_func (WapiHandleType type))(gpointer, gpointer)
1184 if (handle_ops[type] != NULL &&
1185 handle_ops[type]->close != NULL) {
1186 return (handle_ops[type]->close);
1192 void _wapi_handle_ops_close (gpointer handle, gpointer data)
1194 guint32 idx = GPOINTER_TO_UINT(handle);
1195 WapiHandleType type;
1197 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1201 type = _WAPI_PRIVATE_HANDLES(idx).type;
1203 if (handle_ops[type] != NULL &&
1204 handle_ops[type]->close != NULL) {
1205 handle_ops[type]->close (handle, data);
1209 void _wapi_handle_ops_signal (gpointer handle)
1211 guint32 idx = GPOINTER_TO_UINT(handle);
1212 WapiHandleType type;
1214 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1218 type = _WAPI_PRIVATE_HANDLES(idx).type;
1220 if (handle_ops[type] != NULL && handle_ops[type]->signal != NULL) {
1221 handle_ops[type]->signal (handle);
1225 gboolean _wapi_handle_ops_own (gpointer handle)
1227 guint32 idx = GPOINTER_TO_UINT(handle);
1228 WapiHandleType type;
1230 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1234 type = _WAPI_PRIVATE_HANDLES(idx).type;
1236 if (handle_ops[type] != NULL && handle_ops[type]->own_handle != NULL) {
1237 return(handle_ops[type]->own_handle (handle));
1243 gboolean _wapi_handle_ops_isowned (gpointer handle)
1245 guint32 idx = GPOINTER_TO_UINT(handle);
1246 WapiHandleType type;
1248 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1252 type = _WAPI_PRIVATE_HANDLES(idx).type;
1254 if (handle_ops[type] != NULL && handle_ops[type]->is_owned != NULL) {
1255 return(handle_ops[type]->is_owned (handle));
1261 guint32 _wapi_handle_ops_special_wait (gpointer handle, guint32 timeout, gboolean alertable)
1263 guint32 idx = GPOINTER_TO_UINT(handle);
1264 WapiHandleType type;
1266 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1267 return(WAIT_FAILED);
1270 type = _WAPI_PRIVATE_HANDLES(idx).type;
1272 if (handle_ops[type] != NULL &&
1273 handle_ops[type]->special_wait != NULL) {
1274 return(handle_ops[type]->special_wait (handle, timeout, alertable));
1276 return(WAIT_FAILED);
1280 void _wapi_handle_ops_prewait (gpointer handle)
1282 guint32 idx = GPOINTER_TO_UINT (handle);
1283 WapiHandleType type;
1285 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1289 type = _WAPI_PRIVATE_HANDLES (idx).type;
1291 if (handle_ops[type] != NULL &&
1292 handle_ops[type]->prewait != NULL) {
1293 handle_ops[type]->prewait (handle);
1300 * @handle: The handle to release
1302 * Closes and invalidates @handle, releasing any resources it
1303 * consumes. When the last handle to a temporary or non-persistent
1304 * object is closed, that object can be deleted. Closing the same
1305 * handle twice is an error.
1307 * Return value: %TRUE on success, %FALSE otherwise.
1309 gboolean CloseHandle(gpointer handle)
1311 if (handle == NULL) {
1312 /* Problem: because we map file descriptors to the
1313 * same-numbered handle we can't tell the difference
1314 * between a bogus handle and the handle to stdin.
1315 * Assume that it's the console handle if that handle
1318 if (_WAPI_PRIVATE_HANDLES (0).type != WAPI_HANDLE_CONSOLE) {
1319 SetLastError (ERROR_INVALID_PARAMETER);
1323 if (handle == _WAPI_HANDLE_INVALID){
1324 SetLastError (ERROR_INVALID_PARAMETER);
1328 _wapi_handle_unref (handle);
1333 /* Lots more to implement here, but this is all we need at the moment */
1334 gboolean DuplicateHandle (gpointer srcprocess, gpointer src,
1335 gpointer targetprocess, gpointer *target,
1336 guint32 access G_GNUC_UNUSED, gboolean inherit G_GNUC_UNUSED, guint32 options G_GNUC_UNUSED)
1338 if (srcprocess != _WAPI_PROCESS_CURRENT ||
1339 targetprocess != _WAPI_PROCESS_CURRENT) {
1340 /* Duplicating other process's handles is not supported */
1341 SetLastError (ERROR_INVALID_HANDLE);
1345 if (src == _WAPI_PROCESS_CURRENT) {
1346 *target = _wapi_process_duplicate ();
1347 } else if (src == _WAPI_THREAD_CURRENT) {
1348 *target = _wapi_thread_duplicate ();
1350 _wapi_handle_ref (src);
1357 gboolean _wapi_handle_count_signalled_handles (guint32 numhandles,
1363 guint32 count, i, iter=0;
1366 WapiHandleType type;
1368 /* Lock all the handles, with backoff */
1370 thr_ret = _wapi_handle_lock_shared_handles ();
1371 g_assert (thr_ret == 0);
1373 for(i=0; i<numhandles; i++) {
1374 gpointer handle = handles[i];
1375 guint32 idx = GPOINTER_TO_UINT(handle);
1377 DEBUG ("%s: attempting to lock %p", __func__, handle);
1379 type = _WAPI_PRIVATE_HANDLES(idx).type;
1381 thr_ret = _wapi_handle_trylock_handle (handle);
1386 DEBUG ("%s: attempt failed for %p: %s", __func__,
1387 handle, strerror (thr_ret));
1389 thr_ret = _wapi_handle_unlock_shared_handles ();
1390 g_assert (thr_ret == 0);
1393 handle = handles[i];
1394 idx = GPOINTER_TO_UINT(handle);
1396 thr_ret = _wapi_handle_unlock_handle (handle);
1397 g_assert (thr_ret == 0);
1400 /* If iter ever reaches 100 the nanosleep will
1401 * return EINVAL immediately, but we have a
1402 * design flaw if that happens.
1406 g_warning ("%s: iteration overflow!",
1411 DEBUG ("%s: Backing off for %d ms", __func__,
1413 _wapi_handle_spin (10 * iter);
1419 DEBUG ("%s: Locked all handles", __func__);
1424 for(i=0; i<numhandles; i++) {
1425 gpointer handle = handles[i];
1426 guint32 idx = GPOINTER_TO_UINT(handle);
1428 type = _WAPI_PRIVATE_HANDLES(idx).type;
1430 DEBUG ("%s: Checking handle %p", __func__, handle);
1432 if(((_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_OWN)==TRUE) &&
1433 (_wapi_handle_ops_isowned (handle) == TRUE)) ||
1434 (_WAPI_SHARED_HANDLE(type) &&
1435 WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) ||
1436 (!_WAPI_SHARED_HANDLE(type) &&
1437 _WAPI_PRIVATE_HANDLES(idx).signalled == TRUE)) {
1440 DEBUG ("%s: Handle %p signalled", __func__,
1448 DEBUG ("%s: %d event handles signalled", __func__, count);
1450 if ((waitall == TRUE && count == numhandles) ||
1451 (waitall == FALSE && count > 0)) {
1457 DEBUG ("%s: Returning %d", __func__, ret);
1464 void _wapi_handle_unlock_handles (guint32 numhandles, gpointer *handles)
1469 thr_ret = _wapi_handle_unlock_shared_handles ();
1470 g_assert (thr_ret == 0);
1472 for(i=0; i<numhandles; i++) {
1473 gpointer handle = handles[i];
1475 DEBUG ("%s: unlocking handle %p", __func__, handle);
1477 thr_ret = _wapi_handle_unlock_handle (handle);
1478 g_assert (thr_ret == 0);
1482 static int timedwait_signal_poll_cond (pthread_cond_t *cond, mono_mutex_t *mutex, struct timespec *timeout, gboolean alertable)
1484 struct timespec fake_timeout;
1489 ret=mono_cond_timedwait (cond, mutex, timeout);
1491 ret=mono_cond_wait (cond, mutex);
1493 _wapi_calc_timeout (&fake_timeout, 100);
1495 if (timeout != NULL && ((fake_timeout.tv_sec > timeout->tv_sec) ||
1496 (fake_timeout.tv_sec == timeout->tv_sec &&
1497 fake_timeout.tv_nsec > timeout->tv_nsec))) {
1498 /* Real timeout is less than 100ms time */
1499 ret=mono_cond_timedwait (cond, mutex, timeout);
1501 ret=mono_cond_timedwait (cond, mutex, &fake_timeout);
1503 /* Mask the fake timeout, this will cause
1504 * another poll if the cond was not really signaled
1506 if (ret==ETIMEDOUT) {
1515 int _wapi_handle_wait_signal (gboolean poll)
1517 return _wapi_handle_timedwait_signal_handle (_wapi_global_signal_handle, NULL, TRUE, poll);
1520 int _wapi_handle_timedwait_signal (struct timespec *timeout, gboolean poll)
1522 return _wapi_handle_timedwait_signal_handle (_wapi_global_signal_handle, timeout, TRUE, poll);
1525 int _wapi_handle_wait_signal_handle (gpointer handle, gboolean alertable)
1527 DEBUG ("%s: waiting for %p", __func__, handle);
1529 return _wapi_handle_timedwait_signal_handle (handle, NULL, alertable, FALSE);
1532 int _wapi_handle_timedwait_signal_handle (gpointer handle,
1533 struct timespec *timeout, gboolean alertable, gboolean poll)
1535 DEBUG ("%s: waiting for %p (type %s)", __func__, handle,
1536 _wapi_handle_typename[_wapi_handle_type (handle)]);
1538 if (_WAPI_SHARED_HANDLE (_wapi_handle_type (handle))) {
1539 if (WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) {
1542 if (timeout != NULL) {
1543 struct timespec fake_timeout;
1544 _wapi_calc_timeout (&fake_timeout, 100);
1546 if ((fake_timeout.tv_sec > timeout->tv_sec) ||
1547 (fake_timeout.tv_sec == timeout->tv_sec &&
1548 fake_timeout.tv_nsec > timeout->tv_nsec)) {
1549 /* FIXME: Real timeout is less than
1550 * 100ms time, but is it really worth
1551 * calculating to the exact ms?
1553 _wapi_handle_spin (100);
1555 if (WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) {
1562 _wapi_handle_spin (100);
1566 guint32 idx = GPOINTER_TO_UINT(handle);
1568 pthread_cond_t *cond;
1569 mono_mutex_t *mutex;
1571 if (alertable && !wapi_thread_set_wait_handle (handle))
1574 cond = &_WAPI_PRIVATE_HANDLES (idx).signal_cond;
1575 mutex = &_WAPI_PRIVATE_HANDLES (idx).signal_mutex;
1578 /* This is needed when waiting for process handles */
1579 res = timedwait_signal_poll_cond (cond, mutex, timeout, alertable);
1582 res = mono_cond_timedwait (cond, mutex, timeout);
1584 res = mono_cond_wait (cond, mutex);
1588 wapi_thread_clear_wait_handle (handle);
1595 _wapi_free_share_info (_WapiFileShare *share_info)
1597 if (!_wapi_shm_enabled ()) {
1598 file_share_hash_lock ();
1599 g_hash_table_remove (file_share_hash, share_info);
1600 file_share_hash_unlock ();
1601 /* The hashtable dtor frees share_info */
1603 memset (share_info, '\0', sizeof(struct _WapiFileShare));
1608 wapi_share_info_equal (gconstpointer ka, gconstpointer kb)
1610 const _WapiFileShare *s1 = ka;
1611 const _WapiFileShare *s2 = kb;
1613 return (s1->device == s2->device && s1->inode == s2->inode) ? 1 : 0;
1617 wapi_share_info_hash (gconstpointer data)
1619 const _WapiFileShare *s = data;
1624 gboolean _wapi_handle_get_or_set_share (dev_t device, ino_t inode,
1625 guint32 new_sharemode,
1627 guint32 *old_sharemode,
1628 guint32 *old_access,
1629 struct _WapiFileShare **share_info)
1631 struct _WapiFileShare *file_share;
1632 guint32 now = (guint32)(time(NULL) & 0xFFFFFFFF);
1633 int thr_ret, i, first_unused = -1;
1634 gboolean exists = FALSE;
1636 /* Prevents entries from expiring under us as we search
1638 thr_ret = _wapi_handle_lock_shared_handles ();
1639 g_assert (thr_ret == 0);
1641 /* Prevent new entries racing with us */
1642 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1643 g_assert (thr_ret == 0);
1645 if (!_wapi_shm_enabled ()) {
1649 * Instead of allocating a 4MB array, we use a hash table to keep track of this
1650 * info. This is needed even if SHM is disabled, to track sharing inside
1651 * the current process.
1653 if (!file_share_hash) {
1654 file_share_hash = g_hash_table_new_full (wapi_share_info_hash, wapi_share_info_equal, NULL, g_free);
1655 InitializeCriticalSection (&file_share_hash_mutex);
1658 tmp.device = device;
1661 file_share_hash_lock ();
1663 file_share = g_hash_table_lookup (file_share_hash, &tmp);
1665 *old_sharemode = file_share->sharemode;
1666 *old_access = file_share->access;
1667 *share_info = file_share;
1669 InterlockedIncrement ((gint32 *)&file_share->handle_refs);
1672 file_share = g_new0 (_WapiFileShare, 1);
1674 file_share->device = device;
1675 file_share->inode = inode;
1676 file_share->opened_by_pid = _wapi_getpid ();
1677 file_share->sharemode = new_sharemode;
1678 file_share->access = new_access;
1679 file_share->handle_refs = 1;
1680 *share_info = file_share;
1682 g_hash_table_insert (file_share_hash, file_share, file_share);
1685 file_share_hash_unlock ();
1687 /* If a linear scan gets too slow we'll have to fit a hash
1688 * table onto the shared mem backing store
1691 for (i = 0; i <= _wapi_fileshare_layout->hwm; i++) {
1692 file_share = &_wapi_fileshare_layout->share_info[i];
1694 /* Make a note of an unused slot, in case we need to
1697 if (first_unused == -1 && file_share->handle_refs == 0) {
1702 if (file_share->handle_refs == 0) {
1706 if (file_share->device == device &&
1707 file_share->inode == inode) {
1708 *old_sharemode = file_share->sharemode;
1709 *old_access = file_share->access;
1710 *share_info = file_share;
1712 /* Increment the reference count while we
1713 * still have sole access to the shared area.
1714 * This makes the increment atomic wrt
1717 InterlockedIncrement ((gint32 *)&file_share->handle_refs);
1725 if (i == _WAPI_FILESHARE_SIZE && first_unused == -1) {
1728 if (first_unused == -1) {
1729 file_share = &_wapi_fileshare_layout->share_info[++i];
1730 _wapi_fileshare_layout->hwm = i;
1732 file_share = &_wapi_fileshare_layout->share_info[first_unused];
1735 file_share->device = device;
1736 file_share->inode = inode;
1737 file_share->opened_by_pid = _wapi_getpid ();
1738 file_share->sharemode = new_sharemode;
1739 file_share->access = new_access;
1740 file_share->handle_refs = 1;
1741 *share_info = file_share;
1745 if (*share_info != NULL) {
1746 InterlockedExchange ((gint32 *)&(*share_info)->timestamp, now);
1750 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1752 _wapi_handle_unlock_shared_handles ();
1757 /* If we don't have the info in /proc, check if the process that
1758 * opened this share info is still there (it's not a perfect method,
1761 static void _wapi_handle_check_share_by_pid (struct _WapiFileShare *share_info)
1763 if (kill (share_info->opened_by_pid, 0) == -1 &&
1766 /* It's gone completely (or there's a new process
1767 * owned by someone else) so mark this share info as
1770 DEBUG ("%s: Didn't find it, destroying entry", __func__);
1772 _wapi_free_share_info (share_info);
1777 /* Scan /proc/<pids>/fd/ for open file descriptors to the file in
1778 * question. If there are none, reset the share info.
1780 * This implementation is Linux-specific; legacy systems will have to
1781 * implement their own ways of finding out if a particular file is
1782 * open by a process.
1784 void _wapi_handle_check_share (struct _WapiFileShare *share_info, int fd)
1786 gboolean found = FALSE, proc_fds = FALSE;
1787 pid_t self = _wapi_getpid ();
1791 /* Prevents entries from expiring under us if we remove this
1794 thr_ret = _wapi_handle_lock_shared_handles ();
1795 g_assert (thr_ret == 0);
1797 /* Prevent new entries racing with us */
1798 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1799 g_assert (thr_ret == 0);
1801 /* If there is no /proc, there's nothing more we can do here */
1802 if (access ("/proc", F_OK) == -1) {
1803 _wapi_handle_check_share_by_pid (share_info);
1807 /* If there's another handle that thinks it owns this fd, then even
1808 * if the fd has been closed behind our back consider it still owned.
1809 * See bugs 75764 and 75891
1811 for (i = 0; i < _wapi_fd_reserve; i++) {
1812 if (_wapi_private_handles [SLOT_INDEX (i)]) {
1813 struct _WapiHandleUnshared *handle = &_WAPI_PRIVATE_HANDLES(i);
1816 handle->type == WAPI_HANDLE_FILE) {
1817 struct _WapiHandle_file *file_handle = &handle->u.file;
1819 if (file_handle->share_info == share_info) {
1820 DEBUG ("%s: handle 0x%x has this file open!",
1829 for (i = 0; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
1830 struct _WapiHandleShared *shared;
1831 struct _WapiHandle_process *process_handle;
1833 shared = &_wapi_shared_layout->handles[i];
1835 if (shared->type == WAPI_HANDLE_PROCESS) {
1837 struct dirent *fd_entry;
1838 char subdir[_POSIX_PATH_MAX];
1840 process_handle = &shared->u.process;
1841 pid = process_handle->id;
1843 /* Look in /proc/<pid>/fd/ but ignore
1844 * /proc/<our pid>/fd/<fd>, as we have the
1847 g_snprintf (subdir, _POSIX_PATH_MAX, "/proc/%d/fd",
1850 fd_dir = opendir (subdir);
1851 if (fd_dir == NULL) {
1855 DEBUG ("%s: Looking in %s", __func__, subdir);
1859 while ((fd_entry = readdir (fd_dir)) != NULL) {
1860 char path[_POSIX_PATH_MAX];
1861 struct stat link_stat;
1863 if (!strcmp (fd_entry->d_name, ".") ||
1864 !strcmp (fd_entry->d_name, "..") ||
1866 fd == atoi (fd_entry->d_name))) {
1870 g_snprintf (path, _POSIX_PATH_MAX,
1871 "/proc/%d/fd/%s", pid,
1874 stat (path, &link_stat);
1875 if (link_stat.st_dev == share_info->device &&
1876 link_stat.st_ino == share_info->inode) {
1877 DEBUG ("%s: Found it at %s",
1888 if (proc_fds == FALSE) {
1889 _wapi_handle_check_share_by_pid (share_info);
1890 } else if (found == FALSE) {
1891 /* Blank out this entry, as it is stale */
1892 DEBUG ("%s: Didn't find it, destroying entry", __func__);
1894 _wapi_free_share_info (share_info);
1898 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1900 _wapi_handle_unlock_shared_handles ();
1904 // Other implementations (non-Linux)
1906 void _wapi_handle_check_share (struct _WapiFileShare *share_info, int fd)
1910 /* Prevents entries from expiring under us if we remove this
1912 thr_ret = _wapi_handle_lock_shared_handles ();
1913 g_assert (thr_ret == 0);
1915 /* Prevent new entries racing with us */
1916 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1917 g_assert (thr_ret == 0);
1919 _wapi_handle_check_share_by_pid (share_info);
1921 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1922 _wapi_handle_unlock_shared_handles ();
1926 void _wapi_handle_dump (void)
1928 struct _WapiHandleUnshared *handle_data;
1932 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
1933 (void *)&scan_mutex);
1934 thr_ret = mono_mutex_lock (&scan_mutex);
1935 g_assert (thr_ret == 0);
1937 for(i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
1938 if (_wapi_private_handles [i]) {
1939 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
1940 handle_data = &_wapi_private_handles [i][k];
1942 if (handle_data->type == WAPI_HANDLE_UNUSED) {
1946 g_print ("%3x [%7s] %s %d ",
1947 i * _WAPI_HANDLE_INITIAL_COUNT + k,
1948 _wapi_handle_typename[handle_data->type],
1949 handle_data->signalled?"Sg":"Un",
1951 handle_details[handle_data->type](&handle_data->u);
1957 thr_ret = mono_mutex_unlock (&scan_mutex);
1958 g_assert (thr_ret == 0);
1959 pthread_cleanup_pop (0);
1962 static void _wapi_shared_details (gpointer handle_info)
1964 struct _WapiHandle_shared_ref *shared = (struct _WapiHandle_shared_ref *)handle_info;
1966 g_print ("offset: 0x%x", shared->offset);
1969 void _wapi_handle_update_refs (void)
1973 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
1975 thr_ret = _wapi_handle_lock_shared_handles ();
1976 g_assert (thr_ret == 0);
1978 /* Prevent file share entries racing with us */
1979 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1980 g_assert(thr_ret == 0);
1982 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
1983 (void *)&scan_mutex);
1984 thr_ret = mono_mutex_lock (&scan_mutex);
1986 for(i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
1987 if (_wapi_private_handles [i]) {
1988 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
1989 struct _WapiHandleUnshared *handle = &_wapi_private_handles [i][k];
1991 if (_WAPI_SHARED_HANDLE(handle->type)) {
1992 struct _WapiHandleShared *shared_data;
1994 DEBUG ("%s: (%d) handle 0x%x is SHARED (%s)", __func__, _wapi_getpid (), i * _WAPI_HANDLE_INITIAL_COUNT + k, _wapi_handle_typename[handle->type]);
1996 shared_data = &_wapi_shared_layout->handles[handle->u.shared.offset];
1998 DEBUG ("%s: (%d) Updating timestamp of handle 0x%x", __func__, _wapi_getpid (), handle->u.shared.offset);
2000 InterlockedExchange ((gint32 *)&shared_data->timestamp, now);
2001 } else if (handle->type == WAPI_HANDLE_FILE) {
2002 struct _WapiHandle_file *file_handle = &handle->u.file;
2004 DEBUG ("%s: (%d) handle 0x%x is FILE", __func__, _wapi_getpid (), i * _WAPI_HANDLE_INITIAL_COUNT + k);
2006 g_assert (file_handle->share_info != NULL);
2008 DEBUG ("%s: (%d) Inc refs on fileshare 0x%x", __func__, _wapi_getpid (), (file_handle->share_info - &_wapi_fileshare_layout->share_info[0]) / sizeof(struct _WapiFileShare));
2010 InterlockedExchange ((gint32 *)&file_handle->share_info->timestamp, now);
2016 thr_ret = mono_mutex_unlock (&scan_mutex);
2017 g_assert (thr_ret == 0);
2018 pthread_cleanup_pop (0);
2020 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
2022 _wapi_handle_unlock_shared_handles ();