#define UICULTURES_START_IDX NUM_CACHED_CULTURES
/* Controls access to the 'threads' hash table */
-#define mono_threads_lock() EnterCriticalSection (&threads_mutex)
-#define mono_threads_unlock() LeaveCriticalSection (&threads_mutex)
-static CRITICAL_SECTION threads_mutex;
+#define mono_threads_lock() mono_mutex_lock (&threads_mutex)
+#define mono_threads_unlock() mono_mutex_unlock (&threads_mutex)
+static mono_mutex_t threads_mutex;
/* Controls access to context static data */
-#define mono_contexts_lock() EnterCriticalSection (&contexts_mutex)
-#define mono_contexts_unlock() LeaveCriticalSection (&contexts_mutex)
-static CRITICAL_SECTION contexts_mutex;
+#define mono_contexts_lock() mono_mutex_lock (&contexts_mutex)
+#define mono_contexts_unlock() mono_mutex_unlock (&contexts_mutex)
+static mono_mutex_t contexts_mutex;
/* Controls access to the 'joinable_threads' hash table */
-#define joinable_threads_lock() EnterCriticalSection (&joinable_threads_mutex)
-#define joinable_threads_unlock() LeaveCriticalSection (&joinable_threads_mutex)
-static CRITICAL_SECTION joinable_threads_mutex;
+#define joinable_threads_lock() mono_mutex_lock (&joinable_threads_mutex)
+#define joinable_threads_unlock() mono_mutex_unlock (&joinable_threads_mutex)
+static mono_mutex_t joinable_threads_mutex;
/* Holds current status of static data heap */
static StaticDataInfo thread_static_info;
static void ref_stack_destroy (gpointer rs);
/* Spin lock for InterlockedXXX 64 bit functions */
-#define mono_interlocked_lock() EnterCriticalSection (&interlocked_mutex)
-#define mono_interlocked_unlock() LeaveCriticalSection (&interlocked_mutex)
-static CRITICAL_SECTION interlocked_mutex;
+#define mono_interlocked_lock() mono_mutex_lock (&interlocked_mutex)
+#define mono_interlocked_unlock() mono_mutex_unlock (&interlocked_mutex)
+static mono_mutex_t interlocked_mutex;
/* global count of thread interruptions requested */
static gint32 thread_interruption_requested = 0;
static void ensure_synch_cs_set (MonoInternalThread *thread)
{
- CRITICAL_SECTION *synch_cs;
+ mono_mutex_t *synch_cs;
if (thread->synch_cs != NULL) {
return;
}
- synch_cs = g_new0 (CRITICAL_SECTION, 1);
- InitializeCriticalSection (synch_cs);
+ synch_cs = g_new0 (mono_mutex_t, 1);
+ mono_mutex_init_recursive (synch_cs);
if (InterlockedCompareExchangePointer ((gpointer *)&thread->synch_cs,
synch_cs, NULL) != NULL) {
/* Another thread must have installed this CS */
- DeleteCriticalSection (synch_cs);
+ mono_mutex_destroy (synch_cs);
g_free (synch_cs);
}
}
ensure_synch_cs_set (thread);
g_assert (thread->synch_cs);
- EnterCriticalSection (thread->synch_cs);
+ mono_mutex_lock (thread->synch_cs);
}
static inline void
unlock_thread (MonoInternalThread *thread)
{
- LeaveCriticalSection (thread->synch_cs);
+ mono_mutex_unlock (thread->synch_cs);
}
/*
vt = mono_class_vtable (mono_get_root_domain (), mono_defaults.internal_thread_class);
thread = (MonoInternalThread*)mono_gc_alloc_mature (vt);
- thread->synch_cs = g_new0 (CRITICAL_SECTION, 1);
- InitializeCriticalSection (thread->synch_cs);
+ thread->synch_cs = g_new0 (mono_mutex_t, 1);
+ mono_mutex_init_recursive (thread->synch_cs);
thread->apartment_state = ThreadApartmentState_Unknown;
thread->managed_id = get_next_managed_thread_id ();
MonoNativeThreadId tid;
guint32 create_flags;
+ /*
+ * Join joinable threads to prevent running out of threads since the finalizer
+ * thread might be blocked/backlogged.
+ */
+ mono_threads_join_threads ();
+
mono_threads_lock ();
if (shutting_down) {
g_free (start_info);
mono_thread_detach_internal (thread->internal_thread);
}
+/*
+ * mono_thread_detach_if_exiting:
+ *
+ * Detach the current thread from the runtime if it is exiting, i.e. it is running pthread dtors.
+ * This should be used at the end of embedding code which calls into managed code, and which
+ * can be called from pthread dtors, like dealloc: implementations in objective-c.
+ */
+void
+mono_thread_detach_if_exiting (void)
+{
+ if (mono_thread_info_is_exiting ()) {
+ MonoInternalThread *thread;
+
+ thread = mono_thread_internal_current ();
+ if (thread) {
+ mono_thread_detach_internal (thread);
+ mono_thread_info_detach ();
+ }
+ }
+}
+
void
mono_thread_exit ()
{
CloseHandle (thread);
if (this->synch_cs) {
- CRITICAL_SECTION *synch_cs = this->synch_cs;
+ mono_mutex_t *synch_cs = this->synch_cs;
this->synch_cs = NULL;
- DeleteCriticalSection (synch_cs);
+ mono_mutex_destroy (synch_cs);
g_free (synch_cs);
}
continue;
/* Re-calculate ms according to the time passed */
- diff_ms = (mono_100ns_ticks () - start) / 10000;
+ diff_ms = (gint32)((mono_100ns_ticks () - start) / 10000);
if (diff_ms >= ms) {
ret = WAIT_TIMEOUT;
break;
/* Do this WaitSleepJoin check before creating objects */
mono_thread_current_check_pending_interrupt ();
- numhandles = mono_array_length(mono_handles);
+ /* We fail in managed if the array has more than 64 elements */
+ numhandles = (guint32)mono_array_length(mono_handles);
handles = g_new0(HANDLE, numhandles);
for(i = 0; i < numhandles; i++) {
gint32 ves_icall_System_Threading_WaitHandle_WaitAny_internal(MonoArray *mono_handles, gint32 ms, gboolean exitContext)
{
HANDLE handles [MAXIMUM_WAIT_OBJECTS];
- guint32 numhandles;
+ uintptr_t numhandles;
guint32 ret;
guint32 i;
MonoObject *waitHandle;
}
#ifdef HOST_WIN32
- QueueUserAPC ((PAPCFUNC)interruption_request_apc, thread->handle, NULL);
+ QueueUserAPC ((PAPCFUNC)interruption_request_apc, thread->handle, (ULONG_PTR)NULL);
#else
/*
* This will cause waits to be broken.
void mono_thread_init (MonoThreadStartCB start_cb,
MonoThreadAttachCB attach_cb)
{
- InitializeCriticalSection(&threads_mutex);
- InitializeCriticalSection(&interlocked_mutex);
- InitializeCriticalSection(&contexts_mutex);
- InitializeCriticalSection(&joinable_threads_mutex);
+ mono_mutex_init_recursive(&threads_mutex);
+ mono_mutex_init_recursive(&interlocked_mutex);
+ mono_mutex_init_recursive(&contexts_mutex);
+ mono_mutex_init_recursive(&joinable_threads_mutex);
background_change_event = CreateEvent (NULL, TRUE, FALSE, NULL);
g_assert(background_change_event != NULL);
* critical sections can be locked when mono_thread_cleanup is
* called.
*/
- DeleteCriticalSection (&threads_mutex);
- DeleteCriticalSection (&interlocked_mutex);
- DeleteCriticalSection (&contexts_mutex);
- DeleteCriticalSection (&delayed_free_table_mutex);
- DeleteCriticalSection (&small_id_mutex);
+ mono_mutex_destroy (&threads_mutex);
+ mono_mutex_destroy (&interlocked_mutex);
+ mono_mutex_destroy (&contexts_mutex);
+ mono_mutex_destroy (&delayed_free_table_mutex);
+ mono_mutex_destroy (&small_id_mutex);
CloseHandle (background_change_event);
#endif
static uintptr_t* static_reference_bitmaps [NUM_STATIC_DATA_IDX];
static void
-mark_tls_slots (void *addr, MonoGCMarkFunc mark_func)
+mark_tls_slots (void *addr, MonoGCMarkFunc mark_func, void *gc_data)
{
int i;
gpointer *static_data = addr;
void ** p = ptr;
while (bmap) {
if ((bmap & 1) && *p) {
- mark_func (p);
+ mark_func (p, gc_data);
}
p++;
bmap >>= 1;
return NULL;
}
+#if SIZEOF_VOID_P == 4
+#define ONE_P 1
+#else
+#define ONE_P 1ll
+#endif
+
static void
update_tls_reference_bitmap (guint32 offset, uintptr_t *bitmap, int numbits)
{
offset /= sizeof (gpointer);
/* offset is now the bitmap offset */
for (i = 0; i < numbits; ++i) {
- if (bitmap [i / sizeof (uintptr_t)] & (1L << (i & (sizeof (uintptr_t) * 8 -1))))
- rb [(offset + i) / (sizeof (uintptr_t) * 8)] |= (1L << ((offset + i) & (sizeof (uintptr_t) * 8 -1)));
+ if (bitmap [i / sizeof (uintptr_t)] & (ONE_P << (i & (sizeof (uintptr_t) * 8 -1))))
+ rb [(offset + i) / (sizeof (uintptr_t) * 8)] |= (ONE_P << ((offset + i) & (sizeof (uintptr_t) * 8 -1)));
}
}
or similar */
/* Our implementation of this function ignores the func argument */
#ifdef HOST_WIN32
- QueueUserAPC ((PAPCFUNC)dummy_apc, thread->handle, NULL);
+ QueueUserAPC ((PAPCFUNC)dummy_apc, thread->handle, (ULONG_PTR)NULL);
#else
wapi_self_interrupt ();
#endif
mono_thread_info_setup_async_call (info, self_interrupt_thread, NULL);
mono_thread_info_finish_suspend_and_resume (info);
} else {
- gpointer interrupt_handle;
/*
* This will cause waits to be broken.
* It will also prevent the thread from entering a wait, so if the thread returns
* make it return.
*/
#ifndef HOST_WIN32
+ gpointer interrupt_handle;
interrupt_handle = wapi_prepare_interrupt_thread (thread->handle);
#endif
mono_thread_info_finish_suspend_and_resume (info);
if (running_managed && !protected_wrapper) {
transition_to_suspended (thread, info);
} else {
+#ifndef HOST_WIN32
gpointer interrupt_handle;
+#endif
if (InterlockedCompareExchange (&thread->interruption_requested, 1, 0) == 0)
InterlockedIncrement (&thread_interruption_requested);