return mono_interp_get_runtime_method (mono_marshal_get_remoting_invoke_for_target (method, target));
}
-static CRITICAL_SECTION runtime_method_lookup_section;
+static mono_mutex_t runtime_method_lookup_section;
RuntimeMethod*
mono_interp_get_runtime_method (MonoMethod *method)
MonoDomain *domain = mono_domain_get ();
RuntimeMethod *rtm;
- EnterCriticalSection (&runtime_method_lookup_section);
+ mono_mutex_lock (&runtime_method_lookup_section);
if ((rtm = mono_internal_hash_table_lookup (&domain->jit_code_hash, method))) {
- LeaveCriticalSection (&runtime_method_lookup_section);
+ mono_mutex_unlock (&runtime_method_lookup_section);
return rtm;
}
rtm = mono_mempool_alloc (domain->mp, sizeof (RuntimeMethod));
rtm->hasthis = mono_method_signature (method)->hasthis;
rtm->valuetype = method->klass->valuetype;
mono_internal_hash_table_insert (&domain->jit_code_hash, method, rtm);
- LeaveCriticalSection (&runtime_method_lookup_section);
+ mono_mutex_unlock (&runtime_method_lookup_section);
return rtm;
}
return sp;
}
-static CRITICAL_SECTION create_method_pointer_mutex;
+static mono_mutex_t create_method_pointer_mutex;
static MonoGHashTable *method_pointer_hash = NULL;
gpointer addr;
MonoJitInfo *ji;
- EnterCriticalSection (&create_method_pointer_mutex);
+ mono_mutex_lock (&create_method_pointer_mutex);
if (!method_pointer_hash) {
MONO_GC_REGISTER_ROOT (method_pointer_hash);
method_pointer_hash = mono_g_hash_table_new (NULL, NULL);
}
addr = mono_g_hash_table_lookup (method_pointer_hash, method);
if (addr) {
- LeaveCriticalSection (&create_method_pointer_mutex);
+ mono_mutex_unlock (&create_method_pointer_mutex);
return addr;
}
addr = mono_arch_create_method_pointer (method);
mono_g_hash_table_insert (method_pointer_hash, method, addr);
- LeaveCriticalSection (&create_method_pointer_mutex);
+ mono_mutex_unlock (&create_method_pointer_mutex);
return addr;
}
thread_context_id = TlsAlloc ();
TlsSetValue (thread_context_id, NULL);
- InitializeCriticalSection (&runtime_method_lookup_section);
- InitializeCriticalSection (&create_method_pointer_mutex);
+ mono_mutex_init_recursive (&runtime_method_lookup_section);
+ mono_mutex_init_recursive (&create_method_pointer_mutex);
mono_runtime_install_handlers ();
mono_interp_transform_init ();
g_hash_table_destroy (td.data_hash);
}
-static CRITICAL_SECTION calc_section;
+static mono_mutex_t calc_section;
void
mono_interp_transform_init (void)
{
- InitializeCriticalSection(&calc_section);
+ mono_mutex_init_recursive(&calc_section);
}
MonoException *
if (method->iflags & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
MonoMethod *nm = NULL;
- EnterCriticalSection(&calc_section);
+ mono_mutex_lock(&calc_section);
if (runtime_method->transformed) {
- LeaveCriticalSection(&calc_section);
+ mono_mutex_unlock(&calc_section);
mono_profiler_method_end_jit (method, MONO_PROFILE_OK);
return NULL;
}
runtime_method->stack_size = sizeof (stackval); /* for tracing */
runtime_method->alloca_size = runtime_method->stack_size;
runtime_method->transformed = TRUE;
- LeaveCriticalSection(&calc_section);
+ mono_mutex_unlock(&calc_section);
mono_profiler_method_end_jit (method, MONO_PROFILE_OK);
return NULL;
}
method = nm;
header = mono_method_get_header (nm);
- LeaveCriticalSection(&calc_section);
+ mono_mutex_unlock(&calc_section);
}
g_assert ((signature->param_count + signature->hasthis) < 1000);
g_assert (header->max_stack < 10000);
}
/* the rest needs to be locked so it is only done once */
- EnterCriticalSection(&calc_section);
+ mono_mutex_lock(&calc_section);
if (runtime_method->transformed) {
- LeaveCriticalSection(&calc_section);
+ mono_mutex_unlock(&calc_section);
g_free (is_bb_start);
mono_profiler_method_end_jit (method, MONO_PROFILE_OK);
return NULL;
mono_profiler_method_end_jit (method, MONO_PROFILE_OK);
runtime_method->transformed = TRUE;
- LeaveCriticalSection(&calc_section);
+ mono_mutex_unlock(&calc_section);
return NULL;
}
* 4MB array.
*/
static GHashTable *file_share_hash;
-static CRITICAL_SECTION file_share_hash_mutex;
+static mono_mutex_t file_share_hash_mutex;
-#define file_share_hash_lock() EnterCriticalSection (&file_share_hash_mutex)
-#define file_share_hash_unlock() LeaveCriticalSection (&file_share_hash_mutex)
+#define file_share_hash_lock() mono_mutex_lock (&file_share_hash_mutex)
+#define file_share_hash_unlock() mono_mutex_unlock (&file_share_hash_mutex)
guint32 _wapi_fd_reserve;
if (file_share_hash) {
g_hash_table_destroy (file_share_hash);
- DeleteCriticalSection (&file_share_hash_mutex);
+ mono_mutex_destroy (&file_share_hash_mutex);
}
for (i = 0; i < _WAPI_PRIVATE_MAX_SLOTS; ++i)
*/
if (!file_share_hash) {
file_share_hash = g_hash_table_new_full (wapi_share_info_hash, wapi_share_info_equal, NULL, g_free);
- InitializeCriticalSection (&file_share_hash_mutex);
+ mono_mutex_init_recursive (&file_share_hash_mutex);
}
tmp.device = device;
gchar *filename;
} RuntimeConfig;
-CRITICAL_SECTION mono_delegate_section;
+mono_mutex_t mono_delegate_section;
-CRITICAL_SECTION mono_strtod_mutex;
+mono_mutex_t mono_strtod_mutex;
static gunichar2 process_guid [36];
static gboolean process_guid_set = FALSE;
domain->domain = ad;
domain->setup = setup;
- InitializeCriticalSection (&mono_delegate_section);
+ mono_mutex_init_recursive (&mono_delegate_section);
- InitializeCriticalSection (&mono_strtod_mutex);
+ mono_mutex_init_recursive (&mono_strtod_mutex);
mono_thread_attach (domain);
mono_context_init (domain);
#endif
/* This protects loaded_assemblies and image->references */
-#define mono_assemblies_lock() EnterCriticalSection (&assemblies_mutex)
-#define mono_assemblies_unlock() LeaveCriticalSection (&assemblies_mutex)
-static CRITICAL_SECTION assemblies_mutex;
+#define mono_assemblies_lock() mono_mutex_lock (&assemblies_mutex)
+#define mono_assemblies_unlock() mono_mutex_unlock (&assemblies_mutex)
+static mono_mutex_t assemblies_mutex;
/* If defined, points to the bundled assembly information */
const MonoBundledAssembly **bundles;
check_path_env ();
check_extra_gac_path_env ();
- InitializeCriticalSection (&assemblies_mutex);
+ mono_mutex_init_recursive (&assemblies_mutex);
mono_mutex_init (&assembly_binding_mutex);
}
{
GSList *l;
- DeleteCriticalSection (&assemblies_mutex);
+ mono_mutex_destroy (&assemblies_mutex);
mono_mutex_destroy (&assembly_binding_mutex);
for (l = loaded_assembly_bindings; l; l = l->next) {
static gboolean needs_to_start, started;
-#define agent_lock() EnterCriticalSection (&agent_mutex)
-#define agent_unlock() LeaveCriticalSection (&agent_mutex)
-static CRITICAL_SECTION agent_mutex;
+#define agent_lock() mono_mutex_lock (&agent_mutex)
+#define agent_unlock() mono_mutex_unlock (&agent_mutex)
+static mono_mutex_t agent_mutex;
static void transport_connect (void);
void
mono_attach_init (void)
{
- InitializeCriticalSection (&agent_mutex);
+ mono_mutex_init_recursive (&agent_mutex);
config.enabled = TRUE;
}
* from mono_class_from_mono_type (), mono_array_new (),
* Array:CreateInstance (), etc, so use a separate cache + a separate lock.
*/
- EnterCriticalSection (&image->szarray_cache_lock);
+ mono_mutex_lock (&image->szarray_cache_lock);
if (!image->szarray_cache)
image->szarray_cache = g_hash_table_new (mono_aligned_addr_hash, NULL);
class = g_hash_table_lookup (image->szarray_cache, eclass);
- LeaveCriticalSection (&image->szarray_cache_lock);
+ mono_mutex_unlock (&image->szarray_cache_lock);
if (class)
return class;
if (rank == 1 && !bounded) {
MonoClass *prev_class;
- EnterCriticalSection (&image->szarray_cache_lock);
+ mono_mutex_lock (&image->szarray_cache_lock);
prev_class = g_hash_table_lookup (image->szarray_cache, eclass);
if (prev_class)
/* Someone got in before us */
class = prev_class;
else
g_hash_table_insert (image->szarray_cache, eclass, class);
- LeaveCriticalSection (&image->szarray_cache_lock);
+ mono_mutex_unlock (&image->szarray_cache_lock);
} else {
list = g_slist_append (rootlist, class);
g_hash_table_insert (image->array_cache, eclass, list);
#undef OPDEF
/* This mutex protects the various cominterop related caches in MonoImage */
-#define mono_cominterop_lock() EnterCriticalSection (&cominterop_mutex)
-#define mono_cominterop_unlock() LeaveCriticalSection (&cominterop_mutex)
-static CRITICAL_SECTION cominterop_mutex;
+#define mono_cominterop_lock() mono_mutex_lock (&cominterop_mutex)
+#define mono_cominterop_unlock() mono_mutex_unlock (&cominterop_mutex)
+static mono_mutex_t cominterop_mutex;
/* STDCALL on windows, CDECL everywhere else to work with XPCOM and MainWin COM */
#ifdef HOST_WIN32
{
const char* com_provider_env;
- InitializeCriticalSection (&cominterop_mutex);
+ mono_mutex_init_recursive (&cominterop_mutex);
com_provider_env = g_getenv ("MONO_COM");
if (com_provider_env && !strcmp(com_provider_env, "MS"))
void
mono_cominterop_cleanup (void)
{
- DeleteCriticalSection (&cominterop_mutex);
+ mono_mutex_destroy (&cominterop_mutex);
}
void
#include <mono/io-layer/io-layer.h>
#include <mono/metadata/mempool-internals.h>
-extern CRITICAL_SECTION mono_delegate_section;
-extern CRITICAL_SECTION mono_strtod_mutex;
+extern mono_mutex_t mono_delegate_section;
+extern mono_mutex_t mono_strtod_mutex;
/*
* If this is set, the memory belonging to appdomains is not freed when a domain is
* i.e. if both are taken by the same thread, the loader lock
* must taken first.
*/
- CRITICAL_SECTION lock;
+ mono_mutex_t lock;
MonoMemPool *mp;
MonoCodeManager *code_mp;
/*
GHashTable *proxy_vtable_hash;
/* Protected by 'jit_code_hash_lock' */
MonoInternalHashTable jit_code_hash;
- CRITICAL_SECTION jit_code_hash_lock;
+ mono_mutex_t jit_code_hash_lock;
int num_jit_info_tables;
MonoJitInfoTable *
volatile jit_info_table;
GHashTable *finalizable_objects_hash;
/* Protects the three hashes above */
- CRITICAL_SECTION finalizable_objects_hash_lock;
+ mono_mutex_t finalizable_objects_hash_lock;
/* Used when accessing 'domain_assemblies' */
- CRITICAL_SECTION assemblies_lock;
+ mono_mutex_t assemblies_lock;
GHashTable *method_rgctx_hash;
gboolean mono_dont_free_domains;
-#define mono_appdomains_lock() EnterCriticalSection (&appdomains_mutex)
-#define mono_appdomains_unlock() LeaveCriticalSection (&appdomains_mutex)
-static CRITICAL_SECTION appdomains_mutex;
+#define mono_appdomains_lock() mono_mutex_lock (&appdomains_mutex)
+#define mono_appdomains_unlock() mono_mutex_unlock (&appdomains_mutex)
+static mono_mutex_t appdomains_mutex;
static MonoDomain *mono_root_domain = NULL;
domain->finalizable_objects_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
domain->ftnptrs_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
- InitializeCriticalSection (&domain->lock);
- InitializeCriticalSection (&domain->assemblies_lock);
- InitializeCriticalSection (&domain->jit_code_hash_lock);
- InitializeCriticalSection (&domain->finalizable_objects_hash_lock);
+ mono_mutex_init_recursive (&domain->lock);
+ mono_mutex_init_recursive (&domain->assemblies_lock);
+ mono_mutex_init_recursive (&domain->jit_code_hash_lock);
+ mono_mutex_init_recursive (&domain->finalizable_objects_hash_lock);
domain->method_rgctx_hash = NULL;
MONO_FAST_TLS_INIT (tls_appdomain);
mono_native_tls_alloc (&appdomain_thread_id, NULL);
- InitializeCriticalSection (&appdomains_mutex);
+ mono_mutex_init_recursive (&appdomains_mutex);
mono_metadata_init ();
mono_images_init ();
mono_metadata_cleanup ();
mono_native_tls_free (appdomain_thread_id);
- DeleteCriticalSection (&appdomains_mutex);
+ mono_mutex_destroy (&appdomains_mutex);
#ifndef HOST_WIN32
wapi_cleanup ();
domain->ftnptrs_hash = NULL;
}
- DeleteCriticalSection (&domain->finalizable_objects_hash_lock);
- DeleteCriticalSection (&domain->assemblies_lock);
- DeleteCriticalSection (&domain->jit_code_hash_lock);
- DeleteCriticalSection (&domain->lock);
+ mono_mutex_destroy (&domain->finalizable_objects_hash_lock);
+ mono_mutex_destroy (&domain->assemblies_lock);
+ mono_mutex_destroy (&domain->jit_code_hash_lock);
+ mono_mutex_destroy (&domain->lock);
domain->setup = NULL;
mono_gc_deregister_root ((char*)&(domain->MONO_DOMAIN_FIRST_GC_TRACKED));
#endif
} GCStats;
-#define mono_domain_finalizers_lock(domain) EnterCriticalSection (&(domain)->finalizable_objects_hash_lock);
-#define mono_domain_finalizers_unlock(domain) LeaveCriticalSection (&(domain)->finalizable_objects_hash_lock);
+#define mono_domain_finalizers_lock(domain) mono_mutex_lock (&(domain)->finalizable_objects_hash_lock);
+#define mono_domain_finalizers_unlock(domain) mono_mutex_unlock (&(domain)->finalizable_objects_hash_lock);
/* Register a memory area as a conservatively scanned GC root */
#define MONO_GC_REGISTER_ROOT_PINNING(x) mono_gc_register_root ((char*)&(x), sizeof(x), NULL)
static gboolean finalizing_root_domain = FALSE;
-#define mono_finalizer_lock() EnterCriticalSection (&finalizer_mutex)
-#define mono_finalizer_unlock() LeaveCriticalSection (&finalizer_mutex)
-static CRITICAL_SECTION finalizer_mutex;
-static CRITICAL_SECTION reference_queue_mutex;
+#define mono_finalizer_lock() mono_mutex_lock (&finalizer_mutex)
+#define mono_finalizer_unlock() mono_mutex_unlock (&finalizer_mutex)
+static mono_mutex_t finalizer_mutex;
+static mono_mutex_t reference_queue_mutex;
static GSList *domains_to_finalize= NULL;
static MonoMList *threads_to_finalize = NULL;
return mono_domain_get ()->ephemeron_tombstone;
}
-#define mono_allocator_lock() EnterCriticalSection (&allocator_section)
-#define mono_allocator_unlock() LeaveCriticalSection (&allocator_section)
-static CRITICAL_SECTION allocator_section;
-static CRITICAL_SECTION handle_section;
+#define mono_allocator_lock() mono_mutex_lock (&allocator_section)
+#define mono_allocator_unlock() mono_mutex_unlock (&allocator_section)
+static mono_mutex_t allocator_section;
+static mono_mutex_t handle_section;
typedef enum {
HANDLE_WEAK,
{NULL, NULL, 0, HANDLE_PINNED, 0}
};
-#define lock_handles(handles) EnterCriticalSection (&handle_section)
-#define unlock_handles(handles) LeaveCriticalSection (&handle_section)
+#define lock_handles(handles) mono_mutex_lock (&handle_section)
+#define unlock_handles(handles) mono_mutex_unlock (&handle_section)
static int
find_first_unset (guint32 bitmap)
void
mono_gc_init (void)
{
- InitializeCriticalSection (&handle_section);
- InitializeCriticalSection (&allocator_section);
+ mono_mutex_init_recursive (&handle_section);
+ mono_mutex_init_recursive (&allocator_section);
- InitializeCriticalSection (&finalizer_mutex);
- InitializeCriticalSection (&reference_queue_mutex);
+ mono_mutex_init_recursive (&finalizer_mutex);
+ mono_mutex_init_recursive (&reference_queue_mutex);
MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_NORMAL].entries);
MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_PINNED].entries);
mono_reference_queue_cleanup ();
- DeleteCriticalSection (&handle_section);
- DeleteCriticalSection (&allocator_section);
- DeleteCriticalSection (&finalizer_mutex);
- DeleteCriticalSection (&reference_queue_mutex);
+ mono_mutex_destroy (&handle_section);
+ mono_mutex_destroy (&allocator_section);
+ mono_mutex_destroy (&finalizer_mutex);
+ mono_mutex_destroy (&reference_queue_mutex);
}
#else
void mono_gc_init (void)
{
- InitializeCriticalSection (&handle_section);
+ mono_mutex_init_recursive (&handle_section);
}
void mono_gc_cleanup (void)
reference_queue_proccess (queue);
restart:
- EnterCriticalSection (&reference_queue_mutex);
+ mono_mutex_lock (&reference_queue_mutex);
for (iter = &ref_queues; *iter;) {
queue = *iter;
if (!queue->should_be_deleted) {
continue;
}
if (queue->queue) {
- LeaveCriticalSection (&reference_queue_mutex);
+ mono_mutex_unlock (&reference_queue_mutex);
reference_queue_proccess (queue);
goto restart;
}
*iter = queue->next;
g_free (queue);
}
- LeaveCriticalSection (&reference_queue_mutex);
+ mono_mutex_unlock (&reference_queue_mutex);
}
static void
MonoReferenceQueue *res = g_new0 (MonoReferenceQueue, 1);
res->callback = callback;
- EnterCriticalSection (&reference_queue_mutex);
+ mono_mutex_lock (&reference_queue_mutex);
res->next = ref_queues;
ref_queues = res;
- LeaveCriticalSection (&reference_queue_mutex);
+ mono_mutex_unlock (&reference_queue_mutex);
return res;
}
if (*ptr){
/* mono_strtod () is not thread-safe */
- EnterCriticalSection (&mono_strtod_mutex);
+ mono_mutex_lock (&mono_strtod_mutex);
*result = mono_strtod (ptr, &endptr);
- LeaveCriticalSection (&mono_strtod_mutex);
+ mono_mutex_unlock (&mono_strtod_mutex);
}
if (!*ptr || (endptr && *endptr))
static gboolean debug_assembly_unload = FALSE;
-#define mono_images_lock() if (mutex_inited) EnterCriticalSection (&images_mutex)
-#define mono_images_unlock() if (mutex_inited) LeaveCriticalSection (&images_mutex)
+#define mono_images_lock() if (mutex_inited) mono_mutex_lock (&images_mutex)
+#define mono_images_unlock() if (mutex_inited) mono_mutex_unlock (&images_mutex)
static gboolean mutex_inited;
-static CRITICAL_SECTION images_mutex;
+static mono_mutex_t images_mutex;
typedef struct ImageUnloadHook ImageUnloadHook;
struct ImageUnloadHook {
void
mono_images_init (void)
{
- InitializeCriticalSection (&images_mutex);
+ mono_mutex_init_recursive (&images_mutex);
loaded_images_hash = g_hash_table_new (g_str_hash, g_str_equal);
loaded_images_refonly_hash = g_hash_table_new (g_str_hash, g_str_equal);
GHashTableIter iter;
MonoImage *image;
- DeleteCriticalSection (&images_mutex);
+ mono_mutex_destroy (&images_mutex);
g_hash_table_iter_init (&iter, loaded_images_hash);
while (g_hash_table_iter_next (&iter, NULL, (void**)&image))
void
mono_image_init (MonoImage *image)
{
- InitializeCriticalSection (&image->lock);
- InitializeCriticalSection (&image->szarray_cache_lock);
+ mono_mutex_init_recursive (&image->lock);
+ mono_mutex_init_recursive (&image->szarray_cache_lock);
image->mempool = mono_mempool_new_size (512);
mono_internal_hash_table_init (&image->class_cache,
g_direct_hash,
class_key_extract,
class_next_value);
-#ifdef HOST_WIN32
- // FIXME:
image->field_cache = mono_conc_hashtable_new (&image->lock, NULL, NULL);
-#else
- image->field_cache = mono_conc_hashtable_new (&image->lock.mutex, NULL, NULL);
-#endif
image->typespec_cache = g_hash_table_new (NULL, NULL);
image->memberref_signatures = g_hash_table_new (NULL, NULL);
if (image->modules_loaded)
g_free (image->modules_loaded);
- DeleteCriticalSection (&image->szarray_cache_lock);
- DeleteCriticalSection (&image->lock);
+ mono_mutex_destroy (&image->szarray_cache_lock);
+ mono_mutex_destroy (&image->lock);
/*g_print ("destroy image %p (dynamic: %d)\n", image, image->dynamic);*/
if (image_is_dynamic (image)) {
* See domain-internals.h for locking policy in combination with the
* domain lock.
*/
-static CRITICAL_SECTION loader_mutex;
+static mono_mutex_t loader_mutex;
static gboolean loader_lock_inited;
/* Statistics */
static gboolean inited;
if (!inited) {
- InitializeCriticalSection (&loader_mutex);
+ mono_mutex_init_recursive (&loader_mutex);
loader_lock_inited = TRUE;
mono_native_tls_alloc (&loader_error_thread_id, NULL);
mono_native_tls_free (loader_error_thread_id);
mono_native_tls_free (loader_lock_nest_id);
- DeleteCriticalSection (&loader_mutex);
+ mono_mutex_destroy (&loader_mutex);
loader_lock_inited = FALSE;
}
*
* To log more kind of locks just do the following:
* - add an entry into the RuntimeLocks enum
- * - change EnterCriticalSection(mutex) to mono_locks_acquire (mutex, LockName)
- * - change LeaveCriticalSection to mono_locks_release (mutex, LockName)
+ * - change mono_mutex_lock(mutex) to mono_locks_acquire (mutex, LockName)
+ * - change mono_mutex_unlock to mono_locks_release (mutex, LockName)
* - change the decoder to understand the new lock kind.
*
* TODO:
#endif
static FILE *trace_file;
-static CRITICAL_SECTION tracer_lock;
+static mono_mutex_t tracer_lock;
static size_t base_address;
typedef enum {
Dl_info info;
int res;
char *name;
- InitializeCriticalSection (&tracer_lock);
+ mono_mutex_init_recursive (&tracer_lock);
if (!g_getenv ("MONO_ENABLE_LOCK_TRACER"))
return;
name = g_strdup_printf ("locks.%d", getpid ());
#endif
#define mono_locks_acquire(LOCK, NAME) do { \
- EnterCriticalSection (LOCK); \
+ mono_mutex_lock (LOCK); \
mono_locks_lock_acquired (NAME, LOCK); \
} while (0)
#define mono_locks_release(LOCK, NAME) do { \
mono_locks_lock_released (NAME, LOCK); \
- LeaveCriticalSection (LOCK); \
+ mono_mutex_unlock (LOCK); \
} while (0)
#define mono_locks_mutex_acquire(LOCK, NAME) do { \
*/
#define mono_marshal_lock() mono_locks_acquire (&marshal_mutex, MarshalLock)
#define mono_marshal_unlock() mono_locks_release (&marshal_mutex, MarshalLock)
-static CRITICAL_SECTION marshal_mutex;
+static mono_mutex_t marshal_mutex;
static gboolean marshal_mutex_initialized;
static MonoNativeTlsKey last_error_tls_id;
if (!module_initialized) {
module_initialized = TRUE;
- InitializeCriticalSection (&marshal_mutex);
+ mono_mutex_init_recursive (&marshal_mutex);
marshal_mutex_initialized = TRUE;
register_icall (ves_icall_System_Threading_Thread_ResetAbort, "ves_icall_System_Threading_Thread_ResetAbort", "void", TRUE);
mono_native_tls_free (load_type_info_tls_id);
mono_native_tls_free (last_error_tls_id);
- DeleteCriticalSection (&marshal_mutex);
+ mono_mutex_destroy (&marshal_mutex);
marshal_mutex_initialized = FALSE;
}
#include "metadata/appdomain.h"
#include "metadata/metadata-internals.h"
-static CRITICAL_SECTION mempool_tracing_lock;
+static mono_mutex_t mempool_tracing_lock;
#define BACKTRACE_DEPTH 7
static void
mono_backtrace (int size)
static gboolean inited;
if (!inited) {
- InitializeCriticalSection (&mempool_tracing_lock);
+ mono_mutex_init_recursive (&mempool_tracing_lock);
inited = TRUE;
}
- EnterCriticalSection (&mempool_tracing_lock);
+ mono_mutex_lock (&mempool_tracing_lock);
g_print ("Allocating %d bytes\n", size);
symbols = backtrace (array, BACKTRACE_DEPTH);
names = backtrace_symbols (array, symbols);
g_print ("\t%s\n", names [i]);
}
free (names);
- LeaveCriticalSection (&mempool_tracing_lock);
+ mono_mutex_unlock (&mempool_tracing_lock);
}
#endif
GHashTable *szarray_cache;
/* This has a separate lock to improve scalability */
- CRITICAL_SECTION szarray_cache_lock;
+ mono_mutex_t szarray_cache_lock;
/*
* indexed by MonoMethodSignature
* No other runtime locks must be taken while holding this lock.
* It's meant to be used only to mutate and query structures part of this image.
*/
- CRITICAL_SECTION lock;
+ mono_mutex_t lock;
};
/*
GHashTable *gclass_cache, *ginst_cache, *gmethod_cache, *gsignature_cache;
- CRITICAL_SECTION lock;
+ mono_mutex_t lock;
/*
* Memory for generic instances owned by this image set should be allocated from
static MonoImageSet *mscorlib_image_set;
/* Protected by image_sets_mutex */
static GPtrArray *image_sets;
-static CRITICAL_SECTION image_sets_mutex;
+static mono_mutex_t image_sets_mutex;
static guint mono_generic_class_hash (gconstpointer data);
for (i = 0; i < NBUILTIN_TYPES (); ++i)
g_hash_table_insert (type_cache, (gpointer) &builtin_types [i], (gpointer) &builtin_types [i]);
- InitializeCriticalSection (&image_sets_mutex);
+ mono_mutex_init_recursive (&image_sets_mutex);
}
/**
type_cache = NULL;
g_ptr_array_free (image_sets, TRUE);
image_sets = NULL;
- DeleteCriticalSection (&image_sets_mutex);
+ mono_mutex_destroy (&image_sets_mutex);
}
/**
static inline void
image_sets_lock (void)
{
- EnterCriticalSection (&image_sets_mutex);
+ mono_mutex_lock (&image_sets_mutex);
}
static inline void
image_sets_unlock (void)
{
- LeaveCriticalSection (&image_sets_mutex);
+ mono_mutex_unlock (&image_sets_mutex);
}
/*
set = g_new0 (MonoImageSet, 1);
set->nimages = nimages;
set->images = g_new0 (MonoImage*, nimages);
- InitializeCriticalSection (&set->lock);
+ mono_mutex_init_recursive (&set->lock);
for (i = 0; i < nimages; ++i)
set->images [i] = images [i];
set->gclass_cache = g_hash_table_new_full (mono_generic_class_hash, mono_generic_class_equal, NULL, (GDestroyNotify)free_generic_class);
if (set->mempool)
mono_mempool_destroy (set->mempool);
g_free (set->images);
- DeleteCriticalSection (&set->lock);
+ mono_mutex_destroy (&set->lock);
g_free (set);
}
static void
mono_image_set_lock (MonoImageSet *set)
{
- EnterCriticalSection (&set->lock);
+ mono_mutex_lock (&set->lock);
}
static void
mono_image_set_unlock (MonoImageSet *set)
{
- LeaveCriticalSection (&set->lock);
+ mono_mutex_unlock (&set->lock);
}
gpointer
MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
};
-#define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
-#define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
-static CRITICAL_SECTION monitor_mutex;
+#define mono_monitor_allocator_lock() mono_mutex_lock (&monitor_mutex)
+#define mono_monitor_allocator_unlock() mono_mutex_unlock (&monitor_mutex)
+static mono_mutex_t monitor_mutex;
static MonoThreadsSync *monitor_freelist;
static MonitorArray *monitor_allocated;
static int array_size = 16;
void
mono_monitor_init (void)
{
- InitializeCriticalSection (&monitor_mutex);
+ mono_mutex_init_recursive (&monitor_mutex);
}
void
MonoThreadsSync *mon;
/* MonitorArray *marray, *next = NULL; */
- /*DeleteCriticalSection (&monitor_mutex);*/
+ /*mono_mutex_destroy (&monitor_mutex);*/
/* The monitors on the freelist don't have weak links - mark them */
for (mon = monitor_freelist; mon; mon = mon->data)
#include <mono/metadata/mono-endian.h>
static guint32 debugger_lock_level = 0;
-static CRITICAL_SECTION debugger_lock_mutex;
+static mono_mutex_t debugger_lock_mutex;
typedef struct
{
mono_debugger_lock (void)
{
g_assert (initialized);
- EnterCriticalSection (&debugger_lock_mutex);
+ mono_mutex_lock (&debugger_lock_mutex);
debugger_lock_level++;
}
{
g_assert (initialized);
debugger_lock_level--;
- LeaveCriticalSection (&debugger_lock_mutex);
+ mono_mutex_unlock (&debugger_lock_mutex);
}
void
mono_debugger_initialize ()
{
- InitializeCriticalSection (&debugger_lock_mutex);
+ mono_mutex_init_recursive (&debugger_lock_mutex);
initialized = 1;
}
NUM_COUNTERS
};
-static CRITICAL_SECTION perfctr_mutex;
-#define perfctr_lock() EnterCriticalSection (&perfctr_mutex)
-#define perfctr_unlock() LeaveCriticalSection (&perfctr_mutex)
+static mono_mutex_t perfctr_mutex;
+#define perfctr_lock() mono_mutex_lock (&perfctr_mutex)
+#define perfctr_unlock() mono_mutex_unlock (&perfctr_mutex)
typedef struct {
char reserved [16];
d_offset += 7;
d_offset &= ~7;
- InitializeCriticalSection (&perfctr_mutex);
+ mono_mutex_init_recursive (&perfctr_mutex);
shared_area = mono_shared_area ();
shared_area->counters_start = G_STRUCT_OFFSET (MonoSharedArea, counters);
gpointer suspend_event;
gpointer suspended_event;
gpointer resume_event;
- CRITICAL_SECTION *synch_cs;
+ mono_mutex_t *synch_cs;
MonoBoolean threadpool_thread;
MonoBoolean thread_dump_requested;
MonoBoolean thread_interrupt_requested;
mono_string_to_utf8_internal (MonoMemPool *mp, MonoImage *image, MonoString *s, gboolean ignore_error, MonoError *error);
-#define ldstr_lock() EnterCriticalSection (&ldstr_section)
-#define ldstr_unlock() LeaveCriticalSection (&ldstr_section)
-static CRITICAL_SECTION ldstr_section;
+#define ldstr_lock() mono_mutex_lock (&ldstr_section)
+#define ldstr_unlock() mono_mutex_unlock (&ldstr_section)
+static mono_mutex_t ldstr_section;
static gboolean profile_allocs = TRUE;
guint32 initializing_tid;
guint32 waiting_count;
gboolean done;
- CRITICAL_SECTION initialization_section;
+ mono_mutex_t initialization_section;
} TypeInitializationLock;
/* for locking access to type_initialization_hash and blocked_thread_hash */
-#define mono_type_initialization_lock() EnterCriticalSection (&type_initialization_section)
-#define mono_type_initialization_unlock() LeaveCriticalSection (&type_initialization_section)
-static CRITICAL_SECTION type_initialization_section;
+#define mono_type_initialization_lock() mono_mutex_lock (&type_initialization_section)
+#define mono_type_initialization_unlock() mono_mutex_unlock (&type_initialization_section)
+static mono_mutex_t type_initialization_section;
/* from vtable to lock */
static GHashTable *type_initialization_hash;
void
mono_type_initialization_init (void)
{
- InitializeCriticalSection (&type_initialization_section);
+ mono_mutex_init_recursive (&type_initialization_section);
type_initialization_hash = g_hash_table_new (NULL, NULL);
blocked_thread_hash = g_hash_table_new (NULL, NULL);
- InitializeCriticalSection (&ldstr_section);
+ mono_mutex_init_recursive (&ldstr_section);
}
void
/* This is causing race conditions with
* mono_release_type_locks
*/
- DeleteCriticalSection (&type_initialization_section);
+ mono_mutex_destroy (&type_initialization_section);
g_hash_table_destroy (type_initialization_hash);
type_initialization_hash = NULL;
#endif
- DeleteCriticalSection (&ldstr_section);
+ mono_mutex_destroy (&ldstr_section);
g_hash_table_destroy (blocked_thread_hash);
blocked_thread_hash = NULL;
}
}
lock = g_malloc (sizeof(TypeInitializationLock));
- InitializeCriticalSection (&lock->initialization_section);
+ mono_mutex_init_recursive (&lock->initialization_section);
lock->initializing_tid = tid;
lock->waiting_count = 1;
lock->done = FALSE;
/* grab the vtable lock while this thread still owns type_initialization_section */
- EnterCriticalSection (&lock->initialization_section);
+ mono_mutex_lock (&lock->initialization_section);
g_hash_table_insert (type_initialization_hash, vtable, lock);
do_initialization = 1;
} else {
if (last_domain)
mono_domain_set (last_domain, TRUE);
lock->done = TRUE;
- LeaveCriticalSection (&lock->initialization_section);
+ mono_mutex_unlock (&lock->initialization_section);
} else {
/* this just blocks until the initializing thread is done */
- EnterCriticalSection (&lock->initialization_section);
- LeaveCriticalSection (&lock->initialization_section);
+ mono_mutex_lock (&lock->initialization_section);
+ mono_mutex_unlock (&lock->initialization_section);
}
mono_type_initialization_lock ();
g_hash_table_remove (blocked_thread_hash, GUINT_TO_POINTER (tid));
--lock->waiting_count;
if (lock->waiting_count == 0) {
- DeleteCriticalSection (&lock->initialization_section);
+ mono_mutex_destroy (&lock->initialization_section);
g_hash_table_remove (type_initialization_hash, vtable);
g_free (lock);
}
* and get_type_init_exception_for_class () needs to be aware of this.
*/
vtable->init_failed = 1;
- LeaveCriticalSection (&lock->initialization_section);
+ mono_mutex_unlock (&lock->initialization_section);
--lock->waiting_count;
if (lock->waiting_count == 0) {
- DeleteCriticalSection (&lock->initialization_section);
+ mono_mutex_destroy (&lock->initialization_section);
g_free (lock);
return TRUE;
}
static ProfilerDesc *prof_list = NULL;
-#define mono_profiler_coverage_lock() EnterCriticalSection (&profiler_coverage_mutex)
-#define mono_profiler_coverage_unlock() LeaveCriticalSection (&profiler_coverage_mutex)
-static CRITICAL_SECTION profiler_coverage_mutex;
+#define mono_profiler_coverage_lock() mono_mutex_lock (&profiler_coverage_mutex)
+#define mono_profiler_coverage_unlock() mono_mutex_unlock (&profiler_coverage_mutex)
+static mono_mutex_t profiler_coverage_mutex;
/* this is directly accessible to other mono libs.
* It is the ORed value of all the profiler's events.
{
ProfilerDesc *desc = g_new0 (ProfilerDesc, 1);
if (!prof_list)
- InitializeCriticalSection (&profiler_coverage_mutex);
+ mono_mutex_init_recursive (&profiler_coverage_mutex);
desc->profiler = prof;
desc->shutdown_callback = callback;
desc->next = prof_list;
MonoObject*
mono_custom_attrs_get_attr_checked (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass, MonoError *error) MONO_INTERNAL;
-#endif
\ No newline at end of file
+#endif
} \
-#endif
\ No newline at end of file
+#endif
};
typedef struct {
- CRITICAL_SECTION io_lock; /* access to sock_to_state */
+ mono_mutex_t io_lock; /* access to sock_to_state */
int inited; // 0 -> not initialized , 1->initializing, 2->initialized, 3->cleaned up
MonoGHashTable *sock_to_state;
static MonoClass *process_async_call_klass;
static GPtrArray *wsqs;
-CRITICAL_SECTION wsqs_lock;
+mono_mutex_t wsqs_lock;
static gboolean suspended;
/* Hooks */
static void
socket_io_cleanup (SocketIOData *data)
{
- EnterCriticalSection (&data->io_lock);
+ mono_mutex_lock (&data->io_lock);
if (data->inited != 2) {
- LeaveCriticalSection (&data->io_lock);
+ mono_mutex_unlock (&data->io_lock);
return;
}
data->inited = 3;
data->shutdown (data->event_data);
- LeaveCriticalSection (&data->io_lock);
+ mono_mutex_unlock (&data->io_lock);
}
static int
if (socket_io_data.inited == 0)
return;
- EnterCriticalSection (&socket_io_data.io_lock);
+ mono_mutex_lock (&socket_io_data.io_lock);
if (socket_io_data.sock_to_state == NULL) {
- LeaveCriticalSection (&socket_io_data.io_lock);
+ mono_mutex_unlock (&socket_io_data.io_lock);
return;
}
list = mono_g_hash_table_lookup (socket_io_data.sock_to_state, GINT_TO_POINTER (sock));
if (list)
mono_g_hash_table_remove (socket_io_data.sock_to_state, GINT_TO_POINTER (sock));
- LeaveCriticalSection (&socket_io_data.io_lock);
+ mono_mutex_unlock (&socket_io_data.io_lock);
while (list) {
state = (MonoSocketAsyncResult *) mono_mlist_get_data (list);
}
}
- EnterCriticalSection (&data->io_lock);
+ mono_mutex_lock (&data->io_lock);
data->sock_to_state = mono_g_hash_table_new_type (g_direct_hash, g_direct_equal, MONO_HASH_VALUE_GC);
#ifdef HAVE_EPOLL
data->event_system = EPOLL_BACKEND;
init_event_system (data);
mono_thread_create_internal (mono_get_root_domain (), data->wait, data, TRUE, SMALL_STACK);
- LeaveCriticalSection (&data->io_lock);
+ mono_mutex_unlock (&data->io_lock);
data->inited = 2;
threadpool_start_thread (&async_io_tp);
}
MONO_OBJECT_SETREF (state, ares, ares);
fd = GPOINTER_TO_INT (state->handle);
- EnterCriticalSection (&data->io_lock);
+ mono_mutex_lock (&data->io_lock);
if (data->sock_to_state == NULL) {
- LeaveCriticalSection (&data->io_lock);
+ mono_mutex_unlock (&data->io_lock);
return;
}
list = mono_g_hash_table_lookup (data->sock_to_state, GINT_TO_POINTER (fd));
g_print ("Queued: %d\n", (tp->tail - tp->head));
if (tp == &async_tp) {
int i;
- EnterCriticalSection (&wsqs_lock);
+ mono_mutex_lock (&wsqs_lock);
for (i = 0; i < wsqs->len; i++) {
g_print ("\tWSQ %d: %d\n", i, mono_wsq_count (g_ptr_array_index (wsqs, i)));
}
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
} else {
g_print ("\tSockets: %d\n", mono_g_hash_table_size (socket_io_data.sock_to_state));
}
continue;
need_one = (mono_cq_count (tp->queue) > 0);
if (!need_one && !tp->is_io) {
- EnterCriticalSection (&wsqs_lock);
+ mono_mutex_lock (&wsqs_lock);
for (i = 0; wsqs != NULL && i < wsqs->len; i++) {
MonoWSQ *wsq;
wsq = g_ptr_array_index (wsqs, i);
break;
}
}
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
}
if (need_one)
threadpool_start_thread (tp);
}
MONO_GC_REGISTER_ROOT_FIXED (socket_io_data.sock_to_state);
- InitializeCriticalSection (&socket_io_data.io_lock);
+ mono_mutex_init_recursive (&socket_io_data.io_lock);
if (g_getenv ("MONO_THREADS_PER_CPU") != NULL) {
threads_per_cpu = atoi (g_getenv ("MONO_THREADS_PER_CPU"));
if (threads_per_cpu < 1)
async_call_klass = mono_class_from_name (mono_defaults.corlib, "System", "MonoAsyncCall");
g_assert (async_call_klass);
- InitializeCriticalSection (&wsqs_lock);
+ mono_mutex_init_recursive (&wsqs_lock);
wsqs = g_ptr_array_sized_new (MAX (100 * cpu_count, thread_count));
#ifndef DISABLE_PERFCOUNTERS
}
if (wsqs) {
- EnterCriticalSection (&wsqs_lock);
+ mono_mutex_lock (&wsqs_lock);
mono_wsq_cleanup ();
if (wsqs)
g_ptr_array_free (wsqs, TRUE);
wsqs = NULL;
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
MONO_SEM_DESTROY (&async_tp.new_job);
}
}
threadpool_clear_queue (&async_tp, domain);
threadpool_clear_queue (&async_io_tp, domain);
- EnterCriticalSection (&socket_io_data.io_lock);
+ mono_mutex_lock (&socket_io_data.io_lock);
if (socket_io_data.sock_to_state)
mono_g_hash_table_foreach_remove (socket_io_data.sock_to_state, remove_sockstate_for_domain, domain);
- LeaveCriticalSection (&socket_io_data.io_lock);
+ mono_mutex_unlock (&socket_io_data.io_lock);
/*
* There might be some threads out that could be about to execute stuff from the given domain.
int i;
MonoWSQ *wsq;
- EnterCriticalSection (&wsqs_lock);
+ mono_mutex_lock (&wsqs_lock);
wsq = mono_wsq_create ();
if (wsqs == NULL) {
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
return NULL;
}
for (i = 0; i < wsqs->len; i++) {
if (g_ptr_array_index (wsqs, i) == NULL) {
wsqs->pdata [i] = wsq;
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
return wsq;
}
}
g_ptr_array_add (wsqs, wsq);
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
return wsq;
}
if (wsq == NULL)
return;
- EnterCriticalSection (&wsqs_lock);
+ mono_mutex_lock (&wsqs_lock);
if (wsqs == NULL) {
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
return;
}
g_ptr_array_remove_fast (wsqs, wsq);
}
}
mono_wsq_destroy (wsq);
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
}
static void
if (mono_runtime_is_shutting_down ())
return;
- EnterCriticalSection (&wsqs_lock);
+ mono_mutex_lock (&wsqs_lock);
for (i = 0; wsqs != NULL && i < wsqs->len; i++) {
MonoWSQ *wsq;
continue;
mono_wsq_try_steal (wsqs->pdata [i], data, ms);
if (*data != NULL) {
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
return;
}
}
- LeaveCriticalSection (&wsqs_lock);
+ mono_mutex_unlock (&wsqs_lock);
ms += 10;
} while (retry && ms < 11);
}
#define UICULTURES_START_IDX NUM_CACHED_CULTURES
/* Controls access to the 'threads' hash table */
-#define mono_threads_lock() EnterCriticalSection (&threads_mutex)
-#define mono_threads_unlock() LeaveCriticalSection (&threads_mutex)
-static CRITICAL_SECTION threads_mutex;
+#define mono_threads_lock() mono_mutex_lock (&threads_mutex)
+#define mono_threads_unlock() mono_mutex_unlock (&threads_mutex)
+static mono_mutex_t threads_mutex;
/* Controls access to context static data */
-#define mono_contexts_lock() EnterCriticalSection (&contexts_mutex)
-#define mono_contexts_unlock() LeaveCriticalSection (&contexts_mutex)
-static CRITICAL_SECTION contexts_mutex;
+#define mono_contexts_lock() mono_mutex_lock (&contexts_mutex)
+#define mono_contexts_unlock() mono_mutex_unlock (&contexts_mutex)
+static mono_mutex_t contexts_mutex;
/* Controls access to the 'joinable_threads' hash table */
-#define joinable_threads_lock() EnterCriticalSection (&joinable_threads_mutex)
-#define joinable_threads_unlock() LeaveCriticalSection (&joinable_threads_mutex)
-static CRITICAL_SECTION joinable_threads_mutex;
+#define joinable_threads_lock() mono_mutex_lock (&joinable_threads_mutex)
+#define joinable_threads_unlock() mono_mutex_unlock (&joinable_threads_mutex)
+static mono_mutex_t joinable_threads_mutex;
/* Holds current status of static data heap */
static StaticDataInfo thread_static_info;
static void ref_stack_destroy (gpointer rs);
/* Spin lock for InterlockedXXX 64 bit functions */
-#define mono_interlocked_lock() EnterCriticalSection (&interlocked_mutex)
-#define mono_interlocked_unlock() LeaveCriticalSection (&interlocked_mutex)
-static CRITICAL_SECTION interlocked_mutex;
+#define mono_interlocked_lock() mono_mutex_lock (&interlocked_mutex)
+#define mono_interlocked_unlock() mono_mutex_unlock (&interlocked_mutex)
+static mono_mutex_t interlocked_mutex;
/* global count of thread interruptions requested */
static gint32 thread_interruption_requested = 0;
static void ensure_synch_cs_set (MonoInternalThread *thread)
{
- CRITICAL_SECTION *synch_cs;
+ mono_mutex_t *synch_cs;
if (thread->synch_cs != NULL) {
return;
}
- synch_cs = g_new0 (CRITICAL_SECTION, 1);
- InitializeCriticalSection (synch_cs);
+ synch_cs = g_new0 (mono_mutex_t, 1);
+ mono_mutex_init_recursive (synch_cs);
if (InterlockedCompareExchangePointer ((gpointer *)&thread->synch_cs,
synch_cs, NULL) != NULL) {
/* Another thread must have installed this CS */
- DeleteCriticalSection (synch_cs);
+ mono_mutex_destroy (synch_cs);
g_free (synch_cs);
}
}
ensure_synch_cs_set (thread);
g_assert (thread->synch_cs);
- EnterCriticalSection (thread->synch_cs);
+ mono_mutex_lock (thread->synch_cs);
}
static inline void
unlock_thread (MonoInternalThread *thread)
{
- LeaveCriticalSection (thread->synch_cs);
+ mono_mutex_unlock (thread->synch_cs);
}
/*
vt = mono_class_vtable (mono_get_root_domain (), mono_defaults.internal_thread_class);
thread = (MonoInternalThread*)mono_gc_alloc_mature (vt);
- thread->synch_cs = g_new0 (CRITICAL_SECTION, 1);
- InitializeCriticalSection (thread->synch_cs);
+ thread->synch_cs = g_new0 (mono_mutex_t, 1);
+ mono_mutex_init_recursive (thread->synch_cs);
thread->apartment_state = ThreadApartmentState_Unknown;
thread->managed_id = get_next_managed_thread_id ();
CloseHandle (thread);
if (this->synch_cs) {
- CRITICAL_SECTION *synch_cs = this->synch_cs;
+ mono_mutex_t *synch_cs = this->synch_cs;
this->synch_cs = NULL;
- DeleteCriticalSection (synch_cs);
+ mono_mutex_destroy (synch_cs);
g_free (synch_cs);
}
void mono_thread_init (MonoThreadStartCB start_cb,
MonoThreadAttachCB attach_cb)
{
- InitializeCriticalSection(&threads_mutex);
- InitializeCriticalSection(&interlocked_mutex);
- InitializeCriticalSection(&contexts_mutex);
- InitializeCriticalSection(&joinable_threads_mutex);
+ mono_mutex_init_recursive(&threads_mutex);
+ mono_mutex_init_recursive(&interlocked_mutex);
+ mono_mutex_init_recursive(&contexts_mutex);
+ mono_mutex_init_recursive(&joinable_threads_mutex);
background_change_event = CreateEvent (NULL, TRUE, FALSE, NULL);
g_assert(background_change_event != NULL);
* critical sections can be locked when mono_thread_cleanup is
* called.
*/
- DeleteCriticalSection (&threads_mutex);
- DeleteCriticalSection (&interlocked_mutex);
- DeleteCriticalSection (&contexts_mutex);
- DeleteCriticalSection (&delayed_free_table_mutex);
- DeleteCriticalSection (&small_id_mutex);
+ mono_mutex_destroy (&threads_mutex);
+ mono_mutex_destroy (&interlocked_mutex);
+ mono_mutex_destroy (&contexts_mutex);
+ mono_mutex_destroy (&delayed_free_table_mutex);
+ mono_mutex_destroy (&small_id_mutex);
CloseHandle (background_change_event);
#endif
}
}
}
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
}
static void
return;
}
- EnterCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_lock (&socket_io_data->io_lock);
if (socket_io_data->inited == 3) {
g_free (events);
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
return; /* cleanup called */
}
epoll_ctl (epollfd, EPOLL_CTL_DEL, fd, evt);
}
}
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results, nresults);
mono_gc_bzero_aligned (async_results, sizeof (gpointer) * nresults);
}
EV_SET (&evt, fd, EVFILT_WRITE, EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, 0);
kevent_change (data->fd, &evt, "ADD write");
}
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
}
static void
return;
}
- EnterCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_lock (&socket_io_data->io_lock);
if (socket_io_data->inited == 3) {
g_free (events);
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
return; /* cleanup called */
}
mono_g_hash_table_remove (socket_io_data->sock_to_state, GINT_TO_POINTER (fd));
}
}
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results, nresults);
mono_gc_bzero_aligned (async_results, sizeof (gpointer) * nresults);
}
socket_io_data = p;
data = socket_io_data->event_data;
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
MONO_SEM_WAIT (&data->new_sem);
INIT_POLLFD (&data->newpfd, GPOINTER_TO_INT (fd), events);
if (nsock == 0)
continue;
- EnterCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_lock (&socket_io_data->io_lock);
if (socket_io_data->inited == 3) {
g_free (pfds);
mono_ptr_array_destroy (async_results);
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
return; /* cleanup called */
}
maxfd--;
}
}
- LeaveCriticalSection (&socket_io_data->io_lock);
+ mono_mutex_unlock (&socket_io_data->io_lock);
threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results.data, nresults);
mono_ptr_array_clear (async_results);
}
info->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
info->delegate_trampoline_hash = g_hash_table_new (class_method_pair_hash, class_method_pair_equal);
info->llvm_vcall_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
-#ifdef HOST_WIN32
- // FIXME:
info->runtime_invoke_hash = mono_conc_hashtable_new_full (&domain->lock, mono_aligned_addr_hash, NULL, NULL, runtime_invoke_info_free);
-#else
- info->runtime_invoke_hash = mono_conc_hashtable_new_full (&domain->lock.mutex, mono_aligned_addr_hash, NULL, NULL, runtime_invoke_info_free);
-#endif
info->seq_points = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, seq_point_info_free);
info->arch_seq_points = g_hash_table_new (mono_aligned_addr_hash, NULL);
info->jump_target_hash = g_hash_table_new (NULL, NULL);
mono_mutex_destroy (&jit_mutex);
- DeleteCriticalSection (&mono_delegate_section);
+ mono_mutex_destroy (&mono_delegate_section);
mono_code_manager_cleanup ();
static MonoLockFreeArrayQueue delayed_free_queue = MONO_LOCK_FREE_ARRAY_QUEUE_INIT (sizeof (DelayedFreeItem));
/* The table for small ID assignment */
-static CRITICAL_SECTION small_id_mutex;
+static mono_mutex_t small_id_mutex;
static int small_id_next;
static int highest_small_id = -1;
static MonoBitSet *small_id_table;
{
int i, id = -1;
- EnterCriticalSection (&small_id_mutex);
+ mono_mutex_lock (&small_id_mutex);
if (!small_id_table)
small_id_table = mono_bitset_new (1, 0);
mono_memory_write_barrier ();
}
- LeaveCriticalSection (&small_id_mutex);
+ mono_mutex_unlock (&small_id_mutex);
return id;
}
mono_thread_small_id_free (int id)
{
/* MonoBitSet operations are not atomic. */
- EnterCriticalSection (&small_id_mutex);
+ mono_mutex_lock (&small_id_mutex);
g_assert (id >= 0 && id < small_id_table->size);
g_assert (mono_bitset_test_fast (small_id_table, id));
mono_bitset_clear_fast (small_id_table, id);
- LeaveCriticalSection (&small_id_mutex);
+ mono_mutex_unlock (&small_id_mutex);
}
static gboolean
{
int i;
- InitializeCriticalSection(&small_id_mutex);
+ mono_mutex_init_recursive(&small_id_mutex);
mono_counters_register ("Hazardous pointers", MONO_COUNTER_JIT | MONO_COUNTER_INT, &hazardous_pointer_count);
for (i = 0; i < HAZARD_TABLE_OVERFLOW; ++i) {
#define VALLOC_FREELIST_SIZE 16
-static CRITICAL_SECTION valloc_mutex;
+static mono_mutex_t valloc_mutex;
static GHashTable *valloc_freelists;
static void*
GSList *freelist;
if (!valloc_freelists) {
- InitializeCriticalSection (&valloc_mutex);
+ mono_mutex_init_recursive (&valloc_mutex);
valloc_freelists = g_hash_table_new (NULL, NULL);
}
/*
* Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321.
*/
- EnterCriticalSection (&valloc_mutex);
+ mono_mutex_lock (&valloc_mutex);
freelist = g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size));
if (freelist) {
ptr = freelist->data;
if (!ptr && preferred)
ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
}
- LeaveCriticalSection (&valloc_mutex);
+ mono_mutex_unlock (&valloc_mutex);
return ptr;
}
{
GSList *freelist;
- EnterCriticalSection (&valloc_mutex);
+ mono_mutex_lock (&valloc_mutex);
freelist = g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size));
if (!freelist || g_slist_length (freelist) < VALLOC_FREELIST_SIZE) {
freelist = g_slist_prepend (freelist, ptr);
} else {
mono_vfree (ptr, size);
}
- LeaveCriticalSection (&valloc_mutex);
+ mono_mutex_unlock (&valloc_mutex);
}
static void