#include <mono/metadata/runtime.h>
#include <mono/metadata/handle.h>
#include <mono/metadata/sgen-toggleref.h>
+#include <mono/metadata/w32handle.h>
#include <mono/utils/atomic.h>
#include <mono/utils/mono-logger-internals.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/gc_wrapper.h>
#include <mono/utils/mono-os-mutex.h>
#include <mono/utils/mono-counters.h>
+#include <mono/utils/mono-compiler.h>
#if HAVE_BOEHM_GC
static void
boehm_thread_unregister (MonoThreadInfo *p);
static void
+boehm_thread_detach (MonoThreadInfo *p);
+static void
register_test_toggleref_callback (void);
#define BOEHM_GC_BIT_FINALIZER_AWARE 1
mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_GC, msg, (unsigned long)arg);
}
+static void on_gc_notification (GC_EventType event);
+static void on_gc_heap_resize (size_t new_size);
+
void
mono_gc_base_init (void)
{
mono_counters_init ();
+#ifndef HOST_WIN32
+ mono_w32handle_init ();
+#endif
+
/*
* Handle the case when we are called from a thread different from the main thread,
* confusing libgc.
memset (&cb, 0, sizeof (cb));
cb.thread_register = boehm_thread_register;
cb.thread_unregister = boehm_thread_unregister;
+ cb.thread_detach = boehm_thread_detach;
cb.mono_method_is_critical = (gboolean (*)(void *))mono_runtime_is_critical_method;
mono_threads_init (&cb, sizeof (MonoThreadInfo));
mono_thread_info_attach (&dummy);
- mono_gc_enable_events ();
-
- MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_NORMAL].entries, MONO_ROOT_SOURCE_GC_HANDLE, "gc handles table");
- MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_PINNED].entries, MONO_ROOT_SOURCE_GC_HANDLE, "gc handles table");
+ GC_set_on_collection_event (on_gc_notification);
+ GC_on_heap_resize = on_gc_heap_resize;
gc_initialized = TRUE;
}
mono_threads_add_joinable_thread ((gpointer)tid);
}
+static void
+boehm_thread_detach (MonoThreadInfo *p)
+{
+ if (mono_thread_internal_current_is_attached ())
+ mono_thread_detach_internal (mono_thread_internal_current ());
+}
+
gboolean
mono_object_is_alive (MonoObject* o)
{
switch (e) {
case MONO_GC_EVENT_PRE_STOP_WORLD:
MONO_GC_WORLD_STOP_BEGIN ();
- mono_thread_info_suspend_lock ();
break;
case MONO_GC_EVENT_POST_STOP_WORLD:
case MONO_GC_EVENT_POST_START_WORLD:
MONO_GC_WORLD_RESTART_END (1);
- mono_thread_info_suspend_unlock ();
break;
case MONO_GC_EVENT_START:
}
mono_profiler_gc_event (e, 0);
+
+ switch (e) {
+ case MONO_GC_EVENT_PRE_STOP_WORLD:
+ mono_thread_info_suspend_lock ();
+ mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED, 0);
+ break;
+ case MONO_GC_EVENT_POST_START_WORLD:
+ mono_thread_info_suspend_unlock ();
+ mono_profiler_gc_event (MONO_GC_EVENT_POST_START_WORLD_UNLOCKED, 0);
+ break;
+ default:
+ break;
+ }
}
+
static void
on_gc_heap_resize (size_t new_size)
mono_profiler_gc_heap_resize (new_size);
}
-void
-mono_gc_enable_events (void)
-{
- GC_set_on_collection_event (on_gc_notification);
- GC_on_heap_resize = on_gc_heap_resize;
-}
-
-static gboolean alloc_events = FALSE;
-
-void
-mono_gc_enable_alloc_events (void)
-{
- alloc_events = TRUE;
-}
-
int
mono_gc_register_root (char *start, size_t size, void *descr, MonoGCRootSource source, const char *msg)
{
return TRUE;
}
+int
+mono_gc_register_root_wbarrier (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg)
+{
+ return mono_gc_register_root (start, size, descr, source, msg);
+}
+
void
mono_gc_deregister_root (char* addr)
{
void*
mono_gc_alloc_fixed (size_t size, void *descr, MonoGCRootSource source, const char *msg)
{
- /* To help track down typed allocation bugs */
- /*
- static int count;
- count ++;
- if (count == atoi (g_getenv ("COUNT2")))
- printf ("HIT!\n");
- if (count > atoi (g_getenv ("COUNT2")))
- return GC_MALLOC (size);
- */
-
- if (descr)
- return GC_MALLOC_EXPLICITLY_TYPED (size, (GC_descr)descr);
- else
- return GC_MALLOC (size);
+ return GC_MALLOC_UNCOLLECTABLE (size);
}
void
mono_gc_free_fixed (void* addr)
{
+ GC_FREE (addr);
}
void *
obj->vtable = vtable;
}
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (obj);
return obj;
obj->max_length = max_length;
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&obj->obj);
return obj;
if (bounds_size)
obj->bounds = (MonoArrayBounds *) ((char *) obj + size - bounds_size);
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&obj->obj);
return obj;
obj->length = len;
obj->chars [len] = 0;
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&obj->object);
return obj;
return 0;
}
-gboolean
+MonoBoolean
mono_gc_pending_finalizers (void)
{
return GC_should_invoke_finalizers ();
}
#if defined(USE_COMPILER_TLS) && defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
-extern __thread MONO_TLS_FAST void* GC_thread_tls;
+extern __thread void* GC_thread_tls;
#include "metadata-internals.h"
static int
const char *name = NULL;
WrapperInfo *info;
+ g_assert_not_reached ();
+
if (atype == ATYPE_FREEPTR) {
name = slowpath ? "SlowAllocPtrfree" : "AllocPtrfree";
} else if (atype == ATYPE_FREEPTR_FOR_BOX) {
static MonoMethod* alloc_method_cache [ATYPE_NUM];
static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM];
-static G_GNUC_UNUSED gboolean
+gboolean
mono_gc_is_critical_method (MonoMethod *method)
{
int i;
MonoMethod*
mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
{
- int offset = -1;
int atype;
- MONO_THREAD_VAR_OFFSET (GC_thread_tls, offset);
- /*g_print ("thread tls: %d\n", offset);*/
- if (offset == -1)
- return NULL;
+ /*
+ * Tls implementation changed, we jump to tls native getters/setters.
+ * Is boehm managed allocator ok with this ? Do we even care ?
+ */
+ return NULL;
+
if (!SMALL_ENOUGH (klass->instance_size))
return NULL;
if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass))
MonoMethod*
mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
{
- int offset = -1;
MonoMethod *res;
gboolean slowpath = variant != MANAGED_ALLOCATOR_REGULAR;
MonoMethod **cache = slowpath ? slowpath_alloc_method_cache : alloc_method_cache;
- MONO_THREAD_VAR_OFFSET (GC_thread_tls, offset);
- mono_tls_key_set_offset (TLS_KEY_BOEHM_GC_THREAD, offset);
+ return NULL;
res = cache [atype];
if (res)
return res;
- res = create_allocator (atype, TLS_KEY_BOEHM_GC_THREAD, slowpath);
+ res = create_allocator (atype, -1, slowpath);
mono_os_mutex_lock (&mono_gc_lock);
if (cache [atype]) {
mono_free_method (res);
#else
-static G_GNUC_UNUSED gboolean
+gboolean
mono_gc_is_critical_method (MonoMethod *method)
{
return FALSE;
return NULL;
}
-void
-mono_gc_set_current_thread_appdomain (MonoDomain *domain)
-{
-}
-
gboolean
mono_gc_precise_stack_mark_enabled (void)
{
return NULL;
}
+void
+mono_gc_params_set (const char* options)
+{
+}
+
+void
+mono_gc_debug_set (const char* options)
+{
+}
+
void
mono_gc_conservatively_scan_area (void *start, void *end)
{
}
}
+#else
+MONO_EMPTY_SOURCE_FILE (boehm_gc);
#endif /* no Boehm GC */