#include "sgen/sgen-client.h"
#include "sgen/sgen-cardtable.h"
#include "sgen/sgen-pinning.h"
+#include "sgen/sgen-thread-pool.h"
#include "metadata/marshal.h"
#include "metadata/method-builder.h"
#include "metadata/abi-details.h"
#include "utils/mono-logger-internals.h"
#include "utils/mono-threads-coop.h"
#include "sgen/sgen-thread-pool.h"
+#include "utils/mono-threads.h"
+#include "metadata/w32handle.h"
#ifdef HEAVY_STATISTICS
static guint64 stat_wbarrier_set_arrayref = 0;
sgen_wbarrier_value_copy_bitmap (_dest, _src, size, bitmap);
}
+int
+mono_gc_get_suspend_signal (void)
+{
+ return mono_threads_suspend_get_suspend_signal ();
+}
+
+int
+mono_gc_get_restart_signal (void)
+{
+ return mono_threads_suspend_get_restart_signal ();
+}
+
static MonoMethod *write_barrier_conc_method;
static MonoMethod *write_barrier_noconc_method;
gboolean
sgen_is_critical_method (MonoMethod *method)
{
- return (method == write_barrier_conc_method || method == write_barrier_noconc_method || sgen_is_managed_allocator (method));
+ return sgen_is_managed_allocator (method);
}
gboolean
sgen_has_critical_method (void)
{
- return write_barrier_conc_method || write_barrier_noconc_method || sgen_has_managed_allocator ();
+ return sgen_has_managed_allocator ();
+}
+
+static gboolean
+ip_in_critical_region (MonoDomain *domain, gpointer ip)
+{
+ MonoJitInfo *ji;
+ MonoMethod *method;
+
+ /*
+ * We pass false for 'try_aot' so this becomes async safe.
+ * It won't find aot methods whose jit info is not yet loaded,
+ * so we preload their jit info in the JIT.
+ */
+ ji = mono_jit_info_table_find_internal (domain, ip, FALSE, FALSE);
+ if (!ji)
+ return FALSE;
+
+ method = mono_jit_info_get_method (ji);
+
+ return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
+}
+
+gboolean
+mono_gc_is_critical_method (MonoMethod *method)
+{
+ return sgen_is_critical_method (method);
}
#ifndef DISABLE_JIT
return sgen_gc_invoke_finalizers ();
}
-gboolean
+MonoBoolean
mono_gc_pending_finalizers (void)
{
return sgen_have_pending_finalizers ();
* Appdomain handling
*/
-void
-mono_gc_set_current_thread_appdomain (MonoDomain *domain)
-{
- SgenThreadInfo *info = mono_thread_info_current ();
-
- /* Could be called from sgen_thread_unregister () with a NULL info */
- if (domain) {
- g_assert (info);
- info->client_info.stopped_domain = domain;
- }
-}
-
static gboolean
need_remove_object_for_domain (GCObject *start, MonoDomain *domain)
{
* Allocation
*/
-static gboolean alloc_events = FALSE;
-
-void
-mono_gc_enable_alloc_events (void)
-{
- alloc_events = TRUE;
-}
-
void*
mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg)
{
/* FIXME: do a single allocation */
- void *res = calloc (1, size);
+ void *res = g_calloc (1, size);
if (!res)
return NULL;
if (!mono_gc_register_root ((char *)res, size, descr, source, msg)) {
- free (res);
+ g_free (res);
res = NULL;
}
return res;
mono_gc_free_fixed (void* addr)
{
mono_gc_deregister_root ((char *)addr);
- free (addr);
+ g_free (addr);
}
/*
#ifdef MANAGED_ALLOCATION
-#ifdef HAVE_KW_THREAD
-
-#define EMIT_TLS_ACCESS_VAR(_mb, _var) /* nothing to do */
-
-#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) \
- do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_IN_CRITICAL_REGION_ADDR); \
- } while (0)
-
-#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_NEXT_ADDR); \
- } while (0)
-
-#define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_TEMP_END); \
- } while (0)
-
-#else
-
-#if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
+#if defined(HAVE_KW_THREAD) || defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
// Cache the SgenThreadInfo pointer in a local 'var'.
#define EMIT_TLS_ACCESS_VAR(mb, var) \
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, var) do { \
mono_mb_emit_ldloc ((mb), (var)); \
- mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next_addr)); \
+ mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next)); \
mono_mb_emit_byte ((mb), CEE_ADD); \
- mono_mb_emit_byte ((mb), CEE_LDIND_I); \
} while (0)
#define EMIT_TLS_ACCESS_TEMP_END(mb, var) do { \
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#endif
#endif
static MonoMethod*
create_allocator (int atype, ManagedAllocatorVariant variant)
{
- int p_var, size_var, thread_var G_GNUC_UNUSED;
+ int p_var, size_var, real_size_var, thread_var G_GNUC_UNUSED;
gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
guint32 slowpath_branch, max_size_branch;
MonoMethodBuilder *mb;
goto done;
}
+ /*
+ * Tls access might call foreign code or code without jinfo. This can
+ * only happen if we are outside of the critical region.
+ */
EMIT_TLS_ACCESS_VAR (mb, thread_var);
size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
#endif
+ if (nursery_canaries_enabled ()) {
+ real_size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
+ mono_mb_emit_ldloc (mb, size_var);
+ mono_mb_emit_stloc(mb, real_size_var);
+ }
+ else
+ real_size_var = size_var;
+
/* size += ALLOC_ALIGN - 1; */
mono_mb_emit_ldloc (mb, size_var);
mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
mono_mb_emit_ldloc (mb, size_var);
mono_mb_emit_byte (mb, CEE_CONV_I);
mono_mb_emit_byte (mb, CEE_ADD);
+
+ if (nursery_canaries_enabled ()) {
+ mono_mb_emit_icon (mb, CANARY_SIZE);
+ mono_mb_emit_byte (mb, CEE_ADD);
+ }
mono_mb_emit_stloc (mb, new_next_var);
/* if (G_LIKELY (new_next < tlab_temp_end)) */
/* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_ldloc (mb, size_var);
+ mono_mb_emit_ldloc (mb, real_size_var);
if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
mono_mb_emit_icall (mb, mono_gc_alloc_obj);
} else if (atype == ATYPE_VECTOR) {
mono_mb_emit_ldarg (mb, 0);
mono_mb_emit_byte (mb, CEE_STIND_I);
+ /* mark object end with nursery word */
+ if (nursery_canaries_enabled ()) {
+ mono_mb_emit_ldloc (mb, p_var);
+ mono_mb_emit_ldloc (mb, real_size_var);
+ mono_mb_emit_byte (mb, MONO_CEE_ADD);
+ mono_mb_emit_icon8 (mb, (mword) CANARY_STRING);
+ mono_mb_emit_icon (mb, CANARY_SIZE);
+ mono_mb_emit_byte (mb, MONO_CEE_PREFIX1);
+ mono_mb_emit_byte (mb, CEE_CPBLK);
+ }
+
if (atype == ATYPE_VECTOR) {
/* arr->max_length = max_length; */
mono_mb_emit_ldloc (mb, p_var);
MonoMethod *res;
MonoMethod **cache;
- if (!use_managed_allocator)
+ if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
return NULL;
- if (!mono_runtime_has_tls_get ())
+ if (variant == MANAGED_ALLOCATOR_REGULAR && !mono_runtime_has_tls_get ())
return NULL;
switch (variant) {
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&str->object);
return str;
static void *moved_objects [MOVED_OBJECTS_NUM];
static int moved_objects_idx = 0;
+static SgenPointerQueue moved_objects_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
+
void
mono_sgen_register_moved_object (void *obj, void *destination)
{
- g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
+ /*
+ * This function can be called from SGen's worker threads. We want to try
+ * and avoid exposing those threads to the profiler API, so queue up move
+ * events and send them later when the main GC thread calls
+ * mono_sgen_gc_event_moves ().
+ *
+ * TODO: Once SGen has multiple worker threads, we need to switch to a
+ * lock-free data structure for the queue as multiple threads will be
+ * adding to it at the same time.
+ */
+ if (sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ())) {
+ sgen_pointer_queue_add (&moved_objects_queue, obj);
+ sgen_pointer_queue_add (&moved_objects_queue, destination);
+ } else {
+ if (moved_objects_idx == MOVED_OBJECTS_NUM) {
+ mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ moved_objects_idx = 0;
+ }
- if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
- moved_objects_idx = 0;
+ moved_objects [moved_objects_idx++] = obj;
+ moved_objects [moved_objects_idx++] = destination;
}
- moved_objects [moved_objects_idx++] = obj;
- moved_objects [moved_objects_idx++] = destination;
}
void
mono_sgen_gc_event_moves (void)
{
+ while (!sgen_pointer_queue_is_empty (&moved_objects_queue)) {
+ void *dst = sgen_pointer_queue_pop (&moved_objects_queue);
+ void *src = sgen_pointer_queue_pop (&moved_objects_queue);
+
+ mono_sgen_register_moved_object (src, dst);
+ }
+
if (moved_objects_idx) {
mono_profiler_gc_moves (moved_objects, moved_objects_idx);
moved_objects_idx = 0;
#endif
info->client_info.skip = 0;
- info->client_info.stopped_ip = NULL;
- info->client_info.stopped_domain = NULL;
info->client_info.stack_start = NULL;
info->client_info.signal = 0;
#endif
- /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
mono_thread_info_get_stack_bounds (&staddr, &stsize);
if (staddr) {
-#ifndef HOST_WIN32
info->client_info.stack_start_limit = staddr;
-#endif
info->client_info.stack_end = staddr + stsize;
} else {
gsize stack_bottom = (gsize)stack_bottom_fallback;
UNLOCK_GC;
}
-static gboolean
-is_critical_method (MonoMethod *method)
-{
- return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
-}
-
static gboolean
thread_in_critical_region (SgenThreadInfo *info)
{
* so we assume that if the domain is still registered, we can detach
* the thread
*/
- if (mono_domain_get ())
+ if (mono_thread_internal_current_is_attached ())
mono_thread_detach_internal (mono_thread_internal_current ());
}
return 1;
}
-void
-mono_gc_enable_events (void)
-{
-}
-
const char *
mono_gc_get_gc_name (void)
{
{
switch (type) {
case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
+ case INTERNAL_MEM_MOVED_OBJECT: return "moved-object";
default:
return NULL;
}
cb.thread_detach = sgen_thread_detach;
cb.thread_unregister = sgen_thread_unregister;
cb.thread_attach = sgen_thread_attach;
- cb.mono_method_is_critical = (gboolean (*)(void *))is_critical_method;
cb.mono_thread_in_critical_region = thread_in_critical_region;
+ cb.ip_in_critical_region = ip_in_critical_region;
mono_threads_init (&cb, sizeof (SgenThreadInfo));
}
#endif
- /*
- * This needs to happen before any internal allocations because
- * it inits the small id which is required for hazard pointer
- * operations.
- */
- sgen_os_init ();
-
mono_gc_register_thread (&dummy);
}
} else if (g_str_has_prefix (opt, "toggleref-test")) {
/* FIXME: This should probably in MONO_GC_DEBUG */
sgen_register_test_toggleref_callback ();
- } else {
+ } else if (!sgen_bridge_handle_gc_param (opt)) {
return FALSE;
}
return TRUE;
mono_counters_init ();
+#ifndef HOST_WIN32
+ mono_w32handle_init ();
+#endif
+
#ifdef HEAVY_STATISTICS
mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
sgen_gc_init ();
- if (nursery_canaries_enabled ())
- sgen_set_use_managed_allocator (FALSE);
-
#if defined(HAVE_KW_THREAD)
/* This can happen with using libmonosgen.so */
- if (mono_tls_key_get_offset (TLS_KEY_SGEN_TLAB_NEXT_ADDR) == -1)
+ if (mono_tls_key_get_offset (TLS_KEY_SGEN_THREAD_INFO) == -1)
sgen_set_use_managed_allocator (FALSE);
#endif
mono_gc_base_cleanup (void)
{
sgen_thread_pool_shutdown ();
+
+ // We should have consumed any outstanding moves.
+ g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));
}
gboolean