#include "sgen/sgen-client.h"
#include "sgen/sgen-cardtable.h"
#include "sgen/sgen-pinning.h"
+#include "sgen/sgen-thread-pool.h"
#include "metadata/marshal.h"
#include "metadata/method-builder.h"
#include "metadata/abi-details.h"
#include "utils/mono-logger-internals.h"
#include "utils/mono-threads-coop.h"
#include "sgen/sgen-thread-pool.h"
+#include "utils/mono-threads.h"
#ifdef HEAVY_STATISTICS
static guint64 stat_wbarrier_set_arrayref = 0;
sgen_wbarrier_value_copy_bitmap (_dest, _src, size, bitmap);
}
+int
+mono_gc_get_suspend_signal (void)
+{
+ return mono_threads_suspend_get_suspend_signal ();
+}
+
+int
+mono_gc_get_restart_signal (void)
+{
+ return mono_threads_suspend_get_restart_signal ();
+}
+
static MonoMethod *write_barrier_conc_method;
static MonoMethod *write_barrier_noconc_method;
gboolean
sgen_is_critical_method (MonoMethod *method)
{
- return (method == write_barrier_conc_method || method == write_barrier_noconc_method || sgen_is_managed_allocator (method));
+ return sgen_is_managed_allocator (method);
}
gboolean
sgen_has_critical_method (void)
{
- return write_barrier_conc_method || write_barrier_noconc_method || sgen_has_managed_allocator ();
+ return sgen_has_managed_allocator ();
}
#ifndef DISABLE_JIT
return sgen_gc_invoke_finalizers ();
}
-gboolean
+MonoBoolean
mono_gc_pending_finalizers (void)
{
return sgen_have_pending_finalizers ();
* @suspend is used for early termination of the enqueuing process.
*/
void
-mono_gc_finalize_domain (MonoDomain *domain, volatile gboolean *suspend)
+mono_gc_finalize_domain (MonoDomain *domain)
{
- sgen_finalize_if (object_in_domain_predicate, domain, suspend);
+ sgen_finalize_if (object_in_domain_predicate, domain);
+}
+
+void
+mono_gc_suspend_finalizers (void)
+{
+ sgen_set_suspend_finalizers ();
}
/*
* Appdomain handling
*/
-void
-mono_gc_set_current_thread_appdomain (MonoDomain *domain)
-{
- SgenThreadInfo *info = mono_thread_info_current ();
-
- /* Could be called from sgen_thread_unregister () with a NULL info */
- if (domain) {
- g_assert (info);
- info->client_info.stopped_domain = domain;
- }
-}
-
static gboolean
need_remove_object_for_domain (GCObject *start, MonoDomain *domain)
{
* Allocation
*/
-static gboolean alloc_events = FALSE;
-
-void
-mono_gc_enable_alloc_events (void)
-{
- alloc_events = TRUE;
-}
-
void*
mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg)
{
/* FIXME: do a single allocation */
- void *res = calloc (1, size);
+ void *res = g_calloc (1, size);
if (!res)
return NULL;
if (!mono_gc_register_root ((char *)res, size, descr, source, msg)) {
- free (res);
+ g_free (res);
res = NULL;
}
return res;
mono_gc_free_fixed (void* addr)
{
mono_gc_deregister_root ((char *)addr);
- free (addr);
+ g_free (addr);
}
/*
#ifdef MANAGED_ALLOCATION
-#ifdef HAVE_KW_THREAD
-
-#define EMIT_TLS_ACCESS_VAR(_mb, _var) /* nothing to do */
-
-#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) \
- do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_IN_CRITICAL_REGION_ADDR); \
- } while (0)
-
-#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_NEXT_ADDR); \
- } while (0)
-
-#define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_TEMP_END); \
- } while (0)
-
-#else
-
-#if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
+#if defined(HAVE_KW_THREAD) || defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
// Cache the SgenThreadInfo pointer in a local 'var'.
#define EMIT_TLS_ACCESS_VAR(mb, var) \
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, var) do { \
mono_mb_emit_ldloc ((mb), (var)); \
- mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next_addr)); \
+ mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next)); \
mono_mb_emit_byte ((mb), CEE_ADD); \
- mono_mb_emit_byte ((mb), CEE_LDIND_I); \
} while (0)
#define EMIT_TLS_ACCESS_TEMP_END(mb, var) do { \
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#endif
#endif
goto done;
}
+ /*
+ * Tls access might call foreign code or code without jinfo. This can
+ * only happen if we are outside of the critical region.
+ */
EMIT_TLS_ACCESS_VAR (mb, thread_var);
size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
MonoMethod *res;
MonoMethod **cache;
- if (!use_managed_allocator)
+ if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
return NULL;
- if (!mono_runtime_has_tls_get ())
+ if (variant == MANAGED_ALLOCATOR_REGULAR && !mono_runtime_has_tls_get ())
return NULL;
switch (variant) {
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&str->object);
return str;
static void *moved_objects [MOVED_OBJECTS_NUM];
static int moved_objects_idx = 0;
+static SgenPointerQueue moved_objects_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
+
void
mono_sgen_register_moved_object (void *obj, void *destination)
{
- g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
+ /*
+ * This function can be called from SGen's worker threads. We want to try
+ * and avoid exposing those threads to the profiler API, so queue up move
+ * events and send them later when the main GC thread calls
+ * mono_sgen_gc_event_moves ().
+ *
+ * TODO: Once SGen has multiple worker threads, we need to switch to a
+ * lock-free data structure for the queue as multiple threads will be
+ * adding to it at the same time.
+ */
+ if (sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ())) {
+ sgen_pointer_queue_add (&moved_objects_queue, obj);
+ sgen_pointer_queue_add (&moved_objects_queue, destination);
+ } else {
+ if (moved_objects_idx == MOVED_OBJECTS_NUM) {
+ mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ moved_objects_idx = 0;
+ }
- if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
- moved_objects_idx = 0;
+ moved_objects [moved_objects_idx++] = obj;
+ moved_objects [moved_objects_idx++] = destination;
}
- moved_objects [moved_objects_idx++] = obj;
- moved_objects [moved_objects_idx++] = destination;
}
void
mono_sgen_gc_event_moves (void)
{
+ while (!sgen_pointer_queue_is_empty (&moved_objects_queue)) {
+ void *dst = sgen_pointer_queue_pop (&moved_objects_queue);
+ void *src = sgen_pointer_queue_pop (&moved_objects_queue);
+
+ mono_sgen_register_moved_object (src, dst);
+ }
+
if (moved_objects_idx) {
mono_profiler_gc_moves (moved_objects, moved_objects_idx);
moved_objects_idx = 0;
#endif
info->client_info.skip = 0;
- info->client_info.stopped_ip = NULL;
- info->client_info.stopped_domain = NULL;
info->client_info.stack_start = NULL;
info->client_info.signal = 0;
#endif
- /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
mono_thread_info_get_stack_bounds (&staddr, &stsize);
if (staddr) {
-#ifndef HOST_WIN32
info->client_info.stack_start_limit = staddr;
-#endif
info->client_info.stack_end = staddr + stsize;
} else {
gsize stack_bottom = (gsize)stack_bottom_fallback;
* so we assume that if the domain is still registered, we can detach
* the thread
*/
- if (mono_domain_get ())
+ if (mono_thread_internal_current_is_attached ())
mono_thread_detach_internal (mono_thread_internal_current ());
}
g_assert (info->client_info.stack_end);
aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
+#ifdef HOST_WIN32
+ /* Windows uses a guard page before the committed stack memory pages to detect when the
+ stack needs to be grown. If we suspend a thread just after a function prolog has
+ decremented the stack pointer to point into the guard page but before the thread has
+ been able to read or write to that page, starting the stack scan at aligned_stack_start
+ will raise a STATUS_GUARD_PAGE_VIOLATION and the process will crash. This code uses
+ VirtualQuery() to determine whether stack_start points into the guard page and then
+ updates aligned_stack_start to point at the next non-guard page. */
+ MEMORY_BASIC_INFORMATION mem_info;
+ SIZE_T result = VirtualQuery(info->client_info.stack_start, &mem_info, sizeof(mem_info));
+ g_assert (result != 0);
+ if (mem_info.Protect & PAGE_GUARD) {
+ aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
+ }
+#endif
g_assert (info->client_info.suspend_done);
SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
return 1;
}
-void
-mono_gc_enable_events (void)
-{
-}
-
const char *
mono_gc_get_gc_name (void)
{
{
switch (type) {
case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
+ case INTERNAL_MEM_MOVED_OBJECT: return "moved-object";
default:
return NULL;
}
}
#endif
- /*
- * This needs to happen before any internal allocations because
- * it inits the small id which is required for hazard pointer
- * operations.
- */
- sgen_os_init ();
-
mono_gc_register_thread (&dummy);
}
} else if (g_str_has_prefix (opt, "toggleref-test")) {
/* FIXME: This should probably in MONO_GC_DEBUG */
sgen_register_test_toggleref_callback ();
- } else {
+ } else if (!sgen_bridge_handle_gc_param (opt)) {
return FALSE;
}
return TRUE;
mono_counters_init ();
+#ifndef HOST_WIN32
+ mono_w32handle_init ();
+#endif
+
#ifdef HEAVY_STATISTICS
mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
#if defined(HAVE_KW_THREAD)
/* This can happen with using libmonosgen.so */
- if (mono_tls_key_get_offset (TLS_KEY_SGEN_TLAB_NEXT_ADDR) == -1)
+ if (mono_tls_key_get_offset (TLS_KEY_SGEN_THREAD_INFO) == -1)
sgen_set_use_managed_allocator (FALSE);
#endif
mono_gc_base_cleanup (void)
{
sgen_thread_pool_shutdown ();
+
+ // We should have consumed any outstanding moves.
+ g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));
}
gboolean