#include "sgen/sgen-client.h"
#include "sgen/sgen-cardtable.h"
#include "sgen/sgen-pinning.h"
+#include "sgen/sgen-thread-pool.h"
#include "metadata/marshal.h"
#include "metadata/method-builder.h"
#include "metadata/abi-details.h"
#include "utils/mono-logger-internals.h"
#include "utils/mono-threads-coop.h"
#include "sgen/sgen-thread-pool.h"
+#include "utils/mono-threads.h"
#ifdef HEAVY_STATISTICS
static guint64 stat_wbarrier_set_arrayref = 0;
sgen_wbarrier_value_copy_bitmap (_dest, _src, size, bitmap);
}
+int
+mono_gc_get_suspend_signal (void)
+{
+ return mono_threads_suspend_get_suspend_signal ();
+}
+
+int
+mono_gc_get_restart_signal (void)
+{
+ return mono_threads_suspend_get_restart_signal ();
+}
+
static MonoMethod *write_barrier_conc_method;
static MonoMethod *write_barrier_noconc_method;
return sgen_has_managed_allocator ();
}
+static gboolean
+ip_in_critical_region (MonoDomain *domain, gpointer ip)
+{
+ MonoJitInfo *ji;
+ MonoMethod *method;
+
+ /*
+ * We pass false for 'try_aot' so this becomes async safe.
+ * It won't find aot methods whose jit info is not yet loaded,
+ * so we preload their jit info in the JIT.
+ */
+ ji = mono_jit_info_table_find_internal (domain, ip, FALSE, FALSE);
+ if (!ji)
+ return FALSE;
+
+ method = mono_jit_info_get_method (ji);
+
+ return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
+}
+
+gboolean
+mono_gc_is_critical_method (MonoMethod *method)
+{
+ return sgen_is_critical_method (method);
+}
+
#ifndef DISABLE_JIT
static void
* Appdomain handling
*/
-void
-mono_gc_set_current_thread_appdomain (MonoDomain *domain)
-{
- SgenThreadInfo *info = mono_thread_info_current ();
-
- /* Could be called from sgen_thread_unregister () with a NULL info */
- if (domain) {
- g_assert (info);
- info->client_info.stopped_domain = domain;
- }
-}
-
static gboolean
need_remove_object_for_domain (GCObject *start, MonoDomain *domain)
{
* Allocation
*/
-static gboolean alloc_events = FALSE;
-
-void
-mono_gc_enable_alloc_events (void)
-{
- alloc_events = TRUE;
-}
-
void*
mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&str->object);
return str;
static void *moved_objects [MOVED_OBJECTS_NUM];
static int moved_objects_idx = 0;
+static SgenPointerQueue moved_objects_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
+
void
mono_sgen_register_moved_object (void *obj, void *destination)
{
- g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
+ /*
+ * This function can be called from SGen's worker threads. We want to try
+ * and avoid exposing those threads to the profiler API, so queue up move
+ * events and send them later when the main GC thread calls
+ * mono_sgen_gc_event_moves ().
+ *
+ * TODO: Once SGen has multiple worker threads, we need to switch to a
+ * lock-free data structure for the queue as multiple threads will be
+ * adding to it at the same time.
+ */
+ if (sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ())) {
+ sgen_pointer_queue_add (&moved_objects_queue, obj);
+ sgen_pointer_queue_add (&moved_objects_queue, destination);
+ } else {
+ if (moved_objects_idx == MOVED_OBJECTS_NUM) {
+ mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ moved_objects_idx = 0;
+ }
- if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
- moved_objects_idx = 0;
+ moved_objects [moved_objects_idx++] = obj;
+ moved_objects [moved_objects_idx++] = destination;
}
- moved_objects [moved_objects_idx++] = obj;
- moved_objects [moved_objects_idx++] = destination;
}
void
mono_sgen_gc_event_moves (void)
{
+ while (!sgen_pointer_queue_is_empty (&moved_objects_queue)) {
+ void *dst = sgen_pointer_queue_pop (&moved_objects_queue);
+ void *src = sgen_pointer_queue_pop (&moved_objects_queue);
+
+ mono_sgen_register_moved_object (src, dst);
+ }
+
if (moved_objects_idx) {
mono_profiler_gc_moves (moved_objects, moved_objects_idx);
moved_objects_idx = 0;
#endif
info->client_info.skip = 0;
- info->client_info.stopped_ip = NULL;
- info->client_info.stopped_domain = NULL;
info->client_info.stack_start = NULL;
info->client_info.signal = 0;
#endif
- /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
mono_thread_info_get_stack_bounds (&staddr, &stsize);
if (staddr) {
-#ifndef HOST_WIN32
info->client_info.stack_start_limit = staddr;
-#endif
info->client_info.stack_end = staddr + stsize;
} else {
gsize stack_bottom = (gsize)stack_bottom_fallback;
UNLOCK_GC;
}
-static gboolean
-is_critical_method (MonoMethod *method)
-{
- return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
-}
-
static gboolean
thread_in_critical_region (SgenThreadInfo *info)
{
* so we assume that if the domain is still registered, we can detach
* the thread
*/
- if (mono_domain_get ())
+ if (mono_thread_internal_current_is_attached ())
mono_thread_detach_internal (mono_thread_internal_current ());
}
return 1;
}
-void
-mono_gc_enable_events (void)
-{
-}
-
const char *
mono_gc_get_gc_name (void)
{
{
switch (type) {
case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
+ case INTERNAL_MEM_MOVED_OBJECT: return "moved-object";
default:
return NULL;
}
cb.thread_detach = sgen_thread_detach;
cb.thread_unregister = sgen_thread_unregister;
cb.thread_attach = sgen_thread_attach;
- cb.mono_method_is_critical = (gboolean (*)(void *))is_critical_method;
cb.mono_thread_in_critical_region = thread_in_critical_region;
+ cb.ip_in_critical_region = ip_in_critical_region;
mono_threads_init (&cb, sizeof (SgenThreadInfo));
}
#endif
- /*
- * This needs to happen before any internal allocations because
- * it inits the small id which is required for hazard pointer
- * operations.
- */
- sgen_os_init ();
-
mono_gc_register_thread (&dummy);
}
mono_counters_init ();
+#ifndef HOST_WIN32
+ mono_w32handle_init ();
+#endif
+
#ifdef HEAVY_STATISTICS
mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
mono_gc_base_cleanup (void)
{
sgen_thread_pool_shutdown ();
+
+ // We should have consumed any outstanding moves.
+ g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));
}
gboolean