return sgen_has_managed_allocator ();
}
-static gboolean
-ip_in_critical_region (MonoDomain *domain, gpointer ip)
-{
- MonoJitInfo *ji;
- MonoMethod *method;
-
- /*
- * We pass false for 'try_aot' so this becomes async safe.
- * It won't find aot methods whose jit info is not yet loaded,
- * so we preload their jit info in the JIT.
- */
- ji = mono_jit_info_table_find_internal (domain, ip, FALSE, FALSE);
- if (!ji)
- return FALSE;
-
- method = mono_jit_info_get_method (ji);
-
- return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
-}
-
gboolean
mono_gc_is_critical_method (MonoMethod *method)
{
{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
static MonoMethod* alloc_method_cache [ATYPE_NUM];
static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM];
+static MonoMethod* profiler_alloc_method_cache [ATYPE_NUM];
static gboolean use_managed_allocator = TRUE;
#ifdef MANAGED_ALLOCATION
{
int p_var, size_var, real_size_var, thread_var G_GNUC_UNUSED;
gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
+ gboolean profiler = variant == MANAGED_ALLOCATOR_PROFILER;
guint32 fastpath_branch, max_size_branch, no_oom_branch;
MonoMethodBuilder *mb;
MonoMethod *res;
mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
+ mono_register_jit_icall (mono_profiler_raise_gc_allocation, "mono_profiler_raise_gc_allocation", mono_create_icall_signature ("void object"), FALSE);
registered = TRUE;
}
if (atype == ATYPE_SMALL) {
- name = slowpath ? "SlowAllocSmall" : "AllocSmall";
+ name = slowpath ? "SlowAllocSmall" : (profiler ? "ProfilerAllocSmall" : "AllocSmall");
} else if (atype == ATYPE_NORMAL) {
- name = slowpath ? "SlowAlloc" : "Alloc";
+ name = slowpath ? "SlowAlloc" : (profiler ? "ProfilerAlloc" : "Alloc");
} else if (atype == ATYPE_VECTOR) {
- name = slowpath ? "SlowAllocVector" : "AllocVector";
+ name = slowpath ? "SlowAllocVector" : (profiler ? "ProfilerAllocVector" : "AllocVector");
} else if (atype == ATYPE_STRING) {
- name = slowpath ? "SlowAllocString" : "AllocString";
+ name = slowpath ? "SlowAllocString" : (profiler ? "ProfilerAllocString" : "AllocString");
} else {
g_assert_not_reached ();
}
mono_mb_emit_ldloc (mb, p_var);
done:
+
+ /*
+ * It's important that we do this outside of the critical region as we
+ * will be invoking arbitrary code.
+ */
+ if (profiler) {
+ /*
+ * if (G_UNLIKELY (*&mono_profiler_state.gc_allocation_count)) {
+ * mono_profiler_raise_gc_allocation (p);
+ * }
+ */
+
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT);
+ mono_mb_emit_byte (mb, CEE_LDIND_U4);
+
+ int prof_br = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
+
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
+ mono_mb_emit_byte (mb, CEE_DUP);
+ mono_mb_emit_icall (mb, mono_profiler_raise_gc_allocation);
+
+ mono_mb_patch_short_branch (mb, prof_br);
+ }
+
mono_mb_emit_byte (mb, CEE_RET);
#endif
mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
{
#ifdef MANAGED_ALLOCATION
+ ManagedAllocatorVariant variant = mono_profiler_allocations_enabled () ?
+ MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR;
+
if (collect_before_allocs)
return NULL;
if (klass->instance_size > tlab_size)
return NULL;
if (klass->rank)
return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
if (klass->byval_arg.type == MONO_TYPE_STRING)
- return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, variant);
/* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
if (known_instance_size)
- return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, variant);
else
- return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, variant);
#else
return NULL;
#endif
#ifdef MANAGED_ALLOCATION
if (klass->rank != 1)
return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
if (has_per_allocation_action)
return NULL;
g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
- return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, mono_profiler_allocations_enabled () ?
+ MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR);
#else
return NULL;
#endif
MonoMethod *res;
MonoMethod **cache;
- if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
+ if (variant != MANAGED_ALLOCATOR_SLOW_PATH && !use_managed_allocator)
return NULL;
switch (variant) {
case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
+ case MANAGED_ALLOCATOR_PROFILER: cache = profiler_alloc_method_cache; break;
default: g_assert_not_reached (); break;
}
int i;
for (i = 0; i < ATYPE_NUM; ++i)
- if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i])
+ if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i] || method == profiler_alloc_method_cache [i])
return TRUE;
return FALSE;
}
int i;
for (i = 0; i < ATYPE_NUM; ++i)
- if (alloc_method_cache [i] || slowpath_alloc_method_cache [i])
+ if (alloc_method_cache [i] || slowpath_alloc_method_cache [i] || profiler_alloc_method_cache [i])
return TRUE;
return FALSE;
}
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&arr->obj);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
return arr;
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&arr->obj);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
return arr;
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&str->object);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&str->object));
return str;
}
{
if (!report->count)
return;
- mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
+ MONO_PROFILER_RAISE (gc_roots, ((MonoObject **) report->objects, (MonoProfilerGCRootType *) report->root_types, report->extra_info, report->count));
report->count = 0;
}
void
sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
+ if (MONO_PROFILER_ENABLED (gc_roots)) {
GCRootReport report;
int idx;
report.count = 0;
for (idx = 0; idx < count; ++idx)
- add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILER_GC_ROOT_PINNING | MONO_PROFILER_GC_ROOT_MISC, 0);
notify_gc_roots (&report);
}
}
void *obj = queue->data [i];
if (!obj)
continue;
- add_profile_gc_root (&report, obj, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
+ add_profile_gc_root (&report, obj, MONO_PROFILER_GC_ROOT_FINALIZER, 0);
}
notify_gc_roots (&report);
}
single_arg_report_root (MonoObject **obj, void *gc_data)
{
if (*obj)
- add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (root_report, *obj, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
static void
desc >>= ROOT_DESC_TYPE_SHIFT;
while (desc) {
if ((desc & 1) && *start_root) {
- add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *start_root, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
desc >>= 1;
start_root++;
void **objptr = start_run;
while (bmap) {
if ((bmap & 1) && *objptr) {
- add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *objptr, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
bmap >>= 1;
++objptr;
for (p = start_root; p < end_root; p++) {
if (*p)
- add_profile_gc_root (report, *p, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *p, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
break;
}
void
sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_registered_roots ();
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_finalizer_roots (fin_ready_queue, critical_fin_queue);
}
void
sgen_client_collecting_major_1 (void)
{
- profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
+ profile_roots = MONO_PROFILER_ENABLED (gc_roots);
memset (&major_root_report, 0, sizeof (GCRootReport));
}
sgen_client_pinned_los_object (GCObject *obj)
{
if (profile_roots)
- add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILER_GC_ROOT_PINNING | MONO_PROFILER_GC_ROOT_MISC, 0);
}
void
if (profile_roots)
notify_gc_roots (&major_root_report);
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_registered_roots ();
}
void
sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_finalizer_roots (fin_ready_queue, critical_fin_queue);
}
sgen_pointer_queue_add (&moved_objects_queue, destination);
} else {
if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
moved_objects_idx = 0;
}
}
if (moved_objects_idx) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
moved_objects_idx = 0;
}
}
return &gc_callbacks;
}
+gpointer
+mono_gc_thread_attach (SgenThreadInfo *info)
+{
+ return sgen_thread_attach (info);
+}
+
void
sgen_client_thread_attach (SgenThreadInfo* info)
{
info->client_info.info.handle_stack = mono_handle_stack_alloc ();
}
+void
+mono_gc_thread_detach_with_lock (SgenThreadInfo *info)
+{
+ return sgen_thread_detach_with_lock (info);
+}
+
void
sgen_client_thread_detach_with_lock (SgenThreadInfo *p)
{
}
}
-static gboolean
-thread_in_critical_region (SgenThreadInfo *info)
+gboolean
+mono_gc_thread_in_critical_region (SgenThreadInfo *info)
{
return info->client_info.in_critical_region;
}
-static void
-sgen_thread_detach (SgenThreadInfo *p)
-{
- /* If a delegate is passed to native code and invoked on a thread we dont
- * know about, marshal will register it with mono_threads_attach_coop, but
- * we have no way of knowing when that thread goes away. SGen has a TSD
- * so we assume that if the domain is still registered, we can detach
- * the thread
- */
- if (mono_thread_internal_current_is_attached ())
- mono_thread_detach_internal (mono_thread_internal_current ());
-}
-
/**
* mono_gc_is_gc_thread:
*/
#ifndef DISABLE_PERFCOUNTERS
mono_perfcounters->gc_num_handles++;
#endif
- mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handle_type, handle, obj);
+
+ MONO_PROFILER_RAISE (gc_handle_created, (handle, handle_type, obj));
}
void
#ifndef DISABLE_PERFCOUNTERS
mono_perfcounters->gc_num_handles--;
#endif
- mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handle_type, handle, NULL);
+
+ MONO_PROFILER_RAISE (gc_handle_deleted, (handle, handle_type));
}
void
*/
void
-sgen_client_degraded_allocation (size_t size)
+sgen_client_degraded_allocation (void)
{
- static int last_major_gc_warned = -1;
- static int num_degraded = 0;
+ static gint32 last_major_gc_warned = -1;
+ static gint32 num_degraded = 0;
- if (last_major_gc_warned < (int)gc_stats.major_gc_count) {
- ++num_degraded;
- if (num_degraded == 1 || num_degraded == 3)
+ gint32 major_gc_count = InterlockedRead (&gc_stats.major_gc_count);
+ if (InterlockedRead (&last_major_gc_warned) < major_gc_count) {
+ gint32 num = InterlockedIncrement (&num_degraded);
+ if (num == 1 || num == 3)
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
- else if (num_degraded == 10)
+ else if (num == 10)
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
- last_major_gc_warned = gc_stats.major_gc_count;
+ InterlockedWrite (&last_major_gc_warned, major_gc_count);
}
}
void
sgen_client_init (void)
{
- MonoThreadInfoCallbacks cb;
-
- cb.thread_attach = sgen_thread_attach;
- cb.thread_detach = sgen_thread_detach;
- cb.thread_detach_with_lock = sgen_thread_detach_with_lock;
- cb.mono_thread_in_critical_region = thread_in_critical_region;
- cb.ip_in_critical_region = ip_in_critical_region;
-
- mono_threads_init (&cb, sizeof (SgenThreadInfo));
+ mono_thread_callbacks_init ();
+ mono_thread_info_init (sizeof (SgenThreadInfo));
///* Keep this the default for now */
/* Precise marking is broken on all supported targets. Disable until fixed. */
void
mono_gc_base_cleanup (void)
{
- sgen_thread_pool_shutdown (major_collector.get_sweep_pool ());
-
- sgen_workers_shutdown ();
+ sgen_thread_pool_shutdown ();
// We should have consumed any outstanding moves.
g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));