{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
static MonoMethod* alloc_method_cache [ATYPE_NUM];
static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM];
+static MonoMethod* profiler_alloc_method_cache [ATYPE_NUM];
static gboolean use_managed_allocator = TRUE;
#ifdef MANAGED_ALLOCATION
{
int p_var, size_var, real_size_var, thread_var G_GNUC_UNUSED;
gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
+ gboolean profiler = variant == MANAGED_ALLOCATOR_PROFILER;
guint32 fastpath_branch, max_size_branch, no_oom_branch;
MonoMethodBuilder *mb;
MonoMethod *res;
mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
+ mono_register_jit_icall (mono_profiler_raise_gc_allocation, "mono_profiler_raise_gc_allocation", mono_create_icall_signature ("void object"), FALSE);
registered = TRUE;
}
if (atype == ATYPE_SMALL) {
- name = slowpath ? "SlowAllocSmall" : "AllocSmall";
+ name = slowpath ? "SlowAllocSmall" : (profiler ? "ProfilerAllocSmall" : "AllocSmall");
} else if (atype == ATYPE_NORMAL) {
- name = slowpath ? "SlowAlloc" : "Alloc";
+ name = slowpath ? "SlowAlloc" : (profiler ? "ProfilerAlloc" : "Alloc");
} else if (atype == ATYPE_VECTOR) {
- name = slowpath ? "SlowAllocVector" : "AllocVector";
+ name = slowpath ? "SlowAllocVector" : (profiler ? "ProfilerAllocVector" : "AllocVector");
} else if (atype == ATYPE_STRING) {
- name = slowpath ? "SlowAllocString" : "AllocString";
+ name = slowpath ? "SlowAllocString" : (profiler ? "ProfilerAllocString" : "AllocString");
} else {
g_assert_not_reached ();
}
mono_mb_emit_ldloc (mb, p_var);
done:
+
+ /*
+ * It's important that we do this outside of the critical region as we
+ * will be invoking arbitrary code.
+ */
+ if (profiler) {
+ /*
+ * if (G_UNLIKELY (*&mono_profiler_state.gc_allocation_count)) {
+ * mono_profiler_raise_gc_allocation (p);
+ * }
+ */
+
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT);
+ mono_mb_emit_byte (mb, CEE_LDIND_U4);
+
+ int prof_br = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
+
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
+ mono_mb_emit_byte (mb, CEE_DUP);
+ mono_mb_emit_icall (mb, mono_profiler_raise_gc_allocation);
+
+ mono_mb_patch_short_branch (mb, prof_br);
+ }
+
mono_mb_emit_byte (mb, CEE_RET);
#endif
mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
{
#ifdef MANAGED_ALLOCATION
+ ManagedAllocatorVariant variant = mono_profiler_allocations_enabled () ?
+ MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR;
+
if (collect_before_allocs)
return NULL;
if (klass->instance_size > tlab_size)
return NULL;
if (klass->rank)
return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
if (klass->byval_arg.type == MONO_TYPE_STRING)
- return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, variant);
/* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
if (known_instance_size)
- return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, variant);
else
- return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, variant);
#else
return NULL;
#endif
#ifdef MANAGED_ALLOCATION
if (klass->rank != 1)
return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
if (has_per_allocation_action)
return NULL;
g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
- return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, mono_profiler_allocations_enabled () ?
+ MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR);
#else
return NULL;
#endif
MonoMethod *res;
MonoMethod **cache;
- if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
+ if (variant != MANAGED_ALLOCATOR_SLOW_PATH && !use_managed_allocator)
return NULL;
switch (variant) {
case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
+ case MANAGED_ALLOCATOR_PROFILER: cache = profiler_alloc_method_cache; break;
default: g_assert_not_reached (); break;
}
int i;
for (i = 0; i < ATYPE_NUM; ++i)
- if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i])
+ if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i] || method == profiler_alloc_method_cache [i])
return TRUE;
return FALSE;
}
int i;
for (i = 0; i < ATYPE_NUM; ++i)
- if (alloc_method_cache [i] || slowpath_alloc_method_cache [i])
+ if (alloc_method_cache [i] || slowpath_alloc_method_cache [i] || profiler_alloc_method_cache [i])
return TRUE;
return FALSE;
}
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&arr->obj);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
return arr;
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&arr->obj);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
return arr;
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&str->object);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&str->object));
return str;
}
{
if (!report->count)
return;
- mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
+ MONO_PROFILER_RAISE (gc_roots, ((MonoObject **) report->objects, (MonoProfilerGCRootType *) report->root_types, report->extra_info, report->count));
report->count = 0;
}
void
sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
+ if (MONO_PROFILER_ENABLED (gc_roots)) {
GCRootReport report;
int idx;
report.count = 0;
for (idx = 0; idx < count; ++idx)
- add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILER_GC_ROOT_PINNING | MONO_PROFILER_GC_ROOT_MISC, 0);
notify_gc_roots (&report);
}
}
void *obj = queue->data [i];
if (!obj)
continue;
- add_profile_gc_root (&report, obj, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
+ add_profile_gc_root (&report, obj, MONO_PROFILER_GC_ROOT_FINALIZER, 0);
}
notify_gc_roots (&report);
}
single_arg_report_root (MonoObject **obj, void *gc_data)
{
if (*obj)
- add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (root_report, *obj, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
static void
desc >>= ROOT_DESC_TYPE_SHIFT;
while (desc) {
if ((desc & 1) && *start_root) {
- add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *start_root, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
desc >>= 1;
start_root++;
void **objptr = start_run;
while (bmap) {
if ((bmap & 1) && *objptr) {
- add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *objptr, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
bmap >>= 1;
++objptr;
for (p = start_root; p < end_root; p++) {
if (*p)
- add_profile_gc_root (report, *p, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *p, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
break;
}
void
sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_registered_roots ();
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_finalizer_roots (fin_ready_queue, critical_fin_queue);
}
void
sgen_client_collecting_major_1 (void)
{
- profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
+ profile_roots = MONO_PROFILER_ENABLED (gc_roots);
memset (&major_root_report, 0, sizeof (GCRootReport));
}
sgen_client_pinned_los_object (GCObject *obj)
{
if (profile_roots)
- add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILER_GC_ROOT_PINNING | MONO_PROFILER_GC_ROOT_MISC, 0);
}
void
if (profile_roots)
notify_gc_roots (&major_root_report);
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_registered_roots ();
}
void
sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_finalizer_roots (fin_ready_queue, critical_fin_queue);
}
sgen_pointer_queue_add (&moved_objects_queue, destination);
} else {
if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
moved_objects_idx = 0;
}
}
if (moved_objects_idx) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
moved_objects_idx = 0;
}
}
#ifndef DISABLE_PERFCOUNTERS
mono_perfcounters->gc_num_handles++;
#endif
- mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handle_type, handle, obj);
+
+ MONO_PROFILER_RAISE (gc_handle_created, (handle, handle_type, obj));
}
void
#ifndef DISABLE_PERFCOUNTERS
mono_perfcounters->gc_num_handles--;
#endif
- mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handle_type, handle, NULL);
+
+ MONO_PROFILER_RAISE (gc_handle_deleted, (handle, handle_type));
}
void
void
mono_gc_base_cleanup (void)
{
- sgen_thread_pool_shutdown (major_collector.get_sweep_pool ());
-
- sgen_workers_shutdown ();
+ sgen_thread_pool_shutdown ();
// We should have consumed any outstanding moves.
g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));