{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
static MonoMethod* alloc_method_cache [ATYPE_NUM];
static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM];
+static MonoMethod* profiler_alloc_method_cache [ATYPE_NUM];
static gboolean use_managed_allocator = TRUE;
#ifdef MANAGED_ALLOCATION
{
int p_var, size_var, real_size_var, thread_var G_GNUC_UNUSED;
gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
+ gboolean profiler = variant == MANAGED_ALLOCATOR_PROFILER;
guint32 fastpath_branch, max_size_branch, no_oom_branch;
MonoMethodBuilder *mb;
MonoMethod *res;
mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
+ mono_register_jit_icall (mono_profiler_raise_gc_allocation, "mono_profiler_raise_gc_allocation", mono_create_icall_signature ("void object"), FALSE);
registered = TRUE;
}
if (atype == ATYPE_SMALL) {
- name = slowpath ? "SlowAllocSmall" : "AllocSmall";
+ name = slowpath ? "SlowAllocSmall" : (profiler ? "ProfilerAllocSmall" : "AllocSmall");
} else if (atype == ATYPE_NORMAL) {
- name = slowpath ? "SlowAlloc" : "Alloc";
+ name = slowpath ? "SlowAlloc" : (profiler ? "ProfilerAlloc" : "Alloc");
} else if (atype == ATYPE_VECTOR) {
- name = slowpath ? "SlowAllocVector" : "AllocVector";
+ name = slowpath ? "SlowAllocVector" : (profiler ? "ProfilerAllocVector" : "AllocVector");
} else if (atype == ATYPE_STRING) {
- name = slowpath ? "SlowAllocString" : "AllocString";
+ name = slowpath ? "SlowAllocString" : (profiler ? "ProfilerAllocString" : "AllocString");
} else {
g_assert_not_reached ();
}
mono_mb_emit_ldloc (mb, p_var);
done:
+
+ /*
+ * It's important that we do this outside of the critical region as we
+ * will be invoking arbitrary code.
+ */
+ if (profiler) {
+ /*
+ * if (G_UNLIKELY (*&mono_profiler_state.gc_allocation_count)) {
+ * mono_profiler_raise_gc_allocation (p);
+ * }
+ */
+
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT);
+ mono_mb_emit_byte (mb, CEE_LDIND_U4);
+
+ int prof_br = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
+
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
+ mono_mb_emit_byte (mb, CEE_DUP);
+ mono_mb_emit_icall (mb, mono_profiler_raise_gc_allocation);
+
+ mono_mb_patch_short_branch (mb, prof_br);
+ }
+
mono_mb_emit_byte (mb, CEE_RET);
#endif
mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
{
#ifdef MANAGED_ALLOCATION
+ ManagedAllocatorVariant variant = mono_profiler_allocations_enabled () ?
+ MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR;
+
if (collect_before_allocs)
return NULL;
if (klass->instance_size > tlab_size)
return NULL;
if (klass->rank)
return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
if (klass->byval_arg.type == MONO_TYPE_STRING)
- return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, variant);
/* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
if (known_instance_size)
- return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, variant);
else
- return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, variant);
#else
return NULL;
#endif
#ifdef MANAGED_ALLOCATION
if (klass->rank != 1)
return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
if (has_per_allocation_action)
return NULL;
g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
- return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, mono_profiler_allocations_enabled () ?
+ MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR);
#else
return NULL;
#endif
MonoMethod *res;
MonoMethod **cache;
- if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
+ if (variant != MANAGED_ALLOCATOR_SLOW_PATH && !use_managed_allocator)
return NULL;
switch (variant) {
case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
+ case MANAGED_ALLOCATOR_PROFILER: cache = profiler_alloc_method_cache; break;
default: g_assert_not_reached (); break;
}
int i;
for (i = 0; i < ATYPE_NUM; ++i)
- if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i])
+ if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i] || method == profiler_alloc_method_cache [i])
return TRUE;
return FALSE;
}
int i;
for (i = 0; i < ATYPE_NUM; ++i)
- if (alloc_method_cache [i] || slowpath_alloc_method_cache [i])
+ if (alloc_method_cache [i] || slowpath_alloc_method_cache [i] || profiler_alloc_method_cache [i])
return TRUE;
return FALSE;
}
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&arr->obj);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
return arr;
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&arr->obj);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
return arr;
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&str->object);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&str->object));
return str;
}
{
if (!report->count)
return;
- mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
+ MONO_PROFILER_RAISE (gc_roots, ((MonoObject **) report->objects, (MonoProfilerGCRootType *) report->root_types, report->extra_info, report->count));
report->count = 0;
}
void
sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
+ if (MONO_PROFILER_ENABLED (gc_roots)) {
GCRootReport report;
int idx;
report.count = 0;
for (idx = 0; idx < count; ++idx)
- add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILER_GC_ROOT_PINNING | MONO_PROFILER_GC_ROOT_MISC, 0);
notify_gc_roots (&report);
}
}
void *obj = queue->data [i];
if (!obj)
continue;
- add_profile_gc_root (&report, obj, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
+ add_profile_gc_root (&report, obj, MONO_PROFILER_GC_ROOT_FINALIZER, 0);
}
notify_gc_roots (&report);
}
single_arg_report_root (MonoObject **obj, void *gc_data)
{
if (*obj)
- add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (root_report, *obj, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
static void
desc >>= ROOT_DESC_TYPE_SHIFT;
while (desc) {
if ((desc & 1) && *start_root) {
- add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *start_root, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
desc >>= 1;
start_root++;
void **objptr = start_run;
while (bmap) {
if ((bmap & 1) && *objptr) {
- add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *objptr, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
bmap >>= 1;
++objptr;
for (p = start_root; p < end_root; p++) {
if (*p)
- add_profile_gc_root (report, *p, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *p, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
break;
}
void
sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_registered_roots ();
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_finalizer_roots (fin_ready_queue, critical_fin_queue);
}
void
sgen_client_collecting_major_1 (void)
{
- profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
+ profile_roots = MONO_PROFILER_ENABLED (gc_roots);
memset (&major_root_report, 0, sizeof (GCRootReport));
}
sgen_client_pinned_los_object (GCObject *obj)
{
if (profile_roots)
- add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILER_GC_ROOT_PINNING | MONO_PROFILER_GC_ROOT_MISC, 0);
}
void
if (profile_roots)
notify_gc_roots (&major_root_report);
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_registered_roots ();
}
void
sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_finalizer_roots (fin_ready_queue, critical_fin_queue);
}
sgen_pointer_queue_add (&moved_objects_queue, destination);
} else {
if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
moved_objects_idx = 0;
}
}
if (moved_objects_idx) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
moved_objects_idx = 0;
}
}
{
scan_area_arg_start = start_nursery;
scan_area_arg_end = end_nursery;
+#ifdef HOST_WASM
+ //Under WASM we don't scan thread stacks and we can't trust the values we find there either.
+ return;
+#endif
FOREACH_THREAD (info) {
int skip_reason = 0;
return mono_object_domain (obj);
}
+/**
+ * mono_gchandle_new:
+ * \param obj managed object to get a handle for
+ * \param pinned whether the object should be pinned
+ * This returns a handle that wraps the object, this is used to keep a
+ * reference to a managed object from the unmanaged world and preventing the
+ * object from being disposed.
+ *
+ * If \p pinned is false the address of the object can not be obtained, if it is
+ * true the address of the object can be obtained. This will also pin the
+ * object so it will not be possible by a moving garbage collector to move the
+ * object.
+ *
+ * \returns a handle that can be used to access the object from unmanaged code.
+ */
+guint32
+mono_gchandle_new (MonoObject *obj, gboolean pinned)
+{
+ return sgen_gchandle_new (obj, pinned);
+}
+
+/**
+ * mono_gchandle_new_weakref:
+ * \param obj managed object to get a handle for
+ * \param track_resurrection Determines how long to track the object, if this is set to TRUE, the object is tracked after finalization, if FALSE, the object is only tracked up until the point of finalization.
+ *
+ * This returns a weak handle that wraps the object, this is used to
+ * keep a reference to a managed object from the unmanaged world.
+ * Unlike the \c mono_gchandle_new the object can be reclaimed by the
+ * garbage collector. In this case the value of the GCHandle will be
+ * set to zero.
+ *
+ * If \p track_resurrection is TRUE the object will be tracked through
+ * finalization and if the object is resurrected during the execution
+ * of the finalizer, then the returned weakref will continue to hold
+ * a reference to the object. If \p track_resurrection is FALSE, then
+ * the weak reference's target will become NULL as soon as the object
+ * is passed on to the finalizer.
+ *
+ * \returns a handle that can be used to access the object from
+ * unmanaged code.
+ */
+guint32
+mono_gchandle_new_weakref (GCObject *obj, gboolean track_resurrection)
+{
+ return sgen_gchandle_new_weakref (obj, track_resurrection);
+}
+
/**
* mono_gchandle_is_in_domain:
* \param gchandle a GCHandle's handle.
return domain->domain_id == gchandle_domain->domain_id;
}
+/**
+ * mono_gchandle_free:
+ * \param gchandle a GCHandle's handle.
+ *
+ * Frees the \p gchandle handle. If there are no outstanding
+ * references, the garbage collector can reclaim the memory of the
+ * object wrapped.
+ */
+void
+mono_gchandle_free (guint32 gchandle)
+{
+ sgen_gchandle_free (gchandle);
+}
+
/**
* mono_gchandle_free_domain:
* \param unloading domain that is unloading
{
}
+/**
+ * mono_gchandle_get_target:
+ * \param gchandle a GCHandle's handle.
+ *
+ * The handle was previously created by calling \c mono_gchandle_new or
+ * \c mono_gchandle_new_weakref.
+ *
+ * \returns a pointer to the \c MonoObject* represented by the handle or
+ * NULL for a collected object if using a weakref handle.
+ */
+MonoObject*
+mono_gchandle_get_target (guint32 gchandle)
+{
+ return sgen_gchandle_get_target (gchandle);
+}
+
static gpointer
null_link_if_in_domain (gpointer hidden, GCHandleType handle_type, int max_generation, gpointer user)
{
sgen_client_gchandle_created (int handle_type, GCObject *obj, guint32 handle)
{
#ifndef DISABLE_PERFCOUNTERS
- mono_perfcounters->gc_num_handles++;
+ InterlockedIncrement (&mono_perfcounters->gc_num_handles);
#endif
- mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handle_type, handle, obj);
+
+ MONO_PROFILER_RAISE (gc_handle_created, (handle, handle_type, obj));
}
void
sgen_client_gchandle_destroyed (int handle_type, guint32 handle)
{
#ifndef DISABLE_PERFCOUNTERS
- mono_perfcounters->gc_num_handles--;
+ InterlockedDecrement (&mono_perfcounters->gc_num_handles);
#endif
- mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handle_type, handle, NULL);
+
+ MONO_PROFILER_RAISE (gc_handle_deleted, (handle, handle_type));
}
void
*/
void
-sgen_client_degraded_allocation (size_t size)
+sgen_client_degraded_allocation (void)
{
- static int last_major_gc_warned = -1;
- static int num_degraded = 0;
+ static gint32 last_major_gc_warned = -1;
+ static gint32 num_degraded = 0;
- if (last_major_gc_warned < (int)gc_stats.major_gc_count) {
- ++num_degraded;
- if (num_degraded == 1 || num_degraded == 3)
+ gint32 major_gc_count = InterlockedRead (&gc_stats.major_gc_count);
+ if (InterlockedRead (&last_major_gc_warned) < major_gc_count) {
+ gint32 num = InterlockedIncrement (&num_degraded);
+ if (num == 1 || num == 3)
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
- else if (num_degraded == 10)
+ else if (num == 10)
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
- last_major_gc_warned = gc_stats.major_gc_count;
+ InterlockedWrite (&last_major_gc_warned, major_gc_count);
}
}