code_buffers_ctr,
exception_throws_ctr,
exception_clauses_ctr,
- monitor_contentions_ctr,
- monitor_acquisitions_ctr,
- monitor_failures_ctr,
+ monitor_events_ctr,
thread_starts_ctr,
thread_ends_ctr,
thread_names_ctr,
static gboolean do_heap_walk = FALSE;
static gboolean ignore_heap_events;
-static void
-heap_walk (MonoProfiler *profiler)
-{
- ENTER_LOG (&heap_starts_ctr, logbuffer,
- EVENT_SIZE /* event */
- );
-
- emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
-
- EXIT_LOG_EXPLICIT (DO_SEND);
-
- mono_gc_walk_heap (0, gc_reference, NULL);
-
- ENTER_LOG (&heap_ends_ctr, logbuffer,
- EVENT_SIZE /* event */
- );
-
- emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
-
- EXIT_LOG_EXPLICIT (DO_SEND);
-}
-
static void
gc_roots (MonoProfiler *prof, int num, void **objects, int *root_types, uintptr_t *extra_info)
{
mono_gc_collect (mono_gc_max_generation ());
}
+#define ALL_GC_EVENTS_MASK (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_HEAPSHOT_FEATURE)
+
static void
gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation)
{
EXIT_LOG_EXPLICIT (NO_SEND);
}
-
switch (ev) {
case MONO_GC_EVENT_START:
if (generation == mono_gc_max_generation ())
* committed to the log file before any object move events
* that will be produced during this GC.
*/
- if (ENABLED (PROFLOG_ALLOCATION_EVENTS))
+ if (ENABLED (ALL_GC_EVENTS_MASK))
sync_point (SYNC_POINT_WORLD_STOP);
+
+ /*
+ * All heap events are surrounded by a HEAP_START and a HEAP_ENV event.
+ * Right now, that's the case for GC Moves, GC Roots or heapshots.
+ */
+ if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
+ ENTER_LOG (&heap_starts_ctr, logbuffer,
+ EVENT_SIZE /* event */
+ );
+
+ emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
+
+ EXIT_LOG_EXPLICIT (DO_SEND);
+ }
+
break;
case MONO_GC_EVENT_PRE_START_WORLD:
- if (do_heap_shot && do_heap_walk) {
- heap_walk (profiler);
+ if (do_heap_shot && do_heap_walk)
+ mono_gc_walk_heap (0, gc_reference, NULL);
+ /* Matching HEAP_END to the HEAP_START from above */
+ if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
+ ENTER_LOG (&heap_ends_ctr, logbuffer,
+ EVENT_SIZE /* event */
+ );
+
+ emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
+
+ EXIT_LOG_EXPLICIT (DO_SEND);
+ }
+
+ if (do_heap_shot && do_heap_walk) {
do_heap_walk = FALSE;
heapshot_requested = 0;
last_hs_time = current_time ();
}
- break;
- case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
+
/*
* Similarly, we must now make sure that any object moves
* written to the GC thread's buffer are flushed. Otherwise,
* object allocation events for certain addresses could come
* after the move events that made those addresses available.
*/
- if (ENABLED (PROFLOG_GC_MOVES_EVENTS))
+ if (ENABLED (ALL_GC_EVENTS_MASK))
sync_point_mark (SYNC_POINT_WORLD_START);
-
+ break;
+ case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
/*
* Finally, it is safe to allow other threads to write to
* their buffers again.
char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
int nlen = strlen (name) + 1;
+ MonoImage *image = mono_assembly_get_image (assembly);
ENTER_LOG (&assembly_loads_ctr, logbuffer,
EVENT_SIZE /* event */ +
BYTE_SIZE /* type */ +
LEB128_SIZE /* assembly */ +
+ LEB128_SIZE /* image */ +
nlen /* name */
);
emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_ASSEMBLY);
emit_ptr (logbuffer, assembly);
+ emit_ptr (logbuffer, image);
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
{
char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
int nlen = strlen (name) + 1;
+ MonoImage *image = mono_assembly_get_image (assembly);
ENTER_LOG (&assembly_unloads_ctr, logbuffer,
EVENT_SIZE /* event */ +
BYTE_SIZE /* type */ +
LEB128_SIZE /* assembly */ +
+ LEB128_SIZE /* image */ +
nlen /* name */
);
emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_ASSEMBLY);
emit_ptr (logbuffer, assembly);
+ emit_ptr (logbuffer, image);
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
}
static void
-monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent event)
+monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
{
- int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces && event == MONO_PROFILER_MONITOR_CONTENTION) ? TYPE_MONITOR_BT : 0;
+ int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_MONITOR_BT : 0;
FrameData data;
if (do_bt)
collect_bt (&data);
- gint32 *ctr;
-
- switch (event) {
- case MONO_PROFILER_MONITOR_CONTENTION:
- ctr = &monitor_contentions_ctr;
- break;
- case MONO_PROFILER_MONITOR_DONE:
- ctr = &monitor_acquisitions_ctr;
- break;
- case MONO_PROFILER_MONITOR_FAIL:
- ctr = &monitor_failures_ctr;
- break;
- default:
- g_assert_not_reached ();
- break;
- }
-
- ENTER_LOG (ctr, logbuffer,
+ ENTER_LOG (&monitor_events_ctr, logbuffer,
EVENT_SIZE /* event */ +
+ BYTE_SIZE /* ev */ +
LEB128_SIZE /* object */ +
(do_bt ? (
LEB128_SIZE /* count */ +
) : 0)
);
- emit_event (logbuffer, (event << 4) | do_bt | TYPE_MONITOR);
+ emit_event (logbuffer, do_bt | TYPE_MONITOR);
+ emit_byte (logbuffer, ev);
emit_obj (logbuffer, object);
if (do_bt)
register_counter ("Event: Code buffers", &code_buffers_ctr);
register_counter ("Event: Exception throws", &exception_throws_ctr);
register_counter ("Event: Exception clauses", &exception_clauses_ctr);
- register_counter ("Event: Monitor contentions", &monitor_contentions_ctr);
- register_counter ("Event: Monitor acquisitions", &monitor_acquisitions_ctr);
- register_counter ("Event: Monitor failures", &monitor_failures_ctr);
+ register_counter ("Event: Monitor events", &monitor_events_ctr);
register_counter ("Event: Thread starts", &thread_starts_ctr);
register_counter ("Event: Thread ends", &thread_ends_ctr);
register_counter ("Event: Thread names", &thread_names_ctr);