} NameAndMask;
static NameAndMask event_list[] = {
- { "domain", PROFLOG_DOMAIN_EVENTS },
- { "assembly", PROFLOG_ASSEMBLY_EVENTS },
- { "module", PROFLOG_MODULE_EVENTS },
- { "class", PROFLOG_CLASS_EVENTS },
- { "jit", PROFLOG_JIT_COMPILATION_EVENTS },
{ "exception", PROFLOG_EXCEPTION_EVENTS },
- { "gcalloc", PROFLOG_ALLOCATION_EVENTS },
- { "gc", PROFLOG_GC_EVENTS },
- { "thread", PROFLOG_THREAD_EVENTS },
- { "calls", PROFLOG_CALL_EVENTS },
- //{ "inscov", PROFLOG_INS_COVERAGE_EVENTS }, //this is a profiler API event, but there's no actual event for us to emit here
- //{ "sampling", PROFLOG_SAMPLING_EVENTS }, //it makes no sense to enable/disable this event by itself
{ "monitor", PROFLOG_MONITOR_EVENTS },
- { "gcmove", PROFLOG_GC_MOVES_EVENTS },
+ { "gc", PROFLOG_GC_EVENTS },
+ { "gcalloc", PROFLOG_GC_ALLOCATION_EVENTS },
+ { "gcmove", PROFLOG_GC_MOVE_EVENTS },
{ "gcroot", PROFLOG_GC_ROOT_EVENTS },
- { "context", PROFLOG_CONTEXT_EVENTS },
+ { "gchandle", PROFLOG_GC_HANDLE_EVENTS },
{ "finalization", PROFLOG_FINALIZATION_EVENTS },
{ "counter", PROFLOG_COUNTER_EVENTS },
- { "gchandle", PROFLOG_GC_HANDLE_EVENTS },
+ { "jit", PROFLOG_JIT_EVENTS },
- { "typesystem", PROFLOG_TYPELOADING_ALIAS },
- { "coverage", PROFLOG_CODECOV_ALIAS },
- //{ "sample", PROFLOG_PERF_SAMPLING_ALIAS }, //takes args, explicitly handles
- { "alloc", PROFLOG_GC_ALLOC_ALIAS },
- //{ "heapshot", PROFLOG_HEAPSHOT_ALIAS }, //takes args, explicitly handled
+ { "alloc", PROFLOG_ALLOC_ALIAS },
{ "legacy", PROFLOG_LEGACY_ALIAS },
};
config->do_report = TRUE;
} else if (match_option (arg, "debug", NULL)) {
config->do_debug = TRUE;
- } else if (match_option (arg, "sampling-real", NULL)) {
- config->sampling_mode = MONO_PROFILER_SAMPLE_MODE_REAL;
- } else if (match_option (arg, "sampling-process", NULL)) {
- config->sampling_mode = MONO_PROFILER_SAMPLE_MODE_PROCESS;
} else if (match_option (arg, "heapshot", &val)) {
- config->enable_mask |= PROFLOG_HEAPSHOT_ALIAS;
set_hsmode (config, val);
+ if (config->hs_mode != MONO_PROFILER_HEAPSHOT_NONE)
+ config->enable_mask |= PROFLOG_HEAPSHOT_ALIAS;
} else if (match_option (arg, "sample", &val)) {
set_sample_freq (config, val);
- if (config->sample_freq)
- config->enable_mask |= PROFLOG_PERF_SAMPLING_ALIAS;
+ config->sampling_mode = MONO_PROFILER_SAMPLE_MODE_PROCESS;
+ config->enable_mask |= PROFLOG_SAMPLE_EVENTS;
+ } else if (match_option (arg, "sample-real", &val)) {
+ set_sample_freq (config, val);
+ config->sampling_mode = MONO_PROFILER_SAMPLE_MODE_REAL;
+ config->enable_mask |= PROFLOG_SAMPLE_EVENTS;
+ } else if (match_option (arg, "calls", NULL)) {
+ config->enter_leave = TRUE;
+ } else if (match_option (arg, "coverage", NULL)) {
+ config->collect_coverage = TRUE;
} else if (match_option (arg, "zip", NULL)) {
config->use_zip = TRUE;
} else if (match_option (arg, "output", &val)) {
int num_frames = strtoul (val, &end, 10);
if (num_frames > MAX_FRAMES)
num_frames = MAX_FRAMES;
- config->notraces = num_frames == 0;
config->num_frames = num_frames;
} else if (match_option (arg, "maxsamples", &val)) {
char *end;
//XXX change this to header constants
config->max_allocated_sample_hits = mono_cpu_count () * 1000;
- config->sampling_mode = MONO_PROFILER_SAMPLE_MODE_PROCESS;
+ config->sampling_mode = MONO_PROFILER_SAMPLE_MODE_NONE;
config->sample_freq = 100;
config->max_call_depth = 100;
config->num_frames = MAX_FRAMES;
static void
set_hsmode (ProfilerConfig *config, const char* val)
{
- char *end;
- unsigned int count;
- if (!val)
+ if (!val) {
+ config->hs_mode = MONO_PROFILER_HEAPSHOT_MAJOR;
return;
+ }
+
if (strcmp (val, "ondemand") == 0) {
- config->hs_mode_ondemand = TRUE;
+ config->hs_mode = MONO_PROFILER_HEAPSHOT_ON_DEMAND;
return;
}
- count = strtoul (val, &end, 10);
+ char *end;
+
+ unsigned int count = strtoul (val, &end, 10);
+
if (val == end) {
usage ();
return;
}
- if (strcmp (end, "ms") == 0)
- config->hs_mode_ms = count;
- else if (strcmp (end, "gc") == 0)
- config->hs_mode_gc = count;
- else
+ if (strcmp (end, "ms") == 0) {
+ config->hs_mode = MONO_PROFILER_HEAPSHOT_X_MS;
+ config->hs_freq_ms = count;
+ } else if (strcmp (end, "gc") == 0) {
+ config->hs_mode = MONO_PROFILER_HEAPSHOT_X_GC;
+ config->hs_freq_gc = count;
+ } else
usage ();
}
for (int i = 0; i < G_N_ELEMENTS (event_list); i++)
mono_profiler_printf ("\t %s", event_list [i].event_name);
- mono_profiler_printf ("\t[no]typesystem enable/disable type system related events such as class and assembly loading");
mono_profiler_printf ("\t[no]alloc enable/disable recording allocation info");
- mono_profiler_printf ("\t[no]calls enable/disable recording enter/leave method events (very heavy)");
mono_profiler_printf ("\t[no]legacy enable/disable pre mono 5.4 default profiler events");
- mono_profiler_printf ("\tsample[=FREQ] enable/disable statistical sampling of threads (FREQ in Hz, 100 by default)");
+ mono_profiler_printf ("\tsample[-real][=FREQ] enable/disable statistical sampling of threads");
+ mono_profiler_printf ("\t FREQ in Hz, 100 by default");
+ mono_profiler_printf ("\t the -real variant uses wall clock time instead of process time");
mono_profiler_printf ("\theapshot[=MODE] record heapshot info (by default at each major collection)");
mono_profiler_printf ("\t MODE: every XXms milliseconds, every YYgc collections, ondemand");
- mono_profiler_printf ("\t[no]coverage enable/disable collection of code coverage data");
+ mono_profiler_printf ("\tcalls enable recording enter/leave method events (very heavy)");
+ mono_profiler_printf ("\tcoverage enable collection of code coverage data");
mono_profiler_printf ("\tcovfilter=ASSEMBLY add ASSEMBLY to the code coverage filters");
mono_profiler_printf ("\t prefix a + to include the assembly or a - to exclude it");
mono_profiler_printf ("\t e.g. covfilter=-mscorlib");
BinaryObject *binary_objects;
- gboolean heapshot_requested;
+ volatile gint32 heapshot_requested;
guint64 gc_count;
guint64 last_hs_time;
gboolean do_heap_walk;
- gboolean ignore_heap_events;
mono_mutex_t counters_mutex;
MonoCounterAgent *counters;
InterlockedIncrement (&thread_ends_ctr);
- if (ENABLED (PROFLOG_THREAD_EVENTS)) {
- LogBuffer *buf = ensure_logbuf_unsafe (thread,
- EVENT_SIZE /* event */ +
- BYTE_SIZE /* type */ +
- LEB128_SIZE /* tid */
- );
+ LogBuffer *buf = ensure_logbuf_unsafe (thread,
+ EVENT_SIZE /* event */ +
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* tid */
+ );
- emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
- emit_byte (buf, TYPE_THREAD);
- emit_ptr (buf, (void *) thread->node.key);
- }
+ emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
+ emit_byte (buf, TYPE_THREAD);
+ emit_ptr (buf, (void *) thread->node.key);
}
send_buffer (thread);
static void
gc_roots (MonoProfiler *prof, MonoObject *const *objects, const MonoProfilerGCRootType *root_types, const uintptr_t *extra_info, uint64_t num)
{
- if (log_profiler.ignore_heap_events)
- return;
-
ENTER_LOG (&heap_roots_ctr, logbuffer,
EVENT_SIZE /* event */ +
LEB128_SIZE /* num */ +
static void
trigger_on_demand_heapshot (void)
{
- if (log_profiler.heapshot_requested)
+ if (InterlockedRead (&log_profiler.heapshot_requested))
mono_gc_collect (mono_gc_max_generation ());
}
-#define ALL_GC_EVENTS_MASK (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_HEAPSHOT_FEATURE)
+#define ALL_GC_EVENTS_MASK (PROFLOG_GC_EVENTS | PROFLOG_GC_MOVE_EVENTS | PROFLOG_GC_ROOT_EVENTS)
static void
gc_event (MonoProfiler *profiler, MonoProfilerGCEvent ev, uint32_t generation)
{
- if (ev == MONO_GC_EVENT_START) {
- uint64_t now = current_time ();
-
- if (log_config.hs_mode_ms && (now - log_profiler.last_hs_time) / 1000 * 1000 >= log_config.hs_mode_ms)
- log_profiler.do_heap_walk = TRUE;
- else if (log_config.hs_mode_gc && !(log_profiler.gc_count % log_config.hs_mode_gc))
- log_profiler.do_heap_walk = TRUE;
- else if (log_config.hs_mode_ondemand)
- log_profiler.do_heap_walk = log_profiler.heapshot_requested;
- else if (!log_config.hs_mode_ms && !log_config.hs_mode_gc && generation == mono_gc_max_generation ())
- log_profiler.do_heap_walk = TRUE;
-
- //If using heapshot, ignore events for collections we don't care
- if (ENABLED (PROFLOG_HEAPSHOT_FEATURE)) {
- // Ignore events generated during the collection itself (IE GC ROOTS)
- log_profiler.ignore_heap_events = !log_profiler.do_heap_walk;
- }
- }
-
-
if (ENABLED (PROFLOG_GC_EVENTS)) {
ENTER_LOG (&gc_events_ctr, logbuffer,
EVENT_SIZE /* event */ +
if (generation == mono_gc_max_generation ())
log_profiler.gc_count++;
+ switch (log_config.hs_mode) {
+ case MONO_PROFILER_HEAPSHOT_NONE:
+ log_profiler.do_heap_walk = FALSE;
+ break;
+ case MONO_PROFILER_HEAPSHOT_MAJOR:
+ log_profiler.do_heap_walk = generation == mono_gc_max_generation ();
+ break;
+ case MONO_PROFILER_HEAPSHOT_ON_DEMAND:
+ log_profiler.do_heap_walk = InterlockedRead (&log_profiler.heapshot_requested);
+ break;
+ case MONO_PROFILER_HEAPSHOT_X_GC:
+ log_profiler.do_heap_walk = !(log_profiler.gc_count % log_config.hs_freq_gc);
+ break;
+ case MONO_PROFILER_HEAPSHOT_X_MS:
+ log_profiler.do_heap_walk = (current_time () - log_profiler.last_hs_time) / 1000 * 1000 >= log_config.hs_freq_ms;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ if (ENABLED (PROFLOG_GC_ROOT_EVENTS) && log_profiler.do_heap_walk)
+ mono_profiler_set_gc_roots_callback (log_profiler.handle, gc_roots);
+
break;
case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
/*
if (ENABLED (ALL_GC_EVENTS_MASK))
sync_point (SYNC_POINT_WORLD_STOP);
- /*
- * All heap events are surrounded by a HEAP_START and a HEAP_ENV event.
- * Right now, that's the case for GC Moves, GC Roots or heapshots.
- */
- if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || log_profiler.do_heap_walk) {
+ // Surround heapshots with HEAP_START/HEAP_END events.
+ if (log_profiler.do_heap_walk) {
ENTER_LOG (&heap_starts_ctr, logbuffer,
EVENT_SIZE /* event */
);
break;
case MONO_GC_EVENT_PRE_START_WORLD:
- if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && log_profiler.do_heap_walk)
+ mono_profiler_set_gc_roots_callback (log_profiler.handle, NULL);
+
+ if (log_profiler.do_heap_walk) {
mono_gc_walk_heap (0, gc_reference, NULL);
- /* Matching HEAP_END to the HEAP_START from above */
- if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || log_profiler.do_heap_walk) {
ENTER_LOG (&heap_ends_ctr, logbuffer,
EVENT_SIZE /* event */
);
emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
EXIT_LOG;
- }
- if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && log_profiler.do_heap_walk) {
log_profiler.do_heap_walk = FALSE;
- log_profiler.heapshot_requested = FALSE;
log_profiler.last_hs_time = current_time ();
+
+ InterlockedWrite (&log_profiler.heapshot_requested, 0);
}
/*
static void
gc_alloc (MonoProfiler *prof, MonoObject *obj)
{
- int do_bt = (!ENABLED (PROFLOG_CALL_EVENTS) && InterlockedRead (&log_profiler.runtime_inited) && !log_config.notraces) ? TYPE_ALLOC_BT : 0;
+ int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_ALLOC_BT : 0;
FrameData data;
uintptr_t len = mono_object_get_size (obj);
/* account for object alignment in the heap */
static void
gc_handle (MonoProfiler *prof, int op, MonoGCHandleType type, uint32_t handle, MonoObject *obj)
{
- int do_bt = !ENABLED (PROFLOG_CALL_EVENTS) && InterlockedRead (&log_profiler.runtime_inited) && !log_config.notraces;
+ int do_bt = !log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames;
FrameData data;
if (do_bt)
static void
throw_exc (MonoProfiler *prof, MonoObject *object)
{
- int do_bt = (!ENABLED (PROFLOG_CALL_EVENTS) && InterlockedRead (&log_profiler.runtime_inited) && !log_config.notraces) ? TYPE_THROW_BT : 0;
+ int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_THROW_BT : 0;
FrameData data;
if (do_bt)
static void
monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
{
- int do_bt = (!ENABLED (PROFLOG_CALL_EVENTS) && InterlockedRead (&log_profiler.runtime_inited) && !log_config.notraces) ? TYPE_MONITOR_BT : 0;
+ int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_MONITOR_BT : 0;
FrameData data;
if (do_bt)
static void
thread_start (MonoProfiler *prof, uintptr_t tid)
{
- if (ENABLED (PROFLOG_THREAD_EVENTS)) {
- ENTER_LOG (&thread_starts_ctr, logbuffer,
- EVENT_SIZE /* event */ +
- BYTE_SIZE /* type */ +
- LEB128_SIZE /* tid */
- );
+ ENTER_LOG (&thread_starts_ctr, logbuffer,
+ EVENT_SIZE /* event */ +
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* tid */
+ );
- emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
- emit_byte (logbuffer, TYPE_THREAD);
- emit_ptr (logbuffer, (void*) tid);
+ emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
+ emit_byte (logbuffer, TYPE_THREAD);
+ emit_ptr (logbuffer, (void*) tid);
- EXIT_LOG;
- }
+ EXIT_LOG;
}
static void
thread_end (MonoProfiler *prof, uintptr_t tid)
{
- if (ENABLED (PROFLOG_THREAD_EVENTS)) {
- ENTER_LOG (&thread_ends_ctr, logbuffer,
- EVENT_SIZE /* event */ +
- BYTE_SIZE /* type */ +
- LEB128_SIZE /* tid */
- );
+ ENTER_LOG (&thread_ends_ctr, logbuffer,
+ EVENT_SIZE /* event */ +
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* tid */
+ );
- emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
- emit_byte (logbuffer, TYPE_THREAD);
- emit_ptr (logbuffer, (void*) tid);
+ emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
+ emit_byte (logbuffer, TYPE_THREAD);
+ emit_ptr (logbuffer, (void*) tid);
- EXIT_LOG_EXPLICIT (NO_SEND);
- }
+ EXIT_LOG_EXPLICIT (NO_SEND);
MonoProfilerThread *thread = get_thread ();
{
int len = strlen (name) + 1;
- if (ENABLED (PROFLOG_THREAD_EVENTS)) {
- ENTER_LOG (&thread_names_ctr, logbuffer,
- EVENT_SIZE /* event */ +
- BYTE_SIZE /* type */ +
- LEB128_SIZE /* tid */ +
- len /* name */
- );
+ ENTER_LOG (&thread_names_ctr, logbuffer,
+ EVENT_SIZE /* event */ +
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* tid */ +
+ len /* name */
+ );
- emit_event (logbuffer, TYPE_METADATA);
- emit_byte (logbuffer, TYPE_THREAD);
- emit_ptr (logbuffer, (void*)tid);
- memcpy (logbuffer->cursor, name, len);
- logbuffer->cursor += len;
+ emit_event (logbuffer, TYPE_METADATA);
+ emit_byte (logbuffer, TYPE_THREAD);
+ emit_ptr (logbuffer, (void*)tid);
+ memcpy (logbuffer->cursor, name, len);
+ logbuffer->cursor += len;
- EXIT_LOG;
- }
+ EXIT_LOG;
}
static void
if (ENABLED (PROFLOG_COUNTER_EVENTS))
counters_and_perfcounters_sample ();
- if (ENABLED (PROFLOG_CODE_COV_FEATURE))
+ if (log_config.collect_coverage)
dump_coverage ();
char c = 1;
mono_conc_hashtable_destroy (prof->method_table);
mono_os_mutex_destroy (&prof->method_table_mutex);
- if (ENABLED (PROFLOG_CODE_COV_FEATURE)) {
+ if (log_config.collect_coverage) {
mono_os_mutex_lock (&log_profiler.coverage_mutex);
mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, unref_coverage_assemblies, NULL);
mono_os_mutex_unlock (&log_profiler.coverage_mutex);
buf [len] = 0;
- if (!strcmp (buf, "heapshot\n") && log_config.hs_mode_ondemand) {
+ if (log_config.hs_mode == MONO_PROFILER_HEAPSHOT_ON_DEMAND && !strcmp (buf, "heapshot\n")) {
// Rely on the finalization callback triggering a GC.
- log_profiler.heapshot_requested = TRUE;
+ InterlockedWrite (&log_profiler.heapshot_requested, 1);
mono_gc_finalize_notify ();
}
}
mono_os_mutex_init (&log_profiler.method_table_mutex);
log_profiler.method_table = mono_conc_hashtable_new (NULL, NULL);
- if (ENABLED (PROFLOG_CODE_COV_FEATURE))
+ if (log_config.collect_coverage)
coverage_init ();
log_profiler.coverage_filters = filters;
MonoProfilerHandle handle = log_profiler.handle = mono_profiler_install (&log_profiler);
- //Required callbacks
+ /*
+ * Required callbacks. These are either necessary for the profiler itself
+ * to function, or provide metadata that's needed if other events (e.g.
+ * allocations, exceptions) are dynamically enabled/disabled.
+ */
+
mono_profiler_set_runtime_shutdown_end_callback (handle, log_shutdown);
mono_profiler_set_runtime_initialized_callback (handle, runtime_initialized);
mono_profiler_set_gc_event_callback (handle, gc_event);
- mono_profiler_set_gc_resize_callback (handle, gc_resize);
+
mono_profiler_set_thread_started_callback (handle, thread_start);
mono_profiler_set_thread_stopped_callback (handle, thread_end);
-
- //It's questionable whether we actually want this to be mandatory, maybe put it behind the actual event?
mono_profiler_set_thread_name_callback (handle, thread_name);
- if (log_config.effective_mask & PROFLOG_DOMAIN_EVENTS) {
- mono_profiler_set_domain_loaded_callback (handle, domain_loaded);
- mono_profiler_set_domain_unloading_callback (handle, domain_unloaded);
- mono_profiler_set_domain_name_callback (handle, domain_name);
- }
+ mono_profiler_set_domain_loaded_callback (handle, domain_loaded);
+ mono_profiler_set_domain_unloading_callback (handle, domain_unloaded);
+ mono_profiler_set_domain_name_callback (handle, domain_name);
- if (log_config.effective_mask & PROFLOG_ASSEMBLY_EVENTS) {
- mono_profiler_set_assembly_loaded_callback (handle, assembly_loaded);
- mono_profiler_set_assembly_unloading_callback (handle, assembly_unloaded);
- }
+ mono_profiler_set_context_loaded_callback (handle, context_loaded);
+ mono_profiler_set_context_unloaded_callback (handle, context_unloaded);
- if (log_config.effective_mask & PROFLOG_MODULE_EVENTS) {
- mono_profiler_set_image_loaded_callback (handle, image_loaded);
- mono_profiler_set_image_unloading_callback (handle, image_unloaded);
- }
+ mono_profiler_set_assembly_loaded_callback (handle, assembly_loaded);
+ mono_profiler_set_assembly_unloading_callback (handle, assembly_unloaded);
- if (log_config.effective_mask & PROFLOG_CLASS_EVENTS)
- mono_profiler_set_class_loaded_callback (handle, class_loaded);
+ mono_profiler_set_image_loaded_callback (handle, image_loaded);
+ mono_profiler_set_image_unloading_callback (handle, image_unloaded);
- if (log_config.effective_mask & PROFLOG_JIT_COMPILATION_EVENTS) {
- mono_profiler_set_jit_done_callback (handle, method_jitted);
- mono_profiler_set_jit_code_buffer_callback (handle, code_buffer_new);
- }
+ mono_profiler_set_class_loaded_callback (handle, class_loaded);
+
+ mono_profiler_set_jit_done_callback (handle, method_jitted);
- if (log_config.effective_mask & PROFLOG_EXCEPTION_EVENTS) {
+ if (ENABLED (PROFLOG_EXCEPTION_EVENTS)) {
mono_profiler_set_exception_throw_callback (handle, throw_exc);
mono_profiler_set_exception_clause_callback (handle, clause_exc);
}
- if (log_config.effective_mask & PROFLOG_ALLOCATION_EVENTS) {
- mono_profiler_enable_allocations ();
- mono_profiler_set_gc_allocation_callback (handle, gc_alloc);
- }
-
- //PROFLOG_GC_EVENTS is mandatory
- //PROFLOG_THREAD_EVENTS is mandatory
-
- if (log_config.effective_mask & PROFLOG_CALL_EVENTS) {
- mono_profiler_set_call_instrumentation_filter_callback (handle, method_filter);
- mono_profiler_set_method_enter_callback (handle, method_enter);
- mono_profiler_set_method_leave_callback (handle, method_leave);
- mono_profiler_set_method_exception_leave_callback (handle, method_exc_leave);
- }
-
- if (log_config.effective_mask & PROFLOG_INS_COVERAGE_EVENTS)
- mono_profiler_set_coverage_filter_callback (handle, coverage_filter);
-
- if (log_config.effective_mask & PROFLOG_SAMPLING_EVENTS) {
- mono_profiler_enable_sampling (handle);
-
- if (!mono_profiler_set_sample_mode (handle, log_config.sampling_mode, log_config.sample_freq))
- mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");
-
- mono_profiler_set_sample_hit_callback (handle, mono_sample_hit);
- }
-
- if (log_config.effective_mask & PROFLOG_MONITOR_EVENTS) {
+ if (ENABLED (PROFLOG_MONITOR_EVENTS)) {
mono_profiler_set_monitor_contention_callback (handle, monitor_contention);
mono_profiler_set_monitor_acquired_callback (handle, monitor_acquired);
mono_profiler_set_monitor_failed_callback (handle, monitor_failed);
}
- if (log_config.effective_mask & PROFLOG_GC_MOVES_EVENTS)
+ if (ENABLED (PROFLOG_GC_EVENTS))
+ mono_profiler_set_gc_resize_callback (handle, gc_resize);
+
+ if (ENABLED (PROFLOG_GC_ALLOCATION_EVENTS)) {
+ mono_profiler_enable_allocations ();
+ mono_profiler_set_gc_allocation_callback (handle, gc_alloc);
+ }
+
+ if (ENABLED (PROFLOG_GC_MOVE_EVENTS))
mono_profiler_set_gc_moves_callback (handle, gc_moves);
- if (log_config.effective_mask & PROFLOG_GC_ROOT_EVENTS)
+ if (ENABLED (PROFLOG_GC_ROOT_EVENTS))
mono_profiler_set_gc_roots_callback (handle, gc_roots);
- if (log_config.effective_mask & PROFLOG_CONTEXT_EVENTS) {
- mono_profiler_set_context_loaded_callback (handle, context_loaded);
- mono_profiler_set_context_unloaded_callback (handle, context_unloaded);
+ if (ENABLED (PROFLOG_GC_HANDLE_EVENTS)) {
+ mono_profiler_set_gc_handle_created_callback (handle, gc_handle_created);
+ mono_profiler_set_gc_handle_deleted_callback (handle, gc_handle_deleted);
}
- if (log_config.effective_mask & PROFLOG_FINALIZATION_EVENTS) {
+ if (ENABLED (PROFLOG_FINALIZATION_EVENTS)) {
mono_profiler_set_gc_finalizing_callback (handle, finalize_begin);
mono_profiler_set_gc_finalized_callback (handle, finalize_end);
mono_profiler_set_gc_finalizing_object_callback (handle, finalize_object_begin);
mono_profiler_set_gc_finalized_object_callback (handle, finalize_object_end);
- } else if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && log_config.hs_mode_ondemand) {
+ } else if (log_config.hs_mode == MONO_PROFILER_HEAPSHOT_ON_DEMAND) {
//On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
mono_profiler_set_gc_finalized_callback (handle, finalize_end);
}
- //PROFLOG_COUNTER_EVENTS is a pseudo event controled by the no_counters global var
+ if (ENABLED (PROFLOG_SAMPLE_EVENTS))
+ mono_profiler_set_sample_hit_callback (handle, mono_sample_hit);
- if (log_config.effective_mask & PROFLOG_GC_HANDLE_EVENTS) {
- mono_profiler_set_gc_handle_created_callback (handle, gc_handle_created);
- mono_profiler_set_gc_handle_deleted_callback (handle, gc_handle_deleted);
+ if (ENABLED (PROFLOG_JIT_EVENTS))
+ mono_profiler_set_jit_code_buffer_callback (handle, code_buffer_new);
+
+ if (log_config.enter_leave) {
+ mono_profiler_set_call_instrumentation_filter_callback (handle, method_filter);
+ mono_profiler_set_method_enter_callback (handle, method_enter);
+ mono_profiler_set_method_leave_callback (handle, method_leave);
+ mono_profiler_set_method_exception_leave_callback (handle, method_exc_leave);
}
+
+ if (log_config.collect_coverage)
+ mono_profiler_set_coverage_filter_callback (handle, coverage_filter);
+
+ mono_profiler_enable_sampling (handle);
+
+ /*
+ * If no sample option was given by the user, this just leaves the sampling
+ * thread in idle mode. We do this even if no option was given so that we
+ * can warn if another profiler controls sampling parameters.
+ */
+ if (!mono_profiler_set_sample_mode (handle, log_config.sampling_mode, log_config.sample_freq))
+ mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");
}
MONO_PROFILER_GC_HANDLE_DESTROYED = 1,
};
+typedef enum {
+ MONO_PROFILER_HEAPSHOT_NONE = 0,
+ MONO_PROFILER_HEAPSHOT_MAJOR = 1,
+ MONO_PROFILER_HEAPSHOT_ON_DEMAND = 2,
+ MONO_PROFILER_HEAPSHOT_X_GC = 3,
+ MONO_PROFILER_HEAPSHOT_X_MS = 4,
+} MonoProfilerHeapshotMode;
+
// If you alter MAX_FRAMES, you may need to alter SAMPLE_BLOCK_SIZE too.
#define MAX_FRAMES 32
//The following flags control emitting individual events
-#define PROFLOG_DOMAIN_EVENTS (1 << 0)
-#define PROFLOG_ASSEMBLY_EVENTS (1 << 1)
-#define PROFLOG_MODULE_EVENTS (1 << 2)
-#define PROFLOG_CLASS_EVENTS (1 << 3)
-#define PROFLOG_JIT_COMPILATION_EVENTS (1 << 4)
-#define PROFLOG_EXCEPTION_EVENTS (1 << 5)
-#define PROFLOG_ALLOCATION_EVENTS (1 << 6)
-#define PROFLOG_GC_EVENTS (1 << 7)
-#define PROFLOG_THREAD_EVENTS (1 << 8)
-//This generate enter/leave events
-#define PROFLOG_CALL_EVENTS (1 << 9)
-#define PROFLOG_INS_COVERAGE_EVENTS (1 << 10)
-#define PROFLOG_SAMPLING_EVENTS (1 << 11)
-#define PROFLOG_MONITOR_EVENTS (1 << 12)
-#define PROFLOG_GC_MOVES_EVENTS (1 << 13)
-
-#define PROFLOG_GC_ROOT_EVENTS (1 << 14)
-#define PROFLOG_CONTEXT_EVENTS (1 << 15)
-#define PROFLOG_FINALIZATION_EVENTS (1 << 16)
-#define PROFLOG_COUNTER_EVENTS (1 << 17)
-#define PROFLOG_GC_HANDLE_EVENTS (1 << 18)
-
-//The following flags control whole subsystems
-//Enables code coverage generation
-#define PROFLOG_CODE_COV_FEATURE (1 << 19)
-//This enables sampling to be generated
-#define PROFLOG_SAMPLING_FEATURE (1 << 20)
-//This enable heap dumping during GCs and filter GCRoots and GCHandle events outside of the dumped collections
-#define PROFLOG_HEAPSHOT_FEATURE (1 << 21)
-
-
-
-//The follow flags are the common aliases we want ppl to use
-#define PROFLOG_TYPELOADING_ALIAS (PROFLOG_DOMAIN_EVENTS | PROFLOG_ASSEMBLY_EVENTS | PROFLOG_MODULE_EVENTS | PROFLOG_CLASS_EVENTS)
-#define PROFLOG_CODECOV_ALIAS (PROFLOG_INS_COVERAGE_EVENTS | PROFLOG_CODE_COV_FEATURE)
-#define PROFLOG_PERF_SAMPLING_ALIAS (PROFLOG_TYPELOADING_ALIAS | PROFLOG_THREAD_EVENTS | PROFLOG_SAMPLING_EVENTS | PROFLOG_SAMPLING_FEATURE)
-#define PROFLOG_GC_ALLOC_ALIAS (PROFLOG_TYPELOADING_ALIAS | PROFLOG_THREAD_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_ALLOCATION_EVENTS)
-#define PROFLOG_HEAPSHOT_ALIAS (PROFLOG_TYPELOADING_ALIAS | PROFLOG_THREAD_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_HEAPSHOT_FEATURE)
-#define PROFLOG_LEGACY_ALIAS (PROFLOG_TYPELOADING_ALIAS | PROFLOG_GC_EVENTS | PROFLOG_THREAD_EVENTS | PROFLOG_JIT_COMPILATION_EVENTS | PROFLOG_EXCEPTION_EVENTS | PROFLOG_MONITOR_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_CONTEXT_EVENTS | PROFLOG_FINALIZATION_EVENTS | PROFLOG_COUNTER_EVENTS)
-
+#define PROFLOG_EXCEPTION_EVENTS (1 << 0)
+#define PROFLOG_MONITOR_EVENTS (1 << 1)
+#define PROFLOG_GC_EVENTS (1 << 2)
+#define PROFLOG_GC_ALLOCATION_EVENTS (1 << 3)
+#define PROFLOG_GC_MOVE_EVENTS (1 << 4)
+#define PROFLOG_GC_ROOT_EVENTS (1 << 5)
+#define PROFLOG_GC_HANDLE_EVENTS (1 << 6)
+#define PROFLOG_FINALIZATION_EVENTS (1 << 7)
+#define PROFLOG_COUNTER_EVENTS (1 << 8)
+#define PROFLOG_SAMPLE_EVENTS (1 << 9)
+#define PROFLOG_JIT_EVENTS (1 << 10)
+
+#define PROFLOG_ALLOC_ALIAS (PROFLOG_GC_EVENTS | PROFLOG_GC_ALLOCATION_EVENTS | PROFLOG_GC_MOVE_EVENTS)
+#define PROFLOG_HEAPSHOT_ALIAS (PROFLOG_GC_EVENTS | PROFLOG_GC_ROOT_EVENTS)
+#define PROFLOG_LEGACY_ALIAS (PROFLOG_EXCEPTION_EVENTS | PROFLOG_MONITOR_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_GC_MOVE_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_HANDLE_EVENTS | PROFLOG_FINALIZATION_EVENTS | PROFLOG_COUNTER_EVENTS)
typedef struct {
//Events explicitly enabled
int enable_mask;
+
//Events explicitly disabled
int disable_mask;
- //Actual mask the profiler should use
+ // Actual mask the profiler should use. Can be changed at runtime.
int effective_mask;
+ // Whether to do method prologue/epilogue instrumentation. Only used at startup.
+ gboolean enter_leave;
+
+ // Whether to collect code coverage by instrumenting basic blocks.
+ gboolean collect_coverage;
+
//Emit a report at the end of execution
gboolean do_report;
//Where to compress the output file
gboolean use_zip;
- //If true, don't generate stacktraces
- gboolean notraces;
-
- //If true, heapshots are generated on demand only
- gboolean hs_mode_ondemand;
+ // Heapshot mode (every major, on demand, XXgc, XXms). Can be changed at runtime.
+ MonoProfilerHeapshotMode hs_mode;
- //HeapShort frequency in milliseconds
- unsigned int hs_mode_ms;
+ // Heapshot frequency in milliseconds (for MONO_HEAPSHOT_X_MS). Can be changed at runtime.
+ unsigned int hs_freq_ms;
- //HeapShort frequency in number of collections
- unsigned int hs_mode_gc;
+ // Heapshot frequency in number of collections (for MONO_HEAPSHOT_X_GC). Can be changed at runtime.
+ unsigned int hs_freq_gc;
- //Sample frequency in Hertz
+ // Sample frequency in Hertz. Only used at startup.
int sample_freq;
- //Maximum number of frames to collect
+ // Maximum number of frames to collect. Can be changed at runtime.
int num_frames;
- //Max depth to record enter/leave events
+ // Max depth to record enter/leave events. Can be changed at runtime.
int max_call_depth;
//Name of the generated mlpd file
//Filter files used by the code coverage mode
GPtrArray *cov_filter_files;
- //Port to listen for profiling commands
+ // Port to listen for profiling commands (e.g. "heapshot" for on-demand heapshot).
int command_port;
- //Max size of the sample hit buffer, we'll drop frames if it's reached
+ // Maximum number of SampleHit structures. We'll drop samples if this number is not sufficient.
int max_allocated_sample_hits;
+ // Sample mode. Only used at startup.
MonoProfilerSampleMode sampling_mode;
} ProfilerConfig;