#include <mono/metadata/tabledefs.h>
#include <mono/utils/atomic.h>
#include <mono/utils/mono-membar.h>
+#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/mono-os-mutex.h>
+#include <mono/utils/mono-os-semaphore.h>
#include <mono/utils/mono-conc-hashtable.h>
+#include <mono/utils/mono-linked-list-set.h>
+#include <mono/utils/lock-free-alloc.h>
#include <mono/utils/lock-free-queue.h>
+#include <mono/utils/hazard-pointer.h>
+#include <mono/utils/mono-threads.h>
+#include <mono/utils/mono-threads-api.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
/* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
#define LEB128_SIZE 10
-/* Size in bytes of the event ID prefix. */
-#define EVENT_SIZE 1
+/* Size of a value encoded as a single byte. */
+#define BYTE_SIZE 1
+/* Size in bytes of the event prefix (ID + time). */
+#define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
static int nocalls = 0;
static int notraces = 0;
static int do_coverage = 0;
static gboolean debug_coverage = FALSE;
static MonoProfileSamplingMode sampling_mode = MONO_PROFILER_STAT_MODE_PROCESS;
-
-typedef struct _LogBuffer LogBuffer;
+static int max_allocated_sample_hits;
+
+static gint32 sample_hits;
+static gint32 sample_flushes;
+static gint32 sample_allocations;
+static gint32 buffer_allocations;
+static gint32 thread_starts;
+static gint32 thread_ends;
+static gint32 domain_loads;
+static gint32 domain_unloads;
+static gint32 context_loads;
+static gint32 context_unloads;
+static gint32 assembly_loads;
+static gint32 assembly_unloads;
+static gint32 image_loads;
+static gint32 image_unloads;
+static gint32 class_loads;
+static gint32 class_unloads;
+
+static MonoLinkedListSet profiler_thread_list;
/*
* file format:
* strings are represented as a 0-terminated utf8 sequence.
*
* backtrace format:
- * [flags: uleb128] must be 0
* [num: uleb128] number of frames following
* [frame: sleb128]* num MonoMethod pointers as differences from ptr_base
*
* type GC format:
* type: TYPE_GC
* exinfo: one of TYPE_GC_EVENT, TYPE_GC_RESIZE, TYPE_GC_MOVE, TYPE_GC_HANDLE_CREATED[_BT],
- * TYPE_GC_HANDLE_DESTROYED[_BT]
+ * TYPE_GC_HANDLE_DESTROYED[_BT], TYPE_GC_FINALIZE_START, TYPE_GC_FINALIZE_END,
+ * TYPE_GC_FINALIZE_OBJECT_START, TYPE_GC_FINALIZE_OBJECT_END
* [time diff: uleb128] nanoseconds since last timing
* if exinfo == TYPE_GC_RESIZE
* [heap_size: uleb128] new heap size
* if exinfo == TYPE_GC_EVENT
- * [event type: uleb128] GC event (MONO_GC_EVENT_* from profiler.h)
- * [generation: uleb128] GC generation event refers to
+ * [event type: byte] GC event (MONO_GC_EVENT_* from profiler.h)
+ * [generation: byte] GC generation event refers to
* if exinfo == TYPE_GC_MOVE
* [num_objects: uleb128] number of object moves that follow
* [objaddr: sleb128]+ num_objects object pointer differences from obj_base
* upper bits reserved as flags
* [handle: uleb128] GC handle value
* If exinfo == TYPE_GC_HANDLE_DESTROYED_BT, a backtrace follows.
+ * if exinfo == TYPE_GC_FINALIZE_OBJECT_{START,END}
+ * [object: sleb128] the object as a difference from obj_base
*
* type metadata format:
* type: TYPE_METADATA
* [pointer: sleb128] pointer of the metadata type depending on mtype
* if mtype == TYPE_CLASS
* [image: sleb128] MonoImage* as a pointer difference from ptr_base
- * [flags: uleb128] must be 0
* [name: string] full class name
* if mtype == TYPE_IMAGE
- * [flags: uleb128] must be 0
* [name: string] image file name
* if mtype == TYPE_ASSEMBLY
- * [flags: uleb128] must be 0
* [name: string] assembly name
- * if mtype == TYPE_DOMAIN
- * [flags: uleb128] must be 0
* if mtype == TYPE_DOMAIN && exinfo == 0
* [name: string] domain friendly name
* if mtype == TYPE_CONTEXT
- * [flags: uleb128] must be 0
* [domain: sleb128] domain id as pointer
- * if mtype == TYPE_THREAD && (format_version < 11 || (format_version > 10 && exinfo == 0))
- * [flags: uleb128] must be 0
+ * if mtype == TYPE_THREAD && exinfo == 0
* [name: string] thread name
*
* type method format:
* exinfo: one of: TYPE_JITHELPER
* [time diff: uleb128] nanoseconds since last timing
* if exinfo == TYPE_JITHELPER
- * [type: uleb128] MonoProfilerCodeBufferType enum value
+ * [type: byte] MonoProfilerCodeBufferType enum value
* [buffer address: sleb128] pointer to the native code as a diff from ptr_base
* [buffer size: uleb128] size of the generated code
* if type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
* [class: sleb128] the object MonoClass* as a difference from ptr_base
* [size: uleb128] size of the object on the heap
* [num_refs: uleb128] number of object references
- * if (format version > 1) each referenced objref is preceded by a
- * uleb128 encoded offset: the first offset is from the object address
- * and each next offset is relative to the previous one
+ * each referenced objref is preceded by a uleb128 encoded offset: the
+ * first offset is from the object address and each next offset is relative
+ * to the previous one
* [objrefs: sleb128]+ object referenced as a difference from obj_base
* The same object can appear multiple times, but only the first time
* with size != 0: in the other cases this data will only be used to
* [num_roots: uleb128] number of root references
* [num_gc: uleb128] number of major gcs
* [object: sleb128] the object as a difference from obj_base
- * [root_type: uleb128] the root_type: MonoProfileGCRootType (profiler.h)
+ * [root_type: byte] the root_type: MonoProfileGCRootType (profiler.h)
* [extra_info: uleb128] the extra_info value
* object, root_type and extra_info are repeated num_roots times
*
* type: TYPE_SAMPLE
* exinfo: one of TYPE_SAMPLE_HIT, TYPE_SAMPLE_USYM, TYPE_SAMPLE_UBIN, TYPE_SAMPLE_COUNTERS_DESC, TYPE_SAMPLE_COUNTERS
* if exinfo == TYPE_SAMPLE_HIT
- * [sample_type: uleb128] type of sample (SAMPLE_*)
+ * [sample_type: byte] type of sample (SAMPLE_*)
* [timestamp: uleb128] nanoseconds since startup (note: different from other timestamps!)
- * if (format_version > 10)
- * [thread: sleb128] thread id as difference from ptr_base
+ * [thread: sleb128] thread id as difference from ptr_base
* [count: uleb128] number of following instruction addresses
* [ip: sleb128]* instruction pointer as difference from ptr_base
- * if (format_version > 5)
- * [mbt_count: uleb128] number of managed backtrace info triplets (method + IL offset + native offset)
- * [method: sleb128]* MonoMethod* as a pointer difference from the last such
- * pointer or the buffer method_base (the first such method can be also indentified by ip, but this is not neccessarily true)
- * [il_offset: sleb128]* IL offset inside method where the hit occurred
- * [native_offset: sleb128]* native offset inside method where the hit occurred
+ * [mbt_count: uleb128] number of managed backtrace frames
+ * [method: sleb128]* MonoMethod* as a pointer difference from the last such
+ * pointer or the buffer method_base (the first such method can be also indentified by ip, but this is not neccessarily true)
* if exinfo == TYPE_SAMPLE_USYM
* [address: sleb128] symbol address as a difference from ptr_base
* [size: uleb128] symbol size (may be 0 if unknown)
* if section == MONO_COUNTER_PERFCOUNTERS:
* [section_name: string] section name of counter
* [name: string] name of counter
- * [type: uleb128] type of counter
- * [unit: uleb128] unit of counter
- * [variance: uleb128] variance of counter
+ * [type: byte] type of counter
+ * [unit: byte] unit of counter
+ * [variance: byte] variance of counter
* [index: uleb128] unique index of counter
* if exinfo == TYPE_SAMPLE_COUNTERS
* [timestamp: uleb128] sampling timestamp
* [index: uleb128] unique index of counter
* if index == 0:
* break
- * [type: uleb128] type of counter value
+ * [type: byte] type of counter value
* if type == string:
* if value == null:
* [0: uleb128] 0 -> value is null
* [partially_covered: uleb128] the number of partially covered methods
* currently partially_covered will always be 0, and fully_covered is the
* number of methods that are fully and partially covered.
- */
-
-/*
- * Format oddities that we ought to fix:
*
- * - Methods written in emit_bt () should be based on the buffer's base
- * method instead of the base pointer.
- * - The TYPE_SAMPLE_HIT event contains (currently) pointless data like
- * always-one unmanaged frame count and always-zero IL offsets.
- *
- * These are mostly small things and are not worth a format change by
- * themselves. They should be done when some other major change has to
- * be done to the format.
+ * type meta format:
+ * type: TYPE_META
+ * exinfo: one of: TYPE_SYNC_POINT
+ * [time diff: uleb128] nanoseconds since last timing
+ * if exinfo == TYPE_SYNC_POINT
+ * [type: byte] MonoProfilerSyncPointType enum value
*/
// Pending data to be written to the log, for a single thread.
// Threads periodically flush their own LogBuffers by calling safe_send
+typedef struct _LogBuffer LogBuffer;
struct _LogBuffer {
// Next (older) LogBuffer in processing queue
LogBuffer *next;
uintptr_t last_method;
uintptr_t obj_base;
uintptr_t thread_id;
- int locked;
- int call_depth;
// Bytes allocated for this LogBuffer
int size;
unsigned char buf [1];
};
+typedef struct {
+ MonoLinkedListSetNode node;
+
+ // The current log buffer for this thread.
+ LogBuffer *buffer;
+
+ // Methods referenced by events in `buffer`, see `MethodInfo`.
+ GPtrArray *methods;
+
+ // Current call depth for enter/leave events.
+ int call_depth;
+
+ // Indicates whether this thread is currently writing to its `buffer`.
+ int busy;
+} MonoProfilerThread;
+
static inline void
ign_res (int G_GNUC_UNUSED unused, ...)
{
}
-#define ENTER_LOG(lb,str) if ((lb)->locked) {ign_res (write(2, str, strlen(str))); ign_res (write(2, "\n", 1));return;} else {(lb)->locked++;}
-#define EXIT_LOG(lb) (lb)->locked--;
+/*
+ * These macros create a scope to avoid leaking the buffer returned
+ * from ensure_logbuf () as it may have been invalidated by a GC
+ * thread during STW. If you called init_thread () with add_to_lls =
+ * FALSE, then don't use these macros.
+ */
-// Shared queue of sample snapshots taken at signal time.
-// The queue is written into by signal handlers for all threads;
-// the helper thread later unqueues and writes into its own LogBuffer.
-typedef struct _StatBuffer StatBuffer;
-struct _StatBuffer {
- // Next (older) StatBuffer in processing queue
- StatBuffer *next;
+#define ENTER_LOG \
+ do { \
+ buffer_lock (); \
+ g_assert (!PROF_TLS_GET ()->busy++ && "Why are we trying to write a new event while already writing one?")
- // Bytes allocated for this StatBuffer
- uintptr_t size;
+#define EXIT_LOG \
+ PROF_TLS_GET ()->busy--; \
+ buffer_unlock (); \
+ } while (0)
- // Start of currently unused space in buffer
- uintptr_t *cursor;
+static volatile gint32 buffer_rwlock_count;
+static volatile gpointer buffer_rwlock_exclusive;
- // Pointer to start-of-structure-plus-size (for convenience)
- uintptr_t *buf_end;
-
- // Start of data in buffer.
- // Data consists of a series of sample packets consisting of:
- // 1 ptrword: Metadata
- // Low 8 bits: COUNT, the count of native stack frames in this sample (currently always 1)
- // Next 8 bits: MBT_COUNT, the count of managed stacks in this sample
- // Next 8 bits: TYPE. See "sampling sources" enum in proflog.h. Usually SAMPLE_CYCLES (1)
- // 1 ptrword: Thread ID
- // 1 ptrword: Timestamp
- // COUNT ptrwords: Native stack frames
- // Each word is an IP (first is IP where the signal did the interruption)
- // MBT_COUNT * 4 ptrwords: Managed stack frames (AsyncFrameInfo, repacked)
- // Word 1: MonoMethod ptr
- // Word 2: MonoDomain ptr
- // Word 3: Base address of method
- // Word 4: Offset within method
- uintptr_t buf [1];
-};
+// Can be used recursively.
+static void
+buffer_lock (void)
+{
+ /*
+ * If the thread holding the exclusive lock tries to modify the
+ * reader count, just make it a no-op. This way, we also avoid
+ * invoking the GC safe point macros below, which could break if
+ * done from a thread that is currently the initiator of STW.
+ *
+ * In other words, we rely on the fact that the GC thread takes
+ * the exclusive lock in the gc_event () callback when the world
+ * is about to stop.
+ */
+ if (InterlockedReadPointer (&buffer_rwlock_exclusive) != (gpointer) thread_id ()) {
+ MONO_ENTER_GC_SAFE;
-typedef struct _BinaryObject BinaryObject;
+ while (InterlockedReadPointer (&buffer_rwlock_exclusive))
+ mono_thread_info_yield ();
+
+ InterlockedIncrement (&buffer_rwlock_count);
+
+ MONO_EXIT_GC_SAFE;
+ }
+ mono_memory_barrier ();
+}
+
+static void
+buffer_unlock (void)
+{
+ mono_memory_barrier ();
+
+ // See the comment in buffer_lock ().
+ if (InterlockedReadPointer (&buffer_rwlock_exclusive) == (gpointer) thread_id ())
+ return;
+
+ g_assert (InterlockedRead (&buffer_rwlock_count) && "Why are we trying to decrement a zero reader count?");
+
+ InterlockedDecrement (&buffer_rwlock_count);
+}
+
+// Cannot be used recursively.
+static void
+buffer_lock_excl (void)
+{
+ gpointer tid = (gpointer) thread_id ();
+
+ g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) != tid && "Why are we taking the exclusive lock twice?");
+
+ MONO_ENTER_GC_SAFE;
+
+ while (InterlockedCompareExchangePointer (&buffer_rwlock_exclusive, tid, 0))
+ mono_thread_info_yield ();
+
+ while (InterlockedRead (&buffer_rwlock_count))
+ mono_thread_info_yield ();
+
+ MONO_EXIT_GC_SAFE;
+
+ mono_memory_barrier ();
+}
+
+static void
+buffer_unlock_excl (void)
+{
+ mono_memory_barrier ();
+
+ g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) && "Why is the exclusive lock not held?");
+ g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) == (gpointer) thread_id () && "Why does another thread hold the exclusive lock?");
+ g_assert (!InterlockedRead (&buffer_rwlock_count) && "Why are there readers when the exclusive lock is held?");
+
+ InterlockedWritePointer (&buffer_rwlock_exclusive, NULL);
+}
+
+typedef struct _BinaryObject BinaryObject;
struct _BinaryObject {
BinaryObject *next;
void *addr;
};
struct _MonoProfiler {
- StatBuffer *stat_buffers;
FILE* file;
#if defined (HAVE_SYS_ZLIB)
gzFile gzfile;
#ifndef HOST_WIN32
pthread_t helper_thread;
pthread_t writer_thread;
+ pthread_t dumper_thread;
#endif
volatile gint32 run_writer_thread;
+ MonoLockFreeAllocSizeClass writer_entry_size_class;
+ MonoLockFreeAllocator writer_entry_allocator;
MonoLockFreeQueue writer_queue;
MonoSemType writer_queue_sem;
MonoConcurrentHashTable *method_table;
mono_mutex_t method_table_mutex;
+ volatile gint32 run_dumper_thread;
+ MonoLockFreeQueue dumper_queue;
+ MonoSemType dumper_queue_sem;
+ MonoLockFreeAllocSizeClass sample_size_class;
+ MonoLockFreeAllocator sample_allocator;
+ MonoLockFreeQueue sample_reuse_queue;
BinaryObject *binary_objects;
GPtrArray *coverage_filters;
- GPtrArray *sorted_sample_events;
};
-typedef struct _WriterQueueEntry WriterQueueEntry;
-struct _WriterQueueEntry {
+typedef struct {
MonoLockFreeQueueNode node;
GPtrArray *methods;
LogBuffer *buffer;
-};
+} WriterQueueEntry;
+
+#define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
-typedef struct _MethodInfo MethodInfo;
-struct _MethodInfo {
+typedef struct {
MonoMethod *method;
MonoJitInfo *ji;
uint64_t time;
-};
-
-#ifdef TLS_INIT
-#undef TLS_INIT
-#endif
+} MethodInfo;
#ifdef HOST_WIN32
-#define TLS_SET(x,y) (TlsSetValue (x, y))
-#define TLS_GET(t,x) ((t *) TlsGetValue (x))
-#define TLS_INIT(x) (x = TlsAlloc ())
-static int tlsbuffer;
-static int tlsmethodlist;
+
+#define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
+#define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
+#define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
+#define PROF_TLS_FREE() (TlsFree (profiler_tls))
+
+static DWORD profiler_tls;
+
#elif HAVE_KW_THREAD
-#define TLS_SET(x,y) (x = y)
-#define TLS_GET(t,x) (x)
-#define TLS_INIT(x)
-static __thread LogBuffer* tlsbuffer = NULL;
-static __thread GPtrArray* tlsmethodlist = NULL;
+
+#define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
+#define PROF_TLS_GET() (profiler_tls)
+#define PROF_TLS_INIT()
+#define PROF_TLS_FREE()
+
+static __thread MonoProfilerThread *profiler_tls;
+
#else
-#define TLS_SET(x,y) (pthread_setspecific (x, y))
-#define TLS_GET(t,x) ((t *) pthread_getspecific (x))
-#define TLS_INIT(x) (pthread_key_create (&x, NULL))
-static pthread_key_t tlsbuffer;
-static pthread_key_t tlsmethodlist;
-#endif
-static void safe_send (MonoProfiler *profiler, LogBuffer *logbuffer);
+#define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
+#define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
+#define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
+#define PROF_TLS_FREE() (pthread_key_delete (&profiler_tls))
+
+static pthread_key_t profiler_tls;
+
+#endif
static char*
pstrdup (const char *s)
return p;
}
-static StatBuffer*
-create_stat_buffer (void)
-{
- StatBuffer* buf = (StatBuffer *)alloc_buffer (BUFFER_SIZE);
- buf->size = BUFFER_SIZE;
- buf->buf_end = (uintptr_t*)((unsigned char*)buf + buf->size);
- buf->cursor = buf->buf;
- return buf;
-}
-
static LogBuffer*
create_buffer (void)
{
LogBuffer* buf = (LogBuffer *)alloc_buffer (BUFFER_SIZE);
+
+ InterlockedIncrement (&buffer_allocations);
+
buf->size = BUFFER_SIZE;
buf->time_base = current_time ();
buf->last_time = buf->time_base;
}
static void
-init_thread (void)
+init_buffer_state (MonoProfilerThread *thread)
{
- if (!TLS_GET (LogBuffer, tlsbuffer)) {
- LogBuffer *logbuffer = create_buffer ();
- TLS_SET (tlsbuffer, logbuffer);
- logbuffer->thread_id = thread_id ();
- }
- if (!TLS_GET (GPtrArray, tlsmethodlist)) {
- GPtrArray *methodlist = g_ptr_array_new ();
- TLS_SET (tlsmethodlist, methodlist);
+ thread->buffer = create_buffer ();
+ thread->methods = NULL;
+}
+
+static void
+clear_hazard_pointers (MonoThreadHazardPointers *hp)
+{
+ mono_hazard_pointer_clear (hp, 0);
+ mono_hazard_pointer_clear (hp, 1);
+ mono_hazard_pointer_clear (hp, 2);
+}
+
+static MonoProfilerThread *
+init_thread (gboolean add_to_lls)
+{
+ MonoProfilerThread *thread = PROF_TLS_GET ();
+
+ /*
+ * Sometimes we may try to initialize a thread twice. One example is the
+ * main thread: We initialize it when setting up the profiler, but we will
+ * also get a thread_start () callback for it. Another example is when
+ * attaching new threads to the runtime: We may get a gc_alloc () callback
+ * for that thread's thread object (where we initialize it), soon followed
+ * by a thread_start () callback.
+ *
+ * These cases are harmless anyhow. Just return if we've already done the
+ * initialization work.
+ */
+ if (thread)
+ return thread;
+
+ thread = malloc (sizeof (MonoProfilerThread));
+ thread->node.key = thread_id ();
+ thread->call_depth = 0;
+ thread->busy = 0;
+
+ init_buffer_state (thread);
+
+ /*
+ * Some internal profiler threads don't need to be cleaned up
+ * by the main thread on shutdown.
+ */
+ if (add_to_lls) {
+ MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
+ g_assert (mono_lls_insert (&profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
+ clear_hazard_pointers (hp);
}
- //printf ("thread %p at time %llu\n", (void*)logbuffer->thread_id, logbuffer->time_base);
+ PROF_TLS_SET (thread);
+
+ return thread;
+}
+
+// Only valid if init_thread () was called with add_to_lls = FALSE.
+static void
+deinit_thread (MonoProfilerThread *thread)
+{
+ free (thread);
+ PROF_TLS_SET (NULL);
}
static LogBuffer *
if (old && old->cursor + bytes + 100 < old->buf_end)
return old;
- LogBuffer *new_ = (LogBuffer *)create_buffer ();
- new_->thread_id = thread_id ();
+ LogBuffer *new_ = create_buffer ();
new_->next = old;
- if (old)
- new_->call_depth = old->call_depth;
-
return new_;
}
-static LogBuffer*
-ensure_logbuf (int bytes)
+// Only valid if init_thread () was called with add_to_lls = FALSE.
+static LogBuffer *
+ensure_logbuf_unsafe (int bytes)
{
- LogBuffer *old = TLS_GET (LogBuffer, tlsbuffer);
+ MonoProfilerThread *thread = PROF_TLS_GET ();
+ LogBuffer *old = thread->buffer;
LogBuffer *new_ = ensure_logbuf_inner (old, bytes);
if (new_ == old)
return old; // Still enough space.
- TLS_SET (tlsbuffer, new_);
- init_thread ();
+ thread->buffer = new_;
return new_;
}
+/*
+ * Any calls to this function should be wrapped in the ENTER_LOG and
+ * EXIT_LOG macros to prevent the returned pointer from leaking
+ * outside of the critical region created by the calls to buffer_lock ()
+ * and buffer_unlock () that those macros insert. If the pointer leaks,
+ * it can and will lead to crashes as the GC or helper thread may
+ * invalidate the pointer at any time.
+ *
+ * Note: If you're calling from a thread that called init_thread () with
+ * add_to_lls = FALSE, you should use ensure_logbuf_unsafe () and omit
+ * the macros.
+ */
+static LogBuffer*
+ensure_logbuf (int bytes)
+{
+ g_assert (PROF_TLS_GET ()->busy && "Why are we trying to expand our buffer without the busy flag set?");
+
+ return ensure_logbuf_unsafe (bytes);
+}
+
static void
emit_byte (LogBuffer *logbuffer, int value)
{
assert (logbuffer->cursor <= logbuffer->buf_end);
}
+static void
+emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
+{
+ emit_byte (logbuffer, event);
+ emit_time (logbuffer, time);
+}
+
+static void
+emit_event (LogBuffer *logbuffer, int event)
+{
+ emit_event_time (logbuffer, event, current_time ());
+}
+
static void
emit_svalue (LogBuffer *logbuffer, int64_t value)
{
*/
//g_assert (ji);
- MethodInfo *info = (MethodInfo *)malloc (sizeof (MethodInfo));
+ MethodInfo *info = (MethodInfo *) malloc (sizeof (MethodInfo));
info->method = method;
info->ji = ji;
info->time = current_time ();
- g_ptr_array_add (TLS_GET (GPtrArray, tlsmethodlist), info);
+ MonoProfilerThread *thread = PROF_TLS_GET ();
+ GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
+ g_ptr_array_add (arr, info);
}
}
emit_method_inner (logbuffer, method);
}
-static void
-emit_method_as_ptr (MonoProfiler *prof, LogBuffer *logbuffer, MonoMethod *method)
-{
- register_method_local (prof, method, NULL);
- emit_ptr (logbuffer, method);
-}
-
static void
emit_obj (LogBuffer *logbuffer, void *ptr)
{
}
static void
-send_buffer (MonoProfiler *prof, GPtrArray *methods, LogBuffer *buffer)
+send_buffer (MonoProfiler *prof, MonoProfilerThread *thread)
{
- WriterQueueEntry *entry = (WriterQueueEntry *)calloc (1, sizeof (WriterQueueEntry));
+ WriterQueueEntry *entry = mono_lock_free_alloc (&prof->writer_entry_allocator);
+ entry->methods = thread->methods;
+ entry->buffer = thread->buffer;
+
mono_lock_free_queue_node_init (&entry->node, FALSE);
- entry->methods = methods;
- entry->buffer = buffer;
+
mono_lock_free_queue_enqueue (&prof->writer_queue, &entry->node);
mono_os_sem_post (&prof->writer_queue_sem);
}
+static void
+remove_thread (MonoProfiler *prof, MonoProfilerThread *thread, gboolean from_callback)
+{
+ MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
+
+ if (mono_lls_remove (&profiler_thread_list, hp, &thread->node)) {
+ LogBuffer *buffer = thread->buffer;
+
+ /*
+ * No need to take the buffer lock here as no other threads can
+ * be accessing this buffer anymore.
+ */
+
+ if (!from_callback) {
+ /*
+ * The thread is being cleaned up by the main thread during
+ * shutdown. This typically happens for internal runtime
+ * threads. We need to synthesize a thread end event.
+ */
+
+ buffer = ensure_logbuf_inner (buffer,
+ EVENT_SIZE /* event */ +
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* tid */
+ );
+
+ emit_event (buffer, TYPE_END_UNLOAD | TYPE_METADATA);
+ emit_byte (buffer, TYPE_THREAD);
+ emit_ptr (buffer, (void *) thread->node.key);
+ }
+
+ send_buffer (prof, thread);
+
+ mono_thread_hazardous_try_free (thread, free);
+ }
+
+ clear_hazard_pointers (hp);
+
+ if (from_callback)
+ PROF_TLS_SET (NULL);
+}
+
static void
dump_buffer (MonoProfiler *profiler, LogBuffer *buf)
{
free_buffer (buf, buf->size);
}
+static void
+dump_buffer_threadless (MonoProfiler *profiler, LogBuffer *buf)
+{
+ for (LogBuffer *iter = buf; iter; iter = iter->next)
+ iter->thread_id = 0;
+
+ dump_buffer (profiler, buf);
+}
+
static void
process_requests (MonoProfiler *profiler)
{
}
static void counters_init (MonoProfiler *profiler);
-static void counters_sample (MonoProfiler *profiler, uint64_t timestamp, gboolean threadless);
+static void counters_sample (MonoProfiler *profiler, uint64_t timestamp);
-/*
- * Can be called only at safe callback locations.
- */
static void
-safe_send (MonoProfiler *profiler, LogBuffer *logbuffer)
+safe_send (MonoProfiler *profiler)
{
/* We need the runtime initialized so that we have threads and hazard
* pointers available. Otherwise, the lock free queue will not work and
if (!InterlockedRead (&runtime_inited))
return;
- int cd = logbuffer->call_depth;
+ MonoProfilerThread *thread = PROF_TLS_GET ();
- send_buffer (profiler, TLS_GET (GPtrArray, tlsmethodlist), TLS_GET (LogBuffer, tlsbuffer));
+ buffer_lock ();
- TLS_SET (tlsbuffer, NULL);
- TLS_SET (tlsmethodlist, NULL);
+ send_buffer (profiler, thread);
+ init_buffer_state (thread);
- init_thread ();
+ buffer_unlock ();
+}
- TLS_GET (LogBuffer, tlsbuffer)->call_depth = cd;
+static void
+send_if_needed (MonoProfiler *prof)
+{
+ if (PROF_TLS_GET ()->buffer->next)
+ safe_send (prof);
}
static void
-safe_send_threadless (MonoProfiler *prof, LogBuffer *buf)
+safe_send_threadless (MonoProfiler *prof)
{
+ LogBuffer *buf = PROF_TLS_GET ()->buffer;
+
for (LogBuffer *iter = buf; iter; iter = iter->next)
iter->thread_id = 0;
- safe_send (prof, buf);
+ safe_send (prof);
+}
+
+static void
+send_if_needed_threadless (MonoProfiler *prof)
+{
+ if (PROF_TLS_GET ()->buffer->next)
+ safe_send_threadless (prof);
+}
+
+// Assumes that the exclusive lock is held.
+static void
+sync_point_flush (MonoProfiler *prof)
+{
+ g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) == (gpointer) thread_id () && "Why don't we hold the exclusive lock?");
+
+ MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
+ send_buffer (prof, thread);
+ init_buffer_state (thread);
+ } MONO_LLS_FOREACH_SAFE_END
+}
+
+// Assumes that the exclusive lock is held.
+static void
+sync_point_mark (MonoProfiler *prof, MonoProfilerSyncPointType type)
+{
+ g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) == (gpointer) thread_id () && "Why don't we hold the exclusive lock?");
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
+ EVENT_SIZE /* event */ +
+ LEB128_SIZE /* type */
+ );
+
+ emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
+ emit_byte (logbuffer, type);
+
+ EXIT_LOG;
+
+ switch (type) {
+ case SYNC_POINT_PERIODIC:
+ safe_send_threadless (prof);
+ break;
+ case SYNC_POINT_WORLD_STOP:
+ case SYNC_POINT_WORLD_START:
+ safe_send (prof);
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+}
+
+// Assumes that the exclusive lock is held.
+static void
+sync_point (MonoProfiler *prof, MonoProfilerSyncPointType type)
+{
+ sync_point_flush (prof);
+ sync_point_mark (prof, type);
}
static int
gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
{
- int i;
- uintptr_t last_offset = 0;
- //const char *name = mono_class_get_name (klass);
+ /* account for object alignment in the heap */
+ size += 7;
+ size &= ~7;
+
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
LEB128_SIZE /* obj */ +
LEB128_SIZE /* ref */
)
);
- emit_byte (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
+
+ emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
emit_obj (logbuffer, obj);
emit_ptr (logbuffer, klass);
- /* account for object alignment in the heap */
- size += 7;
- size &= ~7;
emit_value (logbuffer, size);
emit_value (logbuffer, num);
- for (i = 0; i < num; ++i) {
+
+ uintptr_t last_offset = 0;
+
+ for (int i = 0; i < num; ++i) {
emit_value (logbuffer, offsets [i] - last_offset);
last_offset = offsets [i];
emit_obj (logbuffer, refs [i]);
}
- //if (num)
- // printf ("obj: %p, klass: %s, refs: %d, size: %d\n", obj, name, (int)num, (int)size);
+
+ EXIT_LOG;
+
return 0;
}
static void
heap_walk (MonoProfiler *profiler)
{
- int do_walk = 0;
- uint64_t now;
- LogBuffer *logbuffer;
if (!do_heap_shot)
return;
- logbuffer = ensure_logbuf (
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */
- );
- now = current_time ();
- if (hs_mode_ms && (now - last_hs_time)/1000000 >= hs_mode_ms)
- do_walk = 1;
+
+ gboolean do_walk = 0;
+ uint64_t now = current_time ();
+
+ if (hs_mode_ms && (now - last_hs_time) / 1000000 >= hs_mode_ms)
+ do_walk = TRUE;
else if (hs_mode_gc && (gc_count % hs_mode_gc) == 0)
- do_walk = 1;
+ do_walk = TRUE;
else if (hs_mode_ondemand)
do_walk = heapshot_requested;
else if (!hs_mode_ms && !hs_mode_gc && profiler->last_gc_gen_started == mono_gc_max_generation ())
- do_walk = 1;
+ do_walk = TRUE;
if (!do_walk)
return;
+
heapshot_requested = 0;
- emit_byte (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
- emit_time (logbuffer, now);
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
+ EVENT_SIZE /* event */
+ );
+
+ emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
+
+ EXIT_LOG;
+
mono_gc_walk_heap (0, gc_reference, NULL);
- logbuffer = ensure_logbuf (
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
+ EVENT_SIZE /* event */
);
+
now = current_time ();
- emit_byte (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
+
+ EXIT_LOG;
+
last_hs_time = now;
}
static void
-gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation) {
- uint64_t now;
+gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation)
+{
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- LEB128_SIZE /* gc event */ +
- LEB128_SIZE /* generation */
+ BYTE_SIZE /* gc event */ +
+ BYTE_SIZE /* generation */
);
- now = current_time ();
- ENTER_LOG (logbuffer, "gcevent");
- emit_byte (logbuffer, TYPE_GC_EVENT | TYPE_GC);
- emit_time (logbuffer, now);
- emit_value (logbuffer, ev);
- emit_value (logbuffer, generation);
- /* to deal with nested gen1 after gen0 started */
- if (ev == MONO_GC_EVENT_START) {
+
+ emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
+ emit_byte (logbuffer, ev);
+ emit_byte (logbuffer, generation);
+
+ EXIT_LOG;
+
+ switch (ev) {
+ case MONO_GC_EVENT_START:
+ /* to deal with nested gen1 after gen0 started */
profiler->last_gc_gen_started = generation;
+
if (generation == mono_gc_max_generation ())
gc_count++;
- }
- if (ev == MONO_GC_EVENT_PRE_START_WORLD)
+ break;
+ case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
+ /*
+ * Ensure that no thread can be in the middle of writing to
+ * a buffer when the world stops...
+ */
+ buffer_lock_excl ();
+ break;
+ case MONO_GC_EVENT_POST_STOP_WORLD:
+ /*
+ * ... So that we now have a consistent view of all buffers.
+ * This allows us to flush them. We need to do this because
+ * they may contain object allocation events that need to be
+ * committed to the log file before any object move events
+ * that will be produced during this GC.
+ */
+ sync_point (profiler, SYNC_POINT_WORLD_STOP);
+ break;
+ case MONO_GC_EVENT_PRE_START_WORLD:
heap_walk (profiler);
- EXIT_LOG (logbuffer);
- if (ev == MONO_GC_EVENT_POST_START_WORLD)
- safe_send (profiler, logbuffer);
- //printf ("gc event %d for generation %d\n", ev, generation);
+ break;
+ case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
+ /*
+ * Similarly, we must now make sure that any object moves
+ * written to the GC thread's buffer are flushed. Otherwise,
+ * object allocation events for certain addresses could come
+ * after the move events that made those addresses available.
+ */
+ sync_point_mark (profiler, SYNC_POINT_WORLD_START);
+
+ /*
+ * Finally, it is safe to allow other threads to write to
+ * their buffers again.
+ */
+ buffer_unlock_excl ();
+ break;
+ default:
+ break;
+ }
}
static void
-gc_resize (MonoProfiler *profiler, int64_t new_size) {
- uint64_t now;
+gc_resize (MonoProfiler *profiler, int64_t new_size)
+{
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
LEB128_SIZE /* new size */
);
- now = current_time ();
- ENTER_LOG (logbuffer, "gcresize");
- emit_byte (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
emit_value (logbuffer, new_size);
- //printf ("gc resized to %lld\n", new_size);
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
}
+// If you alter MAX_FRAMES, you may need to alter SAMPLE_BLOCK_SIZE too.
#define MAX_FRAMES 32
+
typedef struct {
int count;
MonoMethod* methods [MAX_FRAMES];
int32_t il_offsets [MAX_FRAMES];
int32_t native_offsets [MAX_FRAMES];
} FrameData;
+
static int num_frames = MAX_FRAMES;
static mono_bool
*/
if (data->count > num_frames)
printf ("bad num frames: %d\n", data->count);
- emit_value (logbuffer, 0); /* flags */
emit_value (logbuffer, data->count);
//if (*p != data.count) {
// printf ("bad num frames enc at %d: %d -> %d\n", count, data.count, *p); printf ("frames end: %p->%p\n", p, logbuffer->cursor); exit(0);}
while (data->count) {
- emit_method_as_ptr (prof, logbuffer, data->methods [--data->count]);
+ emit_method (prof, logbuffer, data->methods [--data->count]);
}
}
static void
gc_alloc (MonoProfiler *prof, MonoObject *obj, MonoClass *klass)
{
- uint64_t now;
- uintptr_t len;
- int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces)? TYPE_ALLOC_BT: 0;
+ init_thread (TRUE);
+
+ int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_ALLOC_BT : 0;
FrameData data;
- LogBuffer *logbuffer;
- len = mono_object_get_size (obj);
+ uintptr_t len = mono_object_get_size (obj);
/* account for object alignment in the heap */
len += 7;
len &= ~7;
+
if (do_bt)
collect_bt (&data);
- logbuffer = ensure_logbuf (
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
LEB128_SIZE /* klass */ +
LEB128_SIZE /* obj */ +
LEB128_SIZE /* size */ +
(do_bt ? (
- LEB128_SIZE /* flags */ +
LEB128_SIZE /* count */ +
data.count * (
LEB128_SIZE /* method */
)
) : 0)
);
- now = current_time ();
- ENTER_LOG (logbuffer, "gcalloc");
- emit_byte (logbuffer, do_bt | TYPE_ALLOC);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, do_bt | TYPE_ALLOC);
emit_ptr (logbuffer, klass);
emit_obj (logbuffer, obj);
emit_value (logbuffer, len);
+
if (do_bt)
emit_bt (prof, logbuffer, &data);
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+
+ EXIT_LOG;
+
+ send_if_needed (prof);
+
process_requests (prof);
- //printf ("gc alloc %s at %p\n", mono_class_get_name (klass), obj);
}
static void
gc_moves (MonoProfiler *prof, void **objects, int num)
{
- int i;
- uint64_t now;
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
LEB128_SIZE /* num */ +
num * (
LEB128_SIZE /* object */
)
);
- now = current_time ();
- ENTER_LOG (logbuffer, "gcmove");
- emit_byte (logbuffer, TYPE_GC_MOVE | TYPE_GC);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
emit_value (logbuffer, num);
- for (i = 0; i < num; ++i)
+
+ for (int i = 0; i < num; ++i)
emit_obj (logbuffer, objects [i]);
- //printf ("gc moved %d objects\n", num/2);
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
}
static void
gc_roots (MonoProfiler *prof, int num, void **objects, int *root_types, uintptr_t *extra_info)
{
- int i;
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
LEB128_SIZE /* num */ +
LEB128_SIZE /* extra info */
)
);
- ENTER_LOG (logbuffer, "gcroots");
- emit_byte (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
+
+ emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
emit_value (logbuffer, num);
emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
- for (i = 0; i < num; ++i) {
+
+ for (int i = 0; i < num; ++i) {
emit_obj (logbuffer, objects [i]);
- emit_value (logbuffer, root_types [i]);
+ emit_byte (logbuffer, root_types [i]);
emit_value (logbuffer, extra_info [i]);
}
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
}
static void
gc_handle (MonoProfiler *prof, int op, int type, uintptr_t handle, MonoObject *obj)
{
int do_bt = nocalls && InterlockedRead (&runtime_inited) && !notraces;
- uint64_t now;
FrameData data;
if (do_bt)
collect_bt (&data);
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
LEB128_SIZE /* type */ +
LEB128_SIZE /* handle */ +
(op == MONO_PROFILER_GC_HANDLE_CREATED ? (
LEB128_SIZE /* obj */
) : 0) +
(do_bt ? (
- LEB128_SIZE /* flags */ +
LEB128_SIZE /* count */ +
data.count * (
LEB128_SIZE /* method */
) : 0)
);
- now = current_time ();
- ENTER_LOG (logbuffer, "gchandle");
-
if (op == MONO_PROFILER_GC_HANDLE_CREATED)
- emit_byte (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
+ emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
- emit_byte (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
+ emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
else
g_assert_not_reached ();
- emit_time (logbuffer, now);
emit_value (logbuffer, type);
emit_value (logbuffer, handle);
if (do_bt)
emit_bt (prof, logbuffer, &data);
- EXIT_LOG (logbuffer);
+ EXIT_LOG;
+
+ process_requests (prof);
+}
+
+static void
+finalize_begin (MonoProfiler *prof)
+{
+ ENTER_LOG;
+
+ LogBuffer *buf = ensure_logbuf (
+ EVENT_SIZE /* event */
+ );
+
+ emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
+
+ EXIT_LOG;
+
+ process_requests (prof);
+}
+
+static void
+finalize_end (MonoProfiler *prof)
+{
+ ENTER_LOG;
+
+ LogBuffer *buf = ensure_logbuf (
+ EVENT_SIZE /* event */
+ );
+
+ emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
+
+ EXIT_LOG;
+
+ process_requests (prof);
+}
+
+static void
+finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
+{
+ ENTER_LOG;
+
+ LogBuffer *buf = ensure_logbuf (
+ EVENT_SIZE /* event */ +
+ LEB128_SIZE /* obj */
+ );
+
+ emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
+ emit_obj (buf, obj);
+
+ EXIT_LOG;
+
+ process_requests (prof);
+}
+
+static void
+finalize_object_end (MonoProfiler *prof, MonoObject *obj)
+{
+ ENTER_LOG;
+
+ LogBuffer *buf = ensure_logbuf (
+ EVENT_SIZE /* event */ +
+ LEB128_SIZE /* obj */
+ );
+
+ emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
+ emit_obj (buf, obj);
+
+ EXIT_LOG;
+
process_requests (prof);
}
static void
image_loaded (MonoProfiler *prof, MonoImage *image, int result)
{
- uint64_t now;
- const char *name;
- int nlen;
- LogBuffer *logbuffer;
if (result != MONO_PROFILE_OK)
return;
- name = mono_image_get_filename (image);
- nlen = strlen (name) + 1;
- logbuffer = ensure_logbuf (
+
+ const char *name = mono_image_get_filename (image);
+ int nlen = strlen (name) + 1;
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* image */ +
- LEB128_SIZE /* flags */ +
nlen /* name */
);
- now = current_time ();
- ENTER_LOG (logbuffer, "image");
- emit_byte (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_IMAGE);
emit_ptr (logbuffer, image);
- emit_value (logbuffer, 0); /* flags */
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
- //printf ("loaded image %p (%s)\n", image, name);
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+
+ EXIT_LOG;
+
+ send_if_needed (prof);
+
process_requests (prof);
+
+ InterlockedIncrement (&image_loads);
}
static void
{
const char *name = mono_image_get_filename (image);
int nlen = strlen (name) + 1;
+
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* image */ +
- LEB128_SIZE /* flags */ +
nlen /* name */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "image-unload");
- emit_byte (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_IMAGE);
emit_ptr (logbuffer, image);
- emit_value (logbuffer, 0); /* flags */
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&image_unloads);
}
static void
char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
int nlen = strlen (name) + 1;
+
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* assembly */ +
- LEB128_SIZE /* flags */ +
nlen /* name */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "assembly-load");
- emit_byte (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_ASSEMBLY);
emit_ptr (logbuffer, assembly);
- emit_value (logbuffer, 0); /* flags */
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
mono_free (name);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&assembly_loads);
}
static void
{
char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
int nlen = strlen (name) + 1;
+
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* assembly */ +
- LEB128_SIZE /* flags */ +
nlen /* name */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "assembly-unload");
- emit_byte (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_ASSEMBLY);
emit_ptr (logbuffer, assembly);
- emit_value (logbuffer, 0); /* flags */
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
mono_free (name);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&assembly_unloads);
}
static void
class_loaded (MonoProfiler *prof, MonoClass *klass, int result)
{
- uint64_t now;
- char *name;
- int nlen;
- MonoImage *image;
- LogBuffer *logbuffer;
if (result != MONO_PROFILE_OK)
return;
+
+ char *name;
+
if (InterlockedRead (&runtime_inited))
name = mono_type_get_name (mono_class_get_type (klass));
else
name = type_name (klass);
- nlen = strlen (name) + 1;
- image = mono_class_get_image (klass);
- logbuffer = ensure_logbuf (
+
+ int nlen = strlen (name) + 1;
+ MonoImage *image = mono_class_get_image (klass);
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* klass */ +
LEB128_SIZE /* image */ +
- LEB128_SIZE /* flags */ +
nlen /* name */
);
- now = current_time ();
- ENTER_LOG (logbuffer, "class");
- emit_byte (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_CLASS);
emit_ptr (logbuffer, klass);
emit_ptr (logbuffer, image);
- emit_value (logbuffer, 0); /* flags */
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
- //printf ("loaded class %p (%s)\n", klass, name);
+
+ EXIT_LOG;
+
if (runtime_inited)
mono_free (name);
else
free (name);
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+
+ send_if_needed (prof);
+
process_requests (prof);
+
+ InterlockedIncrement (&class_loads);
}
static void
int nlen = strlen (name) + 1;
MonoImage *image = mono_class_get_image (klass);
+
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* klass */ +
LEB128_SIZE /* image */ +
- LEB128_SIZE /* flags */ +
nlen /* name */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "class-unload");
- emit_byte (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_CLASS);
emit_ptr (logbuffer, klass);
emit_ptr (logbuffer, image);
- emit_value (logbuffer, 0); /* flags */
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
if (runtime_inited)
mono_free (name);
else
free (name);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&class_unloads);
}
#ifndef DISABLE_HELPER_THREAD
static void
method_enter (MonoProfiler *prof, MonoMethod *method)
{
- uint64_t now = current_time ();
-
#ifndef DISABLE_HELPER_THREAD
process_method_enter_coverage (prof, method);
#endif /* DISABLE_HELPER_THREAD */
- LogBuffer *logbuffer = ensure_logbuf (
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- LEB128_SIZE /* method */
- );
- if (logbuffer->call_depth++ > max_call_depth)
- return;
- ENTER_LOG (logbuffer, "enter");
- emit_byte (logbuffer, TYPE_ENTER | TYPE_METHOD);
- emit_time (logbuffer, now);
- emit_method (prof, logbuffer, method);
- EXIT_LOG (logbuffer);
+ if (PROF_TLS_GET ()->call_depth++ <= max_call_depth) {
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
+ EVENT_SIZE /* event */ +
+ LEB128_SIZE /* method */
+ );
+
+ emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
+ emit_method (prof, logbuffer, method);
+
+ EXIT_LOG;
+ }
+
+ send_if_needed (prof);
process_requests (prof);
}
static void
method_leave (MonoProfiler *prof, MonoMethod *method)
{
- uint64_t now;
- LogBuffer *logbuffer = ensure_logbuf (
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- LEB128_SIZE /* method */
- );
- if (--logbuffer->call_depth > max_call_depth)
- return;
- now = current_time ();
- ENTER_LOG (logbuffer, "leave");
- emit_byte (logbuffer, TYPE_LEAVE | TYPE_METHOD);
- emit_time (logbuffer, now);
- emit_method (prof, logbuffer, method);
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ if (--PROF_TLS_GET ()->call_depth <= max_call_depth) {
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
+ EVENT_SIZE /* event */ +
+ LEB128_SIZE /* method */
+ );
+
+ emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
+ emit_method (prof, logbuffer, method);
+
+ EXIT_LOG;
+ }
+
+ send_if_needed (prof);
+
process_requests (prof);
}
static void
method_exc_leave (MonoProfiler *prof, MonoMethod *method)
{
- uint64_t now;
- LogBuffer *logbuffer;
- if (nocalls)
- return;
- logbuffer = ensure_logbuf (
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- LEB128_SIZE /* method */
- );
- if (--logbuffer->call_depth > max_call_depth)
- return;
- now = current_time ();
- ENTER_LOG (logbuffer, "eleave");
- emit_byte (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
- emit_time (logbuffer, now);
- emit_method (prof, logbuffer, method);
- EXIT_LOG (logbuffer);
+ if (!nocalls && --PROF_TLS_GET ()->call_depth <= max_call_depth) {
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
+ EVENT_SIZE /* event */ +
+ LEB128_SIZE /* method */
+ );
+
+ emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
+ emit_method (prof, logbuffer, method);
+
+ EXIT_LOG;
+ }
+
+ send_if_needed (prof);
+
process_requests (prof);
}
static void
code_buffer_new (MonoProfiler *prof, void *buffer, int size, MonoProfilerCodeBufferType type, void *data)
{
- uint64_t now;
- int nlen;
char *name;
- LogBuffer *logbuffer;
+ int nlen;
+
if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
- name = (char *)data;
+ name = (char *) data;
nlen = strlen (name) + 1;
} else {
name = NULL;
nlen = 0;
}
- logbuffer = ensure_logbuf (
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- LEB128_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* buffer */ +
LEB128_SIZE /* size */ +
(name ? (
nlen /* name */
) : 0)
);
- now = current_time ();
- ENTER_LOG (logbuffer, "code buffer");
- emit_byte (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
- emit_time (logbuffer, now);
- emit_value (logbuffer, type);
+
+ emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
+ emit_byte (logbuffer, type);
emit_ptr (logbuffer, buffer);
emit_value (logbuffer, size);
+
if (name) {
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
}
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
+
process_requests (prof);
}
static void
throw_exc (MonoProfiler *prof, MonoObject *object)
{
- int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces)? TYPE_EXCEPTION_BT: 0;
- uint64_t now;
+ int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_EXCEPTION_BT : 0;
FrameData data;
- LogBuffer *logbuffer;
+
if (do_bt)
collect_bt (&data);
- logbuffer = ensure_logbuf (
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
LEB128_SIZE /* object */ +
(do_bt ? (
- LEB128_SIZE /* flags */ +
LEB128_SIZE /* count */ +
data.count * (
LEB128_SIZE /* method */
)
) : 0)
);
- now = current_time ();
- ENTER_LOG (logbuffer, "throw");
- emit_byte (logbuffer, do_bt | TYPE_EXCEPTION);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
emit_obj (logbuffer, object);
+
if (do_bt)
emit_bt (prof, logbuffer, &data);
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
+
process_requests (prof);
}
static void
clause_exc (MonoProfiler *prof, MonoMethod *method, int clause_type, int clause_num)
{
- uint64_t now;
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- LEB128_SIZE /* clause type */ +
+ BYTE_SIZE /* clause type */ +
LEB128_SIZE /* clause num */ +
LEB128_SIZE /* method */
);
- now = current_time ();
- ENTER_LOG (logbuffer, "clause");
- emit_byte (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
- emit_time (logbuffer, now);
- emit_value (logbuffer, clause_type);
+
+ emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
+ emit_byte (logbuffer, clause_type);
emit_value (logbuffer, clause_num);
emit_method (prof, logbuffer, method);
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
process_requests (prof);
}
static void
monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent event)
{
- int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces && event == MONO_PROFILER_MONITOR_CONTENTION)? TYPE_MONITOR_BT: 0;
- uint64_t now;
+ int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces && event == MONO_PROFILER_MONITOR_CONTENTION) ? TYPE_MONITOR_BT : 0;
FrameData data;
- LogBuffer *logbuffer;
+
if (do_bt)
collect_bt (&data);
- logbuffer = ensure_logbuf (
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
LEB128_SIZE /* object */ +
(do_bt ? (
- LEB128_SIZE /* flags */ +
LEB128_SIZE /* count */ +
data.count * (
LEB128_SIZE /* method */
)
) : 0)
);
- now = current_time ();
- ENTER_LOG (logbuffer, "monitor");
- emit_byte (logbuffer, (event << 4) | do_bt | TYPE_MONITOR);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, (event << 4) | do_bt | TYPE_MONITOR);
emit_obj (logbuffer, object);
+
if (do_bt)
emit_bt (profiler, logbuffer, &data);
- EXIT_LOG (logbuffer);
+
+ EXIT_LOG;
+
process_requests (profiler);
}
static void
thread_start (MonoProfiler *prof, uintptr_t tid)
{
- //printf ("thread start %p\n", (void*)tid);
- init_thread ();
+ init_thread (TRUE);
+
+ ENTER_LOG;
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
- LEB128_SIZE /* tid */ +
- LEB128_SIZE /* flags */
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* tid */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "thread-start");
- emit_byte (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_THREAD);
emit_ptr (logbuffer, (void*) tid);
- emit_value (logbuffer, 0); /* flags */
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&thread_starts);
}
static void
thread_end (MonoProfiler *prof, uintptr_t tid)
{
- if (TLS_GET (LogBuffer, tlsbuffer)) {
- LogBuffer *logbuffer = ensure_logbuf (
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
- LEB128_SIZE /* tid */ +
- LEB128_SIZE /* flags */
- );
- uint64_t now = current_time ();
+ ENTER_LOG;
- ENTER_LOG (logbuffer, "thread-end");
- emit_byte (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
- emit_byte (logbuffer, TYPE_THREAD);
- emit_ptr (logbuffer, (void*) tid);
- emit_value (logbuffer, 0); /* flags */
- EXIT_LOG (logbuffer);
+ LogBuffer *logbuffer = ensure_logbuf (
+ EVENT_SIZE /* event */ +
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* tid */
+ );
+
+ emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
+ emit_byte (logbuffer, TYPE_THREAD);
+ emit_ptr (logbuffer, (void*) tid);
- send_buffer (prof, TLS_GET (GPtrArray, tlsmethodlist), logbuffer);
+ EXIT_LOG;
- /* Don't process requests as the thread is detached from the runtime. */
- }
+ // Don't process requests as the thread is detached from the runtime.
+
+ remove_thread (prof, PROF_TLS_GET (), TRUE);
- TLS_SET (tlsbuffer, NULL);
- TLS_SET (tlsmethodlist, NULL);
+ InterlockedIncrement (&thread_ends);
}
static void
if (result != MONO_PROFILE_OK)
return;
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
- LEB128_SIZE /* domain id */ +
- LEB128_SIZE /* flags */
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* domain id */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "domain-start");
- emit_byte (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_DOMAIN);
emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
- emit_value (logbuffer, 0); /* flags */
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&domain_loads);
}
static void
domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
{
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
- LEB128_SIZE /* domain id */ +
- LEB128_SIZE /* flags */
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* domain id */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "domain-end");
- emit_byte (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_DOMAIN);
emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
- emit_value (logbuffer, 0); /* flags */
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&domain_unloads);
}
static void
domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
{
int nlen = strlen (name) + 1;
+
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* domain id */ +
- LEB128_SIZE /* flags */ +
nlen /* name */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "domain-name");
- emit_byte (logbuffer, TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_METADATA);
emit_byte (logbuffer, TYPE_DOMAIN);
emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
- emit_value (logbuffer, 0); /* flags */
memcpy (logbuffer->cursor, name, nlen);
logbuffer->cursor += nlen;
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
process_requests (prof);
}
static void
context_loaded (MonoProfiler *prof, MonoAppContext *context)
{
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* context id */ +
- LEB128_SIZE /* flags */ +
LEB128_SIZE /* domain id */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "context-start");
- emit_byte (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_CONTEXT);
emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
- emit_value (logbuffer, 0); /* flags */
emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&context_loads);
}
static void
context_unloaded (MonoProfiler *prof, MonoAppContext *context)
{
+ ENTER_LOG;
+
LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* context id */ +
- LEB128_SIZE /* flags */ +
LEB128_SIZE /* domain id */
);
- uint64_t now = current_time ();
- ENTER_LOG (logbuffer, "context-end");
- emit_byte (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
- emit_time (logbuffer, now);
+ emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
emit_byte (logbuffer, TYPE_CONTEXT);
emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
- emit_value (logbuffer, 0); /* flags */
emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
process_requests (prof);
+
+ InterlockedIncrement (&context_unloads);
}
static void
thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
{
int len = strlen (name) + 1;
- uint64_t now;
- LogBuffer *logbuffer;
- logbuffer = ensure_logbuf (
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
- EVENT_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* tid */ +
- LEB128_SIZE /* flags */ +
len /* name */
);
- now = current_time ();
- ENTER_LOG (logbuffer, "tname");
- emit_byte (logbuffer, TYPE_METADATA);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, TYPE_METADATA);
emit_byte (logbuffer, TYPE_THREAD);
emit_ptr (logbuffer, (void*)tid);
- emit_value (logbuffer, 0); /* flags */
memcpy (logbuffer->cursor, name, len);
logbuffer->cursor += len;
- EXIT_LOG (logbuffer);
- if (logbuffer->next)
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
process_requests (prof);
}
} AsyncFrameInfo;
typedef struct {
+ MonoLockFreeQueueNode node;
+ MonoProfiler *prof;
+ uint64_t time;
+ uintptr_t tid;
+ void *ip;
int count;
- AsyncFrameInfo *data;
-} AsyncFrameData;
+ AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
+} SampleHit;
static mono_bool
async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
{
- AsyncFrameData *frame = (AsyncFrameData *)data;
- if (frame->count < num_frames) {
- frame->data [frame->count].method = method;
- frame->data [frame->count].domain = domain;
- frame->data [frame->count].base_address = base_address;
- frame->data [frame->count].offset = offset;
- // printf ("In %d at %p (dom %p) (native: %p)\n", frame->count, method, domain, base_address);
- frame->count++;
+ SampleHit *sample = (SampleHit *) data;
+
+ if (sample->count < num_frames) {
+ int i = sample->count;
+
+ sample->frames [i].method = method;
+ sample->frames [i].domain = domain;
+ sample->frames [i].base_address = base_address;
+ sample->frames [i].offset = offset;
+
+ sample->count++;
}
- return frame->count == num_frames;
+
+ return sample->count == num_frames;
}
-/*
-(type | frame count), tid, time, ip, [method, domain, base address, offset] * frames
-*/
-#define SAMPLE_EVENT_SIZE_IN_SLOTS(FRAMES) (4 + (FRAMES) * 4)
+#define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
+#define SAMPLE_BLOCK_SIZE (mono_pagesize ())
+
+static void
+enqueue_sample_hit (gpointer p)
+{
+ SampleHit *sample = p;
+
+ mono_lock_free_queue_node_unpoison (&sample->node);
+ mono_lock_free_queue_enqueue (&sample->prof->dumper_queue, &sample->node);
+ mono_os_sem_post (&sample->prof->dumper_queue_sem);
+
+ InterlockedIncrement (&sample_flushes);
+}
static void
mono_sample_hit (MonoProfiler *profiler, unsigned char *ip, void *context)
{
- StatBuffer *sbuf;
- AsyncFrameInfo frames [num_frames];
- AsyncFrameData bt_data = { 0, &frames [0]};
- uint64_t now;
- uintptr_t *data, *new_data, *old_data;
- uintptr_t elapsed;
- int timedout = 0;
- int i;
+ /*
+ * Please note: We rely on the runtime loading the profiler with
+ * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
+ * this function (and its siblings) are resolved when the profiler is
+ * loaded. Otherwise, we would potentially invoke the dynamic linker when
+ * invoking runtime functions, which is not async-signal-safe.
+ */
+
if (in_shutdown)
return;
- now = current_time ();
- mono_stack_walk_async_safe (&async_walk_stack, context, &bt_data);
+ InterlockedIncrement (&sample_hits);
+
+ SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
+
+ if (!sample) {
+ /*
+ * If we're out of reusable sample events and we're not allowed to
+ * allocate more, we have no choice but to drop the event.
+ */
+ if (InterlockedRead (&sample_allocations) >= max_allocated_sample_hits)
+ return;
+
+ sample = mono_lock_free_alloc (&profiler->sample_allocator);
+ sample->prof = profiler;
+ mono_lock_free_queue_node_init (&sample->node, TRUE);
+
+ InterlockedIncrement (&sample_allocations);
+ }
+
+ sample->count = 0;
+ mono_stack_walk_async_safe (&async_walk_stack, context, sample);
+
+ sample->time = current_time ();
+ sample->tid = thread_id ();
+ sample->ip = ip;
- elapsed = (now - profiler->startup_time) / 10000;
if (do_debug) {
int len;
char buf [256];
- snprintf (buf, sizeof (buf), "hit at %p in thread %p after %llu ms\n", ip, (void*)thread_id (), (unsigned long long int)elapsed/100);
+ snprintf (buf, sizeof (buf), "hit at %p in thread %p after %llu ms\n", ip, (void *) sample->tid, (unsigned long long int) ((sample->time - profiler->startup_time) / 10000 / 100));
len = strlen (buf);
ign_res (write (2, buf, len));
}
- sbuf = profiler->stat_buffers;
- if (!sbuf)
- return;
- /* flush the buffer at 1 second intervals */
- if (sbuf->cursor > sbuf->buf && (elapsed - sbuf->buf [2]) > 100000) {
- timedout = 1;
- }
- /* overflow: 400 slots is a big enough number to reduce the chance of losing this event if many
- * threads hit this same spot at the same time
- */
- if (timedout || (sbuf->cursor + 400 >= sbuf->buf_end)) {
- StatBuffer *oldsb, *foundsb;
- sbuf = create_stat_buffer ();
- do {
- oldsb = profiler->stat_buffers;
- sbuf->next = oldsb;
- foundsb = (StatBuffer *)InterlockedCompareExchangePointer ((void * volatile*)&profiler->stat_buffers, sbuf, oldsb);
- } while (foundsb != oldsb);
- if (do_debug)
- ign_res (write (2, "overflow\n", 9));
- /* notify the helper thread */
- if (sbuf->next->next) {
- char c = 0;
- ign_res (write (profiler->pipes [1], &c, 1));
- if (do_debug)
- ign_res (write (2, "notify\n", 7));
- }
- }
- do {
- old_data = sbuf->cursor;
- new_data = old_data + SAMPLE_EVENT_SIZE_IN_SLOTS (bt_data.count);
- if (new_data > sbuf->buf_end)
- return; /* Not enough room in buf to hold this event-- lost event */
- data = (uintptr_t *)InterlockedCompareExchangePointer ((void * volatile*)&sbuf->cursor, new_data, old_data);
- } while (data != old_data);
-
- old_data [0] = 1 | (sample_type << 16) | (bt_data.count << 8);
- old_data [1] = thread_id ();
- old_data [2] = elapsed;
- old_data [3] = (uintptr_t)ip;
- for (i = 0; i < bt_data.count; ++i) {
- old_data [4 + 4 * i + 0] = (uintptr_t)frames [i].method;
- old_data [4 + 4 * i + 1] = (uintptr_t)frames [i].domain;
- old_data [4 + 4 * i + 2] = (uintptr_t)frames [i].base_address;
- old_data [4 + 4 * i + 3] = (uintptr_t)frames [i].offset;
- }
+
+ mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
}
static uintptr_t *code_pages = 0;
static void
dump_ubin (const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
{
- uint64_t now;
- LogBuffer *logbuffer;
- int len;
- len = strlen (filename) + 1;
- now = current_time ();
- logbuffer = ensure_logbuf (
+ int len = strlen (filename) + 1;
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
LEB128_SIZE /* load address */ +
LEB128_SIZE /* offset */ +
LEB128_SIZE /* size */ +
nlen /* file name */
);
- emit_byte (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
- emit_time (logbuffer, now);
+
+ emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
emit_svalue (logbuffer, load_addr);
emit_uvalue (logbuffer, offset);
emit_uvalue (logbuffer, size);
memcpy (logbuffer->cursor, filename, len);
logbuffer->cursor += len;
+
+ EXIT_LOG;
}
#endif
static void
dump_usym (const char *name, uintptr_t value, uintptr_t size)
{
- LogBuffer *logbuffer;
- int len;
- len = strlen (name) + 1;
- logbuffer = ensure_logbuf (
+ int len = strlen (name) + 1;
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
LEB128_SIZE /* value */ +
LEB128_SIZE /* size */ +
len /* name */
);
- emit_byte (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
+
+ emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
emit_ptr (logbuffer, (void*)value);
emit_value (logbuffer, size);
memcpy (logbuffer->cursor, name, len);
logbuffer->cursor += len;
+
+ EXIT_LOG;
}
/* ELF code crashes on some systems. */
}
}
-static gint
-compare_sample_events (gconstpointer a, gconstpointer b)
-{
- uintptr_t tid1 = (*(uintptr_t **) a) [1];
- uintptr_t tid2 = (*(uintptr_t **) b) [1];
-
- return tid1 > tid2 ? 1 :
- tid1 < tid2 ? -1 :
- 0;
-}
-
-static void
-dump_sample_hits (MonoProfiler *prof, StatBuffer *sbuf)
-{
- LogBuffer *logbuffer;
- if (!sbuf)
- return;
- if (sbuf->next) {
- dump_sample_hits (prof, sbuf->next);
- free_buffer (sbuf->next, sbuf->next->size);
- sbuf->next = NULL;
- }
-
- g_ptr_array_set_size (prof->sorted_sample_events, 0);
-
- for (uintptr_t *sample = sbuf->buf; sample < sbuf->cursor;) {
- int count = sample [0] & 0xff;
- int mbt_count = (sample [0] & 0xff00) >> 8;
-
- if (sample + SAMPLE_EVENT_SIZE_IN_SLOTS (mbt_count) > sbuf->cursor)
- break;
-
- g_ptr_array_add (prof->sorted_sample_events, sample);
-
- sample += count + 3 + 4 * mbt_count;
- }
-
- g_ptr_array_sort (prof->sorted_sample_events, compare_sample_events);
-
- for (guint sidx = 0; sidx < prof->sorted_sample_events->len; sidx++) {
- uintptr_t *sample = (uintptr_t *)g_ptr_array_index (prof->sorted_sample_events, sidx);
- int count = sample [0] & 0xff;
- int mbt_count = (sample [0] & 0xff00) >> 8;
- int type = sample [0] >> 16;
- uintptr_t *managed_sample_base = sample + count + 3;
- uintptr_t thread_id = sample [1];
-
- for (int i = 0; i < mbt_count; ++i) {
- MonoMethod *method = (MonoMethod*)managed_sample_base [i * 4 + 0];
- MonoDomain *domain = (MonoDomain*)managed_sample_base [i * 4 + 1];
- void *address = (void*)managed_sample_base [i * 4 + 2];
-
- if (!method) {
- g_assert (domain);
- MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *)address);
-
- if (ji)
- managed_sample_base [i * 4 + 0] = (uintptr_t)mono_jit_info_get_method (ji);
- }
- }
-
- logbuffer = ensure_logbuf (
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* type */ +
- LEB128_SIZE /* time */ +
- LEB128_SIZE /* tid */ +
- LEB128_SIZE /* count */ +
- count * (
- LEB128_SIZE /* ip */
- ) +
- LEB128_SIZE /* managed count */ +
- mbt_count * (
- LEB128_SIZE /* method */ +
- LEB128_SIZE /* il offset */ +
- LEB128_SIZE /* native offset */
- )
- );
- emit_byte (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT);
- emit_value (logbuffer, type);
- emit_uvalue (logbuffer, prof->startup_time + (uint64_t)sample [2] * (uint64_t)10000);
- emit_ptr (logbuffer, (void *) thread_id);
- emit_value (logbuffer, count);
- for (int i = 0; i < count; ++i) {
- emit_ptr (logbuffer, (void*)sample [i + 3]);
- add_code_pointer (sample [i + 3]);
- }
-
- sample += count + 3;
- /* new in data version 6 */
- emit_uvalue (logbuffer, mbt_count);
- for (int i = 0; i < mbt_count; ++i) {
- MonoMethod *method = (MonoMethod *) sample [i * 4 + 0];
- uintptr_t native_offset = sample [i * 4 + 3];
-
- emit_method (prof, logbuffer, method);
- emit_svalue (logbuffer, 0); /* il offset will always be 0 from now on */
- emit_svalue (logbuffer, native_offset);
- }
- }
-
- dump_unmanaged_coderefs (prof);
-}
-
-#if USE_PERF_EVENTS
-
static int
mono_cpu_count (void)
{
return 1;
}
+#if USE_PERF_EVENTS
+
typedef struct {
int perf_fd;
unsigned int prev_pos;
static void
dump_perf_hits (MonoProfiler *prof, void *buf, int size)
{
- LogBuffer *logbuffer;
int count = 1;
int mbt_count = 0;
void *end = (char*)buf + size;
/*ip = (void*)s->ip;
printf ("sample: %d, size: %d, ip: %p (%s), timestamp: %llu, nframes: %llu\n",
s->h.type, s->h.size, ip, symbol_for (ip), s->timestamp, s->nframes);*/
- logbuffer = ensure_logbuf (
+
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* type */ +
- LEB128_SIZE /* time */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* tid */ +
LEB128_SIZE /* count */ +
count * (
) +
LEB128_SIZE /* managed count */ +
mbt_count * (
- LEB128_SIZE /* method */ +
- LEB128_SIZE /* il offset */ +
- LEB128_SIZE /* native offset */
+ LEB128_SIZE /* method */
)
);
- emit_byte (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT);
- emit_value (logbuffer, sample_type);
- emit_uvalue (logbuffer, s->timestamp - prof->startup_time);
+
+ emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT);
+ emit_byte (logbuffer, sample_type);
/*
* No useful thread ID to write here, since throughout the
* profiler we use pthread_self () but the ID we get from
emit_ptr (logbuffer, 0);
emit_value (logbuffer, count);
emit_ptr (logbuffer, (void*)(uintptr_t)s->ip);
- add_code_pointer (s->ip);
/* no support here yet for the managed backtrace */
emit_uvalue (logbuffer, mbt_count);
+
+ EXIT_LOG;
+
+ add_code_pointer (s->ip);
buf = (char*)buf + s->h.size;
samples++;
}
}
static void
-counters_emit (MonoProfiler *profiler, gboolean threadless)
+counters_emit (MonoProfiler *profiler)
{
MonoCounterAgent *agent;
- LogBuffer *logbuffer;
int len = 0;
int size =
EVENT_SIZE /* event */ +
size +=
LEB128_SIZE /* section */ +
strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
- LEB128_SIZE /* type */ +
- LEB128_SIZE /* unit */ +
- LEB128_SIZE /* variance */ +
+ BYTE_SIZE /* type */ +
+ BYTE_SIZE /* unit */ +
+ BYTE_SIZE /* variance */ +
LEB128_SIZE /* index */
;
return;
}
- logbuffer = ensure_logbuf (size);
+ ENTER_LOG;
- ENTER_LOG (logbuffer, "counters");
- emit_byte (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
+ LogBuffer *logbuffer = ensure_logbuf (size);
+
+ emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
emit_value (logbuffer, len);
+
for (agent = counters; agent; agent = agent->next) {
const char *name;
name = mono_counter_get_name (agent->counter);
emit_value (logbuffer, mono_counter_get_section (agent->counter));
emit_string (logbuffer, name, strlen (name) + 1);
- emit_value (logbuffer, mono_counter_get_type (agent->counter));
- emit_value (logbuffer, mono_counter_get_unit (agent->counter));
- emit_value (logbuffer, mono_counter_get_variance (agent->counter));
+ emit_byte (logbuffer, mono_counter_get_type (agent->counter));
+ emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
+ emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
emit_value (logbuffer, agent->index);
agent->emitted = 1;
}
- EXIT_LOG (logbuffer);
- if (threadless)
- safe_send_threadless (profiler, logbuffer);
- else
- safe_send (profiler, logbuffer);
+ EXIT_LOG;
mono_os_mutex_unlock (&counters_mutex);
}
static void
-counters_sample (MonoProfiler *profiler, uint64_t timestamp, gboolean threadless)
+counters_sample (MonoProfiler *profiler, uint64_t timestamp)
{
MonoCounterAgent *agent;
MonoCounter *counter;
- LogBuffer *logbuffer;
int type;
int buffer_size;
void *buffer;
if (!counters_initialized)
return;
- counters_emit (profiler, threadless);
+ counters_emit (profiler);
buffer_size = 8;
buffer = calloc (1, buffer_size);
mono_os_mutex_lock (&counters_mutex);
size =
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */
+ EVENT_SIZE /* event */
;
for (agent = counters; agent; agent = agent->next) {
size +=
LEB128_SIZE /* index */ +
- LEB128_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
mono_counter_get_size (agent->counter) /* value */
;
}
LEB128_SIZE /* stop marker */
;
- logbuffer = ensure_logbuf (size);
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (size);
+
+ emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
- ENTER_LOG (logbuffer, "counters");
- emit_byte (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE);
- emit_uvalue (logbuffer, timestamp);
for (agent = counters; agent; agent = agent->next) {
size_t size;
}
emit_uvalue (logbuffer, agent->index);
- emit_uvalue (logbuffer, type);
+ emit_byte (logbuffer, type);
switch (type) {
case MONO_COUNTER_INT:
#if SIZEOF_VOID_P == 4
free (buffer);
emit_value (logbuffer, 0);
- EXIT_LOG (logbuffer);
- if (threadless)
- safe_send_threadless (profiler, logbuffer);
- else
- safe_send (profiler, logbuffer);
+ EXIT_LOG;
mono_os_mutex_unlock (&counters_mutex);
}
static PerfCounterAgent *perfcounters = NULL;
static void
-perfcounters_emit (MonoProfiler *profiler, gboolean threadless)
+perfcounters_emit (MonoProfiler *profiler)
{
PerfCounterAgent *pcagent;
- LogBuffer *logbuffer;
int len = 0;
int size =
EVENT_SIZE /* event */ +
LEB128_SIZE /* section */ +
strlen (pcagent->category_name) + 1 /* category name */ +
strlen (pcagent->name) + 1 /* name */ +
- LEB128_SIZE /* type */ +
- LEB128_SIZE /* unit */ +
- LEB128_SIZE /* variance */ +
+ BYTE_SIZE /* type */ +
+ BYTE_SIZE /* unit */ +
+ BYTE_SIZE /* variance */ +
LEB128_SIZE /* index */
;
if (!len)
return;
- logbuffer = ensure_logbuf (size);
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (size);
- ENTER_LOG (logbuffer, "perfcounters");
- emit_byte (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
+ emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
emit_value (logbuffer, len);
+
for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
if (pcagent->emitted)
continue;
emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
- emit_value (logbuffer, MONO_COUNTER_LONG);
- emit_value (logbuffer, MONO_COUNTER_RAW);
- emit_value (logbuffer, MONO_COUNTER_VARIABLE);
+ emit_byte (logbuffer, MONO_COUNTER_LONG);
+ emit_byte (logbuffer, MONO_COUNTER_RAW);
+ emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
emit_value (logbuffer, pcagent->index);
pcagent->emitted = 1;
}
- EXIT_LOG (logbuffer);
- if (threadless)
- safe_send_threadless (profiler, logbuffer);
- else
- safe_send (profiler, logbuffer);
+ EXIT_LOG;
}
static gboolean
}
static void
-perfcounters_sample (MonoProfiler *profiler, uint64_t timestamp, gboolean threadless)
+perfcounters_sample (MonoProfiler *profiler, uint64_t timestamp)
{
PerfCounterAgent *pcagent;
- LogBuffer *logbuffer;
int size;
if (!counters_initialized)
mono_perfcounter_foreach (perfcounters_foreach, perfcounters);
- perfcounters_emit (profiler, threadless);
+ perfcounters_emit (profiler);
size =
- EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */
+ EVENT_SIZE /* event */
;
for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
size +=
LEB128_SIZE /* index */ +
- LEB128_SIZE /* type */ +
+ BYTE_SIZE /* type */ +
LEB128_SIZE /* value */
;
}
LEB128_SIZE /* stop marker */
;
- logbuffer = ensure_logbuf (size);
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (size);
+
+ emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
- ENTER_LOG (logbuffer, "perfcounters");
- emit_byte (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE);
- emit_uvalue (logbuffer, timestamp);
for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
if (pcagent->deleted || !pcagent->updated)
continue;
emit_uvalue (logbuffer, pcagent->index);
- emit_uvalue (logbuffer, MONO_COUNTER_LONG);
+ emit_byte (logbuffer, MONO_COUNTER_LONG);
emit_svalue (logbuffer, pcagent->value);
pcagent->updated = 0;
}
emit_value (logbuffer, 0);
- EXIT_LOG (logbuffer);
- if (threadless)
- safe_send_threadless (profiler, logbuffer);
- else
- safe_send (profiler, logbuffer);
+ EXIT_LOG;
mono_os_mutex_unlock (&counters_mutex);
}
static void
-counters_and_perfcounters_sample (MonoProfiler *prof, gboolean threadless)
+counters_and_perfcounters_sample (MonoProfiler *prof)
{
- static uint64_t start = -1;
- uint64_t now;
-
- if (start == -1)
- start = current_time ();
+ uint64_t now = current_time ();
- now = current_time ();
- counters_sample (prof, (now - start) / 1000/ 1000, threadless);
- perfcounters_sample (prof, (now - start) / 1000/ 1000, threadless);
+ counters_sample (prof, now);
+ perfcounters_sample (prof, now);
}
#define COVERAGE_DEBUG(x) if (debug_coverage) {x}
static GPtrArray *coverage_data = NULL;
static int previous_offset = 0;
-typedef struct _MethodNode MethodNode;
-struct _MethodNode {
+typedef struct {
MonoLockFreeQueueNode node;
MonoMethod *method;
-};
+} MethodNode;
-typedef struct _CoverageEntry CoverageEntry;
-struct _CoverageEntry {
+typedef struct {
int offset;
int counter;
char *filename;
int line;
int column;
-};
+} CoverageEntry;
static void
free_coverage_entry (gpointer data, gpointer userdata)
MonoImage *image;
char *class_name;
const char *image_name, *method_name, *sig, *first_filename;
- LogBuffer *logbuffer;
guint i;
previous_offset = 0;
sig = sig ? sig : "";
method_name = method_name ? method_name : "";
- logbuffer = ensure_logbuf (
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
strlen (image_name) + 1 /* image name */ +
strlen (class_name) + 1 /* class name */ +
LEB128_SIZE /* method id */ +
LEB128_SIZE /* entries */
);
- ENTER_LOG (logbuffer, "coverage-methods");
- emit_byte (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
+ emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
emit_string (logbuffer, image_name, strlen (image_name) + 1);
emit_string (logbuffer, class_name, strlen (class_name) + 1);
emit_string (logbuffer, method_name, strlen (method_name) + 1);
emit_uvalue (logbuffer, method_id);
emit_value (logbuffer, coverage_data->len);
- EXIT_LOG (logbuffer);
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
for (i = 0; i < coverage_data->len; i++) {
CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[i];
- logbuffer = ensure_logbuf (
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
LEB128_SIZE /* method id */ +
LEB128_SIZE /* offset */ +
LEB128_SIZE /* line */ +
LEB128_SIZE /* column */
);
- ENTER_LOG (logbuffer, "coverage-statement");
- emit_byte (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
+ emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
emit_uvalue (logbuffer, method_id);
emit_uvalue (logbuffer, entry->offset);
emit_uvalue (logbuffer, entry->counter);
emit_uvalue (logbuffer, entry->line);
emit_uvalue (logbuffer, entry->column);
- EXIT_LOG (logbuffer);
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
}
method_id++;
while ((node = mono_lock_free_queue_dequeue (queue))) {
count++;
- mono_lock_free_queue_node_free (node);
+ mono_thread_hazardous_try_free (node, free);
}
return count;
const char *assembly_name;
int number_of_methods, partially_covered;
guint fully_covered;
- LogBuffer *logbuffer;
image = mono_class_get_image (klass);
assembly_name = mono_image_get_name (image);
/* We don't handle partial covered yet */
partially_covered = 0;
- logbuffer = ensure_logbuf (
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
strlen (assembly_name) + 1 /* assembly name */ +
strlen (class_name) + 1 /* class name */ +
LEB128_SIZE /* partially covered */
);
- ENTER_LOG (logbuffer, "coverage-class");
- emit_byte (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
+ emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
emit_string (logbuffer, class_name, strlen (class_name) + 1);
emit_uvalue (logbuffer, number_of_methods);
emit_uvalue (logbuffer, fully_covered);
emit_uvalue (logbuffer, partially_covered);
- EXIT_LOG (logbuffer);
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
g_free (class_name);
}
MonoAssembly *assembly = (MonoAssembly *)value;
MonoProfiler *prof = (MonoProfiler *)userdata;
MonoImage *image = mono_assembly_get_image (assembly);
- LogBuffer *logbuffer;
const char *name, *guid, *filename;
int number_of_methods = 0, partially_covered = 0;
guint fully_covered = 0;
get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
- logbuffer = ensure_logbuf (
+ ENTER_LOG;
+
+ LogBuffer *logbuffer = ensure_logbuf (
EVENT_SIZE /* event */ +
strlen (name) + 1 /* name */ +
strlen (guid) + 1 /* guid */ +
LEB128_SIZE /* partially covered */
);
- ENTER_LOG (logbuffer, "coverage-assemblies");
- emit_byte (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
+ emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
emit_string (logbuffer, name, strlen (name) + 1);
emit_string (logbuffer, guid, strlen (guid) + 1);
emit_string (logbuffer, filename, strlen (filename) + 1);
emit_uvalue (logbuffer, number_of_methods);
emit_uvalue (logbuffer, fully_covered);
emit_uvalue (logbuffer, partially_covered);
- EXIT_LOG (logbuffer);
- safe_send (prof, logbuffer);
+ EXIT_LOG;
+
+ send_if_needed (prof);
}
static void
mono_assembly_close (assembly);
}
+static void
+free_sample_hit (gpointer p)
+{
+ mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
+}
+
+static void
+cleanup_reusable_samples (MonoProfiler *prof)
+{
+ SampleHit *sample;
+
+ while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->sample_reuse_queue)))
+ mono_thread_hazardous_try_free (sample, free_sample_hit);
+}
+
static void
log_shutdown (MonoProfiler *prof)
{
in_shutdown = 1;
#ifndef DISABLE_HELPER_THREAD
- counters_and_perfcounters_sample (prof, FALSE);
+ counters_and_perfcounters_sample (prof);
dump_coverage (prof);
}
#endif
- g_ptr_array_free (prof->sorted_sample_events, TRUE);
-
- if (TLS_GET (LogBuffer, tlsbuffer))
- send_buffer (prof, TLS_GET (GPtrArray, tlsmethodlist), TLS_GET (LogBuffer, tlsbuffer));
+ /*
+ * Ensure that we empty the LLS completely, even if some nodes are
+ * not immediately removed upon calling mono_lls_remove (), by
+ * iterating until the head is NULL.
+ */
+ while (profiler_thread_list.head) {
+ MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
+ remove_thread (prof, thread, FALSE);
+ } MONO_LLS_FOREACH_SAFE_END
+ }
- TLS_SET (tlsbuffer, NULL);
- TLS_SET (tlsmethodlist, NULL);
+ InterlockedWrite (&prof->run_dumper_thread, 0);
+ mono_os_sem_post (&prof->dumper_queue_sem);
+ pthread_join (prof->dumper_thread, &res);
+ mono_os_sem_destroy (&prof->dumper_queue_sem);
InterlockedWrite (&prof->run_writer_thread, 0);
mono_os_sem_post (&prof->writer_queue_sem);
pthread_join (prof->writer_thread, &res);
-
mono_os_sem_destroy (&prof->writer_queue_sem);
+ cleanup_reusable_samples (prof);
+
+ g_assert (!InterlockedRead (&buffer_rwlock_count) && "Why is the reader count still non-zero?");
+ g_assert (!InterlockedReadPointer (&buffer_rwlock_exclusive) && "Why does someone still hold the exclusive lock?");
+
#if defined (HAVE_SYS_ZLIB)
if (prof->gzfile)
gzclose (prof->gzfile);
mono_os_mutex_destroy (&coverage_mutex);
}
+ PROF_TLS_FREE ();
+
free (prof);
}
MonoThread *thread = NULL;
mono_threads_attach_tools_thread ();
- mono_thread_info_set_name (mono_native_thread_id_get (), "Profiler helper");
+ mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
+
+ init_thread (FALSE);
//fprintf (stderr, "Server listening\n");
command_socket = -1;
}
#endif
- counters_and_perfcounters_sample (prof, TRUE);
+ counters_and_perfcounters_sample (prof);
+
+ buffer_lock_excl ();
+
+ sync_point (prof, SYNC_POINT_PERIODIC);
+
+ buffer_unlock_excl ();
tv.tv_sec = 1;
tv.tv_usec = 0;
if (FD_ISSET (prof->pipes [0], &rfds)) {
char c;
- int r = read (prof->pipes [0], &c, 1);
- if (r == 1 && c == 0) {
- StatBuffer *sbufbase = prof->stat_buffers;
- StatBuffer *sbuf;
- if (!sbufbase->next)
- continue;
- sbuf = sbufbase->next->next;
- sbufbase->next->next = NULL;
- if (do_debug)
- fprintf (stderr, "stat buffer dump\n");
- if (sbuf) {
- dump_sample_hits (prof, sbuf);
- free_buffer (sbuf, sbuf->size);
- safe_send_threadless (prof, ensure_logbuf (0));
- }
- continue;
- }
- /* time to shut down */
- dump_sample_hits (prof, prof->stat_buffers);
+ read (prof->pipes [0], &c, 1);
if (thread)
mono_thread_detach (thread);
if (do_debug)
}
}
#endif
- safe_send_threadless (prof, ensure_logbuf (0));
+ safe_send_threadless (prof);
return NULL;
}
#if USE_PERF_EVENTS
continue;
if (FD_ISSET (perf_data [i].perf_fd, &rfds)) {
read_perf_mmap (prof, i);
- safe_send_threadless (prof, ensure_logbuf (0));
+ send_if_needed_threadless (prof);
}
}
}
}
#endif
+static void
+free_writer_entry (gpointer p)
+{
+ mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
+}
+
static gboolean
handle_writer_queue_entry (MonoProfiler *prof)
{
WriterQueueEntry *entry;
if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&prof->writer_queue))) {
- LogBuffer *method_buffer = NULL;
- gboolean new_methods = FALSE;
+ if (!entry->methods)
+ goto no_methods;
- if (entry->methods->len)
- method_buffer = create_buffer ();
+ LogBuffer *buf = NULL;
/*
* Encode the method events in a temporary log buffer that we
* flush to disk before the main buffer, ensuring that all
* methods have metadata emitted before they're referenced.
+ *
+ * We use a 'proper' thread-local buffer for this as opposed
+ * to allocating and freeing a buffer by hand because the call
+ * to mono_method_full_name () below may trigger class load
+ * events when it retrieves the signature of the method. So a
+ * thread-local buffer needs to exist when such events occur.
*/
for (guint i = 0; i < entry->methods->len; i++) {
- MethodInfo *info = (MethodInfo *)g_ptr_array_index (entry->methods, i);
+ MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
if (mono_conc_hashtable_lookup (prof->method_table, info->method))
- continue;
-
- new_methods = TRUE;
+ goto free_info; // This method already has metadata emitted.
/*
* Other threads use this hash table to get a general
void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
- method_buffer = ensure_logbuf_inner (method_buffer,
+ buf = ensure_logbuf_unsafe (
EVENT_SIZE /* event */ +
- LEB128_SIZE /* time */ +
LEB128_SIZE /* method */ +
LEB128_SIZE /* start */ +
LEB128_SIZE /* size */ +
nlen /* name */
);
- emit_byte (method_buffer, TYPE_JIT | TYPE_METHOD);
- emit_time (method_buffer, info->time);
- emit_method_inner (method_buffer, info->method);
- emit_ptr (method_buffer, cstart);
- emit_value (method_buffer, csize);
+ emit_event_time (buf, TYPE_JIT | TYPE_METHOD, info->time);
+ emit_method_inner (buf, info->method);
+ emit_ptr (buf, cstart);
+ emit_value (buf, csize);
- memcpy (method_buffer->cursor, name, nlen);
- method_buffer->cursor += nlen;
+ memcpy (buf->cursor, name, nlen);
+ buf->cursor += nlen;
mono_free (name);
+
+ free_info:
free (info);
}
g_ptr_array_free (entry->methods, TRUE);
- if (new_methods) {
- for (LogBuffer *iter = method_buffer; iter; iter = iter->next)
- iter->thread_id = 0;
-
- dump_buffer (prof, method_buffer);
- } else if (method_buffer)
- free_buffer (method_buffer, method_buffer->size);
+ if (buf) {
+ dump_buffer_threadless (prof, buf);
+ init_buffer_state (PROF_TLS_GET ());
+ }
+ no_methods:
dump_buffer (prof, entry->buffer);
- free (entry);
+ mono_thread_hazardous_try_free (entry, free_writer_entry);
return TRUE;
}
writer_thread (void *arg)
{
MonoProfiler *prof = (MonoProfiler *)arg;
- WriterQueueEntry *entry;
mono_threads_attach_tools_thread ();
- mono_thread_info_set_name (mono_native_thread_id_get (), "Profiler writer");
+ mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
dump_header (prof);
+ MonoProfilerThread *thread = init_thread (FALSE);
+
while (InterlockedRead (&prof->run_writer_thread)) {
mono_os_sem_wait (&prof->writer_queue_sem, MONO_SEM_FLAGS_NONE);
handle_writer_queue_entry (prof);
/* Drain any remaining entries on shutdown. */
while (handle_writer_queue_entry (prof));
+ free_buffer (thread->buffer, thread->buffer->size);
+ deinit_thread (thread);
+
mono_thread_info_detach ();
return NULL;
return !pthread_create (&prof->writer_thread, NULL, writer_thread, prof);
}
+static void
+reuse_sample_hit (gpointer p)
+{
+ SampleHit *sample = p;
+
+ mono_lock_free_queue_node_unpoison (&sample->node);
+ mono_lock_free_queue_enqueue (&sample->prof->sample_reuse_queue, &sample->node);
+}
+
+static gboolean
+handle_dumper_queue_entry (MonoProfiler *prof)
+{
+ SampleHit *sample;
+
+ if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->dumper_queue))) {
+ for (int i = 0; i < sample->count; ++i) {
+ MonoMethod *method = sample->frames [i].method;
+ MonoDomain *domain = sample->frames [i].domain;
+ void *address = sample->frames [i].base_address;
+
+ if (!method) {
+ g_assert (domain && "What happened to the domain pointer?");
+ g_assert (address && "What happened to the instruction pointer?");
+
+ MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
+
+ if (ji)
+ sample->frames [i].method = mono_jit_info_get_method (ji);
+ }
+ }
+
+ LogBuffer *logbuffer = ensure_logbuf_unsafe (
+ EVENT_SIZE /* event */ +
+ BYTE_SIZE /* type */ +
+ LEB128_SIZE /* tid */ +
+ LEB128_SIZE /* count */ +
+ 1 * (
+ LEB128_SIZE /* ip */
+ ) +
+ LEB128_SIZE /* managed count */ +
+ sample->count * (
+ LEB128_SIZE /* method */
+ )
+ );
+
+ emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
+ emit_byte (logbuffer, sample_type);
+ emit_ptr (logbuffer, (void *) sample->tid);
+ emit_value (logbuffer, 1);
+
+ // TODO: Actual native unwinding.
+ for (int i = 0; i < 1; ++i) {
+ emit_ptr (logbuffer, sample->ip);
+ add_code_pointer ((uintptr_t) sample->ip);
+ }
+
+ /* new in data version 6 */
+ emit_uvalue (logbuffer, sample->count);
+
+ for (int i = 0; i < sample->count; ++i)
+ emit_method (prof, logbuffer, sample->frames [i].method);
+
+ mono_thread_hazardous_try_free (sample, reuse_sample_hit);
+
+ dump_unmanaged_coderefs (prof);
+
+ send_if_needed_threadless (prof);
+ }
+
+ return FALSE;
+}
+
+static void *
+dumper_thread (void *arg)
+{
+ MonoProfiler *prof = (MonoProfiler *)arg;
+
+ mono_threads_attach_tools_thread ();
+ mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
+
+ MonoProfilerThread *thread = init_thread (FALSE);
+
+ while (InterlockedRead (&prof->run_dumper_thread)) {
+ mono_os_sem_wait (&prof->dumper_queue_sem, MONO_SEM_FLAGS_NONE);
+ handle_dumper_queue_entry (prof);
+ }
+
+ /* Drain any remaining entries on shutdown. */
+ while (handle_dumper_queue_entry (prof));
+
+ safe_send_threadless (prof);
+ deinit_thread (thread);
+
+ mono_thread_info_detach ();
+
+ return NULL;
+}
+
+static int
+start_dumper_thread (MonoProfiler* prof)
+{
+ InterlockedWrite (&prof->run_dumper_thread, 1);
+
+ return !pthread_create (&prof->dumper_thread, NULL, dumper_thread, prof);
+}
+
static void
runtime_initialized (MonoProfiler *profiler)
{
+ InterlockedWrite (&runtime_inited, 1);
+
#ifndef DISABLE_HELPER_THREAD
if (hs_mode_ondemand || need_helper_thread) {
if (!start_helper_thread (profiler))
#endif
start_writer_thread (profiler);
+ start_dumper_thread (profiler);
+
+ mono_counters_register ("Sample hits", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &sample_hits);
+ mono_counters_register ("Sample flushes", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &sample_flushes);
+ mono_counters_register ("Sample events allocated", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &sample_allocations);
+ mono_counters_register ("Log buffers allocated", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &buffer_allocations);
+ mono_counters_register ("Thread start events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &thread_starts);
+ mono_counters_register ("Thread stop events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &thread_ends);
+ mono_counters_register ("Domain load events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &domain_loads);
+ mono_counters_register ("Domain unload events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &domain_unloads);
+ mono_counters_register ("Context load events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &context_loads);
+ mono_counters_register ("Context unload events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &context_unloads);
+ mono_counters_register ("Assembly load events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &assembly_loads);
+ mono_counters_register ("Assembly unload events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &assembly_unloads);
+ mono_counters_register ("Image load events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &image_loads);
+ mono_counters_register ("Image unload events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &image_unloads);
+ mono_counters_register ("Class load events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &class_loads);
+ mono_counters_register ("Class unload events", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &class_unloads);
- InterlockedWrite (&runtime_inited, 1);
#ifndef DISABLE_HELPER_THREAD
counters_init (profiler);
- counters_sample (profiler, 0, FALSE);
+ counters_sample (profiler, 0);
#endif
/* ensure the main thread data and startup are available soon */
- safe_send (profiler, ensure_logbuf (0));
+ safe_send (profiler);
}
static MonoProfiler*
}
#endif
if (do_mono_sample) {
- prof->stat_buffers = create_stat_buffer ();
need_helper_thread = 1;
}
if (do_counters && !need_helper_thread) {
need_helper_thread = 1;
}
- prof->sorted_sample_events = g_ptr_array_sized_new (BUFFER_SIZE / SAMPLE_EVENT_SIZE_IN_SLOTS (0));
+ /*
+ * If you hit this assert while increasing MAX_FRAMES, you need to increase
+ * SAMPLE_BLOCK_SIZE as well.
+ */
+ g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
+
+ // FIXME: We should free this stuff too.
+ mono_lock_free_allocator_init_size_class (&prof->sample_size_class, SAMPLE_SLOT_SIZE (num_frames), SAMPLE_BLOCK_SIZE);
+ mono_lock_free_allocator_init_allocator (&prof->sample_allocator, &prof->sample_size_class);
+
+ mono_lock_free_queue_init (&prof->sample_reuse_queue);
#ifdef DISABLE_HELPER_THREAD
if (hs_mode_ondemand)
#endif
+ g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
+
+ // FIXME: We should free this stuff too.
+ mono_lock_free_allocator_init_size_class (&prof->writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
+ mono_lock_free_allocator_init_allocator (&prof->writer_entry_allocator, &prof->writer_entry_size_class);
+
mono_lock_free_queue_init (&prof->writer_queue);
- mono_os_sem_init (&prof->writer_queue_sem, 1);
+ mono_os_sem_init (&prof->writer_queue_sem, 0);
+
+ mono_lock_free_queue_init (&prof->dumper_queue);
+ mono_os_sem_init (&prof->dumper_queue_sem, 0);
mono_os_mutex_init (&prof->method_table_mutex);
prof->method_table = mono_conc_hashtable_new (NULL, NULL);
printf ("\theapshot[=MODE] record heap shot info (by default at each major collection)\n");
printf ("\t MODE: every XXms milliseconds, every YYgc collections, ondemand\n");
printf ("\tcounters sample counters every 1s\n");
- printf ("\tsample[=TYPE] use statistical sampling mode (by default cycles/1000)\n");
+ printf ("\tsample[=TYPE] use statistical sampling mode (by default cycles/100)\n");
printf ("\t TYPE: cycles,instr,cacherefs,cachemiss,branches,branchmiss\n");
printf ("\t TYPE can be followed by /FREQUENCY\n");
printf ("\ttime=fast use a faster (but more inaccurate) timer\n");
#endif
if (allow_empty && !val) {
sample_type = SAMPLE_CYCLES;
- sample_freq = 1000;
+ sample_freq = 100;
return;
}
if (strcmp (val, "mono") == 0) {
} else if (*maybe_freq != 0) {
usage (1);
} else {
- sample_freq = 1000;
+ sample_freq = 100;
}
free (val);
}
MONO_PROFILE_ENTER_LEAVE|MONO_PROFILE_JIT_COMPILATION|MONO_PROFILE_EXCEPTIONS|
MONO_PROFILE_MONITOR_EVENTS|MONO_PROFILE_MODULE_EVENTS|MONO_PROFILE_GC_ROOTS|
MONO_PROFILE_INS_COVERAGE|MONO_PROFILE_APPDOMAIN_EVENTS|MONO_PROFILE_CONTEXT_EVENTS|
- MONO_PROFILE_ASSEMBLY_EVENTS;
+ MONO_PROFILE_ASSEMBLY_EVENTS|MONO_PROFILE_GC_FINALIZATION;
+
+ max_allocated_sample_hits = mono_cpu_count () * 1000;
p = desc;
if (strncmp (p, "log", 3))
notraces = num_frames == 0;
continue;
}
+ if ((opt = match_option (p, "maxsamples", &val)) != p) {
+ char *end;
+ max_allocated_sample_hits = strtoul (val, &end, 10);
+ if (!max_allocated_sample_hits)
+ max_allocated_sample_hits = G_MAXINT32;
+ free (val);
+ continue;
+ }
if ((opt = match_option (p, "calldepth", &val)) != p) {
char *end;
max_call_depth = strtoul (val, &end, 10);
utils_init (fast_time);
+ PROF_TLS_INIT ();
+
prof = create_profiler (filename, filters);
- if (!prof)
+ if (!prof) {
+ PROF_TLS_FREE ();
return;
- init_thread ();
+ }
+
+ mono_lls_init (&profiler_thread_list, NULL);
+
+ init_thread (TRUE);
mono_profiler_install (prof, log_shutdown);
mono_profiler_install_gc (gc_event, gc_resize);
mono_profiler_install_allocation (gc_alloc);
mono_profiler_install_gc_moves (gc_moves);
mono_profiler_install_gc_roots (gc_handle, gc_roots);
- mono_profiler_install_appdomain (NULL, domain_loaded, NULL, domain_unloaded);
+ mono_profiler_install_gc_finalize (finalize_begin, finalize_object_begin, finalize_object_end, finalize_end);
+ mono_profiler_install_appdomain (NULL, domain_loaded, domain_unloaded, NULL);
mono_profiler_install_appdomain_name (domain_name);
mono_profiler_install_context (context_loaded, context_unloaded);
- mono_profiler_install_class (NULL, class_loaded, NULL, class_unloaded);
- mono_profiler_install_module (NULL, image_loaded, NULL, image_unloaded);
+ mono_profiler_install_class (NULL, class_loaded, class_unloaded, NULL);
+ mono_profiler_install_module (NULL, image_loaded, image_unloaded, NULL);
mono_profiler_install_assembly (NULL, assembly_loaded, assembly_unloaded, NULL);
mono_profiler_install_thread (thread_start, thread_end);
mono_profiler_install_thread_name (thread_name);
if (do_mono_sample && sample_type == SAMPLE_CYCLES && !only_counters) {
events |= MONO_PROFILE_STATISTICAL;
- mono_profiler_set_statistical_mode (sampling_mode, 1000000 / sample_freq);
+ mono_profiler_set_statistical_mode (sampling_mode, sample_freq);
mono_profiler_install_statistical (mono_sample_hit);
}
mono_profiler_set_events ((MonoProfileFlags)events);
-
- TLS_INIT (tlsbuffer);
- TLS_INIT (tlsmethodlist);
}