[profiler] Move the log format description to the header.
[mono.git] / mono / profiler / log.c
1 /*
2  * log.c: mono log profiler
3  *
4  * Authors:
5  *   Paolo Molaro (lupus@ximian.com)
6  *   Alex Rønne Petersen (alexrp@xamarin.com)
7  *
8  * Copyright 2010 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11  */
12
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/debug-helpers.h>
16 #include "../metadata/metadata-internals.h"
17 #include <mono/metadata/mono-config.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/metadata/mono-perfcounters.h>
20 #include <mono/utils/atomic.h>
21 #include <mono/utils/hazard-pointer.h>
22 #include <mono/utils/lock-free-alloc.h>
23 #include <mono/utils/lock-free-queue.h>
24 #include <mono/utils/mono-conc-hashtable.h>
25 #include <mono/utils/mono-counters.h>
26 #include <mono/utils/mono-logger-internals.h>
27 #include <mono/utils/mono-linked-list-set.h>
28 #include <mono/utils/mono-membar.h>
29 #include <mono/utils/mono-mmap.h>
30 #include <mono/utils/mono-os-mutex.h>
31 #include <mono/utils/mono-os-semaphore.h>
32 #include <mono/utils/mono-threads.h>
33 #include <mono/utils/mono-threads-api.h>
34 #include "log.h"
35
36 #ifdef HAVE_DLFCN_H
37 #include <dlfcn.h>
38 #endif
39 #include <fcntl.h>
40 #ifdef HAVE_LINK_H
41 #include <link.h>
42 #endif
43 #ifdef HAVE_UNISTD_H
44 #include <unistd.h>
45 #endif
46 #if defined(__APPLE__)
47 #include <mach/mach_time.h>
48 #endif
49 #include <netinet/in.h>
50 #ifdef HAVE_SYS_MMAN_H
51 #include <sys/mman.h>
52 #endif
53 #include <sys/socket.h>
54 #if defined (HAVE_SYS_ZLIB)
55 #include <zlib.h>
56 #endif
57
58 // Statistics for internal profiler data structures.
59 static gint32 sample_allocations_ctr,
60               buffer_allocations_ctr;
61
62 // Statistics for profiler events.
63 static gint32 sync_points_ctr,
64               heap_objects_ctr,
65               heap_starts_ctr,
66               heap_ends_ctr,
67               heap_roots_ctr,
68               gc_events_ctr,
69               gc_resizes_ctr,
70               gc_allocs_ctr,
71               gc_moves_ctr,
72               gc_handle_creations_ctr,
73               gc_handle_deletions_ctr,
74               finalize_begins_ctr,
75               finalize_ends_ctr,
76               finalize_object_begins_ctr,
77               finalize_object_ends_ctr,
78               image_loads_ctr,
79               image_unloads_ctr,
80               assembly_loads_ctr,
81               assembly_unloads_ctr,
82               class_loads_ctr,
83               class_unloads_ctr,
84               method_entries_ctr,
85               method_exits_ctr,
86               method_exception_exits_ctr,
87               method_jits_ctr,
88               code_buffers_ctr,
89               exception_throws_ctr,
90               exception_clauses_ctr,
91               monitor_events_ctr,
92               thread_starts_ctr,
93               thread_ends_ctr,
94               thread_names_ctr,
95               domain_loads_ctr,
96               domain_unloads_ctr,
97               domain_names_ctr,
98               context_loads_ctr,
99               context_unloads_ctr,
100               sample_ubins_ctr,
101               sample_usyms_ctr,
102               sample_hits_ctr,
103               counter_descriptors_ctr,
104               counter_samples_ctr,
105               perfcounter_descriptors_ctr,
106               perfcounter_samples_ctr,
107               coverage_methods_ctr,
108               coverage_statements_ctr,
109               coverage_classes_ctr,
110               coverage_assemblies_ctr;
111
112 // Pending data to be written to the log, for a single thread.
113 // Threads periodically flush their own LogBuffers by calling safe_send
114 typedef struct _LogBuffer LogBuffer;
115 struct _LogBuffer {
116         // Next (older) LogBuffer in processing queue
117         LogBuffer *next;
118
119         uint64_t time_base;
120         uint64_t last_time;
121         uintptr_t ptr_base;
122         uintptr_t method_base;
123         uintptr_t last_method;
124         uintptr_t obj_base;
125         uintptr_t thread_id;
126
127         // Bytes allocated for this LogBuffer
128         int size;
129
130         // Start of currently unused space in buffer
131         unsigned char* cursor;
132
133         // Pointer to start-of-structure-plus-size (for convenience)
134         unsigned char* buf_end;
135
136         // Start of data in buffer. Contents follow "buffer format" described above.
137         unsigned char buf [1];
138 };
139
140 typedef struct {
141         MonoLinkedListSetNode node;
142
143         // Was this thread added to the LLS?
144         gboolean attached;
145
146         // The current log buffer for this thread.
147         LogBuffer *buffer;
148
149         // Methods referenced by events in `buffer`, see `MethodInfo`.
150         GPtrArray *methods;
151
152         // Current call depth for enter/leave events.
153         int call_depth;
154
155         // Indicates whether this thread is currently writing to its `buffer`.
156         gboolean busy;
157
158         // Has this thread written a thread end event to `buffer`?
159         gboolean ended;
160
161         // Stored in `buffer_lock_state` to take the exclusive lock.
162         int small_id;
163 } MonoProfilerThread;
164
165 // Do not use these TLS macros directly unless you know what you're doing.
166
167 #ifdef HOST_WIN32
168
169 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
170 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
171 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
172 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
173
174 static DWORD profiler_tls;
175
176 #elif HAVE_KW_THREAD
177
178 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
179 #define PROF_TLS_GET() (profiler_tls)
180 #define PROF_TLS_INIT()
181 #define PROF_TLS_FREE()
182
183 static __thread MonoProfilerThread *profiler_tls;
184
185 #else
186
187 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
188 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
189 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
190 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
191
192 static pthread_key_t profiler_tls;
193
194 #endif
195
196 static uintptr_t
197 thread_id (void)
198 {
199         return (uintptr_t) mono_native_thread_id_get ();
200 }
201
202 static uintptr_t
203 process_id (void)
204 {
205 #ifdef HOST_WIN32
206         return (uintptr_t) GetCurrentProcessId ();
207 #else
208         return (uintptr_t) getpid ();
209 #endif
210 }
211
212 #define ENABLED(EVT) (log_config.effective_mask & (EVT))
213
214 /*
215  * These macros should be used when writing an event to a log buffer. They
216  * take care of a bunch of stuff that can be repetitive and error-prone, such
217  * as attaching the current thread, acquiring/releasing the buffer lock,
218  * incrementing the event counter, expanding the log buffer, etc. They also
219  * create a scope so that it's harder to leak the LogBuffer pointer, which can
220  * be problematic as the pointer is unstable when the buffer lock isn't
221  * acquired.
222  *
223  * If the calling thread is already attached, these macros will not alter its
224  * attach mode (i.e. whether it's added to the LLS). If the thread is not
225  * attached, init_thread () will be called with add_to_lls = TRUE.
226  */
227
228 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
229         do { \
230                 MonoProfilerThread *thread__ = get_thread (); \
231                 if (thread__->attached) \
232                         buffer_lock (); \
233                 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
234                 thread__->busy = TRUE; \
235                 InterlockedIncrement ((COUNTER)); \
236                 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
237
238 #define EXIT_LOG_EXPLICIT(SEND) \
239                 thread__->busy = FALSE; \
240                 if ((SEND)) \
241                         send_log_unsafe (TRUE); \
242                 if (thread__->attached) \
243                         buffer_unlock (); \
244         } while (0)
245
246 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
247 #define DO_SEND TRUE
248 #define NO_SEND FALSE
249
250 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
251
252 typedef struct _BinaryObject BinaryObject;
253 struct _BinaryObject {
254         BinaryObject *next;
255         void *addr;
256         char *name;
257 };
258
259 typedef struct MonoCounterAgent {
260         MonoCounter *counter;
261         // MonoCounterAgent specific data :
262         void *value;
263         size_t value_size;
264         guint32 index;
265         gboolean emitted;
266         struct MonoCounterAgent *next;
267 } MonoCounterAgent;
268
269 typedef struct _PerfCounterAgent PerfCounterAgent;
270 struct _PerfCounterAgent {
271         PerfCounterAgent *next;
272         guint32 index;
273         char *category_name;
274         char *name;
275         gint64 value;
276         gboolean emitted;
277         gboolean updated;
278         gboolean deleted;
279 };
280
281 struct _MonoProfiler {
282         MonoProfilerHandle handle;
283
284         FILE* file;
285 #if defined (HAVE_SYS_ZLIB)
286         gzFile gzfile;
287 #endif
288
289         char *args;
290         uint64_t startup_time;
291         int timer_overhead;
292
293 #ifdef __APPLE__
294         mach_timebase_info_data_t timebase_info;
295 #elif defined (HOST_WIN32)
296         LARGE_INTEGER pcounter_freq;
297 #endif
298
299         int pipe_output;
300         int command_port;
301         int server_socket;
302         int pipes [2];
303
304         MonoLinkedListSet profiler_thread_list;
305         volatile gint32 buffer_lock_state;
306         volatile gint32 buffer_lock_exclusive_intent;
307
308         volatile gint32 runtime_inited;
309         volatile gint32 in_shutdown;
310
311         MonoNativeThreadId helper_thread;
312
313         MonoNativeThreadId writer_thread;
314         volatile gint32 run_writer_thread;
315         MonoLockFreeQueue writer_queue;
316         MonoSemType writer_queue_sem;
317
318         MonoLockFreeAllocSizeClass writer_entry_size_class;
319         MonoLockFreeAllocator writer_entry_allocator;
320
321         MonoConcurrentHashTable *method_table;
322         mono_mutex_t method_table_mutex;
323
324         MonoNativeThreadId dumper_thread;
325         volatile gint32 run_dumper_thread;
326         MonoLockFreeQueue dumper_queue;
327         MonoSemType dumper_queue_sem;
328
329         MonoLockFreeAllocSizeClass sample_size_class;
330         MonoLockFreeAllocator sample_allocator;
331         MonoLockFreeQueue sample_reuse_queue;
332
333         BinaryObject *binary_objects;
334
335         volatile gint32 heapshot_requested;
336         guint64 gc_count;
337         guint64 last_hs_time;
338         gboolean do_heap_walk;
339
340         mono_mutex_t counters_mutex;
341         MonoCounterAgent *counters;
342         PerfCounterAgent *perfcounters;
343         guint32 counters_index;
344
345         mono_mutex_t coverage_mutex;
346         GPtrArray *coverage_data;
347
348         GPtrArray *coverage_filters;
349         MonoConcurrentHashTable *coverage_filtered_classes;
350         MonoConcurrentHashTable *coverage_suppressed_assemblies;
351
352         MonoConcurrentHashTable *coverage_methods;
353         MonoConcurrentHashTable *coverage_assemblies;
354         MonoConcurrentHashTable *coverage_classes;
355
356         MonoConcurrentHashTable *coverage_image_to_methods;
357
358         guint32 coverage_previous_offset;
359         guint32 coverage_method_id;
360 };
361
362 static ProfilerConfig log_config;
363 static struct _MonoProfiler log_profiler;
364
365 typedef struct {
366         MonoLockFreeQueueNode node;
367         GPtrArray *methods;
368         LogBuffer *buffer;
369 } WriterQueueEntry;
370
371 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
372
373 typedef struct {
374         MonoMethod *method;
375         MonoJitInfo *ji;
376         uint64_t time;
377 } MethodInfo;
378
379 #define TICKS_PER_SEC 1000000000LL
380
381 static uint64_t
382 current_time (void)
383 {
384 #ifdef __APPLE__
385         uint64_t time = mach_absolute_time ();
386
387         time *= log_profiler.timebase_info.numer;
388         time /= log_profiler.timebase_info.denom;
389
390         return time;
391 #elif defined (HOST_WIN32)
392         LARGE_INTEGER value;
393
394         QueryPerformanceCounter (&value);
395
396         return value.QuadPart * TICKS_PER_SEC / log_profiler.pcounter_freq.QuadPart;
397 #elif defined (CLOCK_MONOTONIC)
398         struct timespec tspec;
399
400         clock_gettime (CLOCK_MONOTONIC, &tspec);
401
402         return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
403 #else
404         struct timeval tv;
405
406         gettimeofday (&tv, NULL);
407
408         return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
409 #endif
410 }
411
412 static void
413 init_time (void)
414 {
415 #ifdef __APPLE__
416         mach_timebase_info (&log_profiler.timebase_info);
417 #elif defined (HOST_WIN32)
418         QueryPerformanceFrequency (&log_profiler.pcounter_freq);
419 #endif
420
421         uint64_t time_start = current_time ();
422
423         for (int i = 0; i < 256; ++i)
424                 current_time ();
425
426         uint64_t time_end = current_time ();
427
428         log_profiler.timer_overhead = (time_end - time_start) / 256;
429 }
430
431 static char*
432 pstrdup (const char *s)
433 {
434         int len = strlen (s) + 1;
435         char *p = (char *) g_malloc (len);
436         memcpy (p, s, len);
437         return p;
438 }
439
440 #define BUFFER_SIZE (4096 * 16)
441
442 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
443 #define LEB128_SIZE 10
444
445 /* Size of a value encoded as a single byte. */
446 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
447 #define BYTE_SIZE 1
448
449 /* Size in bytes of the event prefix (ID + time). */
450 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
451
452 static void *
453 alloc_buffer (int size)
454 {
455         return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
456 }
457
458 static void
459 free_buffer (void *buf, int size)
460 {
461         mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
462 }
463
464 static LogBuffer*
465 create_buffer (uintptr_t tid, int bytes)
466 {
467         LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
468
469         InterlockedIncrement (&buffer_allocations_ctr);
470
471         buf->size = BUFFER_SIZE;
472         buf->time_base = current_time ();
473         buf->last_time = buf->time_base;
474         buf->buf_end = (unsigned char *) buf + buf->size;
475         buf->cursor = buf->buf;
476         buf->thread_id = tid;
477
478         return buf;
479 }
480
481 /*
482  * Must be called with the reader lock held if thread is the current thread, or
483  * the exclusive lock if thread is a different thread. However, if thread is
484  * the current thread, and init_thread () was called with add_to_lls = FALSE,
485  * then no locking is necessary.
486  */
487 static void
488 init_buffer_state (MonoProfilerThread *thread)
489 {
490         thread->buffer = create_buffer (thread->node.key, 0);
491         thread->methods = NULL;
492 }
493
494 static void
495 clear_hazard_pointers (MonoThreadHazardPointers *hp)
496 {
497         mono_hazard_pointer_clear (hp, 0);
498         mono_hazard_pointer_clear (hp, 1);
499         mono_hazard_pointer_clear (hp, 2);
500 }
501
502 static MonoProfilerThread *
503 init_thread (gboolean add_to_lls)
504 {
505         MonoProfilerThread *thread = PROF_TLS_GET ();
506
507         /*
508          * Sometimes we may try to initialize a thread twice. One example is the
509          * main thread: We initialize it when setting up the profiler, but we will
510          * also get a thread_start () callback for it. Another example is when
511          * attaching new threads to the runtime: We may get a gc_alloc () callback
512          * for that thread's thread object (where we initialize it), soon followed
513          * by a thread_start () callback.
514          *
515          * These cases are harmless anyhow. Just return if we've already done the
516          * initialization work.
517          */
518         if (thread)
519                 return thread;
520
521         thread = g_malloc (sizeof (MonoProfilerThread));
522         thread->node.key = thread_id ();
523         thread->attached = add_to_lls;
524         thread->call_depth = 0;
525         thread->busy = 0;
526         thread->ended = FALSE;
527
528         init_buffer_state (thread);
529
530         thread->small_id = mono_thread_info_register_small_id ();
531
532         /*
533          * Some internal profiler threads don't need to be cleaned up
534          * by the main thread on shutdown.
535          */
536         if (add_to_lls) {
537                 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
538                 g_assert (mono_lls_insert (&log_profiler.profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
539                 clear_hazard_pointers (hp);
540         }
541
542         PROF_TLS_SET (thread);
543
544         return thread;
545 }
546
547 // Only valid if init_thread () was called with add_to_lls = FALSE.
548 static void
549 deinit_thread (MonoProfilerThread *thread)
550 {
551         g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
552
553         g_free (thread);
554         PROF_TLS_SET (NULL);
555 }
556
557 static MonoProfilerThread *
558 get_thread (void)
559 {
560         return init_thread (TRUE);
561 }
562
563 // Only valid if init_thread () was called with add_to_lls = FALSE.
564 static LogBuffer *
565 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
566 {
567         LogBuffer *old = thread->buffer;
568
569         if (old->cursor + bytes < old->buf_end)
570                 return old;
571
572         LogBuffer *new_ = create_buffer (thread->node.key, bytes);
573         new_->next = old;
574         thread->buffer = new_;
575
576         return new_;
577 }
578
579 /*
580  * This is a reader/writer spin lock of sorts used to protect log buffers.
581  * When a thread modifies its own log buffer, it increments the reader
582  * count. When a thread wants to access log buffers of other threads, it
583  * takes the exclusive lock.
584  *
585  * `buffer_lock_state` holds the reader count in its lower 16 bits, and
586  * the small ID of the thread currently holding the exclusive (writer)
587  * lock in its upper 16 bits. Both can be zero. It's important that the
588  * whole lock state is a single word that can be read/written atomically
589  * to avoid race conditions where there could end up being readers while
590  * the writer lock is held.
591  *
592  * The lock is writer-biased. When a thread wants to take the exclusive
593  * lock, it increments `buffer_lock_exclusive_intent` which will make new
594  * readers spin until it's back to zero, then takes the exclusive lock
595  * once the reader count has reached zero. After releasing the exclusive
596  * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
597  * reaches zero again, allows readers to increment the reader count.
598  *
599  * The writer bias is necessary because we take the exclusive lock in
600  * `gc_event ()` during STW. If the writer bias was not there, and a
601  * program had a large number of threads, STW-induced pauses could be
602  * significantly longer than they have to be. Also, we emit periodic
603  * sync points from the helper thread, which requires taking the
604  * exclusive lock, and we need those to arrive with a reasonably
605  * consistent frequency so that readers don't have to queue up too many
606  * events between sync points.
607  *
608  * The lock does not support recursion.
609  */
610
611 static void
612 buffer_lock (void)
613 {
614         /*
615          * If the thread holding the exclusive lock tries to modify the
616          * reader count, just make it a no-op. This way, we also avoid
617          * invoking the GC safe point macros below, which could break if
618          * done from a thread that is currently the initiator of STW.
619          *
620          * In other words, we rely on the fact that the GC thread takes
621          * the exclusive lock in the gc_event () callback when the world
622          * is about to stop.
623          */
624         if (InterlockedRead (&log_profiler.buffer_lock_state) != get_thread ()->small_id << 16) {
625                 MONO_ENTER_GC_SAFE;
626
627                 gint32 old, new_;
628
629                 do {
630                 restart:
631                         // Hold off if a thread wants to take the exclusive lock.
632                         while (InterlockedRead (&log_profiler.buffer_lock_exclusive_intent))
633                                 mono_thread_info_yield ();
634
635                         old = InterlockedRead (&log_profiler.buffer_lock_state);
636
637                         // Is a thread holding the exclusive lock?
638                         if (old >> 16) {
639                                 mono_thread_info_yield ();
640                                 goto restart;
641                         }
642
643                         new_ = old + 1;
644                 } while (InterlockedCompareExchange (&log_profiler.buffer_lock_state, new_, old) != old);
645
646                 MONO_EXIT_GC_SAFE;
647         }
648
649         mono_memory_barrier ();
650 }
651
652 static void
653 buffer_unlock (void)
654 {
655         mono_memory_barrier ();
656
657         gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
658
659         // See the comment in buffer_lock ().
660         if (state == PROF_TLS_GET ()->small_id << 16)
661                 return;
662
663         g_assert (state && "Why are we decrementing a zero reader count?");
664         g_assert (!(state >> 16) && "Why is the exclusive lock held?");
665
666         InterlockedDecrement (&log_profiler.buffer_lock_state);
667 }
668
669 static void
670 buffer_lock_excl (void)
671 {
672         gint32 new_ = get_thread ()->small_id << 16;
673
674         g_assert (InterlockedRead (&log_profiler.buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
675
676         InterlockedIncrement (&log_profiler.buffer_lock_exclusive_intent);
677
678         MONO_ENTER_GC_SAFE;
679
680         while (InterlockedCompareExchange (&log_profiler.buffer_lock_state, new_, 0))
681                 mono_thread_info_yield ();
682
683         MONO_EXIT_GC_SAFE;
684
685         mono_memory_barrier ();
686 }
687
688 static void
689 buffer_unlock_excl (void)
690 {
691         mono_memory_barrier ();
692
693         gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
694         gint32 excl = state >> 16;
695
696         g_assert (excl && "Why is the exclusive lock not held?");
697         g_assert (excl == PROF_TLS_GET ()->small_id && "Why does another thread hold the exclusive lock?");
698         g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
699
700         InterlockedWrite (&log_profiler.buffer_lock_state, 0);
701         InterlockedDecrement (&log_profiler.buffer_lock_exclusive_intent);
702 }
703
704 static void
705 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
706 {
707         uint8_t *p = buf;
708
709         do {
710                 uint8_t b = value & 0x7f;
711                 value >>= 7;
712
713                 if (value != 0) /* more bytes to come */
714                         b |= 0x80;
715
716                 *p ++ = b;
717         } while (value);
718
719         *endbuf = p;
720 }
721
722 static void
723 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
724 {
725         int more = 1;
726         int negative = (value < 0);
727         unsigned int size = sizeof (intptr_t) * 8;
728         uint8_t byte;
729         uint8_t *p = buf;
730
731         while (more) {
732                 byte = value & 0x7f;
733                 value >>= 7;
734
735                 /* the following is unnecessary if the
736                  * implementation of >>= uses an arithmetic rather
737                  * than logical shift for a signed left operand
738                  */
739                 if (negative)
740                         /* sign extend */
741                         value |= - ((intptr_t) 1 <<(size - 7));
742
743                 /* sign bit of byte is second high order bit (0x40) */
744                 if ((value == 0 && !(byte & 0x40)) ||
745                     (value == -1 && (byte & 0x40)))
746                         more = 0;
747                 else
748                         byte |= 0x80;
749
750                 *p ++= byte;
751         }
752
753         *endbuf = p;
754 }
755
756 static void
757 emit_byte (LogBuffer *logbuffer, int value)
758 {
759         logbuffer->cursor [0] = value;
760         logbuffer->cursor++;
761
762         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
763 }
764
765 static void
766 emit_value (LogBuffer *logbuffer, int value)
767 {
768         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
769
770         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
771 }
772
773 static void
774 emit_time (LogBuffer *logbuffer, uint64_t value)
775 {
776         uint64_t tdiff = value - logbuffer->last_time;
777         encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
778         logbuffer->last_time = value;
779
780         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
781 }
782
783 static void
784 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
785 {
786         emit_byte (logbuffer, event);
787         emit_time (logbuffer, time);
788 }
789
790 static void
791 emit_event (LogBuffer *logbuffer, int event)
792 {
793         emit_event_time (logbuffer, event, current_time ());
794 }
795
796 static void
797 emit_svalue (LogBuffer *logbuffer, int64_t value)
798 {
799         encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
800
801         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
802 }
803
804 static void
805 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
806 {
807         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
808
809         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
810 }
811
812 static void
813 emit_ptr (LogBuffer *logbuffer, const void *ptr)
814 {
815         if (!logbuffer->ptr_base)
816                 logbuffer->ptr_base = (uintptr_t) ptr;
817
818         emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
819
820         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
821 }
822
823 static void
824 emit_method_inner (LogBuffer *logbuffer, void *method)
825 {
826         if (!logbuffer->method_base) {
827                 logbuffer->method_base = (intptr_t) method;
828                 logbuffer->last_method = (intptr_t) method;
829         }
830
831         encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
832         logbuffer->last_method = (intptr_t) method;
833
834         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
835 }
836
837 // The reader lock must be held.
838 static void
839 register_method_local (MonoMethod *method, MonoJitInfo *ji)
840 {
841         MonoProfilerThread *thread = get_thread ();
842
843         if (!mono_conc_hashtable_lookup (log_profiler.method_table, method)) {
844                 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
845
846                 info->method = method;
847                 info->ji = ji;
848                 info->time = current_time ();
849
850                 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
851                 g_ptr_array_add (arr, info);
852         }
853 }
854
855 static void
856 emit_method (LogBuffer *logbuffer, MonoMethod *method)
857 {
858         register_method_local (method, NULL);
859         emit_method_inner (logbuffer, method);
860 }
861
862 static void
863 emit_obj (LogBuffer *logbuffer, void *ptr)
864 {
865         if (!logbuffer->obj_base)
866                 logbuffer->obj_base = (uintptr_t) ptr >> 3;
867
868         emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
869
870         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
871 }
872
873 static void
874 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
875 {
876         size_t i = 0;
877         if (str) {
878                 for (; i < size; i++) {
879                         if (str[i] == '\0')
880                                 break;
881                         emit_byte (logbuffer, str [i]);
882                 }
883         }
884         emit_byte (logbuffer, '\0');
885 }
886
887 static void
888 emit_double (LogBuffer *logbuffer, double value)
889 {
890         int i;
891         unsigned char buffer[8];
892         memcpy (buffer, &value, 8);
893 #if G_BYTE_ORDER == G_BIG_ENDIAN
894         for (i = 7; i >= 0; i--)
895 #else
896         for (i = 0; i < 8; i++)
897 #endif
898                 emit_byte (logbuffer, buffer[i]);
899 }
900
901 static char*
902 write_int16 (char *buf, int32_t value)
903 {
904         int i;
905         for (i = 0; i < 2; ++i) {
906                 buf [i] = value;
907                 value >>= 8;
908         }
909         return buf + 2;
910 }
911
912 static char*
913 write_int32 (char *buf, int32_t value)
914 {
915         int i;
916         for (i = 0; i < 4; ++i) {
917                 buf [i] = value;
918                 value >>= 8;
919         }
920         return buf + 4;
921 }
922
923 static char*
924 write_int64 (char *buf, int64_t value)
925 {
926         int i;
927         for (i = 0; i < 8; ++i) {
928                 buf [i] = value;
929                 value >>= 8;
930         }
931         return buf + 8;
932 }
933
934 static char *
935 write_header_string (char *p, const char *str)
936 {
937         size_t len = strlen (str) + 1;
938
939         p = write_int32 (p, len);
940         strcpy (p, str);
941
942         return p + len;
943 }
944
945 static void
946 dump_header (void)
947 {
948         const char *args = log_profiler.args;
949         const char *arch = mono_config_get_cpu ();
950         const char *os = mono_config_get_os ();
951
952         char *hbuf = g_malloc (
953                 sizeof (gint32) /* header id */ +
954                 sizeof (gint8) /* major version */ +
955                 sizeof (gint8) /* minor version */ +
956                 sizeof (gint8) /* data version */ +
957                 sizeof (gint8) /* word size */ +
958                 sizeof (gint64) /* startup time */ +
959                 sizeof (gint32) /* timer overhead */ +
960                 sizeof (gint32) /* flags */ +
961                 sizeof (gint32) /* process id */ +
962                 sizeof (gint16) /* command port */ +
963                 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
964                 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
965                 sizeof (gint32) + strlen (os) + 1 /* operating system */
966         );
967         char *p = hbuf;
968
969         p = write_int32 (p, LOG_HEADER_ID);
970         *p++ = LOG_VERSION_MAJOR;
971         *p++ = LOG_VERSION_MINOR;
972         *p++ = LOG_DATA_VERSION;
973         *p++ = sizeof (void *);
974         p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
975         p = write_int32 (p, log_profiler.timer_overhead);
976         p = write_int32 (p, 0); /* flags */
977         p = write_int32 (p, process_id ());
978         p = write_int16 (p, log_profiler.command_port);
979         p = write_header_string (p, args);
980         p = write_header_string (p, arch);
981         p = write_header_string (p, os);
982
983 #if defined (HAVE_SYS_ZLIB)
984         if (log_profiler.gzfile) {
985                 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
986         } else
987 #endif
988         {
989                 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
990                 fflush (log_profiler.file);
991         }
992
993         g_free (hbuf);
994 }
995
996 /*
997  * Must be called with the reader lock held if thread is the current thread, or
998  * the exclusive lock if thread is a different thread. However, if thread is
999  * the current thread, and init_thread () was called with add_to_lls = FALSE,
1000  * then no locking is necessary.
1001  */
1002 static void
1003 send_buffer (MonoProfilerThread *thread)
1004 {
1005         WriterQueueEntry *entry = mono_lock_free_alloc (&log_profiler.writer_entry_allocator);
1006         entry->methods = thread->methods;
1007         entry->buffer = thread->buffer;
1008
1009         mono_lock_free_queue_node_init (&entry->node, FALSE);
1010
1011         mono_lock_free_queue_enqueue (&log_profiler.writer_queue, &entry->node);
1012         mono_os_sem_post (&log_profiler.writer_queue_sem);
1013 }
1014
1015 static void
1016 free_thread (gpointer p)
1017 {
1018         MonoProfilerThread *thread = p;
1019
1020         if (!thread->ended) {
1021                 /*
1022                  * The thread is being cleaned up by the main thread during
1023                  * shutdown. This typically happens for internal runtime
1024                  * threads. We need to synthesize a thread end event.
1025                  */
1026
1027                 InterlockedIncrement (&thread_ends_ctr);
1028
1029                 LogBuffer *buf = ensure_logbuf_unsafe (thread,
1030                         EVENT_SIZE /* event */ +
1031                         BYTE_SIZE /* type */ +
1032                         LEB128_SIZE /* tid */
1033                 );
1034
1035                 emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1036                 emit_byte (buf, TYPE_THREAD);
1037                 emit_ptr (buf, (void *) thread->node.key);
1038         }
1039
1040         send_buffer (thread);
1041
1042         g_free (thread);
1043 }
1044
1045 static void
1046 remove_thread (MonoProfilerThread *thread)
1047 {
1048         MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1049
1050         if (mono_lls_remove (&log_profiler.profiler_thread_list, hp, &thread->node))
1051                 mono_thread_hazardous_try_free (thread, free_thread);
1052
1053         clear_hazard_pointers (hp);
1054 }
1055
1056 static void
1057 dump_buffer (LogBuffer *buf)
1058 {
1059         char hbuf [128];
1060         char *p = hbuf;
1061
1062         if (buf->next)
1063                 dump_buffer (buf->next);
1064
1065         if (buf->cursor - buf->buf) {
1066                 p = write_int32 (p, BUF_ID);
1067                 p = write_int32 (p, buf->cursor - buf->buf);
1068                 p = write_int64 (p, buf->time_base);
1069                 p = write_int64 (p, buf->ptr_base);
1070                 p = write_int64 (p, buf->obj_base);
1071                 p = write_int64 (p, buf->thread_id);
1072                 p = write_int64 (p, buf->method_base);
1073
1074 #if defined (HAVE_SYS_ZLIB)
1075                 if (log_profiler.gzfile) {
1076                         gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1077                         gzwrite (log_profiler.gzfile, buf->buf, buf->cursor - buf->buf);
1078                 } else
1079 #endif
1080                 {
1081                         fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1082                         fwrite (buf->buf, buf->cursor - buf->buf, 1, log_profiler.file);
1083                         fflush (log_profiler.file);
1084                 }
1085         }
1086
1087         free_buffer (buf, buf->size);
1088 }
1089
1090 static void
1091 dump_buffer_threadless (LogBuffer *buf)
1092 {
1093         for (LogBuffer *iter = buf; iter; iter = iter->next)
1094                 iter->thread_id = 0;
1095
1096         dump_buffer (buf);
1097 }
1098
1099 // Only valid if init_thread () was called with add_to_lls = FALSE.
1100 static void
1101 send_log_unsafe (gboolean if_needed)
1102 {
1103         MonoProfilerThread *thread = PROF_TLS_GET ();
1104
1105         if (!if_needed || (if_needed && thread->buffer->next)) {
1106                 if (!thread->attached)
1107                         for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1108                                 iter->thread_id = 0;
1109
1110                 send_buffer (thread);
1111                 init_buffer_state (thread);
1112         }
1113 }
1114
1115 // Assumes that the exclusive lock is held.
1116 static void
1117 sync_point_flush (void)
1118 {
1119         g_assert (InterlockedRead (&log_profiler.buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1120
1121         MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
1122                 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1123
1124                 send_buffer (thread);
1125                 init_buffer_state (thread);
1126         } MONO_LLS_FOREACH_SAFE_END
1127 }
1128
1129 // Assumes that the exclusive lock is held.
1130 static void
1131 sync_point_mark (MonoProfilerSyncPointType type)
1132 {
1133         g_assert (InterlockedRead (&log_profiler.buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1134
1135         ENTER_LOG (&sync_points_ctr, logbuffer,
1136                 EVENT_SIZE /* event */ +
1137                 LEB128_SIZE /* type */
1138         );
1139
1140         emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1141         emit_byte (logbuffer, type);
1142
1143         EXIT_LOG_EXPLICIT (NO_SEND);
1144
1145         send_log_unsafe (FALSE);
1146 }
1147
1148 // Assumes that the exclusive lock is held.
1149 static void
1150 sync_point (MonoProfilerSyncPointType type)
1151 {
1152         sync_point_flush ();
1153         sync_point_mark (type);
1154 }
1155
1156 static int
1157 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1158 {
1159         /* account for object alignment in the heap */
1160         size += 7;
1161         size &= ~7;
1162
1163         ENTER_LOG (&heap_objects_ctr, logbuffer,
1164                 EVENT_SIZE /* event */ +
1165                 LEB128_SIZE /* obj */ +
1166                 LEB128_SIZE /* klass */ +
1167                 LEB128_SIZE /* size */ +
1168                 LEB128_SIZE /* num */ +
1169                 num * (
1170                         LEB128_SIZE /* offset */ +
1171                         LEB128_SIZE /* ref */
1172                 )
1173         );
1174
1175         emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1176         emit_obj (logbuffer, obj);
1177         emit_ptr (logbuffer, klass);
1178         emit_value (logbuffer, size);
1179         emit_value (logbuffer, num);
1180
1181         uintptr_t last_offset = 0;
1182
1183         for (int i = 0; i < num; ++i) {
1184                 emit_value (logbuffer, offsets [i] - last_offset);
1185                 last_offset = offsets [i];
1186                 emit_obj (logbuffer, refs [i]);
1187         }
1188
1189         EXIT_LOG;
1190
1191         return 0;
1192 }
1193
1194 static void
1195 gc_roots (MonoProfiler *prof, MonoObject *const *objects, const MonoProfilerGCRootType *root_types, const uintptr_t *extra_info, uint64_t num)
1196 {
1197         ENTER_LOG (&heap_roots_ctr, logbuffer,
1198                 EVENT_SIZE /* event */ +
1199                 LEB128_SIZE /* num */ +
1200                 LEB128_SIZE /* collections */ +
1201                 num * (
1202                         LEB128_SIZE /* object */ +
1203                         LEB128_SIZE /* root type */ +
1204                         LEB128_SIZE /* extra info */
1205                 )
1206         );
1207
1208         emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1209         emit_value (logbuffer, num);
1210         emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
1211
1212         for (int i = 0; i < num; ++i) {
1213                 emit_obj (logbuffer, objects [i]);
1214                 emit_byte (logbuffer, root_types [i]);
1215                 emit_value (logbuffer, extra_info [i]);
1216         }
1217
1218         EXIT_LOG;
1219 }
1220
1221
1222 static void
1223 trigger_on_demand_heapshot (void)
1224 {
1225         if (InterlockedRead (&log_profiler.heapshot_requested))
1226                 mono_gc_collect (mono_gc_max_generation ());
1227 }
1228
1229 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_EVENTS | PROFLOG_GC_MOVE_EVENTS | PROFLOG_GC_ROOT_EVENTS)
1230
1231 static void
1232 gc_event (MonoProfiler *profiler, MonoProfilerGCEvent ev, uint32_t generation)
1233 {
1234         if (ENABLED (PROFLOG_GC_EVENTS)) {
1235                 ENTER_LOG (&gc_events_ctr, logbuffer,
1236                         EVENT_SIZE /* event */ +
1237                         BYTE_SIZE /* gc event */ +
1238                         BYTE_SIZE /* generation */
1239                 );
1240
1241                 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1242                 emit_byte (logbuffer, ev);
1243                 emit_byte (logbuffer, generation);
1244
1245                 EXIT_LOG;
1246         }
1247
1248         switch (ev) {
1249         case MONO_GC_EVENT_START:
1250                 if (generation == mono_gc_max_generation ())
1251                         log_profiler.gc_count++;
1252
1253                 switch (log_config.hs_mode) {
1254                 case MONO_PROFILER_HEAPSHOT_NONE:
1255                         log_profiler.do_heap_walk = FALSE;
1256                         break;
1257                 case MONO_PROFILER_HEAPSHOT_MAJOR:
1258                         log_profiler.do_heap_walk = generation == mono_gc_max_generation ();
1259                         break;
1260                 case MONO_PROFILER_HEAPSHOT_ON_DEMAND:
1261                         log_profiler.do_heap_walk = InterlockedRead (&log_profiler.heapshot_requested);
1262                         break;
1263                 case MONO_PROFILER_HEAPSHOT_X_GC:
1264                         log_profiler.do_heap_walk = !(log_profiler.gc_count % log_config.hs_freq_gc);
1265                         break;
1266                 case MONO_PROFILER_HEAPSHOT_X_MS:
1267                         log_profiler.do_heap_walk = (current_time () - log_profiler.last_hs_time) / 1000 * 1000 >= log_config.hs_freq_ms;
1268                         break;
1269                 default:
1270                         g_assert_not_reached ();
1271                 }
1272
1273                 if (ENABLED (PROFLOG_GC_ROOT_EVENTS) && log_profiler.do_heap_walk)
1274                         mono_profiler_set_gc_roots_callback (log_profiler.handle, gc_roots);
1275
1276                 break;
1277         case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1278                 /*
1279                  * Ensure that no thread can be in the middle of writing to
1280                  * a buffer when the world stops...
1281                  */
1282                 buffer_lock_excl ();
1283                 break;
1284         case MONO_GC_EVENT_POST_STOP_WORLD:
1285                 /*
1286                  * ... So that we now have a consistent view of all buffers.
1287                  * This allows us to flush them. We need to do this because
1288                  * they may contain object allocation events that need to be
1289                  * committed to the log file before any object move events
1290                  * that will be produced during this GC.
1291                  */
1292                 if (ENABLED (ALL_GC_EVENTS_MASK))
1293                         sync_point (SYNC_POINT_WORLD_STOP);
1294
1295                 // Surround heapshots with HEAP_START/HEAP_END events.
1296                 if (log_profiler.do_heap_walk) {
1297                         ENTER_LOG (&heap_starts_ctr, logbuffer,
1298                                 EVENT_SIZE /* event */
1299                         );
1300
1301                         emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1302
1303                         EXIT_LOG;
1304                 }
1305
1306                 break;
1307         case MONO_GC_EVENT_PRE_START_WORLD:
1308                 mono_profiler_set_gc_roots_callback (log_profiler.handle, NULL);
1309
1310                 if (log_profiler.do_heap_walk) {
1311                         mono_gc_walk_heap (0, gc_reference, NULL);
1312
1313                         ENTER_LOG (&heap_ends_ctr, logbuffer,
1314                                 EVENT_SIZE /* event */
1315                         );
1316
1317                         emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1318
1319                         EXIT_LOG;
1320
1321                         log_profiler.do_heap_walk = FALSE;
1322                         log_profiler.last_hs_time = current_time ();
1323
1324                         InterlockedWrite (&log_profiler.heapshot_requested, 0);
1325                 }
1326
1327                 /*
1328                  * Similarly, we must now make sure that any object moves
1329                  * written to the GC thread's buffer are flushed. Otherwise,
1330                  * object allocation events for certain addresses could come
1331                  * after the move events that made those addresses available.
1332                  */
1333                 if (ENABLED (ALL_GC_EVENTS_MASK))
1334                         sync_point_mark (SYNC_POINT_WORLD_START);
1335                 break;
1336         case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1337                 /*
1338                  * Finally, it is safe to allow other threads to write to
1339                  * their buffers again.
1340                  */
1341                 buffer_unlock_excl ();
1342                 break;
1343         default:
1344                 break;
1345         }
1346 }
1347
1348 static void
1349 gc_resize (MonoProfiler *profiler, uintptr_t new_size)
1350 {
1351         ENTER_LOG (&gc_resizes_ctr, logbuffer,
1352                 EVENT_SIZE /* event */ +
1353                 LEB128_SIZE /* new size */
1354         );
1355
1356         emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1357         emit_value (logbuffer, new_size);
1358
1359         EXIT_LOG;
1360 }
1361
1362 typedef struct {
1363         int count;
1364         MonoMethod* methods [MAX_FRAMES];
1365         int32_t il_offsets [MAX_FRAMES];
1366         int32_t native_offsets [MAX_FRAMES];
1367 } FrameData;
1368
1369 static mono_bool
1370 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1371 {
1372         FrameData *frame = (FrameData *)data;
1373         if (method && frame->count < log_config.num_frames) {
1374                 frame->il_offsets [frame->count] = il_offset;
1375                 frame->native_offsets [frame->count] = native_offset;
1376                 frame->methods [frame->count++] = method;
1377         }
1378         return frame->count == log_config.num_frames;
1379 }
1380
1381 /*
1382  * a note about stack walks: they can cause more profiler events to fire,
1383  * so we need to make sure they don't happen after we started emitting an
1384  * event, hence the collect_bt/emit_bt split.
1385  */
1386 static void
1387 collect_bt (FrameData *data)
1388 {
1389         data->count = 0;
1390         mono_stack_walk_no_il (walk_stack, data);
1391 }
1392
1393 static void
1394 emit_bt (LogBuffer *logbuffer, FrameData *data)
1395 {
1396         emit_value (logbuffer, data->count);
1397
1398         while (data->count)
1399                 emit_method (logbuffer, data->methods [--data->count]);
1400 }
1401
1402 static void
1403 gc_alloc (MonoProfiler *prof, MonoObject *obj)
1404 {
1405         int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_ALLOC_BT : 0;
1406         FrameData data;
1407         uintptr_t len = mono_object_get_size (obj);
1408         /* account for object alignment in the heap */
1409         len += 7;
1410         len &= ~7;
1411
1412         if (do_bt)
1413                 collect_bt (&data);
1414
1415         ENTER_LOG (&gc_allocs_ctr, logbuffer,
1416                 EVENT_SIZE /* event */ +
1417                 LEB128_SIZE /* klass */ +
1418                 LEB128_SIZE /* obj */ +
1419                 LEB128_SIZE /* size */ +
1420                 (do_bt ? (
1421                         LEB128_SIZE /* count */ +
1422                         data.count * (
1423                                 LEB128_SIZE /* method */
1424                         )
1425                 ) : 0)
1426         );
1427
1428         emit_event (logbuffer, do_bt | TYPE_ALLOC);
1429         emit_ptr (logbuffer, mono_object_get_class (obj));
1430         emit_obj (logbuffer, obj);
1431         emit_value (logbuffer, len);
1432
1433         if (do_bt)
1434                 emit_bt (logbuffer, &data);
1435
1436         EXIT_LOG;
1437 }
1438
1439 static void
1440 gc_moves (MonoProfiler *prof, MonoObject *const *objects, uint64_t num)
1441 {
1442         ENTER_LOG (&gc_moves_ctr, logbuffer,
1443                 EVENT_SIZE /* event */ +
1444                 LEB128_SIZE /* num */ +
1445                 num * (
1446                         LEB128_SIZE /* object */
1447                 )
1448         );
1449
1450         emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1451         emit_value (logbuffer, num);
1452
1453         for (int i = 0; i < num; ++i)
1454                 emit_obj (logbuffer, objects [i]);
1455
1456         EXIT_LOG;
1457 }
1458
1459 static void
1460 gc_handle (MonoProfiler *prof, int op, MonoGCHandleType type, uint32_t handle, MonoObject *obj)
1461 {
1462         int do_bt = !log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames;
1463         FrameData data;
1464
1465         if (do_bt)
1466                 collect_bt (&data);
1467
1468         gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1469
1470         ENTER_LOG (ctr, logbuffer,
1471                 EVENT_SIZE /* event */ +
1472                 LEB128_SIZE /* type */ +
1473                 LEB128_SIZE /* handle */ +
1474                 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1475                         LEB128_SIZE /* obj */
1476                 ) : 0) +
1477                 (do_bt ? (
1478                         LEB128_SIZE /* count */ +
1479                         data.count * (
1480                                 LEB128_SIZE /* method */
1481                         )
1482                 ) : 0)
1483         );
1484
1485         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1486                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1487         else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1488                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1489         else
1490                 g_assert_not_reached ();
1491
1492         emit_value (logbuffer, type);
1493         emit_value (logbuffer, handle);
1494
1495         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1496                 emit_obj (logbuffer, obj);
1497
1498         if (do_bt)
1499                 emit_bt (logbuffer, &data);
1500
1501         EXIT_LOG;
1502 }
1503
1504 static void
1505 gc_handle_created (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type, MonoObject *obj)
1506 {
1507         gc_handle (prof, MONO_PROFILER_GC_HANDLE_CREATED, type, handle, obj);
1508 }
1509
1510 static void
1511 gc_handle_deleted (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type)
1512 {
1513         gc_handle (prof, MONO_PROFILER_GC_HANDLE_DESTROYED, type, handle, NULL);
1514 }
1515
1516 static void
1517 finalize_begin (MonoProfiler *prof)
1518 {
1519         ENTER_LOG (&finalize_begins_ctr, buf,
1520                 EVENT_SIZE /* event */
1521         );
1522
1523         emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1524
1525         EXIT_LOG;
1526 }
1527
1528 static void
1529 finalize_end (MonoProfiler *prof)
1530 {
1531         trigger_on_demand_heapshot ();
1532         if (ENABLED (PROFLOG_FINALIZATION_EVENTS)) {
1533                 ENTER_LOG (&finalize_ends_ctr, buf,
1534                         EVENT_SIZE /* event */
1535                 );
1536
1537                 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1538
1539                 EXIT_LOG;
1540         }
1541 }
1542
1543 static void
1544 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1545 {
1546         ENTER_LOG (&finalize_object_begins_ctr, buf,
1547                 EVENT_SIZE /* event */ +
1548                 LEB128_SIZE /* obj */
1549         );
1550
1551         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1552         emit_obj (buf, obj);
1553
1554         EXIT_LOG;
1555 }
1556
1557 static void
1558 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1559 {
1560         ENTER_LOG (&finalize_object_ends_ctr, buf,
1561                 EVENT_SIZE /* event */ +
1562                 LEB128_SIZE /* obj */
1563         );
1564
1565         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1566         emit_obj (buf, obj);
1567
1568         EXIT_LOG;
1569 }
1570
1571 static char*
1572 push_nesting (char *p, MonoClass *klass)
1573 {
1574         MonoClass *nesting;
1575         const char *name;
1576         const char *nspace;
1577         nesting = mono_class_get_nesting_type (klass);
1578         if (nesting) {
1579                 p = push_nesting (p, nesting);
1580                 *p++ = '/';
1581                 *p = 0;
1582         }
1583         name = mono_class_get_name (klass);
1584         nspace = mono_class_get_namespace (klass);
1585         if (*nspace) {
1586                 strcpy (p, nspace);
1587                 p += strlen (nspace);
1588                 *p++ = '.';
1589                 *p = 0;
1590         }
1591         strcpy (p, name);
1592         p += strlen (name);
1593         return p;
1594 }
1595
1596 static char*
1597 type_name (MonoClass *klass)
1598 {
1599         char buf [1024];
1600         char *p;
1601         push_nesting (buf, klass);
1602         p = (char *) g_malloc (strlen (buf) + 1);
1603         strcpy (p, buf);
1604         return p;
1605 }
1606
1607 static void
1608 image_loaded (MonoProfiler *prof, MonoImage *image)
1609 {
1610         const char *name = mono_image_get_filename (image);
1611         int nlen = strlen (name) + 1;
1612
1613         ENTER_LOG (&image_loads_ctr, logbuffer,
1614                 EVENT_SIZE /* event */ +
1615                 BYTE_SIZE /* type */ +
1616                 LEB128_SIZE /* image */ +
1617                 nlen /* name */
1618         );
1619
1620         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1621         emit_byte (logbuffer, TYPE_IMAGE);
1622         emit_ptr (logbuffer, image);
1623         memcpy (logbuffer->cursor, name, nlen);
1624         logbuffer->cursor += nlen;
1625
1626         EXIT_LOG;
1627 }
1628
1629 static void
1630 image_unloaded (MonoProfiler *prof, MonoImage *image)
1631 {
1632         const char *name = mono_image_get_filename (image);
1633         int nlen = strlen (name) + 1;
1634
1635         ENTER_LOG (&image_unloads_ctr, logbuffer,
1636                 EVENT_SIZE /* event */ +
1637                 BYTE_SIZE /* type */ +
1638                 LEB128_SIZE /* image */ +
1639                 nlen /* name */
1640         );
1641
1642         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1643         emit_byte (logbuffer, TYPE_IMAGE);
1644         emit_ptr (logbuffer, image);
1645         memcpy (logbuffer->cursor, name, nlen);
1646         logbuffer->cursor += nlen;
1647
1648         EXIT_LOG;
1649 }
1650
1651 static void
1652 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly)
1653 {
1654         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1655         int nlen = strlen (name) + 1;
1656         MonoImage *image = mono_assembly_get_image (assembly);
1657
1658         ENTER_LOG (&assembly_loads_ctr, logbuffer,
1659                 EVENT_SIZE /* event */ +
1660                 BYTE_SIZE /* type */ +
1661                 LEB128_SIZE /* assembly */ +
1662                 LEB128_SIZE /* image */ +
1663                 nlen /* name */
1664         );
1665
1666         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1667         emit_byte (logbuffer, TYPE_ASSEMBLY);
1668         emit_ptr (logbuffer, assembly);
1669         emit_ptr (logbuffer, image);
1670         memcpy (logbuffer->cursor, name, nlen);
1671         logbuffer->cursor += nlen;
1672
1673         EXIT_LOG;
1674
1675         mono_free (name);
1676 }
1677
1678 static void
1679 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1680 {
1681         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1682         int nlen = strlen (name) + 1;
1683         MonoImage *image = mono_assembly_get_image (assembly);
1684
1685         ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1686                 EVENT_SIZE /* event */ +
1687                 BYTE_SIZE /* type */ +
1688                 LEB128_SIZE /* assembly */ +
1689                 LEB128_SIZE /* image */ +
1690                 nlen /* name */
1691         );
1692
1693         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1694         emit_byte (logbuffer, TYPE_ASSEMBLY);
1695         emit_ptr (logbuffer, assembly);
1696         emit_ptr (logbuffer, image);
1697         memcpy (logbuffer->cursor, name, nlen);
1698         logbuffer->cursor += nlen;
1699
1700         EXIT_LOG;
1701
1702         mono_free (name);
1703 }
1704
1705 static void
1706 class_loaded (MonoProfiler *prof, MonoClass *klass)
1707 {
1708         char *name;
1709
1710         if (InterlockedRead (&log_profiler.runtime_inited))
1711                 name = mono_type_get_name (mono_class_get_type (klass));
1712         else
1713                 name = type_name (klass);
1714
1715         int nlen = strlen (name) + 1;
1716         MonoImage *image = mono_class_get_image (klass);
1717
1718         ENTER_LOG (&class_loads_ctr, logbuffer,
1719                 EVENT_SIZE /* event */ +
1720                 BYTE_SIZE /* type */ +
1721                 LEB128_SIZE /* klass */ +
1722                 LEB128_SIZE /* image */ +
1723                 nlen /* name */
1724         );
1725
1726         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1727         emit_byte (logbuffer, TYPE_CLASS);
1728         emit_ptr (logbuffer, klass);
1729         emit_ptr (logbuffer, image);
1730         memcpy (logbuffer->cursor, name, nlen);
1731         logbuffer->cursor += nlen;
1732
1733         EXIT_LOG;
1734
1735         if (InterlockedRead (&log_profiler.runtime_inited))
1736                 mono_free (name);
1737         else
1738                 g_free (name);
1739 }
1740
1741 static void
1742 method_enter (MonoProfiler *prof, MonoMethod *method)
1743 {
1744         if (get_thread ()->call_depth++ <= log_config.max_call_depth) {
1745                 ENTER_LOG (&method_entries_ctr, logbuffer,
1746                         EVENT_SIZE /* event */ +
1747                         LEB128_SIZE /* method */
1748                 );
1749
1750                 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
1751                 emit_method (logbuffer, method);
1752
1753                 EXIT_LOG;
1754         }
1755 }
1756
1757 static void
1758 method_leave (MonoProfiler *prof, MonoMethod *method)
1759 {
1760         if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1761                 ENTER_LOG (&method_exits_ctr, logbuffer,
1762                         EVENT_SIZE /* event */ +
1763                         LEB128_SIZE /* method */
1764                 );
1765
1766                 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
1767                 emit_method (logbuffer, method);
1768
1769                 EXIT_LOG;
1770         }
1771 }
1772
1773 static void
1774 method_exc_leave (MonoProfiler *prof, MonoMethod *method, MonoObject *exc)
1775 {
1776         if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1777                 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
1778                         EVENT_SIZE /* event */ +
1779                         LEB128_SIZE /* method */
1780                 );
1781
1782                 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
1783                 emit_method (logbuffer, method);
1784
1785                 EXIT_LOG;
1786         }
1787 }
1788
1789 static MonoProfilerCallInstrumentationFlags
1790 method_filter (MonoProfiler *prof, MonoMethod *method)
1791 {
1792         return MONO_PROFILER_CALL_INSTRUMENTATION_PROLOGUE | MONO_PROFILER_CALL_INSTRUMENTATION_EPILOGUE;
1793 }
1794
1795 static void
1796 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji)
1797 {
1798         buffer_lock ();
1799
1800         register_method_local (method, ji);
1801
1802         buffer_unlock ();
1803 }
1804
1805 static void
1806 code_buffer_new (MonoProfiler *prof, const mono_byte *buffer, uint64_t size, MonoProfilerCodeBufferType type, const void *data)
1807 {
1808         const char *name;
1809         int nlen;
1810
1811         if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
1812                 name = (const char *) data;
1813                 nlen = strlen (name) + 1;
1814         } else {
1815                 name = NULL;
1816                 nlen = 0;
1817         }
1818
1819         ENTER_LOG (&code_buffers_ctr, logbuffer,
1820                 EVENT_SIZE /* event */ +
1821                 BYTE_SIZE /* type */ +
1822                 LEB128_SIZE /* buffer */ +
1823                 LEB128_SIZE /* size */ +
1824                 (name ? (
1825                         nlen /* name */
1826                 ) : 0)
1827         );
1828
1829         emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
1830         emit_byte (logbuffer, type);
1831         emit_ptr (logbuffer, buffer);
1832         emit_value (logbuffer, size);
1833
1834         if (name) {
1835                 memcpy (logbuffer->cursor, name, nlen);
1836                 logbuffer->cursor += nlen;
1837         }
1838
1839         EXIT_LOG;
1840 }
1841
1842 static void
1843 throw_exc (MonoProfiler *prof, MonoObject *object)
1844 {
1845         int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_THROW_BT : 0;
1846         FrameData data;
1847
1848         if (do_bt)
1849                 collect_bt (&data);
1850
1851         ENTER_LOG (&exception_throws_ctr, logbuffer,
1852                 EVENT_SIZE /* event */ +
1853                 LEB128_SIZE /* object */ +
1854                 (do_bt ? (
1855                         LEB128_SIZE /* count */ +
1856                         data.count * (
1857                                 LEB128_SIZE /* method */
1858                         )
1859                 ) : 0)
1860         );
1861
1862         emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
1863         emit_obj (logbuffer, object);
1864
1865         if (do_bt)
1866                 emit_bt (logbuffer, &data);
1867
1868         EXIT_LOG;
1869 }
1870
1871 static void
1872 clause_exc (MonoProfiler *prof, MonoMethod *method, uint32_t clause_num, MonoExceptionEnum clause_type, MonoObject *exc)
1873 {
1874         ENTER_LOG (&exception_clauses_ctr, logbuffer,
1875                 EVENT_SIZE /* event */ +
1876                 BYTE_SIZE /* clause type */ +
1877                 LEB128_SIZE /* clause num */ +
1878                 LEB128_SIZE /* method */
1879         );
1880
1881         emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
1882         emit_byte (logbuffer, clause_type);
1883         emit_value (logbuffer, clause_num);
1884         emit_method (logbuffer, method);
1885         emit_obj (logbuffer, exc);
1886
1887         EXIT_LOG;
1888 }
1889
1890 static void
1891 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
1892 {
1893         int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_MONITOR_BT : 0;
1894         FrameData data;
1895
1896         if (do_bt)
1897                 collect_bt (&data);
1898
1899         ENTER_LOG (&monitor_events_ctr, logbuffer,
1900                 EVENT_SIZE /* event */ +
1901                 BYTE_SIZE /* ev */ +
1902                 LEB128_SIZE /* object */ +
1903                 (do_bt ? (
1904                         LEB128_SIZE /* count */ +
1905                         data.count * (
1906                                 LEB128_SIZE /* method */
1907                         )
1908                 ) : 0)
1909         );
1910
1911         emit_event (logbuffer, do_bt | TYPE_MONITOR);
1912         emit_byte (logbuffer, ev);
1913         emit_obj (logbuffer, object);
1914
1915         if (do_bt)
1916                 emit_bt (logbuffer, &data);
1917
1918         EXIT_LOG;
1919 }
1920
1921 static void
1922 monitor_contention (MonoProfiler *prof, MonoObject *object)
1923 {
1924         monitor_event (prof, object, MONO_PROFILER_MONITOR_CONTENTION);
1925 }
1926
1927 static void
1928 monitor_acquired (MonoProfiler *prof, MonoObject *object)
1929 {
1930         monitor_event (prof, object, MONO_PROFILER_MONITOR_DONE);
1931 }
1932
1933 static void
1934 monitor_failed (MonoProfiler *prof, MonoObject *object)
1935 {
1936         monitor_event (prof, object, MONO_PROFILER_MONITOR_FAIL);
1937 }
1938
1939 static void
1940 thread_start (MonoProfiler *prof, uintptr_t tid)
1941 {
1942         ENTER_LOG (&thread_starts_ctr, logbuffer,
1943                 EVENT_SIZE /* event */ +
1944                 BYTE_SIZE /* type */ +
1945                 LEB128_SIZE /* tid */
1946         );
1947
1948         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1949         emit_byte (logbuffer, TYPE_THREAD);
1950         emit_ptr (logbuffer, (void*) tid);
1951
1952         EXIT_LOG;
1953 }
1954
1955 static void
1956 thread_end (MonoProfiler *prof, uintptr_t tid)
1957 {
1958         ENTER_LOG (&thread_ends_ctr, logbuffer,
1959                 EVENT_SIZE /* event */ +
1960                 BYTE_SIZE /* type */ +
1961                 LEB128_SIZE /* tid */
1962         );
1963
1964         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1965         emit_byte (logbuffer, TYPE_THREAD);
1966         emit_ptr (logbuffer, (void*) tid);
1967
1968         EXIT_LOG_EXPLICIT (NO_SEND);
1969
1970         MonoProfilerThread *thread = get_thread ();
1971
1972         thread->ended = TRUE;
1973         remove_thread (thread);
1974
1975         PROF_TLS_SET (NULL);
1976 }
1977
1978 static void
1979 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
1980 {
1981         int len = strlen (name) + 1;
1982
1983         ENTER_LOG (&thread_names_ctr, logbuffer,
1984                 EVENT_SIZE /* event */ +
1985                 BYTE_SIZE /* type */ +
1986                 LEB128_SIZE /* tid */ +
1987                 len /* name */
1988         );
1989
1990         emit_event (logbuffer, TYPE_METADATA);
1991         emit_byte (logbuffer, TYPE_THREAD);
1992         emit_ptr (logbuffer, (void*)tid);
1993         memcpy (logbuffer->cursor, name, len);
1994         logbuffer->cursor += len;
1995
1996         EXIT_LOG;
1997 }
1998
1999 static void
2000 domain_loaded (MonoProfiler *prof, MonoDomain *domain)
2001 {
2002         ENTER_LOG (&domain_loads_ctr, logbuffer,
2003                 EVENT_SIZE /* event */ +
2004                 BYTE_SIZE /* type */ +
2005                 LEB128_SIZE /* domain id */
2006         );
2007
2008         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2009         emit_byte (logbuffer, TYPE_DOMAIN);
2010         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2011
2012         EXIT_LOG;
2013 }
2014
2015 static void
2016 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2017 {
2018         ENTER_LOG (&domain_unloads_ctr, logbuffer,
2019                 EVENT_SIZE /* event */ +
2020                 BYTE_SIZE /* type */ +
2021                 LEB128_SIZE /* domain id */
2022         );
2023
2024         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2025         emit_byte (logbuffer, TYPE_DOMAIN);
2026         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2027
2028         EXIT_LOG;
2029 }
2030
2031 static void
2032 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2033 {
2034         int nlen = strlen (name) + 1;
2035
2036         ENTER_LOG (&domain_names_ctr, logbuffer,
2037                 EVENT_SIZE /* event */ +
2038                 BYTE_SIZE /* type */ +
2039                 LEB128_SIZE /* domain id */ +
2040                 nlen /* name */
2041         );
2042
2043         emit_event (logbuffer, TYPE_METADATA);
2044         emit_byte (logbuffer, TYPE_DOMAIN);
2045         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2046         memcpy (logbuffer->cursor, name, nlen);
2047         logbuffer->cursor += nlen;
2048
2049         EXIT_LOG;
2050 }
2051
2052 static void
2053 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2054 {
2055         ENTER_LOG (&context_loads_ctr, logbuffer,
2056                 EVENT_SIZE /* event */ +
2057                 BYTE_SIZE /* type */ +
2058                 LEB128_SIZE /* context id */ +
2059                 LEB128_SIZE /* domain id */
2060         );
2061
2062         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2063         emit_byte (logbuffer, TYPE_CONTEXT);
2064         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2065         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2066
2067         EXIT_LOG;
2068 }
2069
2070 static void
2071 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2072 {
2073         ENTER_LOG (&context_unloads_ctr, logbuffer,
2074                 EVENT_SIZE /* event */ +
2075                 BYTE_SIZE /* type */ +
2076                 LEB128_SIZE /* context id */ +
2077                 LEB128_SIZE /* domain id */
2078         );
2079
2080         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2081         emit_byte (logbuffer, TYPE_CONTEXT);
2082         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2083         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2084
2085         EXIT_LOG;
2086 }
2087
2088 typedef struct {
2089         MonoMethod *method;
2090         MonoDomain *domain;
2091         void *base_address;
2092         int offset;
2093 } AsyncFrameInfo;
2094
2095 typedef struct {
2096         MonoLockFreeQueueNode node;
2097         uint64_t time;
2098         uintptr_t tid;
2099         const void *ip;
2100         int count;
2101         AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2102 } SampleHit;
2103
2104 static mono_bool
2105 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2106 {
2107         SampleHit *sample = (SampleHit *) data;
2108
2109         if (sample->count < log_config.num_frames) {
2110                 int i = sample->count;
2111
2112                 sample->frames [i].method = method;
2113                 sample->frames [i].domain = domain;
2114                 sample->frames [i].base_address = base_address;
2115                 sample->frames [i].offset = offset;
2116
2117                 sample->count++;
2118         }
2119
2120         return sample->count == log_config.num_frames;
2121 }
2122
2123 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2124 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2125
2126 static void
2127 enqueue_sample_hit (gpointer p)
2128 {
2129         SampleHit *sample = p;
2130
2131         mono_lock_free_queue_node_unpoison (&sample->node);
2132         mono_lock_free_queue_enqueue (&log_profiler.dumper_queue, &sample->node);
2133         mono_os_sem_post (&log_profiler.dumper_queue_sem);
2134 }
2135
2136 static void
2137 mono_sample_hit (MonoProfiler *profiler, const mono_byte *ip, const void *context)
2138 {
2139         /*
2140          * Please note: We rely on the runtime loading the profiler with
2141          * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2142          * this function (and its siblings) are resolved when the profiler is
2143          * loaded. Otherwise, we would potentially invoke the dynamic linker when
2144          * invoking runtime functions, which is not async-signal-safe.
2145          */
2146
2147         if (InterlockedRead (&log_profiler.in_shutdown))
2148                 return;
2149
2150         SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2151
2152         if (!sample) {
2153                 /*
2154                  * If we're out of reusable sample events and we're not allowed to
2155                  * allocate more, we have no choice but to drop the event.
2156                  */
2157                 if (InterlockedRead (&sample_allocations_ctr) >= log_config.max_allocated_sample_hits)
2158                         return;
2159
2160                 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2161                 mono_lock_free_queue_node_init (&sample->node, TRUE);
2162
2163                 InterlockedIncrement (&sample_allocations_ctr);
2164         }
2165
2166         sample->count = 0;
2167         mono_stack_walk_async_safe (&async_walk_stack, (void *) context, sample);
2168
2169         sample->time = current_time ();
2170         sample->tid = thread_id ();
2171         sample->ip = ip;
2172
2173         mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2174 }
2175
2176 static uintptr_t *code_pages = 0;
2177 static int num_code_pages = 0;
2178 static int size_code_pages = 0;
2179 #define CPAGE_SHIFT (9)
2180 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2181 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2182 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2183
2184 static uintptr_t
2185 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2186 {
2187         uintptr_t i;
2188         uintptr_t start_pos;
2189         start_pos = (page >> CPAGE_SHIFT) % hsize;
2190         i = start_pos;
2191         do {
2192                 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2193                         return 0;
2194                 } else if (!hash [i]) {
2195                         hash [i] = page;
2196                         return 1;
2197                 }
2198                 /* wrap around */
2199                 if (++i == hsize)
2200                         i = 0;
2201         } while (i != start_pos);
2202         g_assert_not_reached ();
2203         return 0;
2204 }
2205
2206 static void
2207 add_code_pointer (uintptr_t ip)
2208 {
2209         uintptr_t i;
2210         if (num_code_pages * 2 >= size_code_pages) {
2211                 uintptr_t *n;
2212                 uintptr_t old_size = size_code_pages;
2213                 size_code_pages *= 2;
2214                 if (size_code_pages == 0)
2215                         size_code_pages = 16;
2216                 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2217                 for (i = 0; i < old_size; ++i) {
2218                         if (code_pages [i])
2219                                 add_code_page (n, size_code_pages, code_pages [i]);
2220                 }
2221                 if (code_pages)
2222                         g_free (code_pages);
2223                 code_pages = n;
2224         }
2225         num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2226 }
2227
2228 /* ELF code crashes on some systems. */
2229 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2230 #if 0
2231 static void
2232 dump_ubin (const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2233 {
2234         int len = strlen (filename) + 1;
2235
2236         ENTER_LOG (&sample_ubins_ctr, logbuffer,
2237                 EVENT_SIZE /* event */ +
2238                 LEB128_SIZE /* load address */ +
2239                 LEB128_SIZE /* offset */ +
2240                 LEB128_SIZE /* size */ +
2241                 nlen /* file name */
2242         );
2243
2244         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2245         emit_ptr (logbuffer, load_addr);
2246         emit_uvalue (logbuffer, offset);
2247         emit_uvalue (logbuffer, size);
2248         memcpy (logbuffer->cursor, filename, len);
2249         logbuffer->cursor += len;
2250
2251         EXIT_LOG;
2252 }
2253 #endif
2254
2255 static void
2256 dump_usym (const char *name, uintptr_t value, uintptr_t size)
2257 {
2258         int len = strlen (name) + 1;
2259
2260         ENTER_LOG (&sample_usyms_ctr, logbuffer,
2261                 EVENT_SIZE /* event */ +
2262                 LEB128_SIZE /* value */ +
2263                 LEB128_SIZE /* size */ +
2264                 len /* name */
2265         );
2266
2267         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2268         emit_ptr (logbuffer, (void*)value);
2269         emit_value (logbuffer, size);
2270         memcpy (logbuffer->cursor, name, len);
2271         logbuffer->cursor += len;
2272
2273         EXIT_LOG;
2274 }
2275
2276 /* ELF code crashes on some systems. */
2277 //#if defined(ELFMAG0)
2278 #if 0
2279
2280 #if SIZEOF_VOID_P == 4
2281 #define ELF_WSIZE 32
2282 #else
2283 #define ELF_WSIZE 64
2284 #endif
2285 #ifndef ElfW
2286 #define ElfW(type)      _ElfW (Elf, ELF_WSIZE, type)
2287 #define _ElfW(e,w,t)    _ElfW_1 (e, w, _##t)
2288 #define _ElfW_1(e,w,t)  e##w##t
2289 #endif
2290
2291 static void
2292 dump_elf_symbols (ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2293 {
2294         int i;
2295         for (i = 0; i < num_symbols; ++i) {
2296                 const char* sym;
2297                 sym =  strtab + symbols [i].st_name;
2298                 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2299                         continue;
2300                 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2301         }
2302 }
2303
2304 static int
2305 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2306 {
2307         int fd, i;
2308         void *data;
2309         struct stat statb;
2310         uint64_t file_size;
2311         ElfW(Ehdr) *header;
2312         ElfW(Shdr) *sheader;
2313         ElfW(Shdr) *shstrtabh;
2314         ElfW(Shdr) *symtabh = NULL;
2315         ElfW(Shdr) *strtabh = NULL;
2316         ElfW(Sym) *symbols = NULL;
2317         const char *strtab;
2318         int num_symbols;
2319
2320         fd = open (filename, O_RDONLY);
2321         if (fd < 0)
2322                 return 0;
2323         if (fstat (fd, &statb) != 0) {
2324                 close (fd);
2325                 return 0;
2326         }
2327         file_size = statb.st_size;
2328         data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2329         close (fd);
2330         if (data == MAP_FAILED)
2331                 return 0;
2332         header = data;
2333         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2334                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2335                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2336                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2337                 munmap (data, file_size);
2338                 return 0;
2339         }
2340         sheader = (void*)((char*)data + header->e_shoff);
2341         shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2342         strtab = (const char*)data + shstrtabh->sh_offset;
2343         for (i = 0; i < header->e_shnum; ++i) {
2344                 if (sheader->sh_type == SHT_SYMTAB) {
2345                         symtabh = sheader;
2346                         strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2347                         break;
2348                 }
2349                 sheader = (void*)((char*)sheader + header->e_shentsize);
2350         }
2351         if (!symtabh || !strtabh) {
2352                 munmap (data, file_size);
2353                 return 0;
2354         }
2355         strtab = (const char*)data + strtabh->sh_offset;
2356         num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2357         symbols = (void*)((char*)data + symtabh->sh_offset);
2358         dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2359         munmap (data, file_size);
2360         return 1;
2361 }
2362 #endif
2363
2364 /* ELF code crashes on some systems. */
2365 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2366 #if 0
2367 static int
2368 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2369 {
2370         char buf [256];
2371         const char *filename;
2372         BinaryObject *obj;
2373         char *a = (void*)info->dlpi_addr;
2374         int i, num_sym;
2375         ElfW(Dyn) *dyn = NULL;
2376         ElfW(Sym) *symtab = NULL;
2377         ElfW(Word) *hash_table = NULL;
2378         ElfW(Ehdr) *header = NULL;
2379         const char* strtab = NULL;
2380         for (obj = log_profiler.binary_objects; obj; obj = obj->next) {
2381                 if (obj->addr == a)
2382                         return 0;
2383         }
2384         filename = info->dlpi_name;
2385         if (!filename)
2386                 return 0;
2387         if (!info->dlpi_addr && !filename [0]) {
2388                 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2389                 if (l > 0) {
2390                         buf [l] = 0;
2391                         filename = buf;
2392                 }
2393         }
2394         obj = g_calloc (sizeof (BinaryObject), 1);
2395         obj->addr = (void*)info->dlpi_addr;
2396         obj->name = pstrdup (filename);
2397         obj->next = log_profiler.binary_objects;
2398         log_profiler.binary_objects = obj;
2399         a = NULL;
2400         for (i = 0; i < info->dlpi_phnum; ++i) {
2401                 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2402                         header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2403                         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2404                                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2405                                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2406                                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2407                                 header = NULL;
2408                         }
2409                         dump_ubin (filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2410                 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2411                         dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2412                 }
2413         }
2414         if (read_elf_symbols (filename, (void*)info->dlpi_addr))
2415                 return 0;
2416         if (!info->dlpi_name || !info->dlpi_name[0])
2417                 return 0;
2418         if (!dyn)
2419                 return 0;
2420         for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2421                 if (dyn [i].d_tag == DT_SYMTAB) {
2422                         symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2423                 } else if (dyn [i].d_tag == DT_HASH) {
2424                         hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2425                 } else if (dyn [i].d_tag == DT_STRTAB) {
2426                         strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2427                 }
2428         }
2429         if (!hash_table)
2430                 return 0;
2431         num_sym = hash_table [1];
2432         dump_elf_symbols (symtab, num_sym, strtab, (void*)info->dlpi_addr);
2433         return 0;
2434 }
2435
2436 static int
2437 load_binaries (void)
2438 {
2439         dl_iterate_phdr (elf_dl_callback, NULL);
2440         return 1;
2441 }
2442 #else
2443 static int
2444 load_binaries (void)
2445 {
2446         return 0;
2447 }
2448 #endif
2449
2450 static const char*
2451 symbol_for (uintptr_t code)
2452 {
2453 #ifdef HAVE_DLADDR
2454         void *ip = (void*)code;
2455         Dl_info di;
2456         if (dladdr (ip, &di)) {
2457                 if (di.dli_sname)
2458                         return di.dli_sname;
2459         } else {
2460         /*      char **names;
2461                 names = backtrace_symbols (&ip, 1);
2462                 if (names) {
2463                         const char* p = names [0];
2464                         g_free (names);
2465                         return p;
2466                 }
2467                 */
2468         }
2469 #endif
2470         return NULL;
2471 }
2472
2473 static void
2474 dump_unmanaged_coderefs (void)
2475 {
2476         int i;
2477         const char* last_symbol;
2478         uintptr_t addr, page_end;
2479
2480         if (load_binaries ())
2481                 return;
2482         for (i = 0; i < size_code_pages; ++i) {
2483                 const char* sym;
2484                 if (!code_pages [i] || code_pages [i] & 1)
2485                         continue;
2486                 last_symbol = NULL;
2487                 addr = CPAGE_ADDR (code_pages [i]);
2488                 page_end = addr + CPAGE_SIZE;
2489                 code_pages [i] |= 1;
2490                 /* we dump the symbols for the whole page */
2491                 for (; addr < page_end; addr += 16) {
2492                         sym = symbol_for (addr);
2493                         if (sym && sym == last_symbol)
2494                                 continue;
2495                         last_symbol = sym;
2496                         if (!sym)
2497                                 continue;
2498                         dump_usym (sym, addr, 0); /* let's not guess the size */
2499                 }
2500         }
2501 }
2502
2503 static void
2504 counters_add_agent (MonoCounter *counter)
2505 {
2506         if (InterlockedRead (&log_profiler.in_shutdown))
2507                 return;
2508
2509         MonoCounterAgent *agent, *item;
2510
2511         mono_os_mutex_lock (&log_profiler.counters_mutex);
2512
2513         for (agent = log_profiler.counters; agent; agent = agent->next) {
2514                 if (agent->counter == counter) {
2515                         agent->value_size = 0;
2516                         if (agent->value) {
2517                                 g_free (agent->value);
2518                                 agent->value = NULL;
2519                         }
2520                         goto done;
2521                 }
2522         }
2523
2524         agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2525         agent->counter = counter;
2526         agent->value = NULL;
2527         agent->value_size = 0;
2528         agent->index = log_profiler.counters_index++;
2529         agent->emitted = FALSE;
2530         agent->next = NULL;
2531
2532         if (!log_profiler.counters) {
2533                 log_profiler.counters = agent;
2534         } else {
2535                 item = log_profiler.counters;
2536                 while (item->next)
2537                         item = item->next;
2538                 item->next = agent;
2539         }
2540
2541 done:
2542         mono_os_mutex_unlock (&log_profiler.counters_mutex);
2543 }
2544
2545 static mono_bool
2546 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2547 {
2548         counters_add_agent (counter);
2549         return TRUE;
2550 }
2551
2552 static void
2553 counters_init (void)
2554 {
2555         mono_os_mutex_init (&log_profiler.counters_mutex);
2556
2557         log_profiler.counters_index = 1;
2558
2559         mono_counters_on_register (&counters_add_agent);
2560         mono_counters_foreach (counters_init_foreach_callback, NULL);
2561 }
2562
2563 static void
2564 counters_emit (void)
2565 {
2566         MonoCounterAgent *agent;
2567         int len = 0;
2568         int size =
2569                 EVENT_SIZE /* event */ +
2570                 LEB128_SIZE /* len */
2571         ;
2572
2573         mono_os_mutex_lock (&log_profiler.counters_mutex);
2574
2575         for (agent = log_profiler.counters; agent; agent = agent->next) {
2576                 if (agent->emitted)
2577                         continue;
2578
2579                 size +=
2580                         LEB128_SIZE /* section */ +
2581                         strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2582                         BYTE_SIZE /* type */ +
2583                         BYTE_SIZE /* unit */ +
2584                         BYTE_SIZE /* variance */ +
2585                         LEB128_SIZE /* index */
2586                 ;
2587
2588                 len++;
2589         }
2590
2591         if (!len)
2592                 goto done;
2593
2594         ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2595
2596         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2597         emit_value (logbuffer, len);
2598
2599         for (agent = log_profiler.counters; agent; agent = agent->next) {
2600                 const char *name;
2601
2602                 if (agent->emitted)
2603                         continue;
2604
2605                 name = mono_counter_get_name (agent->counter);
2606                 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2607                 emit_string (logbuffer, name, strlen (name) + 1);
2608                 emit_byte (logbuffer, mono_counter_get_type (agent->counter));
2609                 emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
2610                 emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
2611                 emit_value (logbuffer, agent->index);
2612
2613                 agent->emitted = TRUE;
2614         }
2615
2616         EXIT_LOG;
2617
2618 done:
2619         mono_os_mutex_unlock (&log_profiler.counters_mutex);
2620 }
2621
2622 static void
2623 counters_sample (uint64_t timestamp)
2624 {
2625         MonoCounterAgent *agent;
2626         MonoCounter *counter;
2627         int type;
2628         int buffer_size;
2629         void *buffer;
2630         int size;
2631
2632         counters_emit ();
2633
2634         buffer_size = 8;
2635         buffer = g_calloc (1, buffer_size);
2636
2637         mono_os_mutex_lock (&log_profiler.counters_mutex);
2638
2639         size =
2640                 EVENT_SIZE /* event */
2641         ;
2642
2643         for (agent = log_profiler.counters; agent; agent = agent->next) {
2644                 size +=
2645                         LEB128_SIZE /* index */ +
2646                         BYTE_SIZE /* type */ +
2647                         mono_counter_get_size (agent->counter) /* value */
2648                 ;
2649         }
2650
2651         size +=
2652                 LEB128_SIZE /* stop marker */
2653         ;
2654
2655         ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2656
2657         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2658
2659         for (agent = log_profiler.counters; agent; agent = agent->next) {
2660                 size_t size;
2661
2662                 counter = agent->counter;
2663
2664                 size = mono_counter_get_size (counter);
2665
2666                 if (size > buffer_size) {
2667                         buffer_size = size;
2668                         buffer = g_realloc (buffer, buffer_size);
2669                 }
2670
2671                 memset (buffer, 0, buffer_size);
2672
2673                 g_assert (mono_counters_sample (counter, buffer, size));
2674
2675                 type = mono_counter_get_type (counter);
2676
2677                 if (!agent->value) {
2678                         agent->value = g_calloc (1, size);
2679                         agent->value_size = size;
2680                 } else {
2681                         if (type == MONO_COUNTER_STRING) {
2682                                 if (strcmp (agent->value, buffer) == 0)
2683                                         continue;
2684                         } else {
2685                                 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
2686                                         continue;
2687                         }
2688                 }
2689
2690                 emit_uvalue (logbuffer, agent->index);
2691                 emit_byte (logbuffer, type);
2692                 switch (type) {
2693                 case MONO_COUNTER_INT:
2694 #if SIZEOF_VOID_P == 4
2695                 case MONO_COUNTER_WORD:
2696 #endif
2697                         emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
2698                         break;
2699                 case MONO_COUNTER_UINT:
2700                         emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
2701                         break;
2702                 case MONO_COUNTER_TIME_INTERVAL:
2703                 case MONO_COUNTER_LONG:
2704 #if SIZEOF_VOID_P == 8
2705                 case MONO_COUNTER_WORD:
2706 #endif
2707                         emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
2708                         break;
2709                 case MONO_COUNTER_ULONG:
2710                         emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
2711                         break;
2712                 case MONO_COUNTER_DOUBLE:
2713                         emit_double (logbuffer, *(double*)buffer);
2714                         break;
2715                 case MONO_COUNTER_STRING:
2716                         if (size == 0) {
2717                                 emit_byte (logbuffer, 0);
2718                         } else {
2719                                 emit_byte (logbuffer, 1);
2720                                 emit_string (logbuffer, (char*)buffer, size);
2721                         }
2722                         break;
2723                 default:
2724                         g_assert_not_reached ();
2725                 }
2726
2727                 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
2728                         agent->value = g_realloc (agent->value, size);
2729                         agent->value_size = size;
2730                 }
2731
2732                 if (size > 0)
2733                         memcpy (agent->value, buffer, size);
2734         }
2735         g_free (buffer);
2736
2737         emit_value (logbuffer, 0);
2738
2739         EXIT_LOG;
2740
2741         mono_os_mutex_unlock (&log_profiler.counters_mutex);
2742 }
2743
2744 static void
2745 perfcounters_emit (void)
2746 {
2747         PerfCounterAgent *pcagent;
2748         int len = 0;
2749         int size =
2750                 EVENT_SIZE /* event */ +
2751                 LEB128_SIZE /* len */
2752         ;
2753
2754         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2755                 if (pcagent->emitted)
2756                         continue;
2757
2758                 size +=
2759                         LEB128_SIZE /* section */ +
2760                         strlen (pcagent->category_name) + 1 /* category name */ +
2761                         strlen (pcagent->name) + 1 /* name */ +
2762                         BYTE_SIZE /* type */ +
2763                         BYTE_SIZE /* unit */ +
2764                         BYTE_SIZE /* variance */ +
2765                         LEB128_SIZE /* index */
2766                 ;
2767
2768                 len++;
2769         }
2770
2771         if (!len)
2772                 return;
2773
2774         ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
2775
2776         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2777         emit_value (logbuffer, len);
2778
2779         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2780                 if (pcagent->emitted)
2781                         continue;
2782
2783                 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
2784                 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
2785                 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
2786                 emit_byte (logbuffer, MONO_COUNTER_LONG);
2787                 emit_byte (logbuffer, MONO_COUNTER_RAW);
2788                 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
2789                 emit_value (logbuffer, pcagent->index);
2790
2791                 pcagent->emitted = TRUE;
2792         }
2793
2794         EXIT_LOG;
2795 }
2796
2797 static gboolean
2798 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
2799 {
2800         PerfCounterAgent *pcagent;
2801
2802         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2803                 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
2804                         continue;
2805                 if (pcagent->value == value)
2806                         return TRUE;
2807
2808                 pcagent->value = value;
2809                 pcagent->updated = TRUE;
2810                 pcagent->deleted = FALSE;
2811                 return TRUE;
2812         }
2813
2814         pcagent = g_new0 (PerfCounterAgent, 1);
2815         pcagent->next = log_profiler.perfcounters;
2816         pcagent->index = log_profiler.counters_index++;
2817         pcagent->category_name = g_strdup (category_name);
2818         pcagent->name = g_strdup (name);
2819         pcagent->value = value;
2820         pcagent->emitted = FALSE;
2821         pcagent->updated = TRUE;
2822         pcagent->deleted = FALSE;
2823
2824         log_profiler.perfcounters = pcagent;
2825
2826         return TRUE;
2827 }
2828
2829 static void
2830 perfcounters_sample (uint64_t timestamp)
2831 {
2832         PerfCounterAgent *pcagent;
2833         int len = 0;
2834         int size;
2835
2836         mono_os_mutex_lock (&log_profiler.counters_mutex);
2837
2838         /* mark all perfcounters as deleted, foreach will unmark them as necessary */
2839         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next)
2840                 pcagent->deleted = TRUE;
2841
2842         mono_perfcounter_foreach (perfcounters_foreach, NULL);
2843
2844         perfcounters_emit ();
2845
2846         size =
2847                 EVENT_SIZE /* event */
2848         ;
2849
2850         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2851                 if (pcagent->deleted || !pcagent->updated)
2852                         continue;
2853
2854                 size +=
2855                         LEB128_SIZE /* index */ +
2856                         BYTE_SIZE /* type */ +
2857                         LEB128_SIZE /* value */
2858                 ;
2859
2860                 len++;
2861         }
2862
2863         if (!len)
2864                 goto done;
2865
2866         size +=
2867                 LEB128_SIZE /* stop marker */
2868         ;
2869
2870         ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
2871
2872         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2873
2874         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2875                 if (pcagent->deleted || !pcagent->updated)
2876                         continue;
2877                 emit_uvalue (logbuffer, pcagent->index);
2878                 emit_byte (logbuffer, MONO_COUNTER_LONG);
2879                 emit_svalue (logbuffer, pcagent->value);
2880
2881                 pcagent->updated = FALSE;
2882         }
2883
2884         emit_value (logbuffer, 0);
2885
2886         EXIT_LOG;
2887
2888 done:
2889         mono_os_mutex_unlock (&log_profiler.counters_mutex);
2890 }
2891
2892 static void
2893 counters_and_perfcounters_sample (void)
2894 {
2895         uint64_t now = current_time ();
2896
2897         counters_sample (now);
2898         perfcounters_sample (now);
2899 }
2900
2901 typedef struct {
2902         MonoLockFreeQueueNode node;
2903         MonoMethod *method;
2904 } MethodNode;
2905
2906 typedef struct {
2907         int offset;
2908         int counter;
2909         char *filename;
2910         int line;
2911         int column;
2912 } CoverageEntry;
2913
2914 static void
2915 free_coverage_entry (gpointer data, gpointer userdata)
2916 {
2917         CoverageEntry *entry = (CoverageEntry *)data;
2918         g_free (entry->filename);
2919         g_free (entry);
2920 }
2921
2922 static void
2923 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfilerCoverageData *entry)
2924 {
2925         int offset = entry->il_offset - log_profiler.coverage_previous_offset;
2926         CoverageEntry *e = g_new (CoverageEntry, 1);
2927
2928         log_profiler.coverage_previous_offset = entry->il_offset;
2929
2930         e->offset = offset;
2931         e->counter = entry->counter;
2932         e->filename = g_strdup(entry->file_name ? entry->file_name : "");
2933         e->line = entry->line;
2934         e->column = entry->column;
2935
2936         g_ptr_array_add (log_profiler.coverage_data, e);
2937 }
2938
2939 static char *
2940 parse_generic_type_names(char *name)
2941 {
2942         char *new_name, *ret;
2943         int within_generic_declaration = 0, generic_members = 1;
2944
2945         if (name == NULL || *name == '\0')
2946                 return g_strdup ("");
2947
2948         if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
2949                 return NULL;
2950
2951         do {
2952                 switch (*name) {
2953                         case '<':
2954                                 within_generic_declaration = 1;
2955                                 break;
2956
2957                         case '>':
2958                                 within_generic_declaration = 0;
2959
2960                                 if (*(name - 1) != '<') {
2961                                         *new_name++ = '`';
2962                                         *new_name++ = '0' + generic_members;
2963                                 } else {
2964                                         memcpy (new_name, "&lt;&gt;", 8);
2965                                         new_name += 8;
2966                                 }
2967
2968                                 generic_members = 0;
2969                                 break;
2970
2971                         case ',':
2972                                 generic_members++;
2973                                 break;
2974
2975                         default:
2976                                 if (!within_generic_declaration)
2977                                         *new_name++ = *name;
2978
2979                                 break;
2980                 }
2981         } while (*name++);
2982
2983         return ret;
2984 }
2985
2986 static void
2987 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
2988 {
2989         MonoMethod *method = (MonoMethod *)value;
2990         MonoClass *klass;
2991         MonoImage *image;
2992         char *class_name;
2993         const char *image_name, *method_name, *sig, *first_filename;
2994         guint i;
2995
2996         log_profiler.coverage_previous_offset = 0;
2997         log_profiler.coverage_data = g_ptr_array_new ();
2998
2999         mono_profiler_get_coverage_data (log_profiler.handle, method, obtain_coverage_for_method);
3000
3001         klass = mono_method_get_class (method);
3002         image = mono_class_get_image (klass);
3003         image_name = mono_image_get_name (image);
3004
3005         sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3006         class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3007         method_name = mono_method_get_name (method);
3008
3009         if (log_profiler.coverage_data->len != 0) {
3010                 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[0];
3011                 first_filename = entry->filename ? entry->filename : "";
3012         } else
3013                 first_filename = "";
3014
3015         image_name = image_name ? image_name : "";
3016         sig = sig ? sig : "";
3017         method_name = method_name ? method_name : "";
3018
3019         ENTER_LOG (&coverage_methods_ctr, logbuffer,
3020                 EVENT_SIZE /* event */ +
3021                 strlen (image_name) + 1 /* image name */ +
3022                 strlen (class_name) + 1 /* class name */ +
3023                 strlen (method_name) + 1 /* method name */ +
3024                 strlen (sig) + 1 /* signature */ +
3025                 strlen (first_filename) + 1 /* first file name */ +
3026                 LEB128_SIZE /* token */ +
3027                 LEB128_SIZE /* method id */ +
3028                 LEB128_SIZE /* entries */
3029         );
3030
3031         emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3032         emit_string (logbuffer, image_name, strlen (image_name) + 1);
3033         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3034         emit_string (logbuffer, method_name, strlen (method_name) + 1);
3035         emit_string (logbuffer, sig, strlen (sig) + 1);
3036         emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3037
3038         emit_uvalue (logbuffer, mono_method_get_token (method));
3039         emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3040         emit_value (logbuffer, log_profiler.coverage_data->len);
3041
3042         EXIT_LOG;
3043
3044         for (i = 0; i < log_profiler.coverage_data->len; i++) {
3045                 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[i];
3046
3047                 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3048                         EVENT_SIZE /* event */ +
3049                         LEB128_SIZE /* method id */ +
3050                         LEB128_SIZE /* offset */ +
3051                         LEB128_SIZE /* counter */ +
3052                         LEB128_SIZE /* line */ +
3053                         LEB128_SIZE /* column */
3054                 );
3055
3056                 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3057                 emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3058                 emit_uvalue (logbuffer, entry->offset);
3059                 emit_uvalue (logbuffer, entry->counter);
3060                 emit_uvalue (logbuffer, entry->line);
3061                 emit_uvalue (logbuffer, entry->column);
3062
3063                 EXIT_LOG;
3064         }
3065
3066         log_profiler.coverage_method_id++;
3067
3068         g_free (class_name);
3069
3070         g_ptr_array_foreach (log_profiler.coverage_data, free_coverage_entry, NULL);
3071         g_ptr_array_free (log_profiler.coverage_data, TRUE);
3072 }
3073
3074 /* This empties the queue */
3075 static guint
3076 count_queue (MonoLockFreeQueue *queue)
3077 {
3078         MonoLockFreeQueueNode *node;
3079         guint count = 0;
3080
3081         while ((node = mono_lock_free_queue_dequeue (queue))) {
3082                 count++;
3083                 mono_thread_hazardous_try_free (node, g_free);
3084         }
3085
3086         return count;
3087 }
3088
3089 static void
3090 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3091 {
3092         MonoClass *klass = (MonoClass *)key;
3093         MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3094         MonoImage *image;
3095         char *class_name;
3096         const char *assembly_name;
3097         int number_of_methods, partially_covered;
3098         guint fully_covered;
3099
3100         image = mono_class_get_image (klass);
3101         assembly_name = mono_image_get_name (image);
3102         class_name = mono_type_get_name (mono_class_get_type (klass));
3103
3104         assembly_name = assembly_name ? assembly_name : "";
3105         number_of_methods = mono_class_num_methods (klass);
3106         fully_covered = count_queue (class_methods);
3107         /* We don't handle partial covered yet */
3108         partially_covered = 0;
3109
3110         ENTER_LOG (&coverage_classes_ctr, logbuffer,
3111                 EVENT_SIZE /* event */ +
3112                 strlen (assembly_name) + 1 /* assembly name */ +
3113                 strlen (class_name) + 1 /* class name */ +
3114                 LEB128_SIZE /* no. methods */ +
3115                 LEB128_SIZE /* fully covered */ +
3116                 LEB128_SIZE /* partially covered */
3117         );
3118
3119         emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3120         emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3121         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3122         emit_uvalue (logbuffer, number_of_methods);
3123         emit_uvalue (logbuffer, fully_covered);
3124         emit_uvalue (logbuffer, partially_covered);
3125
3126         EXIT_LOG;
3127
3128         g_free (class_name);
3129 }
3130
3131 static void
3132 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3133 {
3134         MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3135
3136         *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3137         if (image_methods)
3138                 *fully_covered = count_queue (image_methods);
3139         else
3140                 *fully_covered = 0;
3141
3142         // FIXME: We don't handle partially covered yet.
3143         *partially_covered = 0;
3144 }
3145
3146 static void
3147 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3148 {
3149         MonoAssembly *assembly = (MonoAssembly *)value;
3150         MonoImage *image = mono_assembly_get_image (assembly);
3151         const char *name, *guid, *filename;
3152         int number_of_methods = 0, partially_covered = 0;
3153         guint fully_covered = 0;
3154
3155         name = mono_image_get_name (image);
3156         guid = mono_image_get_guid (image);
3157         filename = mono_image_get_filename (image);
3158
3159         name = name ? name : "";
3160         guid = guid ? guid : "";
3161         filename = filename ? filename : "";
3162
3163         get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3164
3165         ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3166                 EVENT_SIZE /* event */ +
3167                 strlen (name) + 1 /* name */ +
3168                 strlen (guid) + 1 /* guid */ +
3169                 strlen (filename) + 1 /* file name */ +
3170                 LEB128_SIZE /* no. methods */ +
3171                 LEB128_SIZE /* fully covered */ +
3172                 LEB128_SIZE /* partially covered */
3173         );
3174
3175         emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3176         emit_string (logbuffer, name, strlen (name) + 1);
3177         emit_string (logbuffer, guid, strlen (guid) + 1);
3178         emit_string (logbuffer, filename, strlen (filename) + 1);
3179         emit_uvalue (logbuffer, number_of_methods);
3180         emit_uvalue (logbuffer, fully_covered);
3181         emit_uvalue (logbuffer, partially_covered);
3182
3183         EXIT_LOG;
3184 }
3185
3186 static void
3187 dump_coverage (void)
3188 {
3189         mono_os_mutex_lock (&log_profiler.coverage_mutex);
3190         mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, build_assembly_buffer, NULL);
3191         mono_conc_hashtable_foreach (log_profiler.coverage_classes, build_class_buffer, NULL);
3192         mono_conc_hashtable_foreach (log_profiler.coverage_methods, build_method_buffer, NULL);
3193         mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3194 }
3195
3196 static MonoLockFreeQueueNode *
3197 create_method_node (MonoMethod *method)
3198 {
3199         MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3200         mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3201         node->method = method;
3202
3203         return (MonoLockFreeQueueNode *) node;
3204 }
3205
3206 static gboolean
3207 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3208 {
3209         MonoError error;
3210         MonoClass *klass;
3211         MonoImage *image;
3212         MonoAssembly *assembly;
3213         MonoMethodHeader *header;
3214         guint32 iflags, flags, code_size;
3215         char *fqn, *classname;
3216         gboolean has_positive, found;
3217         MonoLockFreeQueue *image_methods, *class_methods;
3218         MonoLockFreeQueueNode *node;
3219
3220         flags = mono_method_get_flags (method, &iflags);
3221         if ((iflags & 0x1000 /*METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL*/) ||
3222             (flags & 0x2000 /*METHOD_ATTRIBUTE_PINVOKE_IMPL*/))
3223                 return FALSE;
3224
3225         // Don't need to do anything else if we're already tracking this method
3226         if (mono_conc_hashtable_lookup (log_profiler.coverage_methods, method))
3227                 return TRUE;
3228
3229         klass = mono_method_get_class (method);
3230         image = mono_class_get_image (klass);
3231
3232         // Don't handle coverage for the core assemblies
3233         if (mono_conc_hashtable_lookup (log_profiler.coverage_suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3234                 return FALSE;
3235
3236         if (prof->coverage_filters) {
3237                 /* Check already filtered classes first */
3238                 if (mono_conc_hashtable_lookup (log_profiler.coverage_filtered_classes, klass))
3239                         return FALSE;
3240
3241                 classname = mono_type_get_name (mono_class_get_type (klass));
3242
3243                 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3244
3245                 // Check positive filters first
3246                 has_positive = FALSE;
3247                 found = FALSE;
3248                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3249                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3250
3251                         if (filter [0] == '+') {
3252                                 filter = &filter [1];
3253
3254                                 if (strstr (fqn, filter) != NULL)
3255                                         found = TRUE;
3256
3257                                 has_positive = TRUE;
3258                         }
3259                 }
3260
3261                 if (has_positive && !found) {
3262                         mono_os_mutex_lock (&log_profiler.coverage_mutex);
3263                         mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3264                         mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3265                         g_free (fqn);
3266                         g_free (classname);
3267
3268                         return FALSE;
3269                 }
3270
3271                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3272                         // FIXME: Is substring search sufficient?
3273                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3274                         if (filter [0] == '+')
3275                                 continue;
3276
3277                         // Skip '-'
3278                         filter = &filter [1];
3279
3280                         if (strstr (fqn, filter) != NULL) {
3281                                 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3282                                 mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3283                                 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3284                                 g_free (fqn);
3285                                 g_free (classname);
3286
3287                                 return FALSE;
3288                         }
3289                 }
3290
3291                 g_free (fqn);
3292                 g_free (classname);
3293         }
3294
3295         header = mono_method_get_header_checked (method, &error);
3296         mono_error_cleanup (&error);
3297
3298         mono_method_header_get_code (header, &code_size, NULL);
3299
3300         assembly = mono_image_get_assembly (image);
3301
3302         // Need to keep the assemblies around for as long as they are kept in the hashtable
3303         // Nunit, for example, has a habit of unloading them before the coverage statistics are
3304         // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3305         mono_assembly_addref (assembly);
3306
3307         mono_os_mutex_lock (&log_profiler.coverage_mutex);
3308         mono_conc_hashtable_insert (log_profiler.coverage_methods, method, method);
3309         mono_conc_hashtable_insert (log_profiler.coverage_assemblies, assembly, assembly);
3310         mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3311
3312         image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3313
3314         if (image_methods == NULL) {
3315                 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3316                 mono_lock_free_queue_init (image_methods);
3317                 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3318                 mono_conc_hashtable_insert (log_profiler.coverage_image_to_methods, image, image_methods);
3319                 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3320         }
3321
3322         node = create_method_node (method);
3323         mono_lock_free_queue_enqueue (image_methods, node);
3324
3325         class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_classes, klass);
3326
3327         if (class_methods == NULL) {
3328                 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3329                 mono_lock_free_queue_init (class_methods);
3330                 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3331                 mono_conc_hashtable_insert (log_profiler.coverage_classes, klass, class_methods);
3332                 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3333         }
3334
3335         node = create_method_node (method);
3336         mono_lock_free_queue_enqueue (class_methods, node);
3337
3338         return TRUE;
3339 }
3340
3341 #define LINE_BUFFER_SIZE 4096
3342 /* Max file limit of 128KB */
3343 #define MAX_FILE_SIZE 128 * 1024
3344 static char *
3345 get_file_content (FILE *stream)
3346 {
3347         char *buffer;
3348         ssize_t bytes_read;
3349         long filesize;
3350         int res, offset = 0;
3351
3352         res = fseek (stream, 0, SEEK_END);
3353         if (res < 0)
3354           return NULL;
3355
3356         filesize = ftell (stream);
3357         if (filesize < 0)
3358           return NULL;
3359
3360         res = fseek (stream, 0, SEEK_SET);
3361         if (res < 0)
3362           return NULL;
3363
3364         if (filesize > MAX_FILE_SIZE)
3365           return NULL;
3366
3367         buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3368         while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3369                 offset += bytes_read;
3370
3371         /* NULL terminate our buffer */
3372         buffer[filesize] = '\0';
3373         return buffer;
3374 }
3375
3376 static char *
3377 get_next_line (char *contents, char **next_start)
3378 {
3379         char *p = contents;
3380
3381         if (p == NULL || *p == '\0') {
3382                 *next_start = NULL;
3383                 return NULL;
3384         }
3385
3386         while (*p != '\n' && *p != '\0')
3387                 p++;
3388
3389         if (*p == '\n') {
3390                 *p = '\0';
3391                 *next_start = p + 1;
3392         } else
3393                 *next_start = NULL;
3394
3395         return contents;
3396 }
3397
3398 static void
3399 init_suppressed_assemblies (void)
3400 {
3401         char *content;
3402         char *line;
3403         FILE *sa_file;
3404
3405         log_profiler.coverage_suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3406         sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3407         if (sa_file == NULL)
3408                 return;
3409
3410         /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3411         content = get_file_content (sa_file);
3412         if (content == NULL)
3413                 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting.");
3414
3415         while ((line = get_next_line (content, &content))) {
3416                 line = g_strchomp (g_strchug (line));
3417                 /* No locking needed as we're doing initialization */
3418                 mono_conc_hashtable_insert (log_profiler.coverage_suppressed_assemblies, line, line);
3419         }
3420
3421         fclose (sa_file);
3422 }
3423
3424 static void
3425 parse_cov_filter_file (GPtrArray *filters, const char *file)
3426 {
3427         FILE *filter_file = fopen (file, "r");
3428
3429         if (filter_file == NULL) {
3430                 mono_profiler_printf_err ("Could not open coverage filter file '%s'.", file);
3431                 return;
3432         }
3433
3434         /* Don't need to free content as it is referred to by the lines stored in @filters */
3435         char *content = get_file_content (filter_file);
3436
3437         if (content == NULL)
3438                 mono_profiler_printf_err ("Coverage filter file '%s' is larger than 128kb - ignoring.", file);
3439
3440         char *line;
3441
3442         while ((line = get_next_line (content, &content)))
3443                 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3444
3445         fclose (filter_file);
3446 }
3447
3448 static void
3449 coverage_init (void)
3450 {
3451         mono_os_mutex_init (&log_profiler.coverage_mutex);
3452         log_profiler.coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3453         log_profiler.coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3454         log_profiler.coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3455         log_profiler.coverage_filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3456         log_profiler.coverage_image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3457         init_suppressed_assemblies ();
3458 }
3459
3460 static void
3461 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3462 {
3463         MonoAssembly *assembly = (MonoAssembly *)value;
3464         mono_assembly_close (assembly);
3465 }
3466
3467 static void
3468 free_sample_hit (gpointer p)
3469 {
3470         mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3471 }
3472
3473 static void
3474 cleanup_reusable_samples (void)
3475 {
3476         SampleHit *sample;
3477
3478         while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.sample_reuse_queue)))
3479                 mono_thread_hazardous_try_free (sample, free_sample_hit);
3480 }
3481
3482 static void
3483 log_shutdown (MonoProfiler *prof)
3484 {
3485         InterlockedWrite (&log_profiler.in_shutdown, 1);
3486
3487         if (ENABLED (PROFLOG_COUNTER_EVENTS))
3488                 counters_and_perfcounters_sample ();
3489
3490         if (log_config.collect_coverage)
3491                 dump_coverage ();
3492
3493         char c = 1;
3494
3495         if (write (prof->pipes [1], &c, 1) != 1) {
3496                 mono_profiler_printf_err ("Could not write to log profiler pipe: %s", strerror (errno));
3497                 exit (1);
3498         }
3499
3500         mono_native_thread_join (prof->helper_thread);
3501
3502         mono_os_mutex_destroy (&log_profiler.counters_mutex);
3503
3504         MonoCounterAgent *mc_next;
3505
3506         for (MonoCounterAgent *cur = log_profiler.counters; cur; cur = mc_next) {
3507                 mc_next = cur->next;
3508                 g_free (cur);
3509         }
3510
3511         PerfCounterAgent *pc_next;
3512
3513         for (PerfCounterAgent *cur = log_profiler.perfcounters; cur; cur = pc_next) {
3514                 pc_next = cur->next;
3515                 g_free (cur);
3516         }
3517
3518         /*
3519          * Ensure that we empty the LLS completely, even if some nodes are
3520          * not immediately removed upon calling mono_lls_remove (), by
3521          * iterating until the head is NULL.
3522          */
3523         while (log_profiler.profiler_thread_list.head) {
3524                 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
3525                         g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3526
3527                         remove_thread (thread);
3528                 } MONO_LLS_FOREACH_SAFE_END
3529         }
3530
3531         /*
3532          * Ensure that all threads have been freed, so that we don't miss any
3533          * buffers when we shut down the writer thread below.
3534          */
3535         mono_thread_hazardous_try_free_all ();
3536
3537         InterlockedWrite (&prof->run_dumper_thread, 0);
3538         mono_os_sem_post (&prof->dumper_queue_sem);
3539         mono_native_thread_join (prof->dumper_thread);
3540         mono_os_sem_destroy (&prof->dumper_queue_sem);
3541
3542         InterlockedWrite (&prof->run_writer_thread, 0);
3543         mono_os_sem_post (&prof->writer_queue_sem);
3544         mono_native_thread_join (prof->writer_thread);
3545         mono_os_sem_destroy (&prof->writer_queue_sem);
3546
3547         /*
3548          * Free all writer queue entries, and ensure that all sample hits will be
3549          * added to the sample reuse queue.
3550          */
3551         mono_thread_hazardous_try_free_all ();
3552
3553         cleanup_reusable_samples ();
3554
3555         /*
3556          * Finally, make sure that all sample hits are freed. This should cover all
3557          * hazardous data from the profiler. We can now be sure that the runtime
3558          * won't later invoke free functions in the profiler library after it has
3559          * been unloaded.
3560          */
3561         mono_thread_hazardous_try_free_all ();
3562
3563         gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
3564
3565         g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
3566         g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
3567
3568 #if defined (HAVE_SYS_ZLIB)
3569         if (prof->gzfile)
3570                 gzclose (prof->gzfile);
3571 #endif
3572         if (prof->pipe_output)
3573                 pclose (prof->file);
3574         else
3575                 fclose (prof->file);
3576
3577         mono_conc_hashtable_destroy (prof->method_table);
3578         mono_os_mutex_destroy (&prof->method_table_mutex);
3579
3580         if (log_config.collect_coverage) {
3581                 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3582                 mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, unref_coverage_assemblies, NULL);
3583                 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3584
3585                 mono_conc_hashtable_destroy (log_profiler.coverage_methods);
3586                 mono_conc_hashtable_destroy (log_profiler.coverage_assemblies);
3587                 mono_conc_hashtable_destroy (log_profiler.coverage_classes);
3588                 mono_conc_hashtable_destroy (log_profiler.coverage_filtered_classes);
3589
3590                 mono_conc_hashtable_destroy (log_profiler.coverage_image_to_methods);
3591                 mono_conc_hashtable_destroy (log_profiler.coverage_suppressed_assemblies);
3592                 mono_os_mutex_destroy (&log_profiler.coverage_mutex);
3593         }
3594
3595         PROF_TLS_FREE ();
3596
3597         g_free (prof->args);
3598 }
3599
3600 static char*
3601 new_filename (const char* filename)
3602 {
3603         time_t t = time (NULL);
3604         int pid = process_id ();
3605         char pid_buf [16];
3606         char time_buf [16];
3607         char *res, *d;
3608         const char *p;
3609         int count_dates = 0;
3610         int count_pids = 0;
3611         int s_date, s_pid;
3612         struct tm *ts;
3613         for (p = filename; *p; p++) {
3614                 if (*p != '%')
3615                         continue;
3616                 p++;
3617                 if (*p == 't')
3618                         count_dates++;
3619                 else if (*p == 'p')
3620                         count_pids++;
3621                 else if (*p == 0)
3622                         break;
3623         }
3624         if (!count_dates && !count_pids)
3625                 return pstrdup (filename);
3626         snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
3627         ts = gmtime (&t);
3628         snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
3629                 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
3630         s_date = strlen (time_buf);
3631         s_pid = strlen (pid_buf);
3632         d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
3633         for (p = filename; *p; p++) {
3634                 if (*p != '%') {
3635                         *d++ = *p;
3636                         continue;
3637                 }
3638                 p++;
3639                 if (*p == 't') {
3640                         strcpy (d, time_buf);
3641                         d += s_date;
3642                         continue;
3643                 } else if (*p == 'p') {
3644                         strcpy (d, pid_buf);
3645                         d += s_pid;
3646                         continue;
3647                 } else if (*p == '%') {
3648                         *d++ = '%';
3649                         continue;
3650                 } else if (*p == 0)
3651                         break;
3652                 *d++ = '%';
3653                 *d++ = *p;
3654         }
3655         *d = 0;
3656         return res;
3657 }
3658
3659 static void
3660 add_to_fd_set (fd_set *set, int fd, int *max_fd)
3661 {
3662         /*
3663          * This should only trigger for the basic FDs (server socket, pipes) at
3664          * startup if for some mysterious reason they're too large. In this case,
3665          * the profiler really can't function, and we're better off printing an
3666          * error and exiting.
3667          */
3668         if (fd >= FD_SETSIZE) {
3669                 mono_profiler_printf_err ("File descriptor is out of bounds for fd_set: %d", fd);
3670                 exit (1);
3671         }
3672
3673         FD_SET (fd, set);
3674
3675         if (*max_fd < fd)
3676                 *max_fd = fd;
3677 }
3678
3679 static void *
3680 helper_thread (void *arg)
3681 {
3682         mono_threads_attach_tools_thread ();
3683         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
3684
3685         MonoProfilerThread *thread = init_thread (FALSE);
3686
3687         GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
3688
3689         while (1) {
3690                 fd_set rfds;
3691                 int max_fd = -1;
3692
3693                 FD_ZERO (&rfds);
3694
3695                 add_to_fd_set (&rfds, log_profiler.server_socket, &max_fd);
3696                 add_to_fd_set (&rfds, log_profiler.pipes [0], &max_fd);
3697
3698                 for (gint i = 0; i < command_sockets->len; i++)
3699                         add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
3700
3701                 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
3702
3703                 // Sleep for 1sec or until a file descriptor has data.
3704                 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
3705                         if (errno == EINTR)
3706                                 continue;
3707
3708                         mono_profiler_printf_err ("Could not poll in log profiler helper thread: %s", strerror (errno));
3709                         exit (1);
3710                 }
3711
3712                 if (ENABLED (PROFLOG_COUNTER_EVENTS))
3713                         counters_and_perfcounters_sample ();
3714
3715                 buffer_lock_excl ();
3716
3717                 sync_point (SYNC_POINT_PERIODIC);
3718
3719                 buffer_unlock_excl ();
3720
3721                 // Are we shutting down?
3722                 if (FD_ISSET (log_profiler.pipes [0], &rfds)) {
3723                         char c;
3724                         read (log_profiler.pipes [0], &c, 1);
3725                         break;
3726                 }
3727
3728                 for (gint i = 0; i < command_sockets->len; i++) {
3729                         int fd = g_array_index (command_sockets, int, i);
3730
3731                         if (!FD_ISSET (fd, &rfds))
3732                                 continue;
3733
3734                         char buf [64];
3735                         int len = read (fd, buf, sizeof (buf) - 1);
3736
3737                         if (len == -1)
3738                                 continue;
3739
3740                         if (!len) {
3741                                 // The other end disconnected.
3742                                 g_array_remove_index (command_sockets, i);
3743                                 close (fd);
3744
3745                                 continue;
3746                         }
3747
3748                         buf [len] = 0;
3749
3750                         if (log_config.hs_mode == MONO_PROFILER_HEAPSHOT_ON_DEMAND && !strcmp (buf, "heapshot\n")) {
3751                                 // Rely on the finalization callback triggering a GC.
3752                                 InterlockedWrite (&log_profiler.heapshot_requested, 1);
3753                                 mono_gc_finalize_notify ();
3754                         }
3755                 }
3756
3757                 if (FD_ISSET (log_profiler.server_socket, &rfds)) {
3758                         int fd = accept (log_profiler.server_socket, NULL, NULL);
3759
3760                         if (fd != -1) {
3761                                 if (fd >= FD_SETSIZE)
3762                                         close (fd);
3763                                 else
3764                                         g_array_append_val (command_sockets, fd);
3765                         }
3766                 }
3767         }
3768
3769         for (gint i = 0; i < command_sockets->len; i++)
3770                 close (g_array_index (command_sockets, int, i));
3771
3772         g_array_free (command_sockets, TRUE);
3773
3774         send_log_unsafe (FALSE);
3775         deinit_thread (thread);
3776
3777         mono_thread_info_detach ();
3778
3779         return NULL;
3780 }
3781
3782 static void
3783 start_helper_thread (void)
3784 {
3785         if (pipe (log_profiler.pipes) == -1) {
3786                 mono_profiler_printf_err ("Could not create log profiler pipe: %s", strerror (errno));
3787                 exit (1);
3788         }
3789
3790         log_profiler.server_socket = socket (PF_INET, SOCK_STREAM, 0);
3791
3792         if (log_profiler.server_socket == -1) {
3793                 mono_profiler_printf_err ("Could not create log profiler server socket: %s", strerror (errno));
3794                 exit (1);
3795         }
3796
3797         struct sockaddr_in server_address;
3798
3799         memset (&server_address, 0, sizeof (server_address));
3800         server_address.sin_family = AF_INET;
3801         server_address.sin_addr.s_addr = INADDR_ANY;
3802         server_address.sin_port = htons (log_profiler.command_port);
3803
3804         if (bind (log_profiler.server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
3805                 mono_profiler_printf_err ("Could not bind log profiler server socket on port %d: %s", log_profiler.command_port, strerror (errno));
3806                 close (log_profiler.server_socket);
3807                 exit (1);
3808         }
3809
3810         if (listen (log_profiler.server_socket, 1) == -1) {
3811                 mono_profiler_printf_err ("Could not listen on log profiler server socket: %s", strerror (errno));
3812                 close (log_profiler.server_socket);
3813                 exit (1);
3814         }
3815
3816         socklen_t slen = sizeof (server_address);
3817
3818         if (getsockname (log_profiler.server_socket, (struct sockaddr *) &server_address, &slen)) {
3819                 mono_profiler_printf_err ("Could not retrieve assigned port for log profiler server socket: %s", strerror (errno));
3820                 close (log_profiler.server_socket);
3821                 exit (1);
3822         }
3823
3824         log_profiler.command_port = ntohs (server_address.sin_port);
3825
3826         if (!mono_native_thread_create (&log_profiler.helper_thread, helper_thread, NULL)) {
3827                 mono_profiler_printf_err ("Could not start log profiler helper thread");
3828                 close (log_profiler.server_socket);
3829                 exit (1);
3830         }
3831 }
3832
3833 static void
3834 free_writer_entry (gpointer p)
3835 {
3836         mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
3837 }
3838
3839 static gboolean
3840 handle_writer_queue_entry (void)
3841 {
3842         WriterQueueEntry *entry;
3843
3844         if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&log_profiler.writer_queue))) {
3845                 if (!entry->methods)
3846                         goto no_methods;
3847
3848                 gboolean wrote_methods = FALSE;
3849
3850                 /*
3851                  * Encode the method events in a temporary log buffer that we
3852                  * flush to disk before the main buffer, ensuring that all
3853                  * methods have metadata emitted before they're referenced.
3854                  *
3855                  * We use a 'proper' thread-local buffer for this as opposed
3856                  * to allocating and freeing a buffer by hand because the call
3857                  * to mono_method_full_name () below may trigger class load
3858                  * events when it retrieves the signature of the method. So a
3859                  * thread-local buffer needs to exist when such events occur.
3860                  */
3861                 for (guint i = 0; i < entry->methods->len; i++) {
3862                         MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
3863
3864                         if (mono_conc_hashtable_lookup (log_profiler.method_table, info->method))
3865                                 goto free_info; // This method already has metadata emitted.
3866
3867                         /*
3868                          * Other threads use this hash table to get a general
3869                          * idea of whether a method has already been emitted to
3870                          * the stream. Due to the way we add to this table, it
3871                          * can easily happen that multiple threads queue up the
3872                          * same methods, but that's OK since eventually all
3873                          * methods will be in this table and the thread-local
3874                          * method lists will just be empty for the rest of the
3875                          * app's lifetime.
3876                          */
3877                         mono_os_mutex_lock (&log_profiler.method_table_mutex);
3878                         mono_conc_hashtable_insert (log_profiler.method_table, info->method, info->method);
3879                         mono_os_mutex_unlock (&log_profiler.method_table_mutex);
3880
3881                         char *name = mono_method_full_name (info->method, 1);
3882                         int nlen = strlen (name) + 1;
3883                         void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
3884                         int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
3885
3886                         ENTER_LOG (&method_jits_ctr, logbuffer,
3887                                 EVENT_SIZE /* event */ +
3888                                 LEB128_SIZE /* method */ +
3889                                 LEB128_SIZE /* start */ +
3890                                 LEB128_SIZE /* size */ +
3891                                 nlen /* name */
3892                         );
3893
3894                         emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
3895                         emit_method_inner (logbuffer, info->method);
3896                         emit_ptr (logbuffer, cstart);
3897                         emit_value (logbuffer, csize);
3898
3899                         memcpy (logbuffer->cursor, name, nlen);
3900                         logbuffer->cursor += nlen;
3901
3902                         EXIT_LOG_EXPLICIT (NO_SEND);
3903
3904                         mono_free (name);
3905
3906                         wrote_methods = TRUE;
3907
3908                 free_info:
3909                         g_free (info);
3910                 }
3911
3912                 g_ptr_array_free (entry->methods, TRUE);
3913
3914                 if (wrote_methods) {
3915                         MonoProfilerThread *thread = PROF_TLS_GET ();
3916
3917                         dump_buffer_threadless (thread->buffer);
3918                         init_buffer_state (thread);
3919                 }
3920
3921         no_methods:
3922                 dump_buffer (entry->buffer);
3923
3924                 mono_thread_hazardous_try_free (entry, free_writer_entry);
3925
3926                 return TRUE;
3927         }
3928
3929         return FALSE;
3930 }
3931
3932 static void *
3933 writer_thread (void *arg)
3934 {
3935         mono_threads_attach_tools_thread ();
3936         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
3937
3938         dump_header ();
3939
3940         MonoProfilerThread *thread = init_thread (FALSE);
3941
3942         while (InterlockedRead (&log_profiler.run_writer_thread)) {
3943                 mono_os_sem_wait (&log_profiler.writer_queue_sem, MONO_SEM_FLAGS_NONE);
3944                 handle_writer_queue_entry ();
3945         }
3946
3947         /* Drain any remaining entries on shutdown. */
3948         while (handle_writer_queue_entry ());
3949
3950         free_buffer (thread->buffer, thread->buffer->size);
3951         deinit_thread (thread);
3952
3953         mono_thread_info_detach ();
3954
3955         return NULL;
3956 }
3957
3958 static void
3959 start_writer_thread (void)
3960 {
3961         InterlockedWrite (&log_profiler.run_writer_thread, 1);
3962
3963         if (!mono_native_thread_create (&log_profiler.writer_thread, writer_thread, NULL)) {
3964                 mono_profiler_printf_err ("Could not start log profiler writer thread");
3965                 exit (1);
3966         }
3967 }
3968
3969 static void
3970 reuse_sample_hit (gpointer p)
3971 {
3972         SampleHit *sample = p;
3973
3974         mono_lock_free_queue_node_unpoison (&sample->node);
3975         mono_lock_free_queue_enqueue (&log_profiler.sample_reuse_queue, &sample->node);
3976 }
3977
3978 static gboolean
3979 handle_dumper_queue_entry (void)
3980 {
3981         SampleHit *sample;
3982
3983         if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.dumper_queue))) {
3984                 for (int i = 0; i < sample->count; ++i) {
3985                         MonoMethod *method = sample->frames [i].method;
3986                         MonoDomain *domain = sample->frames [i].domain;
3987                         void *address = sample->frames [i].base_address;
3988
3989                         if (!method) {
3990                                 g_assert (domain && "What happened to the domain pointer?");
3991                                 g_assert (address && "What happened to the instruction pointer?");
3992
3993                                 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
3994
3995                                 if (ji)
3996                                         sample->frames [i].method = mono_jit_info_get_method (ji);
3997                         }
3998                 }
3999
4000                 ENTER_LOG (&sample_hits_ctr, logbuffer,
4001                         EVENT_SIZE /* event */ +
4002                         LEB128_SIZE /* tid */ +
4003                         LEB128_SIZE /* count */ +
4004                         1 * (
4005                                 LEB128_SIZE /* ip */
4006                         ) +
4007                         LEB128_SIZE /* managed count */ +
4008                         sample->count * (
4009                                 LEB128_SIZE /* method */
4010                         )
4011                 );
4012
4013                 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4014                 emit_ptr (logbuffer, (void *) sample->tid);
4015                 emit_value (logbuffer, 1);
4016
4017                 // TODO: Actual native unwinding.
4018                 for (int i = 0; i < 1; ++i) {
4019                         emit_ptr (logbuffer, sample->ip);
4020                         add_code_pointer ((uintptr_t) sample->ip);
4021                 }
4022
4023                 /* new in data version 6 */
4024                 emit_uvalue (logbuffer, sample->count);
4025
4026                 for (int i = 0; i < sample->count; ++i)
4027                         emit_method (logbuffer, sample->frames [i].method);
4028
4029                 EXIT_LOG;
4030
4031                 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4032
4033                 dump_unmanaged_coderefs ();
4034         }
4035
4036         return FALSE;
4037 }
4038
4039 static void *
4040 dumper_thread (void *arg)
4041 {
4042         mono_threads_attach_tools_thread ();
4043         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4044
4045         MonoProfilerThread *thread = init_thread (FALSE);
4046
4047         while (InterlockedRead (&log_profiler.run_dumper_thread)) {
4048                 /*
4049                  * Flush samples every second so it doesn't seem like the profiler is
4050                  * not working if the program is mostly idle.
4051                  */
4052                 if (mono_os_sem_timedwait (&log_profiler.dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4053                         send_log_unsafe (FALSE);
4054
4055                 handle_dumper_queue_entry ();
4056         }
4057
4058         /* Drain any remaining entries on shutdown. */
4059         while (handle_dumper_queue_entry ());
4060
4061         send_log_unsafe (FALSE);
4062         deinit_thread (thread);
4063
4064         mono_thread_info_detach ();
4065
4066         return NULL;
4067 }
4068
4069 static void
4070 start_dumper_thread (void)
4071 {
4072         InterlockedWrite (&log_profiler.run_dumper_thread, 1);
4073
4074         if (!mono_native_thread_create (&log_profiler.dumper_thread, dumper_thread, NULL)) {
4075                 mono_profiler_printf_err ("Could not start log profiler dumper thread");
4076                 exit (1);
4077         }
4078 }
4079
4080 static void
4081 register_counter (const char *name, gint32 *counter)
4082 {
4083         mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4084 }
4085
4086 static void
4087 runtime_initialized (MonoProfiler *profiler)
4088 {
4089         InterlockedWrite (&log_profiler.runtime_inited, 1);
4090
4091         register_counter ("Sample events allocated", &sample_allocations_ctr);
4092         register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4093
4094         register_counter ("Event: Sync points", &sync_points_ctr);
4095         register_counter ("Event: Heap objects", &heap_objects_ctr);
4096         register_counter ("Event: Heap starts", &heap_starts_ctr);
4097         register_counter ("Event: Heap ends", &heap_ends_ctr);
4098         register_counter ("Event: Heap roots", &heap_roots_ctr);
4099         register_counter ("Event: GC events", &gc_events_ctr);
4100         register_counter ("Event: GC resizes", &gc_resizes_ctr);
4101         register_counter ("Event: GC allocations", &gc_allocs_ctr);
4102         register_counter ("Event: GC moves", &gc_moves_ctr);
4103         register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4104         register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4105         register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4106         register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4107         register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4108         register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4109         register_counter ("Event: Image loads", &image_loads_ctr);
4110         register_counter ("Event: Image unloads", &image_unloads_ctr);
4111         register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4112         register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4113         register_counter ("Event: Class loads", &class_loads_ctr);
4114         register_counter ("Event: Class unloads", &class_unloads_ctr);
4115         register_counter ("Event: Method entries", &method_entries_ctr);
4116         register_counter ("Event: Method exits", &method_exits_ctr);
4117         register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4118         register_counter ("Event: Method JITs", &method_jits_ctr);
4119         register_counter ("Event: Code buffers", &code_buffers_ctr);
4120         register_counter ("Event: Exception throws", &exception_throws_ctr);
4121         register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4122         register_counter ("Event: Monitor events", &monitor_events_ctr);
4123         register_counter ("Event: Thread starts", &thread_starts_ctr);
4124         register_counter ("Event: Thread ends", &thread_ends_ctr);
4125         register_counter ("Event: Thread names", &thread_names_ctr);
4126         register_counter ("Event: Domain loads", &domain_loads_ctr);
4127         register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4128         register_counter ("Event: Domain names", &domain_names_ctr);
4129         register_counter ("Event: Context loads", &context_loads_ctr);
4130         register_counter ("Event: Context unloads", &context_unloads_ctr);
4131         register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4132         register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4133         register_counter ("Event: Sample hits", &sample_hits_ctr);
4134         register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4135         register_counter ("Event: Counter samples", &counter_samples_ctr);
4136         register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4137         register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4138         register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4139         register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4140         register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4141         register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4142
4143         counters_init ();
4144
4145         /*
4146          * We must start the helper thread before the writer thread. This is
4147          * because the helper thread sets up the command port which is written to
4148          * the log header by the writer thread.
4149          */
4150         start_helper_thread ();
4151         start_writer_thread ();
4152         start_dumper_thread ();
4153 }
4154
4155 static void
4156 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4157 {
4158         char *nf;
4159
4160         log_profiler.args = pstrdup (args);
4161         log_profiler.command_port = log_config.command_port;
4162
4163         //If filename begin with +, append the pid at the end
4164         if (filename && *filename == '+')
4165                 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4166
4167         if (!filename) {
4168                 if (log_config.do_report)
4169                         filename = "|mprof-report -";
4170                 else
4171                         filename = "output.mlpd";
4172                 nf = (char*)filename;
4173         } else {
4174                 nf = new_filename (filename);
4175                 if (log_config.do_report) {
4176                         int s = strlen (nf) + 32;
4177                         char *p = (char *) g_malloc (s);
4178                         snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4179                         g_free (nf);
4180                         nf = p;
4181                 }
4182         }
4183         if (*nf == '|') {
4184                 log_profiler.file = popen (nf + 1, "w");
4185                 log_profiler.pipe_output = 1;
4186         } else if (*nf == '#') {
4187                 int fd = strtol (nf + 1, NULL, 10);
4188                 log_profiler.file = fdopen (fd, "a");
4189         } else
4190                 log_profiler.file = fopen (nf, "wb");
4191
4192         if (!log_profiler.file) {
4193                 mono_profiler_printf_err ("Could not create log profiler output file '%s'.", nf);
4194                 exit (1);
4195         }
4196
4197 #if defined (HAVE_SYS_ZLIB)
4198         if (log_config.use_zip)
4199                 log_profiler.gzfile = gzdopen (fileno (log_profiler.file), "wb");
4200 #endif
4201
4202         /*
4203          * If you hit this assert while increasing MAX_FRAMES, you need to increase
4204          * SAMPLE_BLOCK_SIZE as well.
4205          */
4206         g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4207
4208         // FIXME: We should free this stuff too.
4209         mono_lock_free_allocator_init_size_class (&log_profiler.sample_size_class, SAMPLE_SLOT_SIZE (log_config.num_frames), SAMPLE_BLOCK_SIZE);
4210         mono_lock_free_allocator_init_allocator (&log_profiler.sample_allocator, &log_profiler.sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4211
4212         mono_lock_free_queue_init (&log_profiler.sample_reuse_queue);
4213
4214         g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4215
4216         // FIXME: We should free this stuff too.
4217         mono_lock_free_allocator_init_size_class (&log_profiler.writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4218         mono_lock_free_allocator_init_allocator (&log_profiler.writer_entry_allocator, &log_profiler.writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4219
4220         mono_lock_free_queue_init (&log_profiler.writer_queue);
4221         mono_os_sem_init (&log_profiler.writer_queue_sem, 0);
4222
4223         mono_lock_free_queue_init (&log_profiler.dumper_queue);
4224         mono_os_sem_init (&log_profiler.dumper_queue_sem, 0);
4225
4226         mono_os_mutex_init (&log_profiler.method_table_mutex);
4227         log_profiler.method_table = mono_conc_hashtable_new (NULL, NULL);
4228
4229         if (log_config.collect_coverage)
4230                 coverage_init ();
4231
4232         log_profiler.coverage_filters = filters;
4233
4234         log_profiler.startup_time = current_time ();
4235 }
4236
4237 /*
4238  * declaration to silence the compiler: this is the entry point that
4239  * mono will load from the shared library and call.
4240  */
4241 extern void
4242 mono_profiler_init (const char *desc);
4243
4244 extern void
4245 mono_profiler_init_log (const char *desc);
4246
4247 /*
4248  * this is the entry point that will be used when the profiler
4249  * is embedded inside the main executable.
4250  */
4251 void
4252 mono_profiler_init_log (const char *desc)
4253 {
4254         mono_profiler_init (desc);
4255 }
4256
4257 void
4258 mono_profiler_init (const char *desc)
4259 {
4260         GPtrArray *filters = NULL;
4261
4262         proflog_parse_args (&log_config, desc [3] == ':' ? desc + 4 : "");
4263
4264         if (log_config.cov_filter_files) {
4265                 filters = g_ptr_array_new ();
4266                 int i;
4267                 for (i = 0; i < log_config.cov_filter_files->len; ++i) {
4268                         const char *name = log_config.cov_filter_files->pdata [i];
4269                         parse_cov_filter_file (filters, name);
4270                 }
4271         }
4272
4273         init_time ();
4274
4275         PROF_TLS_INIT ();
4276
4277         create_profiler (desc, log_config.output_filename, filters);
4278
4279         mono_lls_init (&log_profiler.profiler_thread_list, NULL);
4280
4281         MonoProfilerHandle handle = log_profiler.handle = mono_profiler_install (&log_profiler);
4282
4283         /*
4284          * Required callbacks. These are either necessary for the profiler itself
4285          * to function, or provide metadata that's needed if other events (e.g.
4286          * allocations, exceptions) are dynamically enabled/disabled.
4287          */
4288
4289         mono_profiler_set_runtime_shutdown_end_callback (handle, log_shutdown);
4290         mono_profiler_set_runtime_initialized_callback (handle, runtime_initialized);
4291
4292         mono_profiler_set_gc_event_callback (handle, gc_event);
4293
4294         mono_profiler_set_thread_started_callback (handle, thread_start);
4295         mono_profiler_set_thread_stopped_callback (handle, thread_end);
4296         mono_profiler_set_thread_name_callback (handle, thread_name);
4297
4298         mono_profiler_set_domain_loaded_callback (handle, domain_loaded);
4299         mono_profiler_set_domain_unloading_callback (handle, domain_unloaded);
4300         mono_profiler_set_domain_name_callback (handle, domain_name);
4301
4302         mono_profiler_set_context_loaded_callback (handle, context_loaded);
4303         mono_profiler_set_context_unloaded_callback (handle, context_unloaded);
4304
4305         mono_profiler_set_assembly_loaded_callback (handle, assembly_loaded);
4306         mono_profiler_set_assembly_unloading_callback (handle, assembly_unloaded);
4307
4308         mono_profiler_set_image_loaded_callback (handle, image_loaded);
4309         mono_profiler_set_image_unloading_callback (handle, image_unloaded);
4310
4311         mono_profiler_set_class_loaded_callback (handle, class_loaded);
4312
4313         mono_profiler_set_jit_done_callback (handle, method_jitted);
4314
4315         if (ENABLED (PROFLOG_EXCEPTION_EVENTS)) {
4316                 mono_profiler_set_exception_throw_callback (handle, throw_exc);
4317                 mono_profiler_set_exception_clause_callback (handle, clause_exc);
4318         }
4319
4320         if (ENABLED (PROFLOG_MONITOR_EVENTS)) {
4321                 mono_profiler_set_monitor_contention_callback (handle, monitor_contention);
4322                 mono_profiler_set_monitor_acquired_callback (handle, monitor_acquired);
4323                 mono_profiler_set_monitor_failed_callback (handle, monitor_failed);
4324         }
4325
4326         if (ENABLED (PROFLOG_GC_EVENTS))
4327                 mono_profiler_set_gc_resize_callback (handle, gc_resize);
4328
4329         if (ENABLED (PROFLOG_GC_ALLOCATION_EVENTS)) {
4330                 mono_profiler_enable_allocations ();
4331                 mono_profiler_set_gc_allocation_callback (handle, gc_alloc);
4332         }
4333
4334         if (ENABLED (PROFLOG_GC_MOVE_EVENTS))
4335                 mono_profiler_set_gc_moves_callback (handle, gc_moves);
4336
4337         if (ENABLED (PROFLOG_GC_ROOT_EVENTS))
4338                 mono_profiler_set_gc_roots_callback (handle, gc_roots);
4339
4340         if (ENABLED (PROFLOG_GC_HANDLE_EVENTS)) {
4341                 mono_profiler_set_gc_handle_created_callback (handle, gc_handle_created);
4342                 mono_profiler_set_gc_handle_deleted_callback (handle, gc_handle_deleted);
4343         }
4344
4345         if (ENABLED (PROFLOG_FINALIZATION_EVENTS)) {
4346                 mono_profiler_set_gc_finalizing_callback (handle, finalize_begin);
4347                 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4348                 mono_profiler_set_gc_finalizing_object_callback (handle, finalize_object_begin);
4349                 mono_profiler_set_gc_finalized_object_callback (handle, finalize_object_end);
4350         } else if (log_config.hs_mode == MONO_PROFILER_HEAPSHOT_ON_DEMAND) {
4351                 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4352                 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4353         }
4354
4355         if (ENABLED (PROFLOG_SAMPLE_EVENTS))
4356                 mono_profiler_set_sample_hit_callback (handle, mono_sample_hit);
4357
4358         if (ENABLED (PROFLOG_JIT_EVENTS))
4359                 mono_profiler_set_jit_code_buffer_callback (handle, code_buffer_new);
4360
4361         if (log_config.enter_leave) {
4362                 mono_profiler_set_call_instrumentation_filter_callback (handle, method_filter);
4363                 mono_profiler_set_method_enter_callback (handle, method_enter);
4364                 mono_profiler_set_method_leave_callback (handle, method_leave);
4365                 mono_profiler_set_method_exception_leave_callback (handle, method_exc_leave);
4366         }
4367
4368         if (log_config.collect_coverage)
4369                 mono_profiler_set_coverage_filter_callback (handle, coverage_filter);
4370
4371         mono_profiler_enable_sampling (handle);
4372
4373         /*
4374          * If no sample option was given by the user, this just leaves the sampling
4375          * thread in idle mode. We do this even if no option was given so that we
4376          * can warn if another profiler controls sampling parameters.
4377          */
4378         if (!mono_profiler_set_sample_mode (handle, log_config.sampling_mode, log_config.sample_freq))
4379                 mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");
4380 }