[profiler] Consolidate log profiler state.
[mono.git] / mono / profiler / log.c
1 /*
2  * mono-profiler-log.c: mono log profiler
3  *
4  * Authors:
5  *   Paolo Molaro (lupus@ximian.com)
6  *   Alex Rønne Petersen (alexrp@xamarin.com)
7  *
8  * Copyright 2010 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11  */
12
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/debug-helpers.h>
16 #include "../metadata/metadata-internals.h"
17 #include <mono/metadata/mono-config.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/metadata/mono-perfcounters.h>
20 #include <mono/utils/atomic.h>
21 #include <mono/utils/hazard-pointer.h>
22 #include <mono/utils/lock-free-alloc.h>
23 #include <mono/utils/lock-free-queue.h>
24 #include <mono/utils/mono-conc-hashtable.h>
25 #include <mono/utils/mono-counters.h>
26 #include <mono/utils/mono-logger-internals.h>
27 #include <mono/utils/mono-linked-list-set.h>
28 #include <mono/utils/mono-membar.h>
29 #include <mono/utils/mono-mmap.h>
30 #include <mono/utils/mono-os-mutex.h>
31 #include <mono/utils/mono-os-semaphore.h>
32 #include <mono/utils/mono-threads.h>
33 #include <mono/utils/mono-threads-api.h>
34 #include "log.h"
35
36 #ifdef HAVE_DLFCN_H
37 #include <dlfcn.h>
38 #endif
39 #include <fcntl.h>
40 #ifdef HAVE_LINK_H
41 #include <link.h>
42 #endif
43 #ifdef HAVE_UNISTD_H
44 #include <unistd.h>
45 #endif
46 #if defined(__APPLE__)
47 #include <mach/mach_time.h>
48 #endif
49 #include <netinet/in.h>
50 #ifdef HAVE_SYS_MMAN_H
51 #include <sys/mman.h>
52 #endif
53 #include <sys/socket.h>
54 #if defined (HAVE_SYS_ZLIB)
55 #include <zlib.h>
56 #endif
57
58 /*
59  * file format:
60  * [header] [buffer]*
61  *
62  * The file is composed by a header followed by 0 or more buffers.
63  * Each buffer contains events that happened on a thread: for a given thread
64  * buffers that appear later in the file are guaranteed to contain events
65  * that happened later in time. Buffers from separate threads could be interleaved,
66  * though.
67  * Buffers are not required to be aligned.
68  *
69  * header format:
70  * [id: 4 bytes] constant value: LOG_HEADER_ID
71  * [major: 1 byte] [minor: 1 byte] major and minor version of the log profiler
72  * [format: 1 byte] version of the data format for the rest of the file
73  * [ptrsize: 1 byte] size in bytes of a pointer in the profiled program
74  * [startup time: 8 bytes] time in milliseconds since the unix epoch when the program started
75  * [timer overhead: 4 bytes] approximate overhead in nanoseconds of the timer
76  * [flags: 4 bytes] file format flags, should be 0 for now
77  * [pid: 4 bytes] pid of the profiled process
78  * [port: 2 bytes] tcp port for server if != 0
79  * [args size: 4 bytes] size of args
80  * [args: string] arguments passed to the profiler
81  * [arch size: 4 bytes] size of arch
82  * [arch: string] architecture the profiler is running on
83  * [os size: 4 bytes] size of os
84  * [os: string] operating system the profiler is running on
85  *
86  * The multiple byte integers are in little-endian format.
87  *
88  * buffer format:
89  * [buffer header] [event]*
90  * Buffers have a fixed-size header followed by 0 or more bytes of event data.
91  * Timing information and other values in the event data are usually stored
92  * as uleb128 or sleb128 integers. To save space, as noted for each item below,
93  * some data is represented as a difference between the actual value and
94  * either the last value of the same type (like for timing information) or
95  * as the difference from a value stored in a buffer header.
96  *
97  * For timing information the data is stored as uleb128, since timing
98  * increases in a monotonic way in each thread: the value is the number of
99  * nanoseconds to add to the last seen timing data in a buffer. The first value
100  * in a buffer will be calculated from the time_base field in the buffer head.
101  *
102  * Object or heap sizes are stored as uleb128.
103  * Pointer differences are stored as sleb128, instead.
104  *
105  * If an unexpected value is found, the rest of the buffer should be ignored,
106  * as generally the later values need the former to be interpreted correctly.
107  *
108  * buffer header format:
109  * [bufid: 4 bytes] constant value: BUF_ID
110  * [len: 4 bytes] size of the data following the buffer header
111  * [time_base: 8 bytes] time base in nanoseconds since an unspecified epoch
112  * [ptr_base: 8 bytes] base value for pointers
113  * [obj_base: 8 bytes] base value for object addresses
114  * [thread id: 8 bytes] system-specific thread ID (pthread_t for example)
115  * [method_base: 8 bytes] base value for MonoMethod pointers
116  *
117  * event format:
118  * [extended info: upper 4 bits] [type: lower 4 bits]
119  * [time diff: uleb128] nanoseconds since last timing
120  * [data]*
121  * The data that follows depends on type and the extended info.
122  * Type is one of the enum values in mono-profiler-log.h: TYPE_ALLOC, TYPE_GC,
123  * TYPE_METADATA, TYPE_METHOD, TYPE_EXCEPTION, TYPE_MONITOR, TYPE_HEAP.
124  * The extended info bits are interpreted based on type, see
125  * each individual event description below.
126  * strings are represented as a 0-terminated utf8 sequence.
127  *
128  * backtrace format:
129  * [num: uleb128] number of frames following
130  * [frame: sleb128]* mum MonoMethod* as a pointer difference from the last such
131  * pointer or the buffer method_base
132  *
133  * type alloc format:
134  * type: TYPE_ALLOC
135  * exinfo: zero or TYPE_ALLOC_BT
136  * [ptr: sleb128] class as a byte difference from ptr_base
137  * [obj: sleb128] object address as a byte difference from obj_base
138  * [size: uleb128] size of the object in the heap
139  * If exinfo == TYPE_ALLOC_BT, a backtrace follows.
140  *
141  * type GC format:
142  * type: TYPE_GC
143  * exinfo: one of TYPE_GC_EVENT, TYPE_GC_RESIZE, TYPE_GC_MOVE, TYPE_GC_HANDLE_CREATED[_BT],
144  * TYPE_GC_HANDLE_DESTROYED[_BT], TYPE_GC_FINALIZE_START, TYPE_GC_FINALIZE_END,
145  * TYPE_GC_FINALIZE_OBJECT_START, TYPE_GC_FINALIZE_OBJECT_END
146  * if exinfo == TYPE_GC_RESIZE
147  *      [heap_size: uleb128] new heap size
148  * if exinfo == TYPE_GC_EVENT
149  *      [event type: byte] GC event (MONO_GC_EVENT_* from profiler.h)
150  *      [generation: byte] GC generation event refers to
151  * if exinfo == TYPE_GC_MOVE
152  *      [num_objects: uleb128] number of object moves that follow
153  *      [objaddr: sleb128]+ num_objects object pointer differences from obj_base
154  *      num is always an even number: the even items are the old
155  *      addresses, the odd numbers are the respective new object addresses
156  * if exinfo == TYPE_GC_HANDLE_CREATED[_BT]
157  *      [handle_type: uleb128] MonoGCHandleType enum value
158  *      upper bits reserved as flags
159  *      [handle: uleb128] GC handle value
160  *      [objaddr: sleb128] object pointer differences from obj_base
161  *      If exinfo == TYPE_GC_HANDLE_CREATED_BT, a backtrace follows.
162  * if exinfo == TYPE_GC_HANDLE_DESTROYED[_BT]
163  *      [handle_type: uleb128] MonoGCHandleType enum value
164  *      upper bits reserved as flags
165  *      [handle: uleb128] GC handle value
166  *      If exinfo == TYPE_GC_HANDLE_DESTROYED_BT, a backtrace follows.
167  * if exinfo == TYPE_GC_FINALIZE_OBJECT_{START,END}
168  *      [object: sleb128] the object as a difference from obj_base
169  *
170  * type metadata format:
171  * type: TYPE_METADATA
172  * exinfo: one of: TYPE_END_LOAD, TYPE_END_UNLOAD (optional for TYPE_THREAD and TYPE_DOMAIN,
173  * doesn't occur for TYPE_CLASS)
174  * [mtype: byte] metadata type, one of: TYPE_CLASS, TYPE_IMAGE, TYPE_ASSEMBLY, TYPE_DOMAIN,
175  * TYPE_THREAD, TYPE_CONTEXT
176  * [pointer: sleb128] pointer of the metadata type depending on mtype
177  * if mtype == TYPE_CLASS
178  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
179  *      [name: string] full class name
180  * if mtype == TYPE_IMAGE
181  *      [name: string] image file name
182  * if mtype == TYPE_ASSEMBLY
183  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
184  *      [name: string] assembly name
185  * if mtype == TYPE_DOMAIN && exinfo == 0
186  *      [name: string] domain friendly name
187  * if mtype == TYPE_CONTEXT
188  *      [domain: sleb128] domain id as pointer
189  * if mtype == TYPE_THREAD && exinfo == 0
190  *      [name: string] thread name
191  *
192  * type method format:
193  * type: TYPE_METHOD
194  * exinfo: one of: TYPE_LEAVE, TYPE_ENTER, TYPE_EXC_LEAVE, TYPE_JIT
195  * [method: sleb128] MonoMethod* as a pointer difference from the last such
196  * pointer or the buffer method_base
197  * if exinfo == TYPE_JIT
198  *      [code address: sleb128] pointer to the native code as a diff from ptr_base
199  *      [code size: uleb128] size of the generated code
200  *      [name: string] full method name
201  *
202  * type exception format:
203  * type: TYPE_EXCEPTION
204  * exinfo: zero, TYPE_CLAUSE, or TYPE_THROW_BT
205  * if exinfo == TYPE_CLAUSE
206  *      [clause type: byte] MonoExceptionEnum enum value
207  *      [clause index: uleb128] index of the current clause
208  *      [method: sleb128] MonoMethod* as a pointer difference from the last such
209  *      pointer or the buffer method_base
210  *      [object: sleb128] the exception object as a difference from obj_base
211  * else
212  *      [object: sleb128] the exception object as a difference from obj_base
213  *      If exinfo == TYPE_THROW_BT, a backtrace follows.
214  *
215  * type runtime format:
216  * type: TYPE_RUNTIME
217  * exinfo: one of: TYPE_JITHELPER
218  * if exinfo == TYPE_JITHELPER
219  *      [type: byte] MonoProfilerCodeBufferType enum value
220  *      [buffer address: sleb128] pointer to the native code as a diff from ptr_base
221  *      [buffer size: uleb128] size of the generated code
222  *      if type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
223  *              [name: string] buffer description name
224  *
225  * type monitor format:
226  * type: TYPE_MONITOR
227  * exinfo: zero or TYPE_MONITOR_BT
228  * [type: byte] MonoProfilerMonitorEvent enum value
229  * [object: sleb128] the lock object as a difference from obj_base
230  * If exinfo == TYPE_MONITOR_BT, a backtrace follows.
231  *
232  * type heap format
233  * type: TYPE_HEAP
234  * exinfo: one of TYPE_HEAP_START, TYPE_HEAP_END, TYPE_HEAP_OBJECT, TYPE_HEAP_ROOT
235  * if exinfo == TYPE_HEAP_OBJECT
236  *      [object: sleb128] the object as a difference from obj_base
237  *      [class: sleb128] the object MonoClass* as a difference from ptr_base
238  *      [size: uleb128] size of the object on the heap
239  *      [num_refs: uleb128] number of object references
240  *      each referenced objref is preceded by a uleb128 encoded offset: the
241  *      first offset is from the object address and each next offset is relative
242  *      to the previous one
243  *      [objrefs: sleb128]+ object referenced as a difference from obj_base
244  *      The same object can appear multiple times, but only the first time
245  *      with size != 0: in the other cases this data will only be used to
246  *      provide additional referenced objects.
247  * if exinfo == TYPE_HEAP_ROOT
248  *      [num_roots: uleb128] number of root references
249  *      [num_gc: uleb128] number of major gcs
250  *      [object: sleb128] the object as a difference from obj_base
251  *      [root_type: byte] the root_type: MonoProfileGCRootType (profiler.h)
252  *      [extra_info: uleb128] the extra_info value
253  *      object, root_type and extra_info are repeated num_roots times
254  *
255  * type sample format
256  * type: TYPE_SAMPLE
257  * exinfo: one of TYPE_SAMPLE_HIT, TYPE_SAMPLE_USYM, TYPE_SAMPLE_UBIN, TYPE_SAMPLE_COUNTERS_DESC, TYPE_SAMPLE_COUNTERS
258  * if exinfo == TYPE_SAMPLE_HIT
259  *      [thread: sleb128] thread id as difference from ptr_base
260  *      [count: uleb128] number of following instruction addresses
261  *      [ip: sleb128]* instruction pointer as difference from ptr_base
262  *      [mbt_count: uleb128] number of managed backtrace frames
263  *      [method: sleb128]* MonoMethod* as a pointer difference from the last such
264  *      pointer or the buffer method_base (the first such method can be also indentified by ip, but this is not neccessarily true)
265  * if exinfo == TYPE_SAMPLE_USYM
266  *      [address: sleb128] symbol address as a difference from ptr_base
267  *      [size: uleb128] symbol size (may be 0 if unknown)
268  *      [name: string] symbol name
269  * if exinfo == TYPE_SAMPLE_UBIN
270  *      [address: sleb128] address where binary has been loaded as a difference from ptr_base
271  *      [offset: uleb128] file offset of mapping (the same file can be mapped multiple times)
272  *      [size: uleb128] memory size
273  *      [name: string] binary name
274  * if exinfo == TYPE_SAMPLE_COUNTERS_DESC
275  *      [len: uleb128] number of counters
276  *      for i = 0 to len
277  *              [section: uleb128] section of counter
278  *              if section == MONO_COUNTER_PERFCOUNTERS:
279  *                      [section_name: string] section name of counter
280  *              [name: string] name of counter
281  *              [type: byte] type of counter
282  *              [unit: byte] unit of counter
283  *              [variance: byte] variance of counter
284  *              [index: uleb128] unique index of counter
285  * if exinfo == TYPE_SAMPLE_COUNTERS
286  *      while true:
287  *              [index: uleb128] unique index of counter
288  *              if index == 0:
289  *                      break
290  *              [type: byte] type of counter value
291  *              if type == string:
292  *                      if value == null:
293  *                              [0: byte] 0 -> value is null
294  *                      else:
295  *                              [1: byte] 1 -> value is not null
296  *                              [value: string] counter value
297  *              else:
298  *                      [value: uleb128/sleb128/double] counter value, can be sleb128, uleb128 or double (determined by using type)
299  *
300  * type coverage format
301  * type: TYPE_COVERAGE
302  * exinfo: one of TYPE_COVERAGE_METHOD, TYPE_COVERAGE_STATEMENT, TYPE_COVERAGE_ASSEMBLY, TYPE_COVERAGE_CLASS
303  * if exinfo == TYPE_COVERAGE_METHOD
304  *  [assembly: string] name of assembly
305  *  [class: string] name of the class
306  *  [name: string] name of the method
307  *  [signature: string] the signature of the method
308  *  [filename: string] the file path of the file that contains this method
309  *  [token: uleb128] the method token
310  *  [method_id: uleb128] an ID for this data to associate with the buffers of TYPE_COVERAGE_STATEMENTS
311  *  [len: uleb128] the number of TYPE_COVERAGE_BUFFERS associated with this method
312  * if exinfo == TYPE_COVERAGE_STATEMENTS
313  *  [method_id: uleb128] an the TYPE_COVERAGE_METHOD buffer to associate this with
314  *  [offset: uleb128] the il offset relative to the previous offset
315  *  [counter: uleb128] the counter for this instruction
316  *  [line: uleb128] the line of filename containing this instruction
317  *  [column: uleb128] the column containing this instruction
318  * if exinfo == TYPE_COVERAGE_ASSEMBLY
319  *  [name: string] assembly name
320  *  [guid: string] assembly GUID
321  *  [filename: string] assembly filename
322  *  [number_of_methods: uleb128] the number of methods in this assembly
323  *  [fully_covered: uleb128] the number of fully covered methods
324  *  [partially_covered: uleb128] the number of partially covered methods
325  *    currently partially_covered will always be 0, and fully_covered is the
326  *    number of methods that are fully and partially covered.
327  * if exinfo == TYPE_COVERAGE_CLASS
328  *  [name: string] assembly name
329  *  [class: string] class name
330  *  [number_of_methods: uleb128] the number of methods in this class
331  *  [fully_covered: uleb128] the number of fully covered methods
332  *  [partially_covered: uleb128] the number of partially covered methods
333  *    currently partially_covered will always be 0, and fully_covered is the
334  *    number of methods that are fully and partially covered.
335  *
336  * type meta format:
337  * type: TYPE_META
338  * exinfo: one of: TYPE_SYNC_POINT
339  * if exinfo == TYPE_SYNC_POINT
340  *      [type: byte] MonoProfilerSyncPointType enum value
341  */
342
343 // Statistics for internal profiler data structures.
344 static gint32 sample_allocations_ctr,
345               buffer_allocations_ctr;
346
347 // Statistics for profiler events.
348 static gint32 sync_points_ctr,
349               heap_objects_ctr,
350               heap_starts_ctr,
351               heap_ends_ctr,
352               heap_roots_ctr,
353               gc_events_ctr,
354               gc_resizes_ctr,
355               gc_allocs_ctr,
356               gc_moves_ctr,
357               gc_handle_creations_ctr,
358               gc_handle_deletions_ctr,
359               finalize_begins_ctr,
360               finalize_ends_ctr,
361               finalize_object_begins_ctr,
362               finalize_object_ends_ctr,
363               image_loads_ctr,
364               image_unloads_ctr,
365               assembly_loads_ctr,
366               assembly_unloads_ctr,
367               class_loads_ctr,
368               class_unloads_ctr,
369               method_entries_ctr,
370               method_exits_ctr,
371               method_exception_exits_ctr,
372               method_jits_ctr,
373               code_buffers_ctr,
374               exception_throws_ctr,
375               exception_clauses_ctr,
376               monitor_events_ctr,
377               thread_starts_ctr,
378               thread_ends_ctr,
379               thread_names_ctr,
380               domain_loads_ctr,
381               domain_unloads_ctr,
382               domain_names_ctr,
383               context_loads_ctr,
384               context_unloads_ctr,
385               sample_ubins_ctr,
386               sample_usyms_ctr,
387               sample_hits_ctr,
388               counter_descriptors_ctr,
389               counter_samples_ctr,
390               perfcounter_descriptors_ctr,
391               perfcounter_samples_ctr,
392               coverage_methods_ctr,
393               coverage_statements_ctr,
394               coverage_classes_ctr,
395               coverage_assemblies_ctr;
396
397 // Pending data to be written to the log, for a single thread.
398 // Threads periodically flush their own LogBuffers by calling safe_send
399 typedef struct _LogBuffer LogBuffer;
400 struct _LogBuffer {
401         // Next (older) LogBuffer in processing queue
402         LogBuffer *next;
403
404         uint64_t time_base;
405         uint64_t last_time;
406         uintptr_t ptr_base;
407         uintptr_t method_base;
408         uintptr_t last_method;
409         uintptr_t obj_base;
410         uintptr_t thread_id;
411
412         // Bytes allocated for this LogBuffer
413         int size;
414
415         // Start of currently unused space in buffer
416         unsigned char* cursor;
417
418         // Pointer to start-of-structure-plus-size (for convenience)
419         unsigned char* buf_end;
420
421         // Start of data in buffer. Contents follow "buffer format" described above.
422         unsigned char buf [1];
423 };
424
425 typedef struct {
426         MonoLinkedListSetNode node;
427
428         // Was this thread added to the LLS?
429         gboolean attached;
430
431         // The current log buffer for this thread.
432         LogBuffer *buffer;
433
434         // Methods referenced by events in `buffer`, see `MethodInfo`.
435         GPtrArray *methods;
436
437         // Current call depth for enter/leave events.
438         int call_depth;
439
440         // Indicates whether this thread is currently writing to its `buffer`.
441         gboolean busy;
442
443         // Has this thread written a thread end event to `buffer`?
444         gboolean ended;
445
446         // Stored in `buffer_lock_state` to take the exclusive lock.
447         int small_id;
448 } MonoProfilerThread;
449
450 // Do not use these TLS macros directly unless you know what you're doing.
451
452 #ifdef HOST_WIN32
453
454 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
455 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
456 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
457 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
458
459 static DWORD profiler_tls;
460
461 #elif HAVE_KW_THREAD
462
463 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
464 #define PROF_TLS_GET() (profiler_tls)
465 #define PROF_TLS_INIT()
466 #define PROF_TLS_FREE()
467
468 static __thread MonoProfilerThread *profiler_tls;
469
470 #else
471
472 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
473 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
474 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
475 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
476
477 static pthread_key_t profiler_tls;
478
479 #endif
480
481 static uintptr_t
482 thread_id (void)
483 {
484         return (uintptr_t) mono_native_thread_id_get ();
485 }
486
487 static uintptr_t
488 process_id (void)
489 {
490 #ifdef HOST_WIN32
491         return (uintptr_t) GetCurrentProcessId ();
492 #else
493         return (uintptr_t) getpid ();
494 #endif
495 }
496
497 #define ENABLED(EVT) (log_config.effective_mask & (EVT))
498
499 /*
500  * These macros should be used when writing an event to a log buffer. They
501  * take care of a bunch of stuff that can be repetitive and error-prone, such
502  * as attaching the current thread, acquiring/releasing the buffer lock,
503  * incrementing the event counter, expanding the log buffer, etc. They also
504  * create a scope so that it's harder to leak the LogBuffer pointer, which can
505  * be problematic as the pointer is unstable when the buffer lock isn't
506  * acquired.
507  *
508  * If the calling thread is already attached, these macros will not alter its
509  * attach mode (i.e. whether it's added to the LLS). If the thread is not
510  * attached, init_thread () will be called with add_to_lls = TRUE.
511  */
512
513 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
514         do { \
515                 MonoProfilerThread *thread__ = get_thread (); \
516                 if (thread__->attached) \
517                         buffer_lock (); \
518                 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
519                 thread__->busy = TRUE; \
520                 InterlockedIncrement ((COUNTER)); \
521                 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
522
523 #define EXIT_LOG_EXPLICIT(SEND) \
524                 thread__->busy = FALSE; \
525                 if ((SEND)) \
526                         send_log_unsafe (TRUE); \
527                 if (thread__->attached) \
528                         buffer_unlock (); \
529         } while (0)
530
531 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
532 #define DO_SEND TRUE
533 #define NO_SEND FALSE
534
535 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
536
537 typedef struct _BinaryObject BinaryObject;
538 struct _BinaryObject {
539         BinaryObject *next;
540         void *addr;
541         char *name;
542 };
543
544 typedef struct MonoCounterAgent {
545         MonoCounter *counter;
546         // MonoCounterAgent specific data :
547         void *value;
548         size_t value_size;
549         short index;
550         short emitted;
551         struct MonoCounterAgent *next;
552 } MonoCounterAgent;
553
554 typedef struct _PerfCounterAgent PerfCounterAgent;
555 struct _PerfCounterAgent {
556         PerfCounterAgent *next;
557         int index;
558         char *category_name;
559         char *name;
560         int type;
561         gint64 value;
562         guint8 emitted;
563         guint8 updated;
564         guint8 deleted;
565 };
566
567 struct _MonoProfiler {
568         MonoProfilerHandle handle;
569
570         FILE* file;
571 #if defined (HAVE_SYS_ZLIB)
572         gzFile gzfile;
573 #endif
574
575         char *args;
576         uint64_t startup_time;
577         int timer_overhead;
578
579 #ifdef __APPLE__
580         mach_timebase_info_data_t timebase_info;
581 #elif defined (HOST_WIN32)
582         LARGE_INTEGER pcounter_freq;
583 #endif
584
585         int pipe_output;
586         int command_port;
587         int server_socket;
588         int pipes [2];
589
590         MonoLinkedListSet profiler_thread_list;
591         volatile gint32 buffer_lock_state;
592         volatile gint32 buffer_lock_exclusive_intent;
593
594         volatile gint32 runtime_inited;
595         volatile gint32 in_shutdown;
596
597         MonoNativeThreadId helper_thread;
598
599         MonoNativeThreadId writer_thread;
600         volatile gint32 run_writer_thread;
601         MonoLockFreeQueue writer_queue;
602         MonoSemType writer_queue_sem;
603
604         MonoLockFreeAllocSizeClass writer_entry_size_class;
605         MonoLockFreeAllocator writer_entry_allocator;
606
607         MonoConcurrentHashTable *method_table;
608         mono_mutex_t method_table_mutex;
609
610         MonoNativeThreadId dumper_thread;
611         volatile gint32 run_dumper_thread;
612         MonoLockFreeQueue dumper_queue;
613         MonoSemType dumper_queue_sem;
614
615         MonoLockFreeAllocSizeClass sample_size_class;
616         MonoLockFreeAllocator sample_allocator;
617         MonoLockFreeQueue sample_reuse_queue;
618
619         BinaryObject *binary_objects;
620
621         gboolean heapshot_requested;
622         guint64 gc_count;
623         guint64 last_hs_time;
624         gboolean do_heap_walk;
625         gboolean ignore_heap_events;
626
627         mono_mutex_t counters_mutex;
628         MonoCounterAgent *counters;
629         PerfCounterAgent *perfcounters;
630         guint32 counters_index;
631
632         mono_mutex_t coverage_mutex;
633         GPtrArray *coverage_data;
634
635         GPtrArray *coverage_filters;
636         MonoConcurrentHashTable *coverage_filtered_classes;
637         MonoConcurrentHashTable *coverage_suppressed_assemblies;
638
639         MonoConcurrentHashTable *coverage_methods;
640         MonoConcurrentHashTable *coverage_assemblies;
641         MonoConcurrentHashTable *coverage_classes;
642
643         MonoConcurrentHashTable *coverage_image_to_methods;
644
645         guint32 coverage_previous_offset;
646         guint32 coverage_method_id;
647 };
648
649 static ProfilerConfig log_config;
650 static struct _MonoProfiler log_profiler;
651
652 typedef struct {
653         MonoLockFreeQueueNode node;
654         GPtrArray *methods;
655         LogBuffer *buffer;
656 } WriterQueueEntry;
657
658 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
659
660 typedef struct {
661         MonoMethod *method;
662         MonoJitInfo *ji;
663         uint64_t time;
664 } MethodInfo;
665
666 #define TICKS_PER_SEC 1000000000LL
667
668 static uint64_t
669 current_time (void)
670 {
671 #ifdef __APPLE__
672         uint64_t time = mach_absolute_time ();
673
674         time *= log_profiler.timebase_info.numer;
675         time /= log_profiler.timebase_info.denom;
676
677         return time;
678 #elif defined (HOST_WIN32)
679         LARGE_INTEGER value;
680
681         QueryPerformanceCounter (&value);
682
683         return value.QuadPart * TICKS_PER_SEC / log_profiler.pcounter_freq.QuadPart;
684 #elif defined (CLOCK_MONOTONIC)
685         struct timespec tspec;
686
687         clock_gettime (CLOCK_MONOTONIC, &tspec);
688
689         return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
690 #else
691         struct timeval tv;
692
693         gettimeofday (&tv, NULL);
694
695         return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
696 #endif
697 }
698
699 static void
700 init_time (void)
701 {
702 #ifdef __APPLE__
703         mach_timebase_info (&log_profiler.timebase_info);
704 #elif defined (HOST_WIN32)
705         QueryPerformanceFrequency (&log_profiler.pcounter_freq);
706 #endif
707
708         uint64_t time_start = current_time ();
709
710         for (int i = 0; i < 256; ++i)
711                 current_time ();
712
713         uint64_t time_end = current_time ();
714
715         log_profiler.timer_overhead = (time_end - time_start) / 256;
716 }
717
718 static char*
719 pstrdup (const char *s)
720 {
721         int len = strlen (s) + 1;
722         char *p = (char *) g_malloc (len);
723         memcpy (p, s, len);
724         return p;
725 }
726
727 #define BUFFER_SIZE (4096 * 16)
728
729 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
730 #define LEB128_SIZE 10
731
732 /* Size of a value encoded as a single byte. */
733 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
734 #define BYTE_SIZE 1
735
736 /* Size in bytes of the event prefix (ID + time). */
737 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
738
739 static void *
740 alloc_buffer (int size)
741 {
742         return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
743 }
744
745 static void
746 free_buffer (void *buf, int size)
747 {
748         mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
749 }
750
751 static LogBuffer*
752 create_buffer (uintptr_t tid, int bytes)
753 {
754         LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
755
756         InterlockedIncrement (&buffer_allocations_ctr);
757
758         buf->size = BUFFER_SIZE;
759         buf->time_base = current_time ();
760         buf->last_time = buf->time_base;
761         buf->buf_end = (unsigned char *) buf + buf->size;
762         buf->cursor = buf->buf;
763         buf->thread_id = tid;
764
765         return buf;
766 }
767
768 /*
769  * Must be called with the reader lock held if thread is the current thread, or
770  * the exclusive lock if thread is a different thread. However, if thread is
771  * the current thread, and init_thread () was called with add_to_lls = FALSE,
772  * then no locking is necessary.
773  */
774 static void
775 init_buffer_state (MonoProfilerThread *thread)
776 {
777         thread->buffer = create_buffer (thread->node.key, 0);
778         thread->methods = NULL;
779 }
780
781 static void
782 clear_hazard_pointers (MonoThreadHazardPointers *hp)
783 {
784         mono_hazard_pointer_clear (hp, 0);
785         mono_hazard_pointer_clear (hp, 1);
786         mono_hazard_pointer_clear (hp, 2);
787 }
788
789 static MonoProfilerThread *
790 init_thread (gboolean add_to_lls)
791 {
792         MonoProfilerThread *thread = PROF_TLS_GET ();
793
794         /*
795          * Sometimes we may try to initialize a thread twice. One example is the
796          * main thread: We initialize it when setting up the profiler, but we will
797          * also get a thread_start () callback for it. Another example is when
798          * attaching new threads to the runtime: We may get a gc_alloc () callback
799          * for that thread's thread object (where we initialize it), soon followed
800          * by a thread_start () callback.
801          *
802          * These cases are harmless anyhow. Just return if we've already done the
803          * initialization work.
804          */
805         if (thread)
806                 return thread;
807
808         thread = g_malloc (sizeof (MonoProfilerThread));
809         thread->node.key = thread_id ();
810         thread->attached = add_to_lls;
811         thread->call_depth = 0;
812         thread->busy = 0;
813         thread->ended = FALSE;
814
815         init_buffer_state (thread);
816
817         thread->small_id = mono_thread_info_register_small_id ();
818
819         /*
820          * Some internal profiler threads don't need to be cleaned up
821          * by the main thread on shutdown.
822          */
823         if (add_to_lls) {
824                 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
825                 g_assert (mono_lls_insert (&log_profiler.profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
826                 clear_hazard_pointers (hp);
827         }
828
829         PROF_TLS_SET (thread);
830
831         return thread;
832 }
833
834 // Only valid if init_thread () was called with add_to_lls = FALSE.
835 static void
836 deinit_thread (MonoProfilerThread *thread)
837 {
838         g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
839
840         g_free (thread);
841         PROF_TLS_SET (NULL);
842 }
843
844 static MonoProfilerThread *
845 get_thread (void)
846 {
847         return init_thread (TRUE);
848 }
849
850 // Only valid if init_thread () was called with add_to_lls = FALSE.
851 static LogBuffer *
852 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
853 {
854         LogBuffer *old = thread->buffer;
855
856         if (old->cursor + bytes < old->buf_end)
857                 return old;
858
859         LogBuffer *new_ = create_buffer (thread->node.key, bytes);
860         new_->next = old;
861         thread->buffer = new_;
862
863         return new_;
864 }
865
866 /*
867  * This is a reader/writer spin lock of sorts used to protect log buffers.
868  * When a thread modifies its own log buffer, it increments the reader
869  * count. When a thread wants to access log buffers of other threads, it
870  * takes the exclusive lock.
871  *
872  * `buffer_lock_state` holds the reader count in its lower 16 bits, and
873  * the small ID of the thread currently holding the exclusive (writer)
874  * lock in its upper 16 bits. Both can be zero. It's important that the
875  * whole lock state is a single word that can be read/written atomically
876  * to avoid race conditions where there could end up being readers while
877  * the writer lock is held.
878  *
879  * The lock is writer-biased. When a thread wants to take the exclusive
880  * lock, it increments `buffer_lock_exclusive_intent` which will make new
881  * readers spin until it's back to zero, then takes the exclusive lock
882  * once the reader count has reached zero. After releasing the exclusive
883  * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
884  * reaches zero again, allows readers to increment the reader count.
885  *
886  * The writer bias is necessary because we take the exclusive lock in
887  * `gc_event ()` during STW. If the writer bias was not there, and a
888  * program had a large number of threads, STW-induced pauses could be
889  * significantly longer than they have to be. Also, we emit periodic
890  * sync points from the helper thread, which requires taking the
891  * exclusive lock, and we need those to arrive with a reasonably
892  * consistent frequency so that readers don't have to queue up too many
893  * events between sync points.
894  *
895  * The lock does not support recursion.
896  */
897
898 static void
899 buffer_lock (void)
900 {
901         /*
902          * If the thread holding the exclusive lock tries to modify the
903          * reader count, just make it a no-op. This way, we also avoid
904          * invoking the GC safe point macros below, which could break if
905          * done from a thread that is currently the initiator of STW.
906          *
907          * In other words, we rely on the fact that the GC thread takes
908          * the exclusive lock in the gc_event () callback when the world
909          * is about to stop.
910          */
911         if (InterlockedRead (&log_profiler.buffer_lock_state) != get_thread ()->small_id << 16) {
912                 MONO_ENTER_GC_SAFE;
913
914                 gint32 old, new_;
915
916                 do {
917                 restart:
918                         // Hold off if a thread wants to take the exclusive lock.
919                         while (InterlockedRead (&log_profiler.buffer_lock_exclusive_intent))
920                                 mono_thread_info_yield ();
921
922                         old = InterlockedRead (&log_profiler.buffer_lock_state);
923
924                         // Is a thread holding the exclusive lock?
925                         if (old >> 16) {
926                                 mono_thread_info_yield ();
927                                 goto restart;
928                         }
929
930                         new_ = old + 1;
931                 } while (InterlockedCompareExchange (&log_profiler.buffer_lock_state, new_, old) != old);
932
933                 MONO_EXIT_GC_SAFE;
934         }
935
936         mono_memory_barrier ();
937 }
938
939 static void
940 buffer_unlock (void)
941 {
942         mono_memory_barrier ();
943
944         gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
945
946         // See the comment in buffer_lock ().
947         if (state == PROF_TLS_GET ()->small_id << 16)
948                 return;
949
950         g_assert (state && "Why are we decrementing a zero reader count?");
951         g_assert (!(state >> 16) && "Why is the exclusive lock held?");
952
953         InterlockedDecrement (&log_profiler.buffer_lock_state);
954 }
955
956 static void
957 buffer_lock_excl (void)
958 {
959         gint32 new_ = get_thread ()->small_id << 16;
960
961         g_assert (InterlockedRead (&log_profiler.buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
962
963         InterlockedIncrement (&log_profiler.buffer_lock_exclusive_intent);
964
965         MONO_ENTER_GC_SAFE;
966
967         while (InterlockedCompareExchange (&log_profiler.buffer_lock_state, new_, 0))
968                 mono_thread_info_yield ();
969
970         MONO_EXIT_GC_SAFE;
971
972         mono_memory_barrier ();
973 }
974
975 static void
976 buffer_unlock_excl (void)
977 {
978         mono_memory_barrier ();
979
980         gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
981         gint32 excl = state >> 16;
982
983         g_assert (excl && "Why is the exclusive lock not held?");
984         g_assert (excl == PROF_TLS_GET ()->small_id && "Why does another thread hold the exclusive lock?");
985         g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
986
987         InterlockedWrite (&log_profiler.buffer_lock_state, 0);
988         InterlockedDecrement (&log_profiler.buffer_lock_exclusive_intent);
989 }
990
991 static void
992 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
993 {
994         uint8_t *p = buf;
995
996         do {
997                 uint8_t b = value & 0x7f;
998                 value >>= 7;
999
1000                 if (value != 0) /* more bytes to come */
1001                         b |= 0x80;
1002
1003                 *p ++ = b;
1004         } while (value);
1005
1006         *endbuf = p;
1007 }
1008
1009 static void
1010 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
1011 {
1012         int more = 1;
1013         int negative = (value < 0);
1014         unsigned int size = sizeof (intptr_t) * 8;
1015         uint8_t byte;
1016         uint8_t *p = buf;
1017
1018         while (more) {
1019                 byte = value & 0x7f;
1020                 value >>= 7;
1021
1022                 /* the following is unnecessary if the
1023                  * implementation of >>= uses an arithmetic rather
1024                  * than logical shift for a signed left operand
1025                  */
1026                 if (negative)
1027                         /* sign extend */
1028                         value |= - ((intptr_t) 1 <<(size - 7));
1029
1030                 /* sign bit of byte is second high order bit (0x40) */
1031                 if ((value == 0 && !(byte & 0x40)) ||
1032                     (value == -1 && (byte & 0x40)))
1033                         more = 0;
1034                 else
1035                         byte |= 0x80;
1036
1037                 *p ++= byte;
1038         }
1039
1040         *endbuf = p;
1041 }
1042
1043 static void
1044 emit_byte (LogBuffer *logbuffer, int value)
1045 {
1046         logbuffer->cursor [0] = value;
1047         logbuffer->cursor++;
1048
1049         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1050 }
1051
1052 static void
1053 emit_value (LogBuffer *logbuffer, int value)
1054 {
1055         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1056
1057         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1058 }
1059
1060 static void
1061 emit_time (LogBuffer *logbuffer, uint64_t value)
1062 {
1063         uint64_t tdiff = value - logbuffer->last_time;
1064         encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
1065         logbuffer->last_time = value;
1066
1067         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1068 }
1069
1070 static void
1071 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
1072 {
1073         emit_byte (logbuffer, event);
1074         emit_time (logbuffer, time);
1075 }
1076
1077 static void
1078 emit_event (LogBuffer *logbuffer, int event)
1079 {
1080         emit_event_time (logbuffer, event, current_time ());
1081 }
1082
1083 static void
1084 emit_svalue (LogBuffer *logbuffer, int64_t value)
1085 {
1086         encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1087
1088         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1089 }
1090
1091 static void
1092 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
1093 {
1094         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1095
1096         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1097 }
1098
1099 static void
1100 emit_ptr (LogBuffer *logbuffer, const void *ptr)
1101 {
1102         if (!logbuffer->ptr_base)
1103                 logbuffer->ptr_base = (uintptr_t) ptr;
1104
1105         emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
1106
1107         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1108 }
1109
1110 static void
1111 emit_method_inner (LogBuffer *logbuffer, void *method)
1112 {
1113         if (!logbuffer->method_base) {
1114                 logbuffer->method_base = (intptr_t) method;
1115                 logbuffer->last_method = (intptr_t) method;
1116         }
1117
1118         encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
1119         logbuffer->last_method = (intptr_t) method;
1120
1121         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1122 }
1123
1124 // The reader lock must be held.
1125 static void
1126 register_method_local (MonoMethod *method, MonoJitInfo *ji)
1127 {
1128         MonoProfilerThread *thread = get_thread ();
1129
1130         if (!mono_conc_hashtable_lookup (log_profiler.method_table, method)) {
1131                 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
1132
1133                 info->method = method;
1134                 info->ji = ji;
1135                 info->time = current_time ();
1136
1137                 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
1138                 g_ptr_array_add (arr, info);
1139         }
1140 }
1141
1142 static void
1143 emit_method (LogBuffer *logbuffer, MonoMethod *method)
1144 {
1145         register_method_local (method, NULL);
1146         emit_method_inner (logbuffer, method);
1147 }
1148
1149 static void
1150 emit_obj (LogBuffer *logbuffer, void *ptr)
1151 {
1152         if (!logbuffer->obj_base)
1153                 logbuffer->obj_base = (uintptr_t) ptr >> 3;
1154
1155         emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
1156
1157         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1158 }
1159
1160 static void
1161 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
1162 {
1163         size_t i = 0;
1164         if (str) {
1165                 for (; i < size; i++) {
1166                         if (str[i] == '\0')
1167                                 break;
1168                         emit_byte (logbuffer, str [i]);
1169                 }
1170         }
1171         emit_byte (logbuffer, '\0');
1172 }
1173
1174 static void
1175 emit_double (LogBuffer *logbuffer, double value)
1176 {
1177         int i;
1178         unsigned char buffer[8];
1179         memcpy (buffer, &value, 8);
1180 #if G_BYTE_ORDER == G_BIG_ENDIAN
1181         for (i = 7; i >= 0; i--)
1182 #else
1183         for (i = 0; i < 8; i++)
1184 #endif
1185                 emit_byte (logbuffer, buffer[i]);
1186 }
1187
1188 static char*
1189 write_int16 (char *buf, int32_t value)
1190 {
1191         int i;
1192         for (i = 0; i < 2; ++i) {
1193                 buf [i] = value;
1194                 value >>= 8;
1195         }
1196         return buf + 2;
1197 }
1198
1199 static char*
1200 write_int32 (char *buf, int32_t value)
1201 {
1202         int i;
1203         for (i = 0; i < 4; ++i) {
1204                 buf [i] = value;
1205                 value >>= 8;
1206         }
1207         return buf + 4;
1208 }
1209
1210 static char*
1211 write_int64 (char *buf, int64_t value)
1212 {
1213         int i;
1214         for (i = 0; i < 8; ++i) {
1215                 buf [i] = value;
1216                 value >>= 8;
1217         }
1218         return buf + 8;
1219 }
1220
1221 static char *
1222 write_header_string (char *p, const char *str)
1223 {
1224         size_t len = strlen (str) + 1;
1225
1226         p = write_int32 (p, len);
1227         strcpy (p, str);
1228
1229         return p + len;
1230 }
1231
1232 static void
1233 dump_header (void)
1234 {
1235         const char *args = log_profiler.args;
1236         const char *arch = mono_config_get_cpu ();
1237         const char *os = mono_config_get_os ();
1238
1239         char *hbuf = g_malloc (
1240                 sizeof (gint32) /* header id */ +
1241                 sizeof (gint8) /* major version */ +
1242                 sizeof (gint8) /* minor version */ +
1243                 sizeof (gint8) /* data version */ +
1244                 sizeof (gint8) /* word size */ +
1245                 sizeof (gint64) /* startup time */ +
1246                 sizeof (gint32) /* timer overhead */ +
1247                 sizeof (gint32) /* flags */ +
1248                 sizeof (gint32) /* process id */ +
1249                 sizeof (gint16) /* command port */ +
1250                 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
1251                 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
1252                 sizeof (gint32) + strlen (os) + 1 /* operating system */
1253         );
1254         char *p = hbuf;
1255
1256         p = write_int32 (p, LOG_HEADER_ID);
1257         *p++ = LOG_VERSION_MAJOR;
1258         *p++ = LOG_VERSION_MINOR;
1259         *p++ = LOG_DATA_VERSION;
1260         *p++ = sizeof (void *);
1261         p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
1262         p = write_int32 (p, log_profiler.timer_overhead);
1263         p = write_int32 (p, 0); /* flags */
1264         p = write_int32 (p, process_id ());
1265         p = write_int16 (p, log_profiler.command_port);
1266         p = write_header_string (p, args);
1267         p = write_header_string (p, arch);
1268         p = write_header_string (p, os);
1269
1270 #if defined (HAVE_SYS_ZLIB)
1271         if (log_profiler.gzfile) {
1272                 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1273         } else
1274 #endif
1275         {
1276                 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1277                 fflush (log_profiler.file);
1278         }
1279
1280         g_free (hbuf);
1281 }
1282
1283 /*
1284  * Must be called with the reader lock held if thread is the current thread, or
1285  * the exclusive lock if thread is a different thread. However, if thread is
1286  * the current thread, and init_thread () was called with add_to_lls = FALSE,
1287  * then no locking is necessary.
1288  */
1289 static void
1290 send_buffer (MonoProfilerThread *thread)
1291 {
1292         WriterQueueEntry *entry = mono_lock_free_alloc (&log_profiler.writer_entry_allocator);
1293         entry->methods = thread->methods;
1294         entry->buffer = thread->buffer;
1295
1296         mono_lock_free_queue_node_init (&entry->node, FALSE);
1297
1298         mono_lock_free_queue_enqueue (&log_profiler.writer_queue, &entry->node);
1299         mono_os_sem_post (&log_profiler.writer_queue_sem);
1300 }
1301
1302 static void
1303 free_thread (gpointer p)
1304 {
1305         MonoProfilerThread *thread = p;
1306
1307         if (!thread->ended) {
1308                 /*
1309                  * The thread is being cleaned up by the main thread during
1310                  * shutdown. This typically happens for internal runtime
1311                  * threads. We need to synthesize a thread end event.
1312                  */
1313
1314                 InterlockedIncrement (&thread_ends_ctr);
1315
1316                 if (ENABLED (PROFLOG_THREAD_EVENTS)) {
1317                         LogBuffer *buf = ensure_logbuf_unsafe (thread,
1318                                 EVENT_SIZE /* event */ +
1319                                 BYTE_SIZE /* type */ +
1320                                 LEB128_SIZE /* tid */
1321                         );
1322
1323                         emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1324                         emit_byte (buf, TYPE_THREAD);
1325                         emit_ptr (buf, (void *) thread->node.key);
1326                 }
1327         }
1328
1329         send_buffer (thread);
1330
1331         g_free (thread);
1332 }
1333
1334 static void
1335 remove_thread (MonoProfilerThread *thread)
1336 {
1337         MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1338
1339         if (mono_lls_remove (&log_profiler.profiler_thread_list, hp, &thread->node))
1340                 mono_thread_hazardous_try_free (thread, free_thread);
1341
1342         clear_hazard_pointers (hp);
1343 }
1344
1345 static void
1346 dump_buffer (LogBuffer *buf)
1347 {
1348         char hbuf [128];
1349         char *p = hbuf;
1350
1351         if (buf->next)
1352                 dump_buffer (buf->next);
1353
1354         if (buf->cursor - buf->buf) {
1355                 p = write_int32 (p, BUF_ID);
1356                 p = write_int32 (p, buf->cursor - buf->buf);
1357                 p = write_int64 (p, buf->time_base);
1358                 p = write_int64 (p, buf->ptr_base);
1359                 p = write_int64 (p, buf->obj_base);
1360                 p = write_int64 (p, buf->thread_id);
1361                 p = write_int64 (p, buf->method_base);
1362
1363 #if defined (HAVE_SYS_ZLIB)
1364                 if (log_profiler.gzfile) {
1365                         gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1366                         gzwrite (log_profiler.gzfile, buf->buf, buf->cursor - buf->buf);
1367                 } else
1368 #endif
1369                 {
1370                         fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1371                         fwrite (buf->buf, buf->cursor - buf->buf, 1, log_profiler.file);
1372                         fflush (log_profiler.file);
1373                 }
1374         }
1375
1376         free_buffer (buf, buf->size);
1377 }
1378
1379 static void
1380 dump_buffer_threadless (LogBuffer *buf)
1381 {
1382         for (LogBuffer *iter = buf; iter; iter = iter->next)
1383                 iter->thread_id = 0;
1384
1385         dump_buffer (buf);
1386 }
1387
1388 // Only valid if init_thread () was called with add_to_lls = FALSE.
1389 static void
1390 send_log_unsafe (gboolean if_needed)
1391 {
1392         MonoProfilerThread *thread = PROF_TLS_GET ();
1393
1394         if (!if_needed || (if_needed && thread->buffer->next)) {
1395                 if (!thread->attached)
1396                         for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1397                                 iter->thread_id = 0;
1398
1399                 send_buffer (thread);
1400                 init_buffer_state (thread);
1401         }
1402 }
1403
1404 // Assumes that the exclusive lock is held.
1405 static void
1406 sync_point_flush (void)
1407 {
1408         g_assert (InterlockedRead (&log_profiler.buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1409
1410         MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
1411                 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1412
1413                 send_buffer (thread);
1414                 init_buffer_state (thread);
1415         } MONO_LLS_FOREACH_SAFE_END
1416 }
1417
1418 // Assumes that the exclusive lock is held.
1419 static void
1420 sync_point_mark (MonoProfilerSyncPointType type)
1421 {
1422         g_assert (InterlockedRead (&log_profiler.buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1423
1424         ENTER_LOG (&sync_points_ctr, logbuffer,
1425                 EVENT_SIZE /* event */ +
1426                 LEB128_SIZE /* type */
1427         );
1428
1429         emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1430         emit_byte (logbuffer, type);
1431
1432         EXIT_LOG_EXPLICIT (NO_SEND);
1433
1434         send_log_unsafe (FALSE);
1435 }
1436
1437 // Assumes that the exclusive lock is held.
1438 static void
1439 sync_point (MonoProfilerSyncPointType type)
1440 {
1441         sync_point_flush ();
1442         sync_point_mark (type);
1443 }
1444
1445 static int
1446 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1447 {
1448         /* account for object alignment in the heap */
1449         size += 7;
1450         size &= ~7;
1451
1452         ENTER_LOG (&heap_objects_ctr, logbuffer,
1453                 EVENT_SIZE /* event */ +
1454                 LEB128_SIZE /* obj */ +
1455                 LEB128_SIZE /* klass */ +
1456                 LEB128_SIZE /* size */ +
1457                 LEB128_SIZE /* num */ +
1458                 num * (
1459                         LEB128_SIZE /* offset */ +
1460                         LEB128_SIZE /* ref */
1461                 )
1462         );
1463
1464         emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1465         emit_obj (logbuffer, obj);
1466         emit_ptr (logbuffer, klass);
1467         emit_value (logbuffer, size);
1468         emit_value (logbuffer, num);
1469
1470         uintptr_t last_offset = 0;
1471
1472         for (int i = 0; i < num; ++i) {
1473                 emit_value (logbuffer, offsets [i] - last_offset);
1474                 last_offset = offsets [i];
1475                 emit_obj (logbuffer, refs [i]);
1476         }
1477
1478         EXIT_LOG_EXPLICIT (DO_SEND);
1479
1480         return 0;
1481 }
1482
1483 static void
1484 gc_roots (MonoProfiler *prof, MonoObject *const *objects, const MonoProfilerGCRootType *root_types, const uintptr_t *extra_info, uint64_t num)
1485 {
1486         if (log_profiler.ignore_heap_events)
1487                 return;
1488
1489         ENTER_LOG (&heap_roots_ctr, logbuffer,
1490                 EVENT_SIZE /* event */ +
1491                 LEB128_SIZE /* num */ +
1492                 LEB128_SIZE /* collections */ +
1493                 num * (
1494                         LEB128_SIZE /* object */ +
1495                         LEB128_SIZE /* root type */ +
1496                         LEB128_SIZE /* extra info */
1497                 )
1498         );
1499
1500         emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1501         emit_value (logbuffer, num);
1502         emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
1503
1504         for (int i = 0; i < num; ++i) {
1505                 emit_obj (logbuffer, objects [i]);
1506                 emit_byte (logbuffer, root_types [i]);
1507                 emit_value (logbuffer, extra_info [i]);
1508         }
1509
1510         EXIT_LOG_EXPLICIT (DO_SEND);
1511 }
1512
1513
1514 static void
1515 trigger_on_demand_heapshot (void)
1516 {
1517         if (log_profiler.heapshot_requested)
1518                 mono_gc_collect (mono_gc_max_generation ());
1519 }
1520
1521 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_HEAPSHOT_FEATURE)
1522
1523 static void
1524 gc_event (MonoProfiler *profiler, MonoProfilerGCEvent ev, uint32_t generation)
1525 {
1526         if (ev == MONO_GC_EVENT_START) {
1527                 uint64_t now = current_time ();
1528
1529                 if (log_config.hs_mode_ms && (now - log_profiler.last_hs_time) / 1000 * 1000 >= log_config.hs_mode_ms)
1530                         log_profiler.do_heap_walk = TRUE;
1531                 else if (log_config.hs_mode_gc && !(log_profiler.gc_count % log_config.hs_mode_gc))
1532                         log_profiler.do_heap_walk = TRUE;
1533                 else if (log_config.hs_mode_ondemand)
1534                         log_profiler.do_heap_walk = log_profiler.heapshot_requested;
1535                 else if (!log_config.hs_mode_ms && !log_config.hs_mode_gc && generation == mono_gc_max_generation ())
1536                         log_profiler.do_heap_walk = TRUE;
1537
1538                 //If using heapshot, ignore events for collections we don't care
1539                 if (ENABLED (PROFLOG_HEAPSHOT_FEATURE)) {
1540                         // Ignore events generated during the collection itself (IE GC ROOTS)
1541                         log_profiler.ignore_heap_events = !log_profiler.do_heap_walk;
1542                 }
1543         }
1544
1545
1546         if (ENABLED (PROFLOG_GC_EVENTS)) {
1547                 ENTER_LOG (&gc_events_ctr, logbuffer,
1548                         EVENT_SIZE /* event */ +
1549                         BYTE_SIZE /* gc event */ +
1550                         BYTE_SIZE /* generation */
1551                 );
1552
1553                 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1554                 emit_byte (logbuffer, ev);
1555                 emit_byte (logbuffer, generation);
1556
1557                 EXIT_LOG_EXPLICIT (NO_SEND);
1558         }
1559
1560         switch (ev) {
1561         case MONO_GC_EVENT_START:
1562                 if (generation == mono_gc_max_generation ())
1563                         log_profiler.gc_count++;
1564
1565                 break;
1566         case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1567                 /*
1568                  * Ensure that no thread can be in the middle of writing to
1569                  * a buffer when the world stops...
1570                  */
1571                 buffer_lock_excl ();
1572                 break;
1573         case MONO_GC_EVENT_POST_STOP_WORLD:
1574                 /*
1575                  * ... So that we now have a consistent view of all buffers.
1576                  * This allows us to flush them. We need to do this because
1577                  * they may contain object allocation events that need to be
1578                  * committed to the log file before any object move events
1579                  * that will be produced during this GC.
1580                  */
1581                 if (ENABLED (ALL_GC_EVENTS_MASK))
1582                         sync_point (SYNC_POINT_WORLD_STOP);
1583
1584                 /*
1585                  * All heap events are surrounded by a HEAP_START and a HEAP_ENV event.
1586                  * Right now, that's the case for GC Moves, GC Roots or heapshots.
1587                  */
1588                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || log_profiler.do_heap_walk) {
1589                         ENTER_LOG (&heap_starts_ctr, logbuffer,
1590                                 EVENT_SIZE /* event */
1591                         );
1592
1593                         emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1594
1595                         EXIT_LOG_EXPLICIT (DO_SEND);
1596                 }
1597
1598                 break;
1599         case MONO_GC_EVENT_PRE_START_WORLD:
1600                 if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && log_profiler.do_heap_walk)
1601                         mono_gc_walk_heap (0, gc_reference, NULL);
1602
1603                 /* Matching HEAP_END to the HEAP_START from above */
1604                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || log_profiler.do_heap_walk) {
1605                         ENTER_LOG (&heap_ends_ctr, logbuffer,
1606                                 EVENT_SIZE /* event */
1607                         );
1608
1609                         emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1610
1611                         EXIT_LOG_EXPLICIT (DO_SEND);
1612                 }
1613
1614                 if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && log_profiler.do_heap_walk) {
1615                         log_profiler.do_heap_walk = FALSE;
1616                         log_profiler.heapshot_requested = FALSE;
1617                         log_profiler.last_hs_time = current_time ();
1618                 }
1619
1620                 /*
1621                  * Similarly, we must now make sure that any object moves
1622                  * written to the GC thread's buffer are flushed. Otherwise,
1623                  * object allocation events for certain addresses could come
1624                  * after the move events that made those addresses available.
1625                  */
1626                 if (ENABLED (ALL_GC_EVENTS_MASK))
1627                         sync_point_mark (SYNC_POINT_WORLD_START);
1628                 break;
1629         case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1630                 /*
1631                  * Finally, it is safe to allow other threads to write to
1632                  * their buffers again.
1633                  */
1634                 buffer_unlock_excl ();
1635                 break;
1636         default:
1637                 break;
1638         }
1639 }
1640
1641 static void
1642 gc_resize (MonoProfiler *profiler, uintptr_t new_size)
1643 {
1644         ENTER_LOG (&gc_resizes_ctr, logbuffer,
1645                 EVENT_SIZE /* event */ +
1646                 LEB128_SIZE /* new size */
1647         );
1648
1649         emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1650         emit_value (logbuffer, new_size);
1651
1652         EXIT_LOG_EXPLICIT (DO_SEND);
1653 }
1654
1655 typedef struct {
1656         int count;
1657         MonoMethod* methods [MAX_FRAMES];
1658         int32_t il_offsets [MAX_FRAMES];
1659         int32_t native_offsets [MAX_FRAMES];
1660 } FrameData;
1661
1662 static mono_bool
1663 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1664 {
1665         FrameData *frame = (FrameData *)data;
1666         if (method && frame->count < log_config.num_frames) {
1667                 frame->il_offsets [frame->count] = il_offset;
1668                 frame->native_offsets [frame->count] = native_offset;
1669                 frame->methods [frame->count++] = method;
1670         }
1671         return frame->count == log_config.num_frames;
1672 }
1673
1674 /*
1675  * a note about stack walks: they can cause more profiler events to fire,
1676  * so we need to make sure they don't happen after we started emitting an
1677  * event, hence the collect_bt/emit_bt split.
1678  */
1679 static void
1680 collect_bt (FrameData *data)
1681 {
1682         data->count = 0;
1683         mono_stack_walk_no_il (walk_stack, data);
1684 }
1685
1686 static void
1687 emit_bt (LogBuffer *logbuffer, FrameData *data)
1688 {
1689         emit_value (logbuffer, data->count);
1690
1691         while (data->count)
1692                 emit_method (logbuffer, data->methods [--data->count]);
1693 }
1694
1695 static void
1696 gc_alloc (MonoProfiler *prof, MonoObject *obj)
1697 {
1698         int do_bt = (!ENABLED (PROFLOG_CALL_EVENTS) && InterlockedRead (&log_profiler.runtime_inited) && !log_config.notraces) ? TYPE_ALLOC_BT : 0;
1699         FrameData data;
1700         uintptr_t len = mono_object_get_size (obj);
1701         /* account for object alignment in the heap */
1702         len += 7;
1703         len &= ~7;
1704
1705         if (do_bt)
1706                 collect_bt (&data);
1707
1708         ENTER_LOG (&gc_allocs_ctr, logbuffer,
1709                 EVENT_SIZE /* event */ +
1710                 LEB128_SIZE /* klass */ +
1711                 LEB128_SIZE /* obj */ +
1712                 LEB128_SIZE /* size */ +
1713                 (do_bt ? (
1714                         LEB128_SIZE /* count */ +
1715                         data.count * (
1716                                 LEB128_SIZE /* method */
1717                         )
1718                 ) : 0)
1719         );
1720
1721         emit_event (logbuffer, do_bt | TYPE_ALLOC);
1722         emit_ptr (logbuffer, mono_object_get_class (obj));
1723         emit_obj (logbuffer, obj);
1724         emit_value (logbuffer, len);
1725
1726         if (do_bt)
1727                 emit_bt (logbuffer, &data);
1728
1729         EXIT_LOG;
1730 }
1731
1732 static void
1733 gc_moves (MonoProfiler *prof, MonoObject *const *objects, uint64_t num)
1734 {
1735         ENTER_LOG (&gc_moves_ctr, logbuffer,
1736                 EVENT_SIZE /* event */ +
1737                 LEB128_SIZE /* num */ +
1738                 num * (
1739                         LEB128_SIZE /* object */
1740                 )
1741         );
1742
1743         emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1744         emit_value (logbuffer, num);
1745
1746         for (int i = 0; i < num; ++i)
1747                 emit_obj (logbuffer, objects [i]);
1748
1749         EXIT_LOG_EXPLICIT (DO_SEND);
1750 }
1751
1752 static void
1753 gc_handle (MonoProfiler *prof, int op, MonoGCHandleType type, uint32_t handle, MonoObject *obj)
1754 {
1755         int do_bt = !ENABLED (PROFLOG_CALL_EVENTS) && InterlockedRead (&log_profiler.runtime_inited) && !log_config.notraces;
1756         FrameData data;
1757
1758         if (do_bt)
1759                 collect_bt (&data);
1760
1761         gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1762
1763         ENTER_LOG (ctr, logbuffer,
1764                 EVENT_SIZE /* event */ +
1765                 LEB128_SIZE /* type */ +
1766                 LEB128_SIZE /* handle */ +
1767                 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1768                         LEB128_SIZE /* obj */
1769                 ) : 0) +
1770                 (do_bt ? (
1771                         LEB128_SIZE /* count */ +
1772                         data.count * (
1773                                 LEB128_SIZE /* method */
1774                         )
1775                 ) : 0)
1776         );
1777
1778         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1779                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1780         else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1781                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1782         else
1783                 g_assert_not_reached ();
1784
1785         emit_value (logbuffer, type);
1786         emit_value (logbuffer, handle);
1787
1788         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1789                 emit_obj (logbuffer, obj);
1790
1791         if (do_bt)
1792                 emit_bt (logbuffer, &data);
1793
1794         EXIT_LOG;
1795 }
1796
1797 static void
1798 gc_handle_created (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type, MonoObject *obj)
1799 {
1800         gc_handle (prof, MONO_PROFILER_GC_HANDLE_CREATED, type, handle, obj);
1801 }
1802
1803 static void
1804 gc_handle_deleted (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type)
1805 {
1806         gc_handle (prof, MONO_PROFILER_GC_HANDLE_DESTROYED, type, handle, NULL);
1807 }
1808
1809 static void
1810 finalize_begin (MonoProfiler *prof)
1811 {
1812         ENTER_LOG (&finalize_begins_ctr, buf,
1813                 EVENT_SIZE /* event */
1814         );
1815
1816         emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1817
1818         EXIT_LOG;
1819 }
1820
1821 static void
1822 finalize_end (MonoProfiler *prof)
1823 {
1824         trigger_on_demand_heapshot ();
1825         if (ENABLED (PROFLOG_FINALIZATION_EVENTS)) {
1826                 ENTER_LOG (&finalize_ends_ctr, buf,
1827                         EVENT_SIZE /* event */
1828                 );
1829
1830                 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1831
1832                 EXIT_LOG;
1833         }
1834 }
1835
1836 static void
1837 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1838 {
1839         ENTER_LOG (&finalize_object_begins_ctr, buf,
1840                 EVENT_SIZE /* event */ +
1841                 LEB128_SIZE /* obj */
1842         );
1843
1844         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1845         emit_obj (buf, obj);
1846
1847         EXIT_LOG;
1848 }
1849
1850 static void
1851 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1852 {
1853         ENTER_LOG (&finalize_object_ends_ctr, buf,
1854                 EVENT_SIZE /* event */ +
1855                 LEB128_SIZE /* obj */
1856         );
1857
1858         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1859         emit_obj (buf, obj);
1860
1861         EXIT_LOG;
1862 }
1863
1864 static char*
1865 push_nesting (char *p, MonoClass *klass)
1866 {
1867         MonoClass *nesting;
1868         const char *name;
1869         const char *nspace;
1870         nesting = mono_class_get_nesting_type (klass);
1871         if (nesting) {
1872                 p = push_nesting (p, nesting);
1873                 *p++ = '/';
1874                 *p = 0;
1875         }
1876         name = mono_class_get_name (klass);
1877         nspace = mono_class_get_namespace (klass);
1878         if (*nspace) {
1879                 strcpy (p, nspace);
1880                 p += strlen (nspace);
1881                 *p++ = '.';
1882                 *p = 0;
1883         }
1884         strcpy (p, name);
1885         p += strlen (name);
1886         return p;
1887 }
1888
1889 static char*
1890 type_name (MonoClass *klass)
1891 {
1892         char buf [1024];
1893         char *p;
1894         push_nesting (buf, klass);
1895         p = (char *) g_malloc (strlen (buf) + 1);
1896         strcpy (p, buf);
1897         return p;
1898 }
1899
1900 static void
1901 image_loaded (MonoProfiler *prof, MonoImage *image)
1902 {
1903         const char *name = mono_image_get_filename (image);
1904         int nlen = strlen (name) + 1;
1905
1906         ENTER_LOG (&image_loads_ctr, logbuffer,
1907                 EVENT_SIZE /* event */ +
1908                 BYTE_SIZE /* type */ +
1909                 LEB128_SIZE /* image */ +
1910                 nlen /* name */
1911         );
1912
1913         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1914         emit_byte (logbuffer, TYPE_IMAGE);
1915         emit_ptr (logbuffer, image);
1916         memcpy (logbuffer->cursor, name, nlen);
1917         logbuffer->cursor += nlen;
1918
1919         EXIT_LOG;
1920 }
1921
1922 static void
1923 image_unloaded (MonoProfiler *prof, MonoImage *image)
1924 {
1925         const char *name = mono_image_get_filename (image);
1926         int nlen = strlen (name) + 1;
1927
1928         ENTER_LOG (&image_unloads_ctr, logbuffer,
1929                 EVENT_SIZE /* event */ +
1930                 BYTE_SIZE /* type */ +
1931                 LEB128_SIZE /* image */ +
1932                 nlen /* name */
1933         );
1934
1935         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1936         emit_byte (logbuffer, TYPE_IMAGE);
1937         emit_ptr (logbuffer, image);
1938         memcpy (logbuffer->cursor, name, nlen);
1939         logbuffer->cursor += nlen;
1940
1941         EXIT_LOG;
1942 }
1943
1944 static void
1945 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly)
1946 {
1947         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1948         int nlen = strlen (name) + 1;
1949         MonoImage *image = mono_assembly_get_image (assembly);
1950
1951         ENTER_LOG (&assembly_loads_ctr, logbuffer,
1952                 EVENT_SIZE /* event */ +
1953                 BYTE_SIZE /* type */ +
1954                 LEB128_SIZE /* assembly */ +
1955                 LEB128_SIZE /* image */ +
1956                 nlen /* name */
1957         );
1958
1959         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1960         emit_byte (logbuffer, TYPE_ASSEMBLY);
1961         emit_ptr (logbuffer, assembly);
1962         emit_ptr (logbuffer, image);
1963         memcpy (logbuffer->cursor, name, nlen);
1964         logbuffer->cursor += nlen;
1965
1966         EXIT_LOG;
1967
1968         mono_free (name);
1969 }
1970
1971 static void
1972 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1973 {
1974         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1975         int nlen = strlen (name) + 1;
1976         MonoImage *image = mono_assembly_get_image (assembly);
1977
1978         ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1979                 EVENT_SIZE /* event */ +
1980                 BYTE_SIZE /* type */ +
1981                 LEB128_SIZE /* assembly */ +
1982                 LEB128_SIZE /* image */ +
1983                 nlen /* name */
1984         );
1985
1986         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1987         emit_byte (logbuffer, TYPE_ASSEMBLY);
1988         emit_ptr (logbuffer, assembly);
1989         emit_ptr (logbuffer, image);
1990         memcpy (logbuffer->cursor, name, nlen);
1991         logbuffer->cursor += nlen;
1992
1993         EXIT_LOG;
1994
1995         mono_free (name);
1996 }
1997
1998 static void
1999 class_loaded (MonoProfiler *prof, MonoClass *klass)
2000 {
2001         char *name;
2002
2003         if (InterlockedRead (&log_profiler.runtime_inited))
2004                 name = mono_type_get_name (mono_class_get_type (klass));
2005         else
2006                 name = type_name (klass);
2007
2008         int nlen = strlen (name) + 1;
2009         MonoImage *image = mono_class_get_image (klass);
2010
2011         ENTER_LOG (&class_loads_ctr, logbuffer,
2012                 EVENT_SIZE /* event */ +
2013                 BYTE_SIZE /* type */ +
2014                 LEB128_SIZE /* klass */ +
2015                 LEB128_SIZE /* image */ +
2016                 nlen /* name */
2017         );
2018
2019         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2020         emit_byte (logbuffer, TYPE_CLASS);
2021         emit_ptr (logbuffer, klass);
2022         emit_ptr (logbuffer, image);
2023         memcpy (logbuffer->cursor, name, nlen);
2024         logbuffer->cursor += nlen;
2025
2026         EXIT_LOG;
2027
2028         if (InterlockedRead (&log_profiler.runtime_inited))
2029                 mono_free (name);
2030         else
2031                 g_free (name);
2032 }
2033
2034 static void
2035 method_enter (MonoProfiler *prof, MonoMethod *method)
2036 {
2037         if (get_thread ()->call_depth++ <= log_config.max_call_depth) {
2038                 ENTER_LOG (&method_entries_ctr, logbuffer,
2039                         EVENT_SIZE /* event */ +
2040                         LEB128_SIZE /* method */
2041                 );
2042
2043                 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
2044                 emit_method (logbuffer, method);
2045
2046                 EXIT_LOG;
2047         }
2048 }
2049
2050 static void
2051 method_leave (MonoProfiler *prof, MonoMethod *method)
2052 {
2053         if (--get_thread ()->call_depth <= log_config.max_call_depth) {
2054                 ENTER_LOG (&method_exits_ctr, logbuffer,
2055                         EVENT_SIZE /* event */ +
2056                         LEB128_SIZE /* method */
2057                 );
2058
2059                 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
2060                 emit_method (logbuffer, method);
2061
2062                 EXIT_LOG;
2063         }
2064 }
2065
2066 static void
2067 method_exc_leave (MonoProfiler *prof, MonoMethod *method, MonoObject *exc)
2068 {
2069         if (--get_thread ()->call_depth <= log_config.max_call_depth) {
2070                 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
2071                         EVENT_SIZE /* event */ +
2072                         LEB128_SIZE /* method */
2073                 );
2074
2075                 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
2076                 emit_method (logbuffer, method);
2077
2078                 EXIT_LOG;
2079         }
2080 }
2081
2082 static MonoProfilerCallInstrumentationFlags
2083 method_filter (MonoProfiler *prof, MonoMethod *method)
2084 {
2085         return MONO_PROFILER_CALL_INSTRUMENTATION_PROLOGUE | MONO_PROFILER_CALL_INSTRUMENTATION_EPILOGUE;
2086 }
2087
2088 static void
2089 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji)
2090 {
2091         buffer_lock ();
2092
2093         register_method_local (method, ji);
2094
2095         buffer_unlock ();
2096 }
2097
2098 static void
2099 code_buffer_new (MonoProfiler *prof, const mono_byte *buffer, uint64_t size, MonoProfilerCodeBufferType type, const void *data)
2100 {
2101         const char *name;
2102         int nlen;
2103
2104         if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
2105                 name = (const char *) data;
2106                 nlen = strlen (name) + 1;
2107         } else {
2108                 name = NULL;
2109                 nlen = 0;
2110         }
2111
2112         ENTER_LOG (&code_buffers_ctr, logbuffer,
2113                 EVENT_SIZE /* event */ +
2114                 BYTE_SIZE /* type */ +
2115                 LEB128_SIZE /* buffer */ +
2116                 LEB128_SIZE /* size */ +
2117                 (name ? (
2118                         nlen /* name */
2119                 ) : 0)
2120         );
2121
2122         emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
2123         emit_byte (logbuffer, type);
2124         emit_ptr (logbuffer, buffer);
2125         emit_value (logbuffer, size);
2126
2127         if (name) {
2128                 memcpy (logbuffer->cursor, name, nlen);
2129                 logbuffer->cursor += nlen;
2130         }
2131
2132         EXIT_LOG;
2133 }
2134
2135 static void
2136 throw_exc (MonoProfiler *prof, MonoObject *object)
2137 {
2138         int do_bt = (!ENABLED (PROFLOG_CALL_EVENTS) && InterlockedRead (&log_profiler.runtime_inited) && !log_config.notraces) ? TYPE_THROW_BT : 0;
2139         FrameData data;
2140
2141         if (do_bt)
2142                 collect_bt (&data);
2143
2144         ENTER_LOG (&exception_throws_ctr, logbuffer,
2145                 EVENT_SIZE /* event */ +
2146                 LEB128_SIZE /* object */ +
2147                 (do_bt ? (
2148                         LEB128_SIZE /* count */ +
2149                         data.count * (
2150                                 LEB128_SIZE /* method */
2151                         )
2152                 ) : 0)
2153         );
2154
2155         emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
2156         emit_obj (logbuffer, object);
2157
2158         if (do_bt)
2159                 emit_bt (logbuffer, &data);
2160
2161         EXIT_LOG;
2162 }
2163
2164 static void
2165 clause_exc (MonoProfiler *prof, MonoMethod *method, uint32_t clause_num, MonoExceptionEnum clause_type, MonoObject *exc)
2166 {
2167         ENTER_LOG (&exception_clauses_ctr, logbuffer,
2168                 EVENT_SIZE /* event */ +
2169                 BYTE_SIZE /* clause type */ +
2170                 LEB128_SIZE /* clause num */ +
2171                 LEB128_SIZE /* method */
2172         );
2173
2174         emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
2175         emit_byte (logbuffer, clause_type);
2176         emit_value (logbuffer, clause_num);
2177         emit_method (logbuffer, method);
2178         emit_obj (logbuffer, exc);
2179
2180         EXIT_LOG;
2181 }
2182
2183 static void
2184 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
2185 {
2186         int do_bt = (!ENABLED (PROFLOG_CALL_EVENTS) && InterlockedRead (&log_profiler.runtime_inited) && !log_config.notraces) ? TYPE_MONITOR_BT : 0;
2187         FrameData data;
2188
2189         if (do_bt)
2190                 collect_bt (&data);
2191
2192         ENTER_LOG (&monitor_events_ctr, logbuffer,
2193                 EVENT_SIZE /* event */ +
2194                 BYTE_SIZE /* ev */ +
2195                 LEB128_SIZE /* object */ +
2196                 (do_bt ? (
2197                         LEB128_SIZE /* count */ +
2198                         data.count * (
2199                                 LEB128_SIZE /* method */
2200                         )
2201                 ) : 0)
2202         );
2203
2204         emit_event (logbuffer, do_bt | TYPE_MONITOR);
2205         emit_byte (logbuffer, ev);
2206         emit_obj (logbuffer, object);
2207
2208         if (do_bt)
2209                 emit_bt (logbuffer, &data);
2210
2211         EXIT_LOG;
2212 }
2213
2214 static void
2215 monitor_contention (MonoProfiler *prof, MonoObject *object)
2216 {
2217         monitor_event (prof, object, MONO_PROFILER_MONITOR_CONTENTION);
2218 }
2219
2220 static void
2221 monitor_acquired (MonoProfiler *prof, MonoObject *object)
2222 {
2223         monitor_event (prof, object, MONO_PROFILER_MONITOR_DONE);
2224 }
2225
2226 static void
2227 monitor_failed (MonoProfiler *prof, MonoObject *object)
2228 {
2229         monitor_event (prof, object, MONO_PROFILER_MONITOR_FAIL);
2230 }
2231
2232 static void
2233 thread_start (MonoProfiler *prof, uintptr_t tid)
2234 {
2235         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2236                 ENTER_LOG (&thread_starts_ctr, logbuffer,
2237                         EVENT_SIZE /* event */ +
2238                         BYTE_SIZE /* type */ +
2239                         LEB128_SIZE /* tid */
2240                 );
2241
2242                 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2243                 emit_byte (logbuffer, TYPE_THREAD);
2244                 emit_ptr (logbuffer, (void*) tid);
2245
2246                 EXIT_LOG;
2247         }
2248 }
2249
2250 static void
2251 thread_end (MonoProfiler *prof, uintptr_t tid)
2252 {
2253         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2254                 ENTER_LOG (&thread_ends_ctr, logbuffer,
2255                         EVENT_SIZE /* event */ +
2256                         BYTE_SIZE /* type */ +
2257                         LEB128_SIZE /* tid */
2258                 );
2259
2260                 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2261                 emit_byte (logbuffer, TYPE_THREAD);
2262                 emit_ptr (logbuffer, (void*) tid);
2263
2264                 EXIT_LOG_EXPLICIT (NO_SEND);
2265         }
2266
2267         MonoProfilerThread *thread = get_thread ();
2268
2269         thread->ended = TRUE;
2270         remove_thread (thread);
2271
2272         PROF_TLS_SET (NULL);
2273 }
2274
2275 static void
2276 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2277 {
2278         int len = strlen (name) + 1;
2279
2280         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2281                 ENTER_LOG (&thread_names_ctr, logbuffer,
2282                         EVENT_SIZE /* event */ +
2283                         BYTE_SIZE /* type */ +
2284                         LEB128_SIZE /* tid */ +
2285                         len /* name */
2286                 );
2287
2288                 emit_event (logbuffer, TYPE_METADATA);
2289                 emit_byte (logbuffer, TYPE_THREAD);
2290                 emit_ptr (logbuffer, (void*)tid);
2291                 memcpy (logbuffer->cursor, name, len);
2292                 logbuffer->cursor += len;
2293
2294                 EXIT_LOG;
2295         }
2296 }
2297
2298 static void
2299 domain_loaded (MonoProfiler *prof, MonoDomain *domain)
2300 {
2301         ENTER_LOG (&domain_loads_ctr, logbuffer,
2302                 EVENT_SIZE /* event */ +
2303                 BYTE_SIZE /* type */ +
2304                 LEB128_SIZE /* domain id */
2305         );
2306
2307         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2308         emit_byte (logbuffer, TYPE_DOMAIN);
2309         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2310
2311         EXIT_LOG;
2312 }
2313
2314 static void
2315 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2316 {
2317         ENTER_LOG (&domain_unloads_ctr, logbuffer,
2318                 EVENT_SIZE /* event */ +
2319                 BYTE_SIZE /* type */ +
2320                 LEB128_SIZE /* domain id */
2321         );
2322
2323         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2324         emit_byte (logbuffer, TYPE_DOMAIN);
2325         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2326
2327         EXIT_LOG;
2328 }
2329
2330 static void
2331 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2332 {
2333         int nlen = strlen (name) + 1;
2334
2335         ENTER_LOG (&domain_names_ctr, logbuffer,
2336                 EVENT_SIZE /* event */ +
2337                 BYTE_SIZE /* type */ +
2338                 LEB128_SIZE /* domain id */ +
2339                 nlen /* name */
2340         );
2341
2342         emit_event (logbuffer, TYPE_METADATA);
2343         emit_byte (logbuffer, TYPE_DOMAIN);
2344         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2345         memcpy (logbuffer->cursor, name, nlen);
2346         logbuffer->cursor += nlen;
2347
2348         EXIT_LOG;
2349 }
2350
2351 static void
2352 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2353 {
2354         ENTER_LOG (&context_loads_ctr, logbuffer,
2355                 EVENT_SIZE /* event */ +
2356                 BYTE_SIZE /* type */ +
2357                 LEB128_SIZE /* context id */ +
2358                 LEB128_SIZE /* domain id */
2359         );
2360
2361         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2362         emit_byte (logbuffer, TYPE_CONTEXT);
2363         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2364         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2365
2366         EXIT_LOG;
2367 }
2368
2369 static void
2370 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2371 {
2372         ENTER_LOG (&context_unloads_ctr, logbuffer,
2373                 EVENT_SIZE /* event */ +
2374                 BYTE_SIZE /* type */ +
2375                 LEB128_SIZE /* context id */ +
2376                 LEB128_SIZE /* domain id */
2377         );
2378
2379         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2380         emit_byte (logbuffer, TYPE_CONTEXT);
2381         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2382         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2383
2384         EXIT_LOG;
2385 }
2386
2387 typedef struct {
2388         MonoMethod *method;
2389         MonoDomain *domain;
2390         void *base_address;
2391         int offset;
2392 } AsyncFrameInfo;
2393
2394 typedef struct {
2395         MonoLockFreeQueueNode node;
2396         uint64_t time;
2397         uintptr_t tid;
2398         const void *ip;
2399         int count;
2400         AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2401 } SampleHit;
2402
2403 static mono_bool
2404 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2405 {
2406         SampleHit *sample = (SampleHit *) data;
2407
2408         if (sample->count < log_config.num_frames) {
2409                 int i = sample->count;
2410
2411                 sample->frames [i].method = method;
2412                 sample->frames [i].domain = domain;
2413                 sample->frames [i].base_address = base_address;
2414                 sample->frames [i].offset = offset;
2415
2416                 sample->count++;
2417         }
2418
2419         return sample->count == log_config.num_frames;
2420 }
2421
2422 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2423 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2424
2425 static void
2426 enqueue_sample_hit (gpointer p)
2427 {
2428         SampleHit *sample = p;
2429
2430         mono_lock_free_queue_node_unpoison (&sample->node);
2431         mono_lock_free_queue_enqueue (&log_profiler.dumper_queue, &sample->node);
2432         mono_os_sem_post (&log_profiler.dumper_queue_sem);
2433 }
2434
2435 static void
2436 mono_sample_hit (MonoProfiler *profiler, const mono_byte *ip, const void *context)
2437 {
2438         /*
2439          * Please note: We rely on the runtime loading the profiler with
2440          * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2441          * this function (and its siblings) are resolved when the profiler is
2442          * loaded. Otherwise, we would potentially invoke the dynamic linker when
2443          * invoking runtime functions, which is not async-signal-safe.
2444          */
2445
2446         if (InterlockedRead (&log_profiler.in_shutdown))
2447                 return;
2448
2449         SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2450
2451         if (!sample) {
2452                 /*
2453                  * If we're out of reusable sample events and we're not allowed to
2454                  * allocate more, we have no choice but to drop the event.
2455                  */
2456                 if (InterlockedRead (&sample_allocations_ctr) >= log_config.max_allocated_sample_hits)
2457                         return;
2458
2459                 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2460                 mono_lock_free_queue_node_init (&sample->node, TRUE);
2461
2462                 InterlockedIncrement (&sample_allocations_ctr);
2463         }
2464
2465         sample->count = 0;
2466         mono_stack_walk_async_safe (&async_walk_stack, (void *) context, sample);
2467
2468         sample->time = current_time ();
2469         sample->tid = thread_id ();
2470         sample->ip = ip;
2471
2472         mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2473 }
2474
2475 static uintptr_t *code_pages = 0;
2476 static int num_code_pages = 0;
2477 static int size_code_pages = 0;
2478 #define CPAGE_SHIFT (9)
2479 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2480 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2481 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2482
2483 static uintptr_t
2484 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2485 {
2486         uintptr_t i;
2487         uintptr_t start_pos;
2488         start_pos = (page >> CPAGE_SHIFT) % hsize;
2489         i = start_pos;
2490         do {
2491                 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2492                         return 0;
2493                 } else if (!hash [i]) {
2494                         hash [i] = page;
2495                         return 1;
2496                 }
2497                 /* wrap around */
2498                 if (++i == hsize)
2499                         i = 0;
2500         } while (i != start_pos);
2501         g_assert_not_reached ();
2502         return 0;
2503 }
2504
2505 static void
2506 add_code_pointer (uintptr_t ip)
2507 {
2508         uintptr_t i;
2509         if (num_code_pages * 2 >= size_code_pages) {
2510                 uintptr_t *n;
2511                 uintptr_t old_size = size_code_pages;
2512                 size_code_pages *= 2;
2513                 if (size_code_pages == 0)
2514                         size_code_pages = 16;
2515                 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2516                 for (i = 0; i < old_size; ++i) {
2517                         if (code_pages [i])
2518                                 add_code_page (n, size_code_pages, code_pages [i]);
2519                 }
2520                 if (code_pages)
2521                         g_free (code_pages);
2522                 code_pages = n;
2523         }
2524         num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2525 }
2526
2527 /* ELF code crashes on some systems. */
2528 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2529 #if 0
2530 static void
2531 dump_ubin (const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2532 {
2533         int len = strlen (filename) + 1;
2534
2535         ENTER_LOG (&sample_ubins_ctr, logbuffer,
2536                 EVENT_SIZE /* event */ +
2537                 LEB128_SIZE /* load address */ +
2538                 LEB128_SIZE /* offset */ +
2539                 LEB128_SIZE /* size */ +
2540                 nlen /* file name */
2541         );
2542
2543         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2544         emit_ptr (logbuffer, load_addr);
2545         emit_uvalue (logbuffer, offset);
2546         emit_uvalue (logbuffer, size);
2547         memcpy (logbuffer->cursor, filename, len);
2548         logbuffer->cursor += len;
2549
2550         EXIT_LOG_EXPLICIT (DO_SEND);
2551 }
2552 #endif
2553
2554 static void
2555 dump_usym (const char *name, uintptr_t value, uintptr_t size)
2556 {
2557         int len = strlen (name) + 1;
2558
2559         ENTER_LOG (&sample_usyms_ctr, logbuffer,
2560                 EVENT_SIZE /* event */ +
2561                 LEB128_SIZE /* value */ +
2562                 LEB128_SIZE /* size */ +
2563                 len /* name */
2564         );
2565
2566         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2567         emit_ptr (logbuffer, (void*)value);
2568         emit_value (logbuffer, size);
2569         memcpy (logbuffer->cursor, name, len);
2570         logbuffer->cursor += len;
2571
2572         EXIT_LOG_EXPLICIT (DO_SEND);
2573 }
2574
2575 /* ELF code crashes on some systems. */
2576 //#if defined(ELFMAG0)
2577 #if 0
2578
2579 #if SIZEOF_VOID_P == 4
2580 #define ELF_WSIZE 32
2581 #else
2582 #define ELF_WSIZE 64
2583 #endif
2584 #ifndef ElfW
2585 #define ElfW(type)      _ElfW (Elf, ELF_WSIZE, type)
2586 #define _ElfW(e,w,t)    _ElfW_1 (e, w, _##t)
2587 #define _ElfW_1(e,w,t)  e##w##t
2588 #endif
2589
2590 static void
2591 dump_elf_symbols (ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2592 {
2593         int i;
2594         for (i = 0; i < num_symbols; ++i) {
2595                 const char* sym;
2596                 sym =  strtab + symbols [i].st_name;
2597                 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2598                         continue;
2599                 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2600         }
2601 }
2602
2603 static int
2604 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2605 {
2606         int fd, i;
2607         void *data;
2608         struct stat statb;
2609         uint64_t file_size;
2610         ElfW(Ehdr) *header;
2611         ElfW(Shdr) *sheader;
2612         ElfW(Shdr) *shstrtabh;
2613         ElfW(Shdr) *symtabh = NULL;
2614         ElfW(Shdr) *strtabh = NULL;
2615         ElfW(Sym) *symbols = NULL;
2616         const char *strtab;
2617         int num_symbols;
2618
2619         fd = open (filename, O_RDONLY);
2620         if (fd < 0)
2621                 return 0;
2622         if (fstat (fd, &statb) != 0) {
2623                 close (fd);
2624                 return 0;
2625         }
2626         file_size = statb.st_size;
2627         data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2628         close (fd);
2629         if (data == MAP_FAILED)
2630                 return 0;
2631         header = data;
2632         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2633                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2634                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2635                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2636                 munmap (data, file_size);
2637                 return 0;
2638         }
2639         sheader = (void*)((char*)data + header->e_shoff);
2640         shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2641         strtab = (const char*)data + shstrtabh->sh_offset;
2642         for (i = 0; i < header->e_shnum; ++i) {
2643                 if (sheader->sh_type == SHT_SYMTAB) {
2644                         symtabh = sheader;
2645                         strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2646                         break;
2647                 }
2648                 sheader = (void*)((char*)sheader + header->e_shentsize);
2649         }
2650         if (!symtabh || !strtabh) {
2651                 munmap (data, file_size);
2652                 return 0;
2653         }
2654         strtab = (const char*)data + strtabh->sh_offset;
2655         num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2656         symbols = (void*)((char*)data + symtabh->sh_offset);
2657         dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2658         munmap (data, file_size);
2659         return 1;
2660 }
2661 #endif
2662
2663 /* ELF code crashes on some systems. */
2664 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2665 #if 0
2666 static int
2667 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2668 {
2669         char buf [256];
2670         const char *filename;
2671         BinaryObject *obj;
2672         char *a = (void*)info->dlpi_addr;
2673         int i, num_sym;
2674         ElfW(Dyn) *dyn = NULL;
2675         ElfW(Sym) *symtab = NULL;
2676         ElfW(Word) *hash_table = NULL;
2677         ElfW(Ehdr) *header = NULL;
2678         const char* strtab = NULL;
2679         for (obj = log_profiler.binary_objects; obj; obj = obj->next) {
2680                 if (obj->addr == a)
2681                         return 0;
2682         }
2683         filename = info->dlpi_name;
2684         if (!filename)
2685                 return 0;
2686         if (!info->dlpi_addr && !filename [0]) {
2687                 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2688                 if (l > 0) {
2689                         buf [l] = 0;
2690                         filename = buf;
2691                 }
2692         }
2693         obj = g_calloc (sizeof (BinaryObject), 1);
2694         obj->addr = (void*)info->dlpi_addr;
2695         obj->name = pstrdup (filename);
2696         obj->next = log_profiler.binary_objects;
2697         log_profiler.binary_objects = obj;
2698         a = NULL;
2699         for (i = 0; i < info->dlpi_phnum; ++i) {
2700                 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2701                         header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2702                         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2703                                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2704                                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2705                                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2706                                 header = NULL;
2707                         }
2708                         dump_ubin (filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2709                 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2710                         dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2711                 }
2712         }
2713         if (read_elf_symbols (filename, (void*)info->dlpi_addr))
2714                 return 0;
2715         if (!info->dlpi_name || !info->dlpi_name[0])
2716                 return 0;
2717         if (!dyn)
2718                 return 0;
2719         for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2720                 if (dyn [i].d_tag == DT_SYMTAB) {
2721                         symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2722                 } else if (dyn [i].d_tag == DT_HASH) {
2723                         hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2724                 } else if (dyn [i].d_tag == DT_STRTAB) {
2725                         strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2726                 }
2727         }
2728         if (!hash_table)
2729                 return 0;
2730         num_sym = hash_table [1];
2731         dump_elf_symbols (symtab, num_sym, strtab, (void*)info->dlpi_addr);
2732         return 0;
2733 }
2734
2735 static int
2736 load_binaries (void)
2737 {
2738         dl_iterate_phdr (elf_dl_callback, NULL);
2739         return 1;
2740 }
2741 #else
2742 static int
2743 load_binaries (void)
2744 {
2745         return 0;
2746 }
2747 #endif
2748
2749 static const char*
2750 symbol_for (uintptr_t code)
2751 {
2752 #ifdef HAVE_DLADDR
2753         void *ip = (void*)code;
2754         Dl_info di;
2755         if (dladdr (ip, &di)) {
2756                 if (di.dli_sname)
2757                         return di.dli_sname;
2758         } else {
2759         /*      char **names;
2760                 names = backtrace_symbols (&ip, 1);
2761                 if (names) {
2762                         const char* p = names [0];
2763                         g_free (names);
2764                         return p;
2765                 }
2766                 */
2767         }
2768 #endif
2769         return NULL;
2770 }
2771
2772 static void
2773 dump_unmanaged_coderefs (void)
2774 {
2775         int i;
2776         const char* last_symbol;
2777         uintptr_t addr, page_end;
2778
2779         if (load_binaries ())
2780                 return;
2781         for (i = 0; i < size_code_pages; ++i) {
2782                 const char* sym;
2783                 if (!code_pages [i] || code_pages [i] & 1)
2784                         continue;
2785                 last_symbol = NULL;
2786                 addr = CPAGE_ADDR (code_pages [i]);
2787                 page_end = addr + CPAGE_SIZE;
2788                 code_pages [i] |= 1;
2789                 /* we dump the symbols for the whole page */
2790                 for (; addr < page_end; addr += 16) {
2791                         sym = symbol_for (addr);
2792                         if (sym && sym == last_symbol)
2793                                 continue;
2794                         last_symbol = sym;
2795                         if (!sym)
2796                                 continue;
2797                         dump_usym (sym, addr, 0); /* let's not guess the size */
2798                 }
2799         }
2800 }
2801
2802 static void
2803 counters_add_agent (MonoCounter *counter)
2804 {
2805         if (InterlockedRead (&log_profiler.in_shutdown))
2806                 return;
2807
2808         MonoCounterAgent *agent, *item;
2809
2810         mono_os_mutex_lock (&log_profiler.counters_mutex);
2811
2812         for (agent = log_profiler.counters; agent; agent = agent->next) {
2813                 if (agent->counter == counter) {
2814                         agent->value_size = 0;
2815                         if (agent->value) {
2816                                 g_free (agent->value);
2817                                 agent->value = NULL;
2818                         }
2819                         goto done;
2820                 }
2821         }
2822
2823         agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2824         agent->counter = counter;
2825         agent->value = NULL;
2826         agent->value_size = 0;
2827         agent->index = log_profiler.counters_index++;
2828         agent->emitted = 0;
2829         agent->next = NULL;
2830
2831         if (!log_profiler.counters) {
2832                 log_profiler.counters = agent;
2833         } else {
2834                 item = log_profiler.counters;
2835                 while (item->next)
2836                         item = item->next;
2837                 item->next = agent;
2838         }
2839
2840 done:
2841         mono_os_mutex_unlock (&log_profiler.counters_mutex);
2842 }
2843
2844 static mono_bool
2845 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2846 {
2847         counters_add_agent (counter);
2848         return TRUE;
2849 }
2850
2851 static void
2852 counters_init (void)
2853 {
2854         mono_os_mutex_init (&log_profiler.counters_mutex);
2855
2856         log_profiler.counters_index = 1;
2857
2858         mono_counters_on_register (&counters_add_agent);
2859         mono_counters_foreach (counters_init_foreach_callback, NULL);
2860 }
2861
2862 static void
2863 counters_emit (void)
2864 {
2865         MonoCounterAgent *agent;
2866         int len = 0;
2867         int size =
2868                 EVENT_SIZE /* event */ +
2869                 LEB128_SIZE /* len */
2870         ;
2871
2872         mono_os_mutex_lock (&log_profiler.counters_mutex);
2873
2874         for (agent = log_profiler.counters; agent; agent = agent->next) {
2875                 if (agent->emitted)
2876                         continue;
2877
2878                 size +=
2879                         LEB128_SIZE /* section */ +
2880                         strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2881                         BYTE_SIZE /* type */ +
2882                         BYTE_SIZE /* unit */ +
2883                         BYTE_SIZE /* variance */ +
2884                         LEB128_SIZE /* index */
2885                 ;
2886
2887                 len++;
2888         }
2889
2890         if (!len)
2891                 goto done;
2892
2893         ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2894
2895         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2896         emit_value (logbuffer, len);
2897
2898         for (agent = log_profiler.counters; agent; agent = agent->next) {
2899                 const char *name;
2900
2901                 if (agent->emitted)
2902                         continue;
2903
2904                 name = mono_counter_get_name (agent->counter);
2905                 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2906                 emit_string (logbuffer, name, strlen (name) + 1);
2907                 emit_byte (logbuffer, mono_counter_get_type (agent->counter));
2908                 emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
2909                 emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
2910                 emit_value (logbuffer, agent->index);
2911
2912                 agent->emitted = 1;
2913         }
2914
2915         EXIT_LOG_EXPLICIT (DO_SEND);
2916
2917 done:
2918         mono_os_mutex_unlock (&log_profiler.counters_mutex);
2919 }
2920
2921 static void
2922 counters_sample (uint64_t timestamp)
2923 {
2924         MonoCounterAgent *agent;
2925         MonoCounter *counter;
2926         int type;
2927         int buffer_size;
2928         void *buffer;
2929         int size;
2930
2931         counters_emit ();
2932
2933         buffer_size = 8;
2934         buffer = g_calloc (1, buffer_size);
2935
2936         mono_os_mutex_lock (&log_profiler.counters_mutex);
2937
2938         size =
2939                 EVENT_SIZE /* event */
2940         ;
2941
2942         for (agent = log_profiler.counters; agent; agent = agent->next) {
2943                 size +=
2944                         LEB128_SIZE /* index */ +
2945                         BYTE_SIZE /* type */ +
2946                         mono_counter_get_size (agent->counter) /* value */
2947                 ;
2948         }
2949
2950         size +=
2951                 LEB128_SIZE /* stop marker */
2952         ;
2953
2954         ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2955
2956         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2957
2958         for (agent = log_profiler.counters; agent; agent = agent->next) {
2959                 size_t size;
2960
2961                 counter = agent->counter;
2962
2963                 size = mono_counter_get_size (counter);
2964
2965                 if (size > buffer_size) {
2966                         buffer_size = size;
2967                         buffer = g_realloc (buffer, buffer_size);
2968                 }
2969
2970                 memset (buffer, 0, buffer_size);
2971
2972                 g_assert (mono_counters_sample (counter, buffer, size));
2973
2974                 type = mono_counter_get_type (counter);
2975
2976                 if (!agent->value) {
2977                         agent->value = g_calloc (1, size);
2978                         agent->value_size = size;
2979                 } else {
2980                         if (type == MONO_COUNTER_STRING) {
2981                                 if (strcmp (agent->value, buffer) == 0)
2982                                         continue;
2983                         } else {
2984                                 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
2985                                         continue;
2986                         }
2987                 }
2988
2989                 emit_uvalue (logbuffer, agent->index);
2990                 emit_byte (logbuffer, type);
2991                 switch (type) {
2992                 case MONO_COUNTER_INT:
2993 #if SIZEOF_VOID_P == 4
2994                 case MONO_COUNTER_WORD:
2995 #endif
2996                         emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
2997                         break;
2998                 case MONO_COUNTER_UINT:
2999                         emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
3000                         break;
3001                 case MONO_COUNTER_TIME_INTERVAL:
3002                 case MONO_COUNTER_LONG:
3003 #if SIZEOF_VOID_P == 8
3004                 case MONO_COUNTER_WORD:
3005 #endif
3006                         emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
3007                         break;
3008                 case MONO_COUNTER_ULONG:
3009                         emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
3010                         break;
3011                 case MONO_COUNTER_DOUBLE:
3012                         emit_double (logbuffer, *(double*)buffer);
3013                         break;
3014                 case MONO_COUNTER_STRING:
3015                         if (size == 0) {
3016                                 emit_byte (logbuffer, 0);
3017                         } else {
3018                                 emit_byte (logbuffer, 1);
3019                                 emit_string (logbuffer, (char*)buffer, size);
3020                         }
3021                         break;
3022                 default:
3023                         g_assert_not_reached ();
3024                 }
3025
3026                 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
3027                         agent->value = g_realloc (agent->value, size);
3028                         agent->value_size = size;
3029                 }
3030
3031                 if (size > 0)
3032                         memcpy (agent->value, buffer, size);
3033         }
3034         g_free (buffer);
3035
3036         emit_value (logbuffer, 0);
3037
3038         EXIT_LOG_EXPLICIT (DO_SEND);
3039
3040         mono_os_mutex_unlock (&log_profiler.counters_mutex);
3041 }
3042
3043 static void
3044 perfcounters_emit (void)
3045 {
3046         PerfCounterAgent *pcagent;
3047         int len = 0;
3048         int size =
3049                 EVENT_SIZE /* event */ +
3050                 LEB128_SIZE /* len */
3051         ;
3052
3053         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
3054                 if (pcagent->emitted)
3055                         continue;
3056
3057                 size +=
3058                         LEB128_SIZE /* section */ +
3059                         strlen (pcagent->category_name) + 1 /* category name */ +
3060                         strlen (pcagent->name) + 1 /* name */ +
3061                         BYTE_SIZE /* type */ +
3062                         BYTE_SIZE /* unit */ +
3063                         BYTE_SIZE /* variance */ +
3064                         LEB128_SIZE /* index */
3065                 ;
3066
3067                 len++;
3068         }
3069
3070         if (!len)
3071                 return;
3072
3073         ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
3074
3075         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
3076         emit_value (logbuffer, len);
3077
3078         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
3079                 if (pcagent->emitted)
3080                         continue;
3081
3082                 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
3083                 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
3084                 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
3085                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3086                 emit_byte (logbuffer, MONO_COUNTER_RAW);
3087                 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
3088                 emit_value (logbuffer, pcagent->index);
3089
3090                 pcagent->emitted = 1;
3091         }
3092
3093         EXIT_LOG_EXPLICIT (DO_SEND);
3094 }
3095
3096 static gboolean
3097 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
3098 {
3099         PerfCounterAgent *pcagent;
3100
3101         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
3102                 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
3103                         continue;
3104                 if (pcagent->value == value)
3105                         return TRUE;
3106
3107                 pcagent->value = value;
3108                 pcagent->updated = 1;
3109                 pcagent->deleted = 0;
3110                 return TRUE;
3111         }
3112
3113         pcagent = g_new0 (PerfCounterAgent, 1);
3114         pcagent->next = log_profiler.perfcounters;
3115         pcagent->index = log_profiler.counters_index++;
3116         pcagent->category_name = g_strdup (category_name);
3117         pcagent->name = g_strdup (name);
3118         pcagent->type = (int) type;
3119         pcagent->value = value;
3120         pcagent->emitted = 0;
3121         pcagent->updated = 1;
3122         pcagent->deleted = 0;
3123
3124         log_profiler.perfcounters = pcagent;
3125
3126         return TRUE;
3127 }
3128
3129 static void
3130 perfcounters_sample (uint64_t timestamp)
3131 {
3132         PerfCounterAgent *pcagent;
3133         int len = 0;
3134         int size;
3135
3136         mono_os_mutex_lock (&log_profiler.counters_mutex);
3137
3138         /* mark all perfcounters as deleted, foreach will unmark them as necessary */
3139         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next)
3140                 pcagent->deleted = 1;
3141
3142         mono_perfcounter_foreach (perfcounters_foreach, NULL);
3143
3144         perfcounters_emit ();
3145
3146         size =
3147                 EVENT_SIZE /* event */
3148         ;
3149
3150         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
3151                 if (pcagent->deleted || !pcagent->updated)
3152                         continue;
3153
3154                 size +=
3155                         LEB128_SIZE /* index */ +
3156                         BYTE_SIZE /* type */ +
3157                         LEB128_SIZE /* value */
3158                 ;
3159
3160                 len++;
3161         }
3162
3163         if (!len)
3164                 goto done;
3165
3166         size +=
3167                 LEB128_SIZE /* stop marker */
3168         ;
3169
3170         ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
3171
3172         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
3173
3174         for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
3175                 if (pcagent->deleted || !pcagent->updated)
3176                         continue;
3177                 emit_uvalue (logbuffer, pcagent->index);
3178                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3179                 emit_svalue (logbuffer, pcagent->value);
3180
3181                 pcagent->updated = 0;
3182         }
3183
3184         emit_value (logbuffer, 0);
3185
3186         EXIT_LOG_EXPLICIT (DO_SEND);
3187
3188 done:
3189         mono_os_mutex_unlock (&log_profiler.counters_mutex);
3190 }
3191
3192 static void
3193 counters_and_perfcounters_sample (void)
3194 {
3195         uint64_t now = current_time ();
3196
3197         counters_sample (now);
3198         perfcounters_sample (now);
3199 }
3200
3201 typedef struct {
3202         MonoLockFreeQueueNode node;
3203         MonoMethod *method;
3204 } MethodNode;
3205
3206 typedef struct {
3207         int offset;
3208         int counter;
3209         char *filename;
3210         int line;
3211         int column;
3212 } CoverageEntry;
3213
3214 static void
3215 free_coverage_entry (gpointer data, gpointer userdata)
3216 {
3217         CoverageEntry *entry = (CoverageEntry *)data;
3218         g_free (entry->filename);
3219         g_free (entry);
3220 }
3221
3222 static void
3223 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfilerCoverageData *entry)
3224 {
3225         int offset = entry->il_offset - log_profiler.coverage_previous_offset;
3226         CoverageEntry *e = g_new (CoverageEntry, 1);
3227
3228         log_profiler.coverage_previous_offset = entry->il_offset;
3229
3230         e->offset = offset;
3231         e->counter = entry->counter;
3232         e->filename = g_strdup(entry->file_name ? entry->file_name : "");
3233         e->line = entry->line;
3234         e->column = entry->column;
3235
3236         g_ptr_array_add (log_profiler.coverage_data, e);
3237 }
3238
3239 static char *
3240 parse_generic_type_names(char *name)
3241 {
3242         char *new_name, *ret;
3243         int within_generic_declaration = 0, generic_members = 1;
3244
3245         if (name == NULL || *name == '\0')
3246                 return g_strdup ("");
3247
3248         if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
3249                 return NULL;
3250
3251         do {
3252                 switch (*name) {
3253                         case '<':
3254                                 within_generic_declaration = 1;
3255                                 break;
3256
3257                         case '>':
3258                                 within_generic_declaration = 0;
3259
3260                                 if (*(name - 1) != '<') {
3261                                         *new_name++ = '`';
3262                                         *new_name++ = '0' + generic_members;
3263                                 } else {
3264                                         memcpy (new_name, "&lt;&gt;", 8);
3265                                         new_name += 8;
3266                                 }
3267
3268                                 generic_members = 0;
3269                                 break;
3270
3271                         case ',':
3272                                 generic_members++;
3273                                 break;
3274
3275                         default:
3276                                 if (!within_generic_declaration)
3277                                         *new_name++ = *name;
3278
3279                                 break;
3280                 }
3281         } while (*name++);
3282
3283         return ret;
3284 }
3285
3286 static void
3287 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
3288 {
3289         MonoMethod *method = (MonoMethod *)value;
3290         MonoClass *klass;
3291         MonoImage *image;
3292         char *class_name;
3293         const char *image_name, *method_name, *sig, *first_filename;
3294         guint i;
3295
3296         log_profiler.coverage_previous_offset = 0;
3297         log_profiler.coverage_data = g_ptr_array_new ();
3298
3299         mono_profiler_get_coverage_data (log_profiler.handle, method, obtain_coverage_for_method);
3300
3301         klass = mono_method_get_class (method);
3302         image = mono_class_get_image (klass);
3303         image_name = mono_image_get_name (image);
3304
3305         sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3306         class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3307         method_name = mono_method_get_name (method);
3308
3309         if (log_profiler.coverage_data->len != 0) {
3310                 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[0];
3311                 first_filename = entry->filename ? entry->filename : "";
3312         } else
3313                 first_filename = "";
3314
3315         image_name = image_name ? image_name : "";
3316         sig = sig ? sig : "";
3317         method_name = method_name ? method_name : "";
3318
3319         ENTER_LOG (&coverage_methods_ctr, logbuffer,
3320                 EVENT_SIZE /* event */ +
3321                 strlen (image_name) + 1 /* image name */ +
3322                 strlen (class_name) + 1 /* class name */ +
3323                 strlen (method_name) + 1 /* method name */ +
3324                 strlen (sig) + 1 /* signature */ +
3325                 strlen (first_filename) + 1 /* first file name */ +
3326                 LEB128_SIZE /* token */ +
3327                 LEB128_SIZE /* method id */ +
3328                 LEB128_SIZE /* entries */
3329         );
3330
3331         emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3332         emit_string (logbuffer, image_name, strlen (image_name) + 1);
3333         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3334         emit_string (logbuffer, method_name, strlen (method_name) + 1);
3335         emit_string (logbuffer, sig, strlen (sig) + 1);
3336         emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3337
3338         emit_uvalue (logbuffer, mono_method_get_token (method));
3339         emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3340         emit_value (logbuffer, log_profiler.coverage_data->len);
3341
3342         EXIT_LOG_EXPLICIT (DO_SEND);
3343
3344         for (i = 0; i < log_profiler.coverage_data->len; i++) {
3345                 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[i];
3346
3347                 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3348                         EVENT_SIZE /* event */ +
3349                         LEB128_SIZE /* method id */ +
3350                         LEB128_SIZE /* offset */ +
3351                         LEB128_SIZE /* counter */ +
3352                         LEB128_SIZE /* line */ +
3353                         LEB128_SIZE /* column */
3354                 );
3355
3356                 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3357                 emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3358                 emit_uvalue (logbuffer, entry->offset);
3359                 emit_uvalue (logbuffer, entry->counter);
3360                 emit_uvalue (logbuffer, entry->line);
3361                 emit_uvalue (logbuffer, entry->column);
3362
3363                 EXIT_LOG_EXPLICIT (DO_SEND);
3364         }
3365
3366         log_profiler.coverage_method_id++;
3367
3368         g_free (class_name);
3369
3370         g_ptr_array_foreach (log_profiler.coverage_data, free_coverage_entry, NULL);
3371         g_ptr_array_free (log_profiler.coverage_data, TRUE);
3372 }
3373
3374 /* This empties the queue */
3375 static guint
3376 count_queue (MonoLockFreeQueue *queue)
3377 {
3378         MonoLockFreeQueueNode *node;
3379         guint count = 0;
3380
3381         while ((node = mono_lock_free_queue_dequeue (queue))) {
3382                 count++;
3383                 mono_thread_hazardous_try_free (node, g_free);
3384         }
3385
3386         return count;
3387 }
3388
3389 static void
3390 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3391 {
3392         MonoClass *klass = (MonoClass *)key;
3393         MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3394         MonoImage *image;
3395         char *class_name;
3396         const char *assembly_name;
3397         int number_of_methods, partially_covered;
3398         guint fully_covered;
3399
3400         image = mono_class_get_image (klass);
3401         assembly_name = mono_image_get_name (image);
3402         class_name = mono_type_get_name (mono_class_get_type (klass));
3403
3404         assembly_name = assembly_name ? assembly_name : "";
3405         number_of_methods = mono_class_num_methods (klass);
3406         fully_covered = count_queue (class_methods);
3407         /* We don't handle partial covered yet */
3408         partially_covered = 0;
3409
3410         ENTER_LOG (&coverage_classes_ctr, logbuffer,
3411                 EVENT_SIZE /* event */ +
3412                 strlen (assembly_name) + 1 /* assembly name */ +
3413                 strlen (class_name) + 1 /* class name */ +
3414                 LEB128_SIZE /* no. methods */ +
3415                 LEB128_SIZE /* fully covered */ +
3416                 LEB128_SIZE /* partially covered */
3417         );
3418
3419         emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3420         emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3421         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3422         emit_uvalue (logbuffer, number_of_methods);
3423         emit_uvalue (logbuffer, fully_covered);
3424         emit_uvalue (logbuffer, partially_covered);
3425
3426         EXIT_LOG_EXPLICIT (DO_SEND);
3427
3428         g_free (class_name);
3429 }
3430
3431 static void
3432 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3433 {
3434         MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3435
3436         *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3437         if (image_methods)
3438                 *fully_covered = count_queue (image_methods);
3439         else
3440                 *fully_covered = 0;
3441
3442         // FIXME: We don't handle partially covered yet.
3443         *partially_covered = 0;
3444 }
3445
3446 static void
3447 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3448 {
3449         MonoAssembly *assembly = (MonoAssembly *)value;
3450         MonoImage *image = mono_assembly_get_image (assembly);
3451         const char *name, *guid, *filename;
3452         int number_of_methods = 0, partially_covered = 0;
3453         guint fully_covered = 0;
3454
3455         name = mono_image_get_name (image);
3456         guid = mono_image_get_guid (image);
3457         filename = mono_image_get_filename (image);
3458
3459         name = name ? name : "";
3460         guid = guid ? guid : "";
3461         filename = filename ? filename : "";
3462
3463         get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3464
3465         ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3466                 EVENT_SIZE /* event */ +
3467                 strlen (name) + 1 /* name */ +
3468                 strlen (guid) + 1 /* guid */ +
3469                 strlen (filename) + 1 /* file name */ +
3470                 LEB128_SIZE /* no. methods */ +
3471                 LEB128_SIZE /* fully covered */ +
3472                 LEB128_SIZE /* partially covered */
3473         );
3474
3475         emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3476         emit_string (logbuffer, name, strlen (name) + 1);
3477         emit_string (logbuffer, guid, strlen (guid) + 1);
3478         emit_string (logbuffer, filename, strlen (filename) + 1);
3479         emit_uvalue (logbuffer, number_of_methods);
3480         emit_uvalue (logbuffer, fully_covered);
3481         emit_uvalue (logbuffer, partially_covered);
3482
3483         EXIT_LOG_EXPLICIT (DO_SEND);
3484 }
3485
3486 static void
3487 dump_coverage (void)
3488 {
3489         mono_os_mutex_lock (&log_profiler.coverage_mutex);
3490         mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, build_assembly_buffer, NULL);
3491         mono_conc_hashtable_foreach (log_profiler.coverage_classes, build_class_buffer, NULL);
3492         mono_conc_hashtable_foreach (log_profiler.coverage_methods, build_method_buffer, NULL);
3493         mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3494 }
3495
3496 static MonoLockFreeQueueNode *
3497 create_method_node (MonoMethod *method)
3498 {
3499         MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3500         mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3501         node->method = method;
3502
3503         return (MonoLockFreeQueueNode *) node;
3504 }
3505
3506 static gboolean
3507 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3508 {
3509         MonoError error;
3510         MonoClass *klass;
3511         MonoImage *image;
3512         MonoAssembly *assembly;
3513         MonoMethodHeader *header;
3514         guint32 iflags, flags, code_size;
3515         char *fqn, *classname;
3516         gboolean has_positive, found;
3517         MonoLockFreeQueue *image_methods, *class_methods;
3518         MonoLockFreeQueueNode *node;
3519
3520         flags = mono_method_get_flags (method, &iflags);
3521         if ((iflags & 0x1000 /*METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL*/) ||
3522             (flags & 0x2000 /*METHOD_ATTRIBUTE_PINVOKE_IMPL*/))
3523                 return FALSE;
3524
3525         // Don't need to do anything else if we're already tracking this method
3526         if (mono_conc_hashtable_lookup (log_profiler.coverage_methods, method))
3527                 return TRUE;
3528
3529         klass = mono_method_get_class (method);
3530         image = mono_class_get_image (klass);
3531
3532         // Don't handle coverage for the core assemblies
3533         if (mono_conc_hashtable_lookup (log_profiler.coverage_suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3534                 return FALSE;
3535
3536         if (prof->coverage_filters) {
3537                 /* Check already filtered classes first */
3538                 if (mono_conc_hashtable_lookup (log_profiler.coverage_filtered_classes, klass))
3539                         return FALSE;
3540
3541                 classname = mono_type_get_name (mono_class_get_type (klass));
3542
3543                 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3544
3545                 // Check positive filters first
3546                 has_positive = FALSE;
3547                 found = FALSE;
3548                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3549                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3550
3551                         if (filter [0] == '+') {
3552                                 filter = &filter [1];
3553
3554                                 if (strstr (fqn, filter) != NULL)
3555                                         found = TRUE;
3556
3557                                 has_positive = TRUE;
3558                         }
3559                 }
3560
3561                 if (has_positive && !found) {
3562                         mono_os_mutex_lock (&log_profiler.coverage_mutex);
3563                         mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3564                         mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3565                         g_free (fqn);
3566                         g_free (classname);
3567
3568                         return FALSE;
3569                 }
3570
3571                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3572                         // FIXME: Is substring search sufficient?
3573                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3574                         if (filter [0] == '+')
3575                                 continue;
3576
3577                         // Skip '-'
3578                         filter = &filter [1];
3579
3580                         if (strstr (fqn, filter) != NULL) {
3581                                 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3582                                 mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3583                                 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3584                                 g_free (fqn);
3585                                 g_free (classname);
3586
3587                                 return FALSE;
3588                         }
3589                 }
3590
3591                 g_free (fqn);
3592                 g_free (classname);
3593         }
3594
3595         header = mono_method_get_header_checked (method, &error);
3596         mono_error_cleanup (&error);
3597
3598         mono_method_header_get_code (header, &code_size, NULL);
3599
3600         assembly = mono_image_get_assembly (image);
3601
3602         // Need to keep the assemblies around for as long as they are kept in the hashtable
3603         // Nunit, for example, has a habit of unloading them before the coverage statistics are
3604         // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3605         mono_assembly_addref (assembly);
3606
3607         mono_os_mutex_lock (&log_profiler.coverage_mutex);
3608         mono_conc_hashtable_insert (log_profiler.coverage_methods, method, method);
3609         mono_conc_hashtable_insert (log_profiler.coverage_assemblies, assembly, assembly);
3610         mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3611
3612         image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3613
3614         if (image_methods == NULL) {
3615                 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3616                 mono_lock_free_queue_init (image_methods);
3617                 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3618                 mono_conc_hashtable_insert (log_profiler.coverage_image_to_methods, image, image_methods);
3619                 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3620         }
3621
3622         node = create_method_node (method);
3623         mono_lock_free_queue_enqueue (image_methods, node);
3624
3625         class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_classes, klass);
3626
3627         if (class_methods == NULL) {
3628                 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3629                 mono_lock_free_queue_init (class_methods);
3630                 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3631                 mono_conc_hashtable_insert (log_profiler.coverage_classes, klass, class_methods);
3632                 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3633         }
3634
3635         node = create_method_node (method);
3636         mono_lock_free_queue_enqueue (class_methods, node);
3637
3638         return TRUE;
3639 }
3640
3641 #define LINE_BUFFER_SIZE 4096
3642 /* Max file limit of 128KB */
3643 #define MAX_FILE_SIZE 128 * 1024
3644 static char *
3645 get_file_content (FILE *stream)
3646 {
3647         char *buffer;
3648         ssize_t bytes_read;
3649         long filesize;
3650         int res, offset = 0;
3651
3652         res = fseek (stream, 0, SEEK_END);
3653         if (res < 0)
3654           return NULL;
3655
3656         filesize = ftell (stream);
3657         if (filesize < 0)
3658           return NULL;
3659
3660         res = fseek (stream, 0, SEEK_SET);
3661         if (res < 0)
3662           return NULL;
3663
3664         if (filesize > MAX_FILE_SIZE)
3665           return NULL;
3666
3667         buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3668         while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3669                 offset += bytes_read;
3670
3671         /* NULL terminate our buffer */
3672         buffer[filesize] = '\0';
3673         return buffer;
3674 }
3675
3676 static char *
3677 get_next_line (char *contents, char **next_start)
3678 {
3679         char *p = contents;
3680
3681         if (p == NULL || *p == '\0') {
3682                 *next_start = NULL;
3683                 return NULL;
3684         }
3685
3686         while (*p != '\n' && *p != '\0')
3687                 p++;
3688
3689         if (*p == '\n') {
3690                 *p = '\0';
3691                 *next_start = p + 1;
3692         } else
3693                 *next_start = NULL;
3694
3695         return contents;
3696 }
3697
3698 static void
3699 init_suppressed_assemblies (void)
3700 {
3701         char *content;
3702         char *line;
3703         FILE *sa_file;
3704
3705         log_profiler.coverage_suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3706         sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3707         if (sa_file == NULL)
3708                 return;
3709
3710         /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3711         content = get_file_content (sa_file);
3712         if (content == NULL)
3713                 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting.");
3714
3715         while ((line = get_next_line (content, &content))) {
3716                 line = g_strchomp (g_strchug (line));
3717                 /* No locking needed as we're doing initialization */
3718                 mono_conc_hashtable_insert (log_profiler.coverage_suppressed_assemblies, line, line);
3719         }
3720
3721         fclose (sa_file);
3722 }
3723
3724 static void
3725 parse_cov_filter_file (GPtrArray *filters, const char *file)
3726 {
3727         FILE *filter_file = fopen (file, "r");
3728
3729         if (filter_file == NULL) {
3730                 mono_profiler_printf_err ("Could not open coverage filter file '%s'.", file);
3731                 return;
3732         }
3733
3734         /* Don't need to free content as it is referred to by the lines stored in @filters */
3735         char *content = get_file_content (filter_file);
3736
3737         if (content == NULL)
3738                 mono_profiler_printf_err ("Coverage filter file '%s' is larger than 128kb - ignoring.", file);
3739
3740         char *line;
3741
3742         while ((line = get_next_line (content, &content)))
3743                 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3744
3745         fclose (filter_file);
3746 }
3747
3748 static void
3749 coverage_init (void)
3750 {
3751         mono_os_mutex_init (&log_profiler.coverage_mutex);
3752         log_profiler.coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3753         log_profiler.coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3754         log_profiler.coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3755         log_profiler.coverage_filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3756         log_profiler.coverage_image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3757         init_suppressed_assemblies ();
3758 }
3759
3760 static void
3761 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3762 {
3763         MonoAssembly *assembly = (MonoAssembly *)value;
3764         mono_assembly_close (assembly);
3765 }
3766
3767 static void
3768 free_sample_hit (gpointer p)
3769 {
3770         mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3771 }
3772
3773 static void
3774 cleanup_reusable_samples (void)
3775 {
3776         SampleHit *sample;
3777
3778         while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.sample_reuse_queue)))
3779                 mono_thread_hazardous_try_free (sample, free_sample_hit);
3780 }
3781
3782 static void
3783 log_shutdown (MonoProfiler *prof)
3784 {
3785         InterlockedWrite (&log_profiler.in_shutdown, 1);
3786
3787         if (ENABLED (PROFLOG_COUNTER_EVENTS))
3788                 counters_and_perfcounters_sample ();
3789
3790         if (ENABLED (PROFLOG_CODE_COV_FEATURE))
3791                 dump_coverage ();
3792
3793         char c = 1;
3794
3795         if (write (prof->pipes [1], &c, 1) != 1) {
3796                 mono_profiler_printf_err ("Could not write to log profiler pipe: %s", strerror (errno));
3797                 exit (1);
3798         }
3799
3800         mono_native_thread_join (prof->helper_thread);
3801
3802         mono_os_mutex_destroy (&log_profiler.counters_mutex);
3803
3804         MonoCounterAgent *mc_next;
3805
3806         for (MonoCounterAgent *cur = log_profiler.counters; cur; cur = mc_next) {
3807                 mc_next = cur->next;
3808                 g_free (cur);
3809         }
3810
3811         PerfCounterAgent *pc_next;
3812
3813         for (PerfCounterAgent *cur = log_profiler.perfcounters; cur; cur = pc_next) {
3814                 pc_next = cur->next;
3815                 g_free (cur);
3816         }
3817
3818         /*
3819          * Ensure that we empty the LLS completely, even if some nodes are
3820          * not immediately removed upon calling mono_lls_remove (), by
3821          * iterating until the head is NULL.
3822          */
3823         while (log_profiler.profiler_thread_list.head) {
3824                 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
3825                         g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3826
3827                         remove_thread (thread);
3828                 } MONO_LLS_FOREACH_SAFE_END
3829         }
3830
3831         /*
3832          * Ensure that all threads have been freed, so that we don't miss any
3833          * buffers when we shut down the writer thread below.
3834          */
3835         mono_thread_hazardous_try_free_all ();
3836
3837         InterlockedWrite (&prof->run_dumper_thread, 0);
3838         mono_os_sem_post (&prof->dumper_queue_sem);
3839         mono_native_thread_join (prof->dumper_thread);
3840         mono_os_sem_destroy (&prof->dumper_queue_sem);
3841
3842         InterlockedWrite (&prof->run_writer_thread, 0);
3843         mono_os_sem_post (&prof->writer_queue_sem);
3844         mono_native_thread_join (prof->writer_thread);
3845         mono_os_sem_destroy (&prof->writer_queue_sem);
3846
3847         /*
3848          * Free all writer queue entries, and ensure that all sample hits will be
3849          * added to the sample reuse queue.
3850          */
3851         mono_thread_hazardous_try_free_all ();
3852
3853         cleanup_reusable_samples ();
3854
3855         /*
3856          * Finally, make sure that all sample hits are freed. This should cover all
3857          * hazardous data from the profiler. We can now be sure that the runtime
3858          * won't later invoke free functions in the profiler library after it has
3859          * been unloaded.
3860          */
3861         mono_thread_hazardous_try_free_all ();
3862
3863         gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
3864
3865         g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
3866         g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
3867
3868 #if defined (HAVE_SYS_ZLIB)
3869         if (prof->gzfile)
3870                 gzclose (prof->gzfile);
3871 #endif
3872         if (prof->pipe_output)
3873                 pclose (prof->file);
3874         else
3875                 fclose (prof->file);
3876
3877         mono_conc_hashtable_destroy (prof->method_table);
3878         mono_os_mutex_destroy (&prof->method_table_mutex);
3879
3880         if (ENABLED (PROFLOG_CODE_COV_FEATURE)) {
3881                 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3882                 mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, unref_coverage_assemblies, NULL);
3883                 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3884
3885                 mono_conc_hashtable_destroy (log_profiler.coverage_methods);
3886                 mono_conc_hashtable_destroy (log_profiler.coverage_assemblies);
3887                 mono_conc_hashtable_destroy (log_profiler.coverage_classes);
3888                 mono_conc_hashtable_destroy (log_profiler.coverage_filtered_classes);
3889
3890                 mono_conc_hashtable_destroy (log_profiler.coverage_image_to_methods);
3891                 mono_conc_hashtable_destroy (log_profiler.coverage_suppressed_assemblies);
3892                 mono_os_mutex_destroy (&log_profiler.coverage_mutex);
3893         }
3894
3895         PROF_TLS_FREE ();
3896
3897         g_free (prof->args);
3898 }
3899
3900 static char*
3901 new_filename (const char* filename)
3902 {
3903         time_t t = time (NULL);
3904         int pid = process_id ();
3905         char pid_buf [16];
3906         char time_buf [16];
3907         char *res, *d;
3908         const char *p;
3909         int count_dates = 0;
3910         int count_pids = 0;
3911         int s_date, s_pid;
3912         struct tm *ts;
3913         for (p = filename; *p; p++) {
3914                 if (*p != '%')
3915                         continue;
3916                 p++;
3917                 if (*p == 't')
3918                         count_dates++;
3919                 else if (*p == 'p')
3920                         count_pids++;
3921                 else if (*p == 0)
3922                         break;
3923         }
3924         if (!count_dates && !count_pids)
3925                 return pstrdup (filename);
3926         snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
3927         ts = gmtime (&t);
3928         snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
3929                 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
3930         s_date = strlen (time_buf);
3931         s_pid = strlen (pid_buf);
3932         d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
3933         for (p = filename; *p; p++) {
3934                 if (*p != '%') {
3935                         *d++ = *p;
3936                         continue;
3937                 }
3938                 p++;
3939                 if (*p == 't') {
3940                         strcpy (d, time_buf);
3941                         d += s_date;
3942                         continue;
3943                 } else if (*p == 'p') {
3944                         strcpy (d, pid_buf);
3945                         d += s_pid;
3946                         continue;
3947                 } else if (*p == '%') {
3948                         *d++ = '%';
3949                         continue;
3950                 } else if (*p == 0)
3951                         break;
3952                 *d++ = '%';
3953                 *d++ = *p;
3954         }
3955         *d = 0;
3956         return res;
3957 }
3958
3959 static void
3960 add_to_fd_set (fd_set *set, int fd, int *max_fd)
3961 {
3962         /*
3963          * This should only trigger for the basic FDs (server socket, pipes) at
3964          * startup if for some mysterious reason they're too large. In this case,
3965          * the profiler really can't function, and we're better off printing an
3966          * error and exiting.
3967          */
3968         if (fd >= FD_SETSIZE) {
3969                 mono_profiler_printf_err ("File descriptor is out of bounds for fd_set: %d", fd);
3970                 exit (1);
3971         }
3972
3973         FD_SET (fd, set);
3974
3975         if (*max_fd < fd)
3976                 *max_fd = fd;
3977 }
3978
3979 static void *
3980 helper_thread (void *arg)
3981 {
3982         mono_threads_attach_tools_thread ();
3983         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
3984
3985         MonoProfilerThread *thread = init_thread (FALSE);
3986
3987         GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
3988
3989         while (1) {
3990                 fd_set rfds;
3991                 int max_fd = -1;
3992
3993                 FD_ZERO (&rfds);
3994
3995                 add_to_fd_set (&rfds, log_profiler.server_socket, &max_fd);
3996                 add_to_fd_set (&rfds, log_profiler.pipes [0], &max_fd);
3997
3998                 for (gint i = 0; i < command_sockets->len; i++)
3999                         add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
4000
4001                 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
4002
4003                 // Sleep for 1sec or until a file descriptor has data.
4004                 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
4005                         if (errno == EINTR)
4006                                 continue;
4007
4008                         mono_profiler_printf_err ("Could not poll in log profiler helper thread: %s", strerror (errno));
4009                         exit (1);
4010                 }
4011
4012                 if (ENABLED (PROFLOG_COUNTER_EVENTS))
4013                         counters_and_perfcounters_sample ();
4014
4015                 buffer_lock_excl ();
4016
4017                 sync_point (SYNC_POINT_PERIODIC);
4018
4019                 buffer_unlock_excl ();
4020
4021                 // Are we shutting down?
4022                 if (FD_ISSET (log_profiler.pipes [0], &rfds)) {
4023                         char c;
4024                         read (log_profiler.pipes [0], &c, 1);
4025                         break;
4026                 }
4027
4028                 for (gint i = 0; i < command_sockets->len; i++) {
4029                         int fd = g_array_index (command_sockets, int, i);
4030
4031                         if (!FD_ISSET (fd, &rfds))
4032                                 continue;
4033
4034                         char buf [64];
4035                         int len = read (fd, buf, sizeof (buf) - 1);
4036
4037                         if (len == -1)
4038                                 continue;
4039
4040                         if (!len) {
4041                                 // The other end disconnected.
4042                                 g_array_remove_index (command_sockets, i);
4043                                 close (fd);
4044
4045                                 continue;
4046                         }
4047
4048                         buf [len] = 0;
4049
4050                         if (!strcmp (buf, "heapshot\n") && log_config.hs_mode_ondemand) {
4051                                 // Rely on the finalization callback triggering a GC.
4052                                 log_profiler.heapshot_requested = TRUE;
4053                                 mono_gc_finalize_notify ();
4054                         }
4055                 }
4056
4057                 if (FD_ISSET (log_profiler.server_socket, &rfds)) {
4058                         int fd = accept (log_profiler.server_socket, NULL, NULL);
4059
4060                         if (fd != -1) {
4061                                 if (fd >= FD_SETSIZE)
4062                                         close (fd);
4063                                 else
4064                                         g_array_append_val (command_sockets, fd);
4065                         }
4066                 }
4067         }
4068
4069         for (gint i = 0; i < command_sockets->len; i++)
4070                 close (g_array_index (command_sockets, int, i));
4071
4072         g_array_free (command_sockets, TRUE);
4073
4074         send_log_unsafe (FALSE);
4075         deinit_thread (thread);
4076
4077         mono_thread_info_detach ();
4078
4079         return NULL;
4080 }
4081
4082 static void
4083 start_helper_thread (void)
4084 {
4085         if (pipe (log_profiler.pipes) == -1) {
4086                 mono_profiler_printf_err ("Could not create log profiler pipe: %s", strerror (errno));
4087                 exit (1);
4088         }
4089
4090         log_profiler.server_socket = socket (PF_INET, SOCK_STREAM, 0);
4091
4092         if (log_profiler.server_socket == -1) {
4093                 mono_profiler_printf_err ("Could not create log profiler server socket: %s", strerror (errno));
4094                 exit (1);
4095         }
4096
4097         struct sockaddr_in server_address;
4098
4099         memset (&server_address, 0, sizeof (server_address));
4100         server_address.sin_family = AF_INET;
4101         server_address.sin_addr.s_addr = INADDR_ANY;
4102         server_address.sin_port = htons (log_profiler.command_port);
4103
4104         if (bind (log_profiler.server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
4105                 mono_profiler_printf_err ("Could not bind log profiler server socket on port %d: %s", log_profiler.command_port, strerror (errno));
4106                 close (log_profiler.server_socket);
4107                 exit (1);
4108         }
4109
4110         if (listen (log_profiler.server_socket, 1) == -1) {
4111                 mono_profiler_printf_err ("Could not listen on log profiler server socket: %s", strerror (errno));
4112                 close (log_profiler.server_socket);
4113                 exit (1);
4114         }
4115
4116         socklen_t slen = sizeof (server_address);
4117
4118         if (getsockname (log_profiler.server_socket, (struct sockaddr *) &server_address, &slen)) {
4119                 mono_profiler_printf_err ("Could not retrieve assigned port for log profiler server socket: %s", strerror (errno));
4120                 close (log_profiler.server_socket);
4121                 exit (1);
4122         }
4123
4124         log_profiler.command_port = ntohs (server_address.sin_port);
4125
4126         if (!mono_native_thread_create (&log_profiler.helper_thread, helper_thread, NULL)) {
4127                 mono_profiler_printf_err ("Could not start log profiler helper thread");
4128                 close (log_profiler.server_socket);
4129                 exit (1);
4130         }
4131 }
4132
4133 static void
4134 free_writer_entry (gpointer p)
4135 {
4136         mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
4137 }
4138
4139 static gboolean
4140 handle_writer_queue_entry (void)
4141 {
4142         WriterQueueEntry *entry;
4143
4144         if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&log_profiler.writer_queue))) {
4145                 if (!entry->methods)
4146                         goto no_methods;
4147
4148                 gboolean wrote_methods = FALSE;
4149
4150                 /*
4151                  * Encode the method events in a temporary log buffer that we
4152                  * flush to disk before the main buffer, ensuring that all
4153                  * methods have metadata emitted before they're referenced.
4154                  *
4155                  * We use a 'proper' thread-local buffer for this as opposed
4156                  * to allocating and freeing a buffer by hand because the call
4157                  * to mono_method_full_name () below may trigger class load
4158                  * events when it retrieves the signature of the method. So a
4159                  * thread-local buffer needs to exist when such events occur.
4160                  */
4161                 for (guint i = 0; i < entry->methods->len; i++) {
4162                         MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
4163
4164                         if (mono_conc_hashtable_lookup (log_profiler.method_table, info->method))
4165                                 goto free_info; // This method already has metadata emitted.
4166
4167                         /*
4168                          * Other threads use this hash table to get a general
4169                          * idea of whether a method has already been emitted to
4170                          * the stream. Due to the way we add to this table, it
4171                          * can easily happen that multiple threads queue up the
4172                          * same methods, but that's OK since eventually all
4173                          * methods will be in this table and the thread-local
4174                          * method lists will just be empty for the rest of the
4175                          * app's lifetime.
4176                          */
4177                         mono_os_mutex_lock (&log_profiler.method_table_mutex);
4178                         mono_conc_hashtable_insert (log_profiler.method_table, info->method, info->method);
4179                         mono_os_mutex_unlock (&log_profiler.method_table_mutex);
4180
4181                         char *name = mono_method_full_name (info->method, 1);
4182                         int nlen = strlen (name) + 1;
4183                         void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
4184                         int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
4185
4186                         ENTER_LOG (&method_jits_ctr, logbuffer,
4187                                 EVENT_SIZE /* event */ +
4188                                 LEB128_SIZE /* method */ +
4189                                 LEB128_SIZE /* start */ +
4190                                 LEB128_SIZE /* size */ +
4191                                 nlen /* name */
4192                         );
4193
4194                         emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
4195                         emit_method_inner (logbuffer, info->method);
4196                         emit_ptr (logbuffer, cstart);
4197                         emit_value (logbuffer, csize);
4198
4199                         memcpy (logbuffer->cursor, name, nlen);
4200                         logbuffer->cursor += nlen;
4201
4202                         EXIT_LOG_EXPLICIT (NO_SEND);
4203
4204                         mono_free (name);
4205
4206                         wrote_methods = TRUE;
4207
4208                 free_info:
4209                         g_free (info);
4210                 }
4211
4212                 g_ptr_array_free (entry->methods, TRUE);
4213
4214                 if (wrote_methods) {
4215                         MonoProfilerThread *thread = PROF_TLS_GET ();
4216
4217                         dump_buffer_threadless (thread->buffer);
4218                         init_buffer_state (thread);
4219                 }
4220
4221         no_methods:
4222                 dump_buffer (entry->buffer);
4223
4224                 mono_thread_hazardous_try_free (entry, free_writer_entry);
4225
4226                 return TRUE;
4227         }
4228
4229         return FALSE;
4230 }
4231
4232 static void *
4233 writer_thread (void *arg)
4234 {
4235         mono_threads_attach_tools_thread ();
4236         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4237
4238         dump_header ();
4239
4240         MonoProfilerThread *thread = init_thread (FALSE);
4241
4242         while (InterlockedRead (&log_profiler.run_writer_thread)) {
4243                 mono_os_sem_wait (&log_profiler.writer_queue_sem, MONO_SEM_FLAGS_NONE);
4244                 handle_writer_queue_entry ();
4245         }
4246
4247         /* Drain any remaining entries on shutdown. */
4248         while (handle_writer_queue_entry ());
4249
4250         free_buffer (thread->buffer, thread->buffer->size);
4251         deinit_thread (thread);
4252
4253         mono_thread_info_detach ();
4254
4255         return NULL;
4256 }
4257
4258 static void
4259 start_writer_thread (void)
4260 {
4261         InterlockedWrite (&log_profiler.run_writer_thread, 1);
4262
4263         if (!mono_native_thread_create (&log_profiler.writer_thread, writer_thread, NULL)) {
4264                 mono_profiler_printf_err ("Could not start log profiler writer thread");
4265                 exit (1);
4266         }
4267 }
4268
4269 static void
4270 reuse_sample_hit (gpointer p)
4271 {
4272         SampleHit *sample = p;
4273
4274         mono_lock_free_queue_node_unpoison (&sample->node);
4275         mono_lock_free_queue_enqueue (&log_profiler.sample_reuse_queue, &sample->node);
4276 }
4277
4278 static gboolean
4279 handle_dumper_queue_entry (void)
4280 {
4281         SampleHit *sample;
4282
4283         if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.dumper_queue))) {
4284                 for (int i = 0; i < sample->count; ++i) {
4285                         MonoMethod *method = sample->frames [i].method;
4286                         MonoDomain *domain = sample->frames [i].domain;
4287                         void *address = sample->frames [i].base_address;
4288
4289                         if (!method) {
4290                                 g_assert (domain && "What happened to the domain pointer?");
4291                                 g_assert (address && "What happened to the instruction pointer?");
4292
4293                                 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
4294
4295                                 if (ji)
4296                                         sample->frames [i].method = mono_jit_info_get_method (ji);
4297                         }
4298                 }
4299
4300                 ENTER_LOG (&sample_hits_ctr, logbuffer,
4301                         EVENT_SIZE /* event */ +
4302                         LEB128_SIZE /* tid */ +
4303                         LEB128_SIZE /* count */ +
4304                         1 * (
4305                                 LEB128_SIZE /* ip */
4306                         ) +
4307                         LEB128_SIZE /* managed count */ +
4308                         sample->count * (
4309                                 LEB128_SIZE /* method */
4310                         )
4311                 );
4312
4313                 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4314                 emit_ptr (logbuffer, (void *) sample->tid);
4315                 emit_value (logbuffer, 1);
4316
4317                 // TODO: Actual native unwinding.
4318                 for (int i = 0; i < 1; ++i) {
4319                         emit_ptr (logbuffer, sample->ip);
4320                         add_code_pointer ((uintptr_t) sample->ip);
4321                 }
4322
4323                 /* new in data version 6 */
4324                 emit_uvalue (logbuffer, sample->count);
4325
4326                 for (int i = 0; i < sample->count; ++i)
4327                         emit_method (logbuffer, sample->frames [i].method);
4328
4329                 EXIT_LOG_EXPLICIT (DO_SEND);
4330
4331                 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4332
4333                 dump_unmanaged_coderefs ();
4334         }
4335
4336         return FALSE;
4337 }
4338
4339 static void *
4340 dumper_thread (void *arg)
4341 {
4342         mono_threads_attach_tools_thread ();
4343         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4344
4345         MonoProfilerThread *thread = init_thread (FALSE);
4346
4347         while (InterlockedRead (&log_profiler.run_dumper_thread)) {
4348                 /*
4349                  * Flush samples every second so it doesn't seem like the profiler is
4350                  * not working if the program is mostly idle.
4351                  */
4352                 if (mono_os_sem_timedwait (&log_profiler.dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4353                         send_log_unsafe (FALSE);
4354
4355                 handle_dumper_queue_entry ();
4356         }
4357
4358         /* Drain any remaining entries on shutdown. */
4359         while (handle_dumper_queue_entry ());
4360
4361         send_log_unsafe (FALSE);
4362         deinit_thread (thread);
4363
4364         mono_thread_info_detach ();
4365
4366         return NULL;
4367 }
4368
4369 static void
4370 start_dumper_thread (void)
4371 {
4372         InterlockedWrite (&log_profiler.run_dumper_thread, 1);
4373
4374         if (!mono_native_thread_create (&log_profiler.dumper_thread, dumper_thread, NULL)) {
4375                 mono_profiler_printf_err ("Could not start log profiler dumper thread");
4376                 exit (1);
4377         }
4378 }
4379
4380 static void
4381 register_counter (const char *name, gint32 *counter)
4382 {
4383         mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4384 }
4385
4386 static void
4387 runtime_initialized (MonoProfiler *profiler)
4388 {
4389         InterlockedWrite (&log_profiler.runtime_inited, 1);
4390
4391         register_counter ("Sample events allocated", &sample_allocations_ctr);
4392         register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4393
4394         register_counter ("Event: Sync points", &sync_points_ctr);
4395         register_counter ("Event: Heap objects", &heap_objects_ctr);
4396         register_counter ("Event: Heap starts", &heap_starts_ctr);
4397         register_counter ("Event: Heap ends", &heap_ends_ctr);
4398         register_counter ("Event: Heap roots", &heap_roots_ctr);
4399         register_counter ("Event: GC events", &gc_events_ctr);
4400         register_counter ("Event: GC resizes", &gc_resizes_ctr);
4401         register_counter ("Event: GC allocations", &gc_allocs_ctr);
4402         register_counter ("Event: GC moves", &gc_moves_ctr);
4403         register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4404         register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4405         register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4406         register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4407         register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4408         register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4409         register_counter ("Event: Image loads", &image_loads_ctr);
4410         register_counter ("Event: Image unloads", &image_unloads_ctr);
4411         register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4412         register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4413         register_counter ("Event: Class loads", &class_loads_ctr);
4414         register_counter ("Event: Class unloads", &class_unloads_ctr);
4415         register_counter ("Event: Method entries", &method_entries_ctr);
4416         register_counter ("Event: Method exits", &method_exits_ctr);
4417         register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4418         register_counter ("Event: Method JITs", &method_jits_ctr);
4419         register_counter ("Event: Code buffers", &code_buffers_ctr);
4420         register_counter ("Event: Exception throws", &exception_throws_ctr);
4421         register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4422         register_counter ("Event: Monitor events", &monitor_events_ctr);
4423         register_counter ("Event: Thread starts", &thread_starts_ctr);
4424         register_counter ("Event: Thread ends", &thread_ends_ctr);
4425         register_counter ("Event: Thread names", &thread_names_ctr);
4426         register_counter ("Event: Domain loads", &domain_loads_ctr);
4427         register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4428         register_counter ("Event: Domain names", &domain_names_ctr);
4429         register_counter ("Event: Context loads", &context_loads_ctr);
4430         register_counter ("Event: Context unloads", &context_unloads_ctr);
4431         register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4432         register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4433         register_counter ("Event: Sample hits", &sample_hits_ctr);
4434         register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4435         register_counter ("Event: Counter samples", &counter_samples_ctr);
4436         register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4437         register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4438         register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4439         register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4440         register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4441         register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4442
4443         counters_init ();
4444
4445         /*
4446          * We must start the helper thread before the writer thread. This is
4447          * because the helper thread sets up the command port which is written to
4448          * the log header by the writer thread.
4449          */
4450         start_helper_thread ();
4451         start_writer_thread ();
4452         start_dumper_thread ();
4453 }
4454
4455 static void
4456 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4457 {
4458         char *nf;
4459
4460         log_profiler.args = pstrdup (args);
4461         log_profiler.command_port = log_config.command_port;
4462
4463         //If filename begin with +, append the pid at the end
4464         if (filename && *filename == '+')
4465                 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4466
4467         if (!filename) {
4468                 if (log_config.do_report)
4469                         filename = "|mprof-report -";
4470                 else
4471                         filename = "output.mlpd";
4472                 nf = (char*)filename;
4473         } else {
4474                 nf = new_filename (filename);
4475                 if (log_config.do_report) {
4476                         int s = strlen (nf) + 32;
4477                         char *p = (char *) g_malloc (s);
4478                         snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4479                         g_free (nf);
4480                         nf = p;
4481                 }
4482         }
4483         if (*nf == '|') {
4484                 log_profiler.file = popen (nf + 1, "w");
4485                 log_profiler.pipe_output = 1;
4486         } else if (*nf == '#') {
4487                 int fd = strtol (nf + 1, NULL, 10);
4488                 log_profiler.file = fdopen (fd, "a");
4489         } else
4490                 log_profiler.file = fopen (nf, "wb");
4491
4492         if (!log_profiler.file) {
4493                 mono_profiler_printf_err ("Could not create log profiler output file '%s'.", nf);
4494                 exit (1);
4495         }
4496
4497 #if defined (HAVE_SYS_ZLIB)
4498         if (log_config.use_zip)
4499                 log_profiler.gzfile = gzdopen (fileno (log_profiler.file), "wb");
4500 #endif
4501
4502         /*
4503          * If you hit this assert while increasing MAX_FRAMES, you need to increase
4504          * SAMPLE_BLOCK_SIZE as well.
4505          */
4506         g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4507
4508         // FIXME: We should free this stuff too.
4509         mono_lock_free_allocator_init_size_class (&log_profiler.sample_size_class, SAMPLE_SLOT_SIZE (log_config.num_frames), SAMPLE_BLOCK_SIZE);
4510         mono_lock_free_allocator_init_allocator (&log_profiler.sample_allocator, &log_profiler.sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4511
4512         mono_lock_free_queue_init (&log_profiler.sample_reuse_queue);
4513
4514         g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4515
4516         // FIXME: We should free this stuff too.
4517         mono_lock_free_allocator_init_size_class (&log_profiler.writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4518         mono_lock_free_allocator_init_allocator (&log_profiler.writer_entry_allocator, &log_profiler.writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4519
4520         mono_lock_free_queue_init (&log_profiler.writer_queue);
4521         mono_os_sem_init (&log_profiler.writer_queue_sem, 0);
4522
4523         mono_lock_free_queue_init (&log_profiler.dumper_queue);
4524         mono_os_sem_init (&log_profiler.dumper_queue_sem, 0);
4525
4526         mono_os_mutex_init (&log_profiler.method_table_mutex);
4527         log_profiler.method_table = mono_conc_hashtable_new (NULL, NULL);
4528
4529         if (ENABLED (PROFLOG_CODE_COV_FEATURE))
4530                 coverage_init ();
4531
4532         log_profiler.coverage_filters = filters;
4533
4534         log_profiler.startup_time = current_time ();
4535 }
4536
4537 /*
4538  * declaration to silence the compiler: this is the entry point that
4539  * mono will load from the shared library and call.
4540  */
4541 extern void
4542 mono_profiler_init (const char *desc);
4543
4544 extern void
4545 mono_profiler_init_log (const char *desc);
4546
4547 /*
4548  * this is the entry point that will be used when the profiler
4549  * is embedded inside the main executable.
4550  */
4551 void
4552 mono_profiler_init_log (const char *desc)
4553 {
4554         mono_profiler_init (desc);
4555 }
4556
4557 void
4558 mono_profiler_init (const char *desc)
4559 {
4560         GPtrArray *filters = NULL;
4561
4562         proflog_parse_args (&log_config, desc [3] == ':' ? desc + 4 : "");
4563
4564         if (log_config.cov_filter_files) {
4565                 filters = g_ptr_array_new ();
4566                 int i;
4567                 for (i = 0; i < log_config.cov_filter_files->len; ++i) {
4568                         const char *name = log_config.cov_filter_files->pdata [i];
4569                         parse_cov_filter_file (filters, name);
4570                 }
4571         }
4572
4573         init_time ();
4574
4575         PROF_TLS_INIT ();
4576
4577         create_profiler (desc, log_config.output_filename, filters);
4578
4579         mono_lls_init (&log_profiler.profiler_thread_list, NULL);
4580
4581         MonoProfilerHandle handle = log_profiler.handle = mono_profiler_install (&log_profiler);
4582
4583         //Required callbacks
4584         mono_profiler_set_runtime_shutdown_end_callback (handle, log_shutdown);
4585         mono_profiler_set_runtime_initialized_callback (handle, runtime_initialized);
4586
4587         mono_profiler_set_gc_event_callback (handle, gc_event);
4588         mono_profiler_set_gc_resize_callback (handle, gc_resize);
4589         mono_profiler_set_thread_started_callback (handle, thread_start);
4590         mono_profiler_set_thread_stopped_callback (handle, thread_end);
4591
4592         //It's questionable whether we actually want this to be mandatory, maybe put it behind the actual event?
4593         mono_profiler_set_thread_name_callback (handle, thread_name);
4594
4595         if (log_config.effective_mask & PROFLOG_DOMAIN_EVENTS) {
4596                 mono_profiler_set_domain_loaded_callback (handle, domain_loaded);
4597                 mono_profiler_set_domain_unloading_callback (handle, domain_unloaded);
4598                 mono_profiler_set_domain_name_callback (handle, domain_name);
4599         }
4600
4601         if (log_config.effective_mask & PROFLOG_ASSEMBLY_EVENTS) {
4602                 mono_profiler_set_assembly_loaded_callback (handle, assembly_loaded);
4603                 mono_profiler_set_assembly_unloading_callback (handle, assembly_unloaded);
4604         }
4605
4606         if (log_config.effective_mask & PROFLOG_MODULE_EVENTS) {
4607                 mono_profiler_set_image_loaded_callback (handle, image_loaded);
4608                 mono_profiler_set_image_unloading_callback (handle, image_unloaded);
4609         }
4610
4611         if (log_config.effective_mask & PROFLOG_CLASS_EVENTS)
4612                 mono_profiler_set_class_loaded_callback (handle, class_loaded);
4613
4614         if (log_config.effective_mask & PROFLOG_JIT_COMPILATION_EVENTS) {
4615                 mono_profiler_set_jit_done_callback (handle, method_jitted);
4616                 mono_profiler_set_jit_code_buffer_callback (handle, code_buffer_new);
4617         }
4618
4619         if (log_config.effective_mask & PROFLOG_EXCEPTION_EVENTS) {
4620                 mono_profiler_set_exception_throw_callback (handle, throw_exc);
4621                 mono_profiler_set_exception_clause_callback (handle, clause_exc);
4622         }
4623
4624         if (log_config.effective_mask & PROFLOG_ALLOCATION_EVENTS) {
4625                 mono_profiler_enable_allocations ();
4626                 mono_profiler_set_gc_allocation_callback (handle, gc_alloc);
4627         }
4628
4629         //PROFLOG_GC_EVENTS is mandatory
4630         //PROFLOG_THREAD_EVENTS is mandatory
4631
4632         if (log_config.effective_mask & PROFLOG_CALL_EVENTS) {
4633                 mono_profiler_set_call_instrumentation_filter_callback (handle, method_filter);
4634                 mono_profiler_set_method_enter_callback (handle, method_enter);
4635                 mono_profiler_set_method_leave_callback (handle, method_leave);
4636                 mono_profiler_set_method_exception_leave_callback (handle, method_exc_leave);
4637         }
4638
4639         if (log_config.effective_mask & PROFLOG_INS_COVERAGE_EVENTS)
4640                 mono_profiler_set_coverage_filter_callback (handle, coverage_filter);
4641
4642         if (log_config.effective_mask & PROFLOG_SAMPLING_EVENTS) {
4643                 mono_profiler_enable_sampling (handle);
4644
4645                 if (!mono_profiler_set_sample_mode (handle, log_config.sampling_mode, log_config.sample_freq))
4646                         mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");
4647
4648                 mono_profiler_set_sample_hit_callback (handle, mono_sample_hit);
4649         }
4650
4651         if (log_config.effective_mask & PROFLOG_MONITOR_EVENTS) {
4652                 mono_profiler_set_monitor_contention_callback (handle, monitor_contention);
4653                 mono_profiler_set_monitor_acquired_callback (handle, monitor_acquired);
4654                 mono_profiler_set_monitor_failed_callback (handle, monitor_failed);
4655         }
4656
4657         if (log_config.effective_mask & PROFLOG_GC_MOVES_EVENTS)
4658                 mono_profiler_set_gc_moves_callback (handle, gc_moves);
4659
4660         if (log_config.effective_mask & PROFLOG_GC_ROOT_EVENTS)
4661                 mono_profiler_set_gc_roots_callback (handle, gc_roots);
4662
4663         if (log_config.effective_mask & PROFLOG_CONTEXT_EVENTS) {
4664                 mono_profiler_set_context_loaded_callback (handle, context_loaded);
4665                 mono_profiler_set_context_unloaded_callback (handle, context_unloaded);
4666         }
4667
4668         if (log_config.effective_mask & PROFLOG_FINALIZATION_EVENTS) {
4669                 mono_profiler_set_gc_finalizing_callback (handle, finalize_begin);
4670                 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4671                 mono_profiler_set_gc_finalizing_object_callback (handle, finalize_object_begin);
4672                 mono_profiler_set_gc_finalized_object_callback (handle, finalize_object_end);
4673         } else if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && log_config.hs_mode_ondemand) {
4674                 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4675                 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4676         }
4677
4678         //PROFLOG_COUNTER_EVENTS is a pseudo event controled by the no_counters global var
4679
4680         if (log_config.effective_mask & PROFLOG_GC_HANDLE_EVENTS) {
4681                 mono_profiler_set_gc_handle_created_callback (handle, gc_handle_created);
4682                 mono_profiler_set_gc_handle_deleted_callback (handle, gc_handle_deleted);
4683         }
4684 }