[profiler] Attach threads automatically in all callbacks.
[mono.git] / mono / profiler / log.c
1 /*
2  * mono-profiler-log.c: mono log profiler
3  *
4  * Authors:
5  *   Paolo Molaro (lupus@ximian.com)
6  *   Alex Rønne Petersen (alexrp@xamarin.com)
7  *
8  * Copyright 2010 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11  */
12
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/debug-helpers.h>
16 #include "../metadata/metadata-internals.h"
17 #include <mono/metadata/mono-config.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/metadata/mono-perfcounters.h>
20 #include <mono/utils/atomic.h>
21 #include <mono/utils/hazard-pointer.h>
22 #include <mono/utils/lock-free-alloc.h>
23 #include <mono/utils/lock-free-queue.h>
24 #include <mono/utils/mono-conc-hashtable.h>
25 #include <mono/utils/mono-counters.h>
26 #include <mono/utils/mono-linked-list-set.h>
27 #include <mono/utils/mono-membar.h>
28 #include <mono/utils/mono-mmap.h>
29 #include <mono/utils/mono-os-mutex.h>
30 #include <mono/utils/mono-os-semaphore.h>
31 #include <mono/utils/mono-threads.h>
32 #include <mono/utils/mono-threads-api.h>
33 #include "log.h"
34
35 #ifdef HAVE_DLFCN_H
36 #include <dlfcn.h>
37 #endif
38 #include <fcntl.h>
39 #ifdef HAVE_LINK_H
40 #include <link.h>
41 #endif
42 #ifdef HAVE_UNISTD_H
43 #include <unistd.h>
44 #endif
45 #if defined(__APPLE__)
46 #include <mach/mach_time.h>
47 #endif
48 #include <netinet/in.h>
49 #ifdef HAVE_SYS_MMAN_H
50 #include <sys/mman.h>
51 #endif
52 #include <sys/socket.h>
53 #if defined (HAVE_SYS_ZLIB)
54 #include <zlib.h>
55 #endif
56
57 #define BUFFER_SIZE (4096 * 16)
58
59 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
60 #define LEB128_SIZE 10
61
62 /* Size of a value encoded as a single byte. */
63 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
64 #define BYTE_SIZE 1
65
66 /* Size in bytes of the event prefix (ID + time). */
67 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
68
69 static volatile gint32 runtime_inited;
70 static volatile gint32 in_shutdown;
71
72 static ProfilerConfig config;
73 static int nocalls = 0;
74 static int notraces = 0;
75 static int use_zip = 0;
76 static int do_report = 0;
77 static int do_heap_shot = 0;
78 static int max_call_depth = 0;
79 static int command_port = 0;
80 static int heapshot_requested = 0;
81 static int do_mono_sample = 0;
82 static int do_debug = 0;
83 static int do_coverage = 0;
84 static gboolean no_counters = FALSE;
85 static gboolean only_coverage = FALSE;
86 static gboolean debug_coverage = FALSE;
87 static int max_allocated_sample_hits;
88
89 #define ENABLED(EVT) (config.effective_mask & (EVT))
90
91 // Statistics for internal profiler data structures.
92 static gint32 sample_allocations_ctr,
93               buffer_allocations_ctr;
94
95 // Statistics for profiler events.
96 static gint32 sync_points_ctr,
97               heap_objects_ctr,
98               heap_starts_ctr,
99               heap_ends_ctr,
100               heap_roots_ctr,
101               gc_events_ctr,
102               gc_resizes_ctr,
103               gc_allocs_ctr,
104               gc_moves_ctr,
105               gc_handle_creations_ctr,
106               gc_handle_deletions_ctr,
107               finalize_begins_ctr,
108               finalize_ends_ctr,
109               finalize_object_begins_ctr,
110               finalize_object_ends_ctr,
111               image_loads_ctr,
112               image_unloads_ctr,
113               assembly_loads_ctr,
114               assembly_unloads_ctr,
115               class_loads_ctr,
116               class_unloads_ctr,
117               method_entries_ctr,
118               method_exits_ctr,
119               method_exception_exits_ctr,
120               method_jits_ctr,
121               code_buffers_ctr,
122               exception_throws_ctr,
123               exception_clauses_ctr,
124               monitor_events_ctr,
125               thread_starts_ctr,
126               thread_ends_ctr,
127               thread_names_ctr,
128               domain_loads_ctr,
129               domain_unloads_ctr,
130               domain_names_ctr,
131               context_loads_ctr,
132               context_unloads_ctr,
133               sample_ubins_ctr,
134               sample_usyms_ctr,
135               sample_hits_ctr,
136               counter_descriptors_ctr,
137               counter_samples_ctr,
138               perfcounter_descriptors_ctr,
139               perfcounter_samples_ctr,
140               coverage_methods_ctr,
141               coverage_statements_ctr,
142               coverage_classes_ctr,
143               coverage_assemblies_ctr;
144
145 static MonoLinkedListSet profiler_thread_list;
146
147 /*
148  * file format:
149  * [header] [buffer]*
150  *
151  * The file is composed by a header followed by 0 or more buffers.
152  * Each buffer contains events that happened on a thread: for a given thread
153  * buffers that appear later in the file are guaranteed to contain events
154  * that happened later in time. Buffers from separate threads could be interleaved,
155  * though.
156  * Buffers are not required to be aligned.
157  *
158  * header format:
159  * [id: 4 bytes] constant value: LOG_HEADER_ID
160  * [major: 1 byte] [minor: 1 byte] major and minor version of the log profiler
161  * [format: 1 byte] version of the data format for the rest of the file
162  * [ptrsize: 1 byte] size in bytes of a pointer in the profiled program
163  * [startup time: 8 bytes] time in milliseconds since the unix epoch when the program started
164  * [timer overhead: 4 bytes] approximate overhead in nanoseconds of the timer
165  * [flags: 4 bytes] file format flags, should be 0 for now
166  * [pid: 4 bytes] pid of the profiled process
167  * [port: 2 bytes] tcp port for server if != 0
168  * [args size: 4 bytes] size of args
169  * [args: string] arguments passed to the profiler
170  * [arch size: 4 bytes] size of arch
171  * [arch: string] architecture the profiler is running on
172  * [os size: 4 bytes] size of os
173  * [os: string] operating system the profiler is running on
174  *
175  * The multiple byte integers are in little-endian format.
176  *
177  * buffer format:
178  * [buffer header] [event]*
179  * Buffers have a fixed-size header followed by 0 or more bytes of event data.
180  * Timing information and other values in the event data are usually stored
181  * as uleb128 or sleb128 integers. To save space, as noted for each item below,
182  * some data is represented as a difference between the actual value and
183  * either the last value of the same type (like for timing information) or
184  * as the difference from a value stored in a buffer header.
185  *
186  * For timing information the data is stored as uleb128, since timing
187  * increases in a monotonic way in each thread: the value is the number of
188  * nanoseconds to add to the last seen timing data in a buffer. The first value
189  * in a buffer will be calculated from the time_base field in the buffer head.
190  *
191  * Object or heap sizes are stored as uleb128.
192  * Pointer differences are stored as sleb128, instead.
193  *
194  * If an unexpected value is found, the rest of the buffer should be ignored,
195  * as generally the later values need the former to be interpreted correctly.
196  *
197  * buffer header format:
198  * [bufid: 4 bytes] constant value: BUF_ID
199  * [len: 4 bytes] size of the data following the buffer header
200  * [time_base: 8 bytes] time base in nanoseconds since an unspecified epoch
201  * [ptr_base: 8 bytes] base value for pointers
202  * [obj_base: 8 bytes] base value for object addresses
203  * [thread id: 8 bytes] system-specific thread ID (pthread_t for example)
204  * [method_base: 8 bytes] base value for MonoMethod pointers
205  *
206  * event format:
207  * [extended info: upper 4 bits] [type: lower 4 bits]
208  * [time diff: uleb128] nanoseconds since last timing
209  * [data]*
210  * The data that follows depends on type and the extended info.
211  * Type is one of the enum values in mono-profiler-log.h: TYPE_ALLOC, TYPE_GC,
212  * TYPE_METADATA, TYPE_METHOD, TYPE_EXCEPTION, TYPE_MONITOR, TYPE_HEAP.
213  * The extended info bits are interpreted based on type, see
214  * each individual event description below.
215  * strings are represented as a 0-terminated utf8 sequence.
216  *
217  * backtrace format:
218  * [num: uleb128] number of frames following
219  * [frame: sleb128]* mum MonoMethod* as a pointer difference from the last such
220  * pointer or the buffer method_base
221  *
222  * type alloc format:
223  * type: TYPE_ALLOC
224  * exinfo: zero or TYPE_ALLOC_BT
225  * [ptr: sleb128] class as a byte difference from ptr_base
226  * [obj: sleb128] object address as a byte difference from obj_base
227  * [size: uleb128] size of the object in the heap
228  * If exinfo == TYPE_ALLOC_BT, a backtrace follows.
229  *
230  * type GC format:
231  * type: TYPE_GC
232  * exinfo: one of TYPE_GC_EVENT, TYPE_GC_RESIZE, TYPE_GC_MOVE, TYPE_GC_HANDLE_CREATED[_BT],
233  * TYPE_GC_HANDLE_DESTROYED[_BT], TYPE_GC_FINALIZE_START, TYPE_GC_FINALIZE_END,
234  * TYPE_GC_FINALIZE_OBJECT_START, TYPE_GC_FINALIZE_OBJECT_END
235  * if exinfo == TYPE_GC_RESIZE
236  *      [heap_size: uleb128] new heap size
237  * if exinfo == TYPE_GC_EVENT
238  *      [event type: byte] GC event (MONO_GC_EVENT_* from profiler.h)
239  *      [generation: byte] GC generation event refers to
240  * if exinfo == TYPE_GC_MOVE
241  *      [num_objects: uleb128] number of object moves that follow
242  *      [objaddr: sleb128]+ num_objects object pointer differences from obj_base
243  *      num is always an even number: the even items are the old
244  *      addresses, the odd numbers are the respective new object addresses
245  * if exinfo == TYPE_GC_HANDLE_CREATED[_BT]
246  *      [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
247  *      upper bits reserved as flags
248  *      [handle: uleb128] GC handle value
249  *      [objaddr: sleb128] object pointer differences from obj_base
250  *      If exinfo == TYPE_GC_HANDLE_CREATED_BT, a backtrace follows.
251  * if exinfo == TYPE_GC_HANDLE_DESTROYED[_BT]
252  *      [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
253  *      upper bits reserved as flags
254  *      [handle: uleb128] GC handle value
255  *      If exinfo == TYPE_GC_HANDLE_DESTROYED_BT, a backtrace follows.
256  * if exinfo == TYPE_GC_FINALIZE_OBJECT_{START,END}
257  *      [object: sleb128] the object as a difference from obj_base
258  *
259  * type metadata format:
260  * type: TYPE_METADATA
261  * exinfo: one of: TYPE_END_LOAD, TYPE_END_UNLOAD (optional for TYPE_THREAD and TYPE_DOMAIN)
262  * [mtype: byte] metadata type, one of: TYPE_CLASS, TYPE_IMAGE, TYPE_ASSEMBLY, TYPE_DOMAIN,
263  * TYPE_THREAD, TYPE_CONTEXT
264  * [pointer: sleb128] pointer of the metadata type depending on mtype
265  * if mtype == TYPE_CLASS
266  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
267  *      [name: string] full class name
268  * if mtype == TYPE_IMAGE
269  *      [name: string] image file name
270  * if mtype == TYPE_ASSEMBLY
271  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
272  *      [name: string] assembly name
273  * if mtype == TYPE_DOMAIN && exinfo == 0
274  *      [name: string] domain friendly name
275  * if mtype == TYPE_CONTEXT
276  *      [domain: sleb128] domain id as pointer
277  * if mtype == TYPE_THREAD && exinfo == 0
278  *      [name: string] thread name
279  *
280  * type method format:
281  * type: TYPE_METHOD
282  * exinfo: one of: TYPE_LEAVE, TYPE_ENTER, TYPE_EXC_LEAVE, TYPE_JIT
283  * [method: sleb128] MonoMethod* as a pointer difference from the last such
284  * pointer or the buffer method_base
285  * if exinfo == TYPE_JIT
286  *      [code address: sleb128] pointer to the native code as a diff from ptr_base
287  *      [code size: uleb128] size of the generated code
288  *      [name: string] full method name
289  *
290  * type exception format:
291  * type: TYPE_EXCEPTION
292  * exinfo: zero, TYPE_CLAUSE, or TYPE_THROW_BT
293  * if exinfo == TYPE_CLAUSE
294  *      [clause type: byte] MonoExceptionEnum enum value
295  *      [clause index: uleb128] index of the current clause
296  *      [method: sleb128] MonoMethod* as a pointer difference from the last such
297  *      pointer or the buffer method_base
298  *      [object: sleb128] the exception object as a difference from obj_base
299  * else
300  *      [object: sleb128] the exception object as a difference from obj_base
301  *      If exinfo == TYPE_THROW_BT, a backtrace follows.
302  *
303  * type runtime format:
304  * type: TYPE_RUNTIME
305  * exinfo: one of: TYPE_JITHELPER
306  * if exinfo == TYPE_JITHELPER
307  *      [type: byte] MonoProfilerCodeBufferType enum value
308  *      [buffer address: sleb128] pointer to the native code as a diff from ptr_base
309  *      [buffer size: uleb128] size of the generated code
310  *      if type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
311  *              [name: string] buffer description name
312  *
313  * type monitor format:
314  * type: TYPE_MONITOR
315  * exinfo: zero or TYPE_MONITOR_BT
316  * [type: byte] MONO_PROFILER_MONITOR_{CONTENTION,FAIL,DONE}
317  * [object: sleb128] the lock object as a difference from obj_base
318  * If exinfo == TYPE_MONITOR_BT, a backtrace follows.
319  *
320  * type heap format
321  * type: TYPE_HEAP
322  * exinfo: one of TYPE_HEAP_START, TYPE_HEAP_END, TYPE_HEAP_OBJECT, TYPE_HEAP_ROOT
323  * if exinfo == TYPE_HEAP_OBJECT
324  *      [object: sleb128] the object as a difference from obj_base
325  *      [class: sleb128] the object MonoClass* as a difference from ptr_base
326  *      [size: uleb128] size of the object on the heap
327  *      [num_refs: uleb128] number of object references
328  *      each referenced objref is preceded by a uleb128 encoded offset: the
329  *      first offset is from the object address and each next offset is relative
330  *      to the previous one
331  *      [objrefs: sleb128]+ object referenced as a difference from obj_base
332  *      The same object can appear multiple times, but only the first time
333  *      with size != 0: in the other cases this data will only be used to
334  *      provide additional referenced objects.
335  * if exinfo == TYPE_HEAP_ROOT
336  *      [num_roots: uleb128] number of root references
337  *      [num_gc: uleb128] number of major gcs
338  *      [object: sleb128] the object as a difference from obj_base
339  *      [root_type: byte] the root_type: MonoProfileGCRootType (profiler.h)
340  *      [extra_info: uleb128] the extra_info value
341  *      object, root_type and extra_info are repeated num_roots times
342  *
343  * type sample format
344  * type: TYPE_SAMPLE
345  * exinfo: one of TYPE_SAMPLE_HIT, TYPE_SAMPLE_USYM, TYPE_SAMPLE_UBIN, TYPE_SAMPLE_COUNTERS_DESC, TYPE_SAMPLE_COUNTERS
346  * if exinfo == TYPE_SAMPLE_HIT
347  *      [thread: sleb128] thread id as difference from ptr_base
348  *      [count: uleb128] number of following instruction addresses
349  *      [ip: sleb128]* instruction pointer as difference from ptr_base
350  *      [mbt_count: uleb128] number of managed backtrace frames
351  *      [method: sleb128]* MonoMethod* as a pointer difference from the last such
352  *      pointer or the buffer method_base (the first such method can be also indentified by ip, but this is not neccessarily true)
353  * if exinfo == TYPE_SAMPLE_USYM
354  *      [address: sleb128] symbol address as a difference from ptr_base
355  *      [size: uleb128] symbol size (may be 0 if unknown)
356  *      [name: string] symbol name
357  * if exinfo == TYPE_SAMPLE_UBIN
358  *      [address: sleb128] address where binary has been loaded as a difference from ptr_base
359  *      [offset: uleb128] file offset of mapping (the same file can be mapped multiple times)
360  *      [size: uleb128] memory size
361  *      [name: string] binary name
362  * if exinfo == TYPE_SAMPLE_COUNTERS_DESC
363  *      [len: uleb128] number of counters
364  *      for i = 0 to len
365  *              [section: uleb128] section of counter
366  *              if section == MONO_COUNTER_PERFCOUNTERS:
367  *                      [section_name: string] section name of counter
368  *              [name: string] name of counter
369  *              [type: byte] type of counter
370  *              [unit: byte] unit of counter
371  *              [variance: byte] variance of counter
372  *              [index: uleb128] unique index of counter
373  * if exinfo == TYPE_SAMPLE_COUNTERS
374  *      while true:
375  *              [index: uleb128] unique index of counter
376  *              if index == 0:
377  *                      break
378  *              [type: byte] type of counter value
379  *              if type == string:
380  *                      if value == null:
381  *                              [0: byte] 0 -> value is null
382  *                      else:
383  *                              [1: byte] 1 -> value is not null
384  *                              [value: string] counter value
385  *              else:
386  *                      [value: uleb128/sleb128/double] counter value, can be sleb128, uleb128 or double (determined by using type)
387  *
388  * type coverage format
389  * type: TYPE_COVERAGE
390  * exinfo: one of TYPE_COVERAGE_METHOD, TYPE_COVERAGE_STATEMENT, TYPE_COVERAGE_ASSEMBLY, TYPE_COVERAGE_CLASS
391  * if exinfo == TYPE_COVERAGE_METHOD
392  *  [assembly: string] name of assembly
393  *  [class: string] name of the class
394  *  [name: string] name of the method
395  *  [signature: string] the signature of the method
396  *  [filename: string] the file path of the file that contains this method
397  *  [token: uleb128] the method token
398  *  [method_id: uleb128] an ID for this data to associate with the buffers of TYPE_COVERAGE_STATEMENTS
399  *  [len: uleb128] the number of TYPE_COVERAGE_BUFFERS associated with this method
400  * if exinfo == TYPE_COVERAGE_STATEMENTS
401  *  [method_id: uleb128] an the TYPE_COVERAGE_METHOD buffer to associate this with
402  *  [offset: uleb128] the il offset relative to the previous offset
403  *  [counter: uleb128] the counter for this instruction
404  *  [line: uleb128] the line of filename containing this instruction
405  *  [column: uleb128] the column containing this instruction
406  * if exinfo == TYPE_COVERAGE_ASSEMBLY
407  *  [name: string] assembly name
408  *  [guid: string] assembly GUID
409  *  [filename: string] assembly filename
410  *  [number_of_methods: uleb128] the number of methods in this assembly
411  *  [fully_covered: uleb128] the number of fully covered methods
412  *  [partially_covered: uleb128] the number of partially covered methods
413  *    currently partially_covered will always be 0, and fully_covered is the
414  *    number of methods that are fully and partially covered.
415  * if exinfo == TYPE_COVERAGE_CLASS
416  *  [name: string] assembly name
417  *  [class: string] class name
418  *  [number_of_methods: uleb128] the number of methods in this class
419  *  [fully_covered: uleb128] the number of fully covered methods
420  *  [partially_covered: uleb128] the number of partially covered methods
421  *    currently partially_covered will always be 0, and fully_covered is the
422  *    number of methods that are fully and partially covered.
423  *
424  * type meta format:
425  * type: TYPE_META
426  * exinfo: one of: TYPE_SYNC_POINT
427  * if exinfo == TYPE_SYNC_POINT
428  *      [type: byte] MonoProfilerSyncPointType enum value
429  */
430
431 // Pending data to be written to the log, for a single thread.
432 // Threads periodically flush their own LogBuffers by calling safe_send
433 typedef struct _LogBuffer LogBuffer;
434 struct _LogBuffer {
435         // Next (older) LogBuffer in processing queue
436         LogBuffer *next;
437
438         uint64_t time_base;
439         uint64_t last_time;
440         uintptr_t ptr_base;
441         uintptr_t method_base;
442         uintptr_t last_method;
443         uintptr_t obj_base;
444         uintptr_t thread_id;
445
446         // Bytes allocated for this LogBuffer
447         int size;
448
449         // Start of currently unused space in buffer
450         unsigned char* cursor;
451
452         // Pointer to start-of-structure-plus-size (for convenience)
453         unsigned char* buf_end;
454
455         // Start of data in buffer. Contents follow "buffer format" described above.
456         unsigned char buf [1];
457 };
458
459 typedef struct {
460         MonoLinkedListSetNode node;
461
462         // Convenience pointer to the profiler structure.
463         MonoProfiler *profiler;
464
465         // Was this thread added to the LLS?
466         gboolean attached;
467
468         // The current log buffer for this thread.
469         LogBuffer *buffer;
470
471         // Methods referenced by events in `buffer`, see `MethodInfo`.
472         GPtrArray *methods;
473
474         // Current call depth for enter/leave events.
475         int call_depth;
476
477         // Indicates whether this thread is currently writing to its `buffer`.
478         gboolean busy;
479
480         // Has this thread written a thread end event to `buffer`?
481         gboolean ended;
482 } MonoProfilerThread;
483
484 static uintptr_t
485 thread_id (void)
486 {
487         return (uintptr_t) mono_native_thread_id_get ();
488 }
489
490 static uintptr_t
491 process_id (void)
492 {
493 #ifdef HOST_WIN32
494         return (uintptr_t) GetCurrentProcessId ();
495 #else
496         return (uintptr_t) getpid ();
497 #endif
498 }
499
500 #ifdef __APPLE__
501 static mach_timebase_info_data_t timebase_info;
502 #elif defined (HOST_WIN32)
503 static LARGE_INTEGER pcounter_freq;
504 #endif
505
506 #define TICKS_PER_SEC 1000000000LL
507
508 static uint64_t
509 current_time (void)
510 {
511 #ifdef __APPLE__
512         uint64_t time = mach_absolute_time ();
513
514         time *= timebase_info.numer;
515         time /= timebase_info.denom;
516
517         return time;
518 #elif defined (HOST_WIN32)
519         LARGE_INTEGER value;
520
521         QueryPerformanceCounter (&value);
522
523         return value.QuadPart * TICKS_PER_SEC / pcounter_freq.QuadPart;
524 #elif defined (CLOCK_MONOTONIC)
525         struct timespec tspec;
526
527         clock_gettime (CLOCK_MONOTONIC, &tspec);
528
529         return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
530 #else
531         struct timeval tv;
532
533         gettimeofday (&tv, NULL);
534
535         return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
536 #endif
537 }
538
539 static int timer_overhead;
540
541 static void
542 init_time (void)
543 {
544 #ifdef __APPLE__
545         mach_timebase_info (&timebase_info);
546 #elif defined (HOST_WIN32)
547         QueryPerformanceFrequency (&pcounter_freq);
548 #endif
549
550         uint64_t time_start = current_time ();
551
552         for (int i = 0; i < 256; ++i)
553                 current_time ();
554
555         uint64_t time_end = current_time ();
556
557         timer_overhead = (time_end - time_start) / 256;
558 }
559
560 /*
561  * These macros should be used when writing an event to a log buffer. They
562  * take care of a bunch of stuff that can be repetitive and error-prone, such
563  * as attaching the current thread, acquiring/releasing the buffer lock,
564  * incrementing the event counter, expanding the log buffer, etc. They also
565  * create a scope so that it's harder to leak the LogBuffer pointer, which can
566  * be problematic as the pointer is unstable when the buffer lock isn't
567  * acquired.
568  *
569  * If the calling thread is already attached, these macros will not alter its
570  * attach mode (i.e. whether it's added to the LLS). If the thread is not
571  * attached, init_thread () will be called with add_to_lls = TRUE.
572  */
573
574 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
575         do { \
576                 MonoProfilerThread *thread__ = get_thread (); \
577                 if (thread__->attached) \
578                         buffer_lock (); \
579                 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
580                 thread__->busy = TRUE; \
581                 InterlockedIncrement ((COUNTER)); \
582                 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
583
584 #define EXIT_LOG_EXPLICIT(SEND) \
585                 thread__->busy = FALSE; \
586                 if ((SEND)) \
587                         send_log_unsafe (TRUE); \
588                 if (thread__->attached) \
589                         buffer_unlock (); \
590         } while (0)
591
592 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
593 #define DO_SEND TRUE
594 #define NO_SEND FALSE
595
596 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
597
598 static volatile gint32 buffer_rwlock_count;
599 static volatile gpointer buffer_rwlock_exclusive;
600
601 // Can be used recursively.
602 static void
603 buffer_lock (void)
604 {
605         /*
606          * If the thread holding the exclusive lock tries to modify the
607          * reader count, just make it a no-op. This way, we also avoid
608          * invoking the GC safe point macros below, which could break if
609          * done from a thread that is currently the initiator of STW.
610          *
611          * In other words, we rely on the fact that the GC thread takes
612          * the exclusive lock in the gc_event () callback when the world
613          * is about to stop.
614          */
615         if (InterlockedReadPointer (&buffer_rwlock_exclusive) != (gpointer) thread_id ()) {
616                 MONO_ENTER_GC_SAFE;
617
618                 while (InterlockedReadPointer (&buffer_rwlock_exclusive))
619                         mono_thread_info_yield ();
620
621                 InterlockedIncrement (&buffer_rwlock_count);
622
623                 MONO_EXIT_GC_SAFE;
624         }
625
626         mono_memory_barrier ();
627 }
628
629 static void
630 buffer_unlock (void)
631 {
632         mono_memory_barrier ();
633
634         // See the comment in buffer_lock ().
635         if (InterlockedReadPointer (&buffer_rwlock_exclusive) == (gpointer) thread_id ())
636                 return;
637
638         g_assert (InterlockedRead (&buffer_rwlock_count) && "Why are we trying to decrement a zero reader count?");
639
640         InterlockedDecrement (&buffer_rwlock_count);
641 }
642
643 // Cannot be used recursively.
644 static void
645 buffer_lock_excl (void)
646 {
647         gpointer tid = (gpointer) thread_id ();
648
649         g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) != tid && "Why are we taking the exclusive lock twice?");
650
651         MONO_ENTER_GC_SAFE;
652
653         while (InterlockedCompareExchangePointer (&buffer_rwlock_exclusive, tid, 0))
654                 mono_thread_info_yield ();
655
656         while (InterlockedRead (&buffer_rwlock_count))
657                 mono_thread_info_yield ();
658
659         MONO_EXIT_GC_SAFE;
660
661         mono_memory_barrier ();
662 }
663
664 static void
665 buffer_unlock_excl (void)
666 {
667         mono_memory_barrier ();
668
669         g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) && "Why is the exclusive lock not held?");
670         g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) == (gpointer) thread_id () && "Why does another thread hold the exclusive lock?");
671         g_assert (!InterlockedRead (&buffer_rwlock_count) && "Why are there readers when the exclusive lock is held?");
672
673         InterlockedWritePointer (&buffer_rwlock_exclusive, NULL);
674 }
675
676 typedef struct _BinaryObject BinaryObject;
677 struct _BinaryObject {
678         BinaryObject *next;
679         void *addr;
680         char *name;
681 };
682
683 static MonoProfiler *log_profiler;
684
685 struct _MonoProfiler {
686         FILE* file;
687 #if defined (HAVE_SYS_ZLIB)
688         gzFile gzfile;
689 #endif
690         char *args;
691         uint64_t startup_time;
692         int pipe_output;
693         int command_port;
694         int server_socket;
695         int pipes [2];
696         MonoNativeThreadId helper_thread;
697         MonoNativeThreadId writer_thread;
698         MonoNativeThreadId dumper_thread;
699         volatile gint32 run_writer_thread;
700         MonoLockFreeAllocSizeClass writer_entry_size_class;
701         MonoLockFreeAllocator writer_entry_allocator;
702         MonoLockFreeQueue writer_queue;
703         MonoSemType writer_queue_sem;
704         MonoConcurrentHashTable *method_table;
705         mono_mutex_t method_table_mutex;
706         volatile gint32 run_dumper_thread;
707         MonoLockFreeQueue dumper_queue;
708         MonoSemType dumper_queue_sem;
709         MonoLockFreeAllocSizeClass sample_size_class;
710         MonoLockFreeAllocator sample_allocator;
711         MonoLockFreeQueue sample_reuse_queue;
712         BinaryObject *binary_objects;
713         GPtrArray *coverage_filters;
714 };
715
716 typedef struct {
717         MonoLockFreeQueueNode node;
718         GPtrArray *methods;
719         LogBuffer *buffer;
720 } WriterQueueEntry;
721
722 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
723
724 typedef struct {
725         MonoMethod *method;
726         MonoJitInfo *ji;
727         uint64_t time;
728 } MethodInfo;
729
730 // Do not use these TLS macros directly unless you know what you're doing.
731
732 #ifdef HOST_WIN32
733
734 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
735 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
736 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
737 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
738
739 static DWORD profiler_tls;
740
741 #elif HAVE_KW_THREAD
742
743 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
744 #define PROF_TLS_GET() (profiler_tls)
745 #define PROF_TLS_INIT()
746 #define PROF_TLS_FREE()
747
748 static __thread MonoProfilerThread *profiler_tls;
749
750 #else
751
752 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
753 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
754 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
755 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
756
757 static pthread_key_t profiler_tls;
758
759 #endif
760
761 static char*
762 pstrdup (const char *s)
763 {
764         int len = strlen (s) + 1;
765         char *p = (char *) g_malloc (len);
766         memcpy (p, s, len);
767         return p;
768 }
769
770 static void *
771 alloc_buffer (int size)
772 {
773         return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
774 }
775
776 static void
777 free_buffer (void *buf, int size)
778 {
779         mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
780 }
781
782 static LogBuffer*
783 create_buffer (uintptr_t tid)
784 {
785         LogBuffer* buf = (LogBuffer *) alloc_buffer (BUFFER_SIZE);
786
787         InterlockedIncrement (&buffer_allocations_ctr);
788
789         buf->size = BUFFER_SIZE;
790         buf->time_base = current_time ();
791         buf->last_time = buf->time_base;
792         buf->buf_end = (unsigned char *) buf + buf->size;
793         buf->cursor = buf->buf;
794         buf->thread_id = tid;
795
796         return buf;
797 }
798
799 /*
800  * Must be called with the reader lock held if thread is the current thread, or
801  * the exclusive lock if thread is a different thread. However, if thread is
802  * the current thread, and init_thread () was called with add_to_lls = FALSE,
803  * then no locking is necessary.
804  */
805 static void
806 init_buffer_state (MonoProfilerThread *thread)
807 {
808         thread->buffer = create_buffer (thread->node.key);
809         thread->methods = NULL;
810 }
811
812 static void
813 clear_hazard_pointers (MonoThreadHazardPointers *hp)
814 {
815         mono_hazard_pointer_clear (hp, 0);
816         mono_hazard_pointer_clear (hp, 1);
817         mono_hazard_pointer_clear (hp, 2);
818 }
819
820 static MonoProfilerThread *
821 init_thread (MonoProfiler *prof, gboolean add_to_lls)
822 {
823         MonoProfilerThread *thread = PROF_TLS_GET ();
824
825         /*
826          * Sometimes we may try to initialize a thread twice. One example is the
827          * main thread: We initialize it when setting up the profiler, but we will
828          * also get a thread_start () callback for it. Another example is when
829          * attaching new threads to the runtime: We may get a gc_alloc () callback
830          * for that thread's thread object (where we initialize it), soon followed
831          * by a thread_start () callback.
832          *
833          * These cases are harmless anyhow. Just return if we've already done the
834          * initialization work.
835          */
836         if (thread)
837                 return thread;
838
839         thread = g_malloc (sizeof (MonoProfilerThread));
840         thread->node.key = thread_id ();
841         thread->profiler = prof;
842         thread->attached = add_to_lls;
843         thread->call_depth = 0;
844         thread->busy = 0;
845         thread->ended = FALSE;
846
847         init_buffer_state (thread);
848
849         /*
850          * Some internal profiler threads don't need to be cleaned up
851          * by the main thread on shutdown.
852          */
853         if (add_to_lls) {
854                 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
855                 g_assert (mono_lls_insert (&profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
856                 clear_hazard_pointers (hp);
857         }
858
859         PROF_TLS_SET (thread);
860
861         return thread;
862 }
863
864 // Only valid if init_thread () was called with add_to_lls = FALSE.
865 static void
866 deinit_thread (MonoProfilerThread *thread)
867 {
868         g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
869
870         g_free (thread);
871         PROF_TLS_SET (NULL);
872 }
873
874 static MonoProfilerThread *
875 get_thread (void)
876 {
877         return init_thread (log_profiler, TRUE);
878 }
879
880 // Only valid if init_thread () was called with add_to_lls = FALSE.
881 static LogBuffer *
882 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
883 {
884         LogBuffer *old = thread->buffer;
885
886         if (old && old->cursor + bytes + 100 < old->buf_end)
887                 return old;
888
889         LogBuffer *new_ = create_buffer (thread->node.key);
890         new_->next = old;
891         thread->buffer = new_;
892
893         return new_;
894 }
895
896 static void
897 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
898 {
899         uint8_t *p = buf;
900
901         do {
902                 uint8_t b = value & 0x7f;
903                 value >>= 7;
904
905                 if (value != 0) /* more bytes to come */
906                         b |= 0x80;
907
908                 *p ++ = b;
909         } while (value);
910
911         *endbuf = p;
912 }
913
914 static void
915 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
916 {
917         int more = 1;
918         int negative = (value < 0);
919         unsigned int size = sizeof (intptr_t) * 8;
920         uint8_t byte;
921         uint8_t *p = buf;
922
923         while (more) {
924                 byte = value & 0x7f;
925                 value >>= 7;
926
927                 /* the following is unnecessary if the
928                  * implementation of >>= uses an arithmetic rather
929                  * than logical shift for a signed left operand
930                  */
931                 if (negative)
932                         /* sign extend */
933                         value |= - ((intptr_t) 1 <<(size - 7));
934
935                 /* sign bit of byte is second high order bit (0x40) */
936                 if ((value == 0 && !(byte & 0x40)) ||
937                     (value == -1 && (byte & 0x40)))
938                         more = 0;
939                 else
940                         byte |= 0x80;
941
942                 *p ++= byte;
943         }
944
945         *endbuf = p;
946 }
947
948 static void
949 emit_byte (LogBuffer *logbuffer, int value)
950 {
951         logbuffer->cursor [0] = value;
952         logbuffer->cursor++;
953
954         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
955 }
956
957 static void
958 emit_value (LogBuffer *logbuffer, int value)
959 {
960         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
961
962         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
963 }
964
965 static void
966 emit_time (LogBuffer *logbuffer, uint64_t value)
967 {
968         uint64_t tdiff = value - logbuffer->last_time;
969         encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
970         logbuffer->last_time = value;
971
972         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
973 }
974
975 static void
976 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
977 {
978         emit_byte (logbuffer, event);
979         emit_time (logbuffer, time);
980 }
981
982 static void
983 emit_event (LogBuffer *logbuffer, int event)
984 {
985         emit_event_time (logbuffer, event, current_time ());
986 }
987
988 static void
989 emit_svalue (LogBuffer *logbuffer, int64_t value)
990 {
991         encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
992
993         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
994 }
995
996 static void
997 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
998 {
999         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1000
1001         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1002 }
1003
1004 static void
1005 emit_ptr (LogBuffer *logbuffer, void *ptr)
1006 {
1007         if (!logbuffer->ptr_base)
1008                 logbuffer->ptr_base = (uintptr_t) ptr;
1009
1010         emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
1011
1012         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1013 }
1014
1015 static void
1016 emit_method_inner (LogBuffer *logbuffer, void *method)
1017 {
1018         if (!logbuffer->method_base) {
1019                 logbuffer->method_base = (intptr_t) method;
1020                 logbuffer->last_method = (intptr_t) method;
1021         }
1022
1023         encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
1024         logbuffer->last_method = (intptr_t) method;
1025
1026         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1027 }
1028
1029 static void
1030 register_method_local (MonoMethod *method, MonoJitInfo *ji)
1031 {
1032         MonoProfilerThread *thread = get_thread ();
1033
1034         if (!mono_conc_hashtable_lookup (thread->profiler->method_table, method)) {
1035                 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
1036
1037                 info->method = method;
1038                 info->ji = ji;
1039                 info->time = current_time ();
1040
1041                 buffer_lock ();
1042
1043                 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
1044                 g_ptr_array_add (arr, info);
1045
1046                 buffer_unlock ();
1047         }
1048 }
1049
1050 static void
1051 emit_method (LogBuffer *logbuffer, MonoMethod *method)
1052 {
1053         register_method_local (method, NULL);
1054         emit_method_inner (logbuffer, method);
1055 }
1056
1057 static void
1058 emit_obj (LogBuffer *logbuffer, void *ptr)
1059 {
1060         if (!logbuffer->obj_base)
1061                 logbuffer->obj_base = (uintptr_t) ptr >> 3;
1062
1063         emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
1064
1065         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1066 }
1067
1068 static void
1069 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
1070 {
1071         size_t i = 0;
1072         if (str) {
1073                 for (; i < size; i++) {
1074                         if (str[i] == '\0')
1075                                 break;
1076                         emit_byte (logbuffer, str [i]);
1077                 }
1078         }
1079         emit_byte (logbuffer, '\0');
1080 }
1081
1082 static void
1083 emit_double (LogBuffer *logbuffer, double value)
1084 {
1085         int i;
1086         unsigned char buffer[8];
1087         memcpy (buffer, &value, 8);
1088 #if G_BYTE_ORDER == G_BIG_ENDIAN
1089         for (i = 7; i >= 0; i--)
1090 #else
1091         for (i = 0; i < 8; i++)
1092 #endif
1093                 emit_byte (logbuffer, buffer[i]);
1094 }
1095
1096 static char*
1097 write_int16 (char *buf, int32_t value)
1098 {
1099         int i;
1100         for (i = 0; i < 2; ++i) {
1101                 buf [i] = value;
1102                 value >>= 8;
1103         }
1104         return buf + 2;
1105 }
1106
1107 static char*
1108 write_int32 (char *buf, int32_t value)
1109 {
1110         int i;
1111         for (i = 0; i < 4; ++i) {
1112                 buf [i] = value;
1113                 value >>= 8;
1114         }
1115         return buf + 4;
1116 }
1117
1118 static char*
1119 write_int64 (char *buf, int64_t value)
1120 {
1121         int i;
1122         for (i = 0; i < 8; ++i) {
1123                 buf [i] = value;
1124                 value >>= 8;
1125         }
1126         return buf + 8;
1127 }
1128
1129 static char *
1130 write_header_string (char *p, const char *str)
1131 {
1132         size_t len = strlen (str) + 1;
1133
1134         p = write_int32 (p, len);
1135         strcpy (p, str);
1136
1137         return p + len;
1138 }
1139
1140 static void
1141 dump_header (MonoProfiler *profiler)
1142 {
1143         const char *args = profiler->args;
1144         const char *arch = mono_config_get_cpu ();
1145         const char *os = mono_config_get_os ();
1146
1147         char *hbuf = g_malloc (
1148                 sizeof (gint32) /* header id */ +
1149                 sizeof (gint8) /* major version */ +
1150                 sizeof (gint8) /* minor version */ +
1151                 sizeof (gint8) /* data version */ +
1152                 sizeof (gint8) /* word size */ +
1153                 sizeof (gint64) /* startup time */ +
1154                 sizeof (gint32) /* timer overhead */ +
1155                 sizeof (gint32) /* flags */ +
1156                 sizeof (gint32) /* process id */ +
1157                 sizeof (gint16) /* command port */ +
1158                 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
1159                 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
1160                 sizeof (gint32) + strlen (os) + 1 /* operating system */
1161         );
1162         char *p = hbuf;
1163
1164         p = write_int32 (p, LOG_HEADER_ID);
1165         *p++ = LOG_VERSION_MAJOR;
1166         *p++ = LOG_VERSION_MINOR;
1167         *p++ = LOG_DATA_VERSION;
1168         *p++ = sizeof (void *);
1169         p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
1170         p = write_int32 (p, timer_overhead);
1171         p = write_int32 (p, 0); /* flags */
1172         p = write_int32 (p, process_id ());
1173         p = write_int16 (p, profiler->command_port);
1174         p = write_header_string (p, args);
1175         p = write_header_string (p, arch);
1176         p = write_header_string (p, os);
1177
1178 #if defined (HAVE_SYS_ZLIB)
1179         if (profiler->gzfile) {
1180                 gzwrite (profiler->gzfile, hbuf, p - hbuf);
1181         } else
1182 #endif
1183         {
1184                 fwrite (hbuf, p - hbuf, 1, profiler->file);
1185                 fflush (profiler->file);
1186         }
1187
1188         g_free (hbuf);
1189 }
1190
1191 /*
1192  * Must be called with the reader lock held if thread is the current thread, or
1193  * the exclusive lock if thread is a different thread. However, if thread is
1194  * the current thread, and init_thread () was called with add_to_lls = FALSE,
1195  * then no locking is necessary.
1196  */
1197 static void
1198 send_buffer (MonoProfilerThread *thread)
1199 {
1200         WriterQueueEntry *entry = mono_lock_free_alloc (&thread->profiler->writer_entry_allocator);
1201         entry->methods = thread->methods;
1202         entry->buffer = thread->buffer;
1203
1204         mono_lock_free_queue_node_init (&entry->node, FALSE);
1205
1206         mono_lock_free_queue_enqueue (&thread->profiler->writer_queue, &entry->node);
1207         mono_os_sem_post (&thread->profiler->writer_queue_sem);
1208 }
1209
1210 static void
1211 free_thread (gpointer p)
1212 {
1213         MonoProfilerThread *thread = p;
1214
1215         if (!thread->ended) {
1216                 /*
1217                  * The thread is being cleaned up by the main thread during
1218                  * shutdown. This typically happens for internal runtime
1219                  * threads. We need to synthesize a thread end event.
1220                  */
1221
1222                 InterlockedIncrement (&thread_ends_ctr);
1223
1224                 if (ENABLED (PROFLOG_THREAD_EVENTS)) {
1225                         LogBuffer *buf = ensure_logbuf_unsafe (thread,
1226                                 EVENT_SIZE /* event */ +
1227                                 BYTE_SIZE /* type */ +
1228                                 LEB128_SIZE /* tid */
1229                         );
1230
1231                         emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1232                         emit_byte (buf, TYPE_THREAD);
1233                         emit_ptr (buf, (void *) thread->node.key);
1234                 }
1235         }
1236
1237         send_buffer (thread);
1238
1239         g_free (thread);
1240 }
1241
1242 static void
1243 remove_thread (MonoProfilerThread *thread)
1244 {
1245         MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1246
1247         if (mono_lls_remove (&profiler_thread_list, hp, &thread->node))
1248                 mono_thread_hazardous_try_free (thread, free_thread);
1249
1250         clear_hazard_pointers (hp);
1251 }
1252
1253 static void
1254 dump_buffer (MonoProfiler *profiler, LogBuffer *buf)
1255 {
1256         char hbuf [128];
1257         char *p = hbuf;
1258
1259         if (buf->next)
1260                 dump_buffer (profiler, buf->next);
1261
1262         if (buf->cursor - buf->buf) {
1263                 p = write_int32 (p, BUF_ID);
1264                 p = write_int32 (p, buf->cursor - buf->buf);
1265                 p = write_int64 (p, buf->time_base);
1266                 p = write_int64 (p, buf->ptr_base);
1267                 p = write_int64 (p, buf->obj_base);
1268                 p = write_int64 (p, buf->thread_id);
1269                 p = write_int64 (p, buf->method_base);
1270
1271 #if defined (HAVE_SYS_ZLIB)
1272                 if (profiler->gzfile) {
1273                         gzwrite (profiler->gzfile, hbuf, p - hbuf);
1274                         gzwrite (profiler->gzfile, buf->buf, buf->cursor - buf->buf);
1275                 } else
1276 #endif
1277                 {
1278                         fwrite (hbuf, p - hbuf, 1, profiler->file);
1279                         fwrite (buf->buf, buf->cursor - buf->buf, 1, profiler->file);
1280                         fflush (profiler->file);
1281                 }
1282         }
1283
1284         free_buffer (buf, buf->size);
1285 }
1286
1287 static void
1288 dump_buffer_threadless (MonoProfiler *profiler, LogBuffer *buf)
1289 {
1290         for (LogBuffer *iter = buf; iter; iter = iter->next)
1291                 iter->thread_id = 0;
1292
1293         dump_buffer (profiler, buf);
1294 }
1295
1296 // Only valid if init_thread () was called with add_to_lls = FALSE.
1297 static void
1298 send_log_unsafe (gboolean if_needed)
1299 {
1300         MonoProfilerThread *thread = PROF_TLS_GET ();
1301
1302         if (!if_needed || (if_needed && thread->buffer->next)) {
1303                 if (!thread->attached)
1304                         for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1305                                 iter->thread_id = 0;
1306
1307                 send_buffer (thread);
1308                 init_buffer_state (thread);
1309         }
1310 }
1311
1312 // Assumes that the exclusive lock is held.
1313 static void
1314 sync_point_flush (void)
1315 {
1316         g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) == (gpointer) thread_id () && "Why don't we hold the exclusive lock?");
1317
1318         MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
1319                 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1320
1321                 send_buffer (thread);
1322                 init_buffer_state (thread);
1323         } MONO_LLS_FOREACH_SAFE_END
1324 }
1325
1326 // Assumes that the exclusive lock is held.
1327 static void
1328 sync_point_mark (MonoProfilerSyncPointType type)
1329 {
1330         g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive) == (gpointer) thread_id () && "Why don't we hold the exclusive lock?");
1331
1332         ENTER_LOG (&sync_points_ctr, logbuffer,
1333                 EVENT_SIZE /* event */ +
1334                 LEB128_SIZE /* type */
1335         );
1336
1337         emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1338         emit_byte (logbuffer, type);
1339
1340         EXIT_LOG_EXPLICIT (NO_SEND);
1341
1342         send_log_unsafe (FALSE);
1343 }
1344
1345 // Assumes that the exclusive lock is held.
1346 static void
1347 sync_point (MonoProfilerSyncPointType type)
1348 {
1349         sync_point_flush ();
1350         sync_point_mark (type);
1351 }
1352
1353 static int
1354 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1355 {
1356         /* account for object alignment in the heap */
1357         size += 7;
1358         size &= ~7;
1359
1360         ENTER_LOG (&heap_objects_ctr, logbuffer,
1361                 EVENT_SIZE /* event */ +
1362                 LEB128_SIZE /* obj */ +
1363                 LEB128_SIZE /* klass */ +
1364                 LEB128_SIZE /* size */ +
1365                 LEB128_SIZE /* num */ +
1366                 num * (
1367                         LEB128_SIZE /* offset */ +
1368                         LEB128_SIZE /* ref */
1369                 )
1370         );
1371
1372         emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1373         emit_obj (logbuffer, obj);
1374         emit_ptr (logbuffer, klass);
1375         emit_value (logbuffer, size);
1376         emit_value (logbuffer, num);
1377
1378         uintptr_t last_offset = 0;
1379
1380         for (int i = 0; i < num; ++i) {
1381                 emit_value (logbuffer, offsets [i] - last_offset);
1382                 last_offset = offsets [i];
1383                 emit_obj (logbuffer, refs [i]);
1384         }
1385
1386         EXIT_LOG_EXPLICIT (DO_SEND);
1387
1388         return 0;
1389 }
1390
1391 static unsigned int hs_mode_ms = 0;
1392 static unsigned int hs_mode_gc = 0;
1393 static unsigned int hs_mode_ondemand = 0;
1394 static unsigned int gc_count = 0;
1395 static uint64_t last_hs_time = 0;
1396 static gboolean do_heap_walk = FALSE;
1397 static gboolean ignore_heap_events;
1398
1399 static void
1400 gc_roots (MonoProfiler *prof, int num, void **objects, int *root_types, uintptr_t *extra_info)
1401 {
1402         if (ignore_heap_events)
1403                 return;
1404
1405         ENTER_LOG (&heap_roots_ctr, logbuffer,
1406                 EVENT_SIZE /* event */ +
1407                 LEB128_SIZE /* num */ +
1408                 LEB128_SIZE /* collections */ +
1409                 num * (
1410                         LEB128_SIZE /* object */ +
1411                         LEB128_SIZE /* root type */ +
1412                         LEB128_SIZE /* extra info */
1413                 )
1414         );
1415
1416         emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1417         emit_value (logbuffer, num);
1418         emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
1419
1420         for (int i = 0; i < num; ++i) {
1421                 emit_obj (logbuffer, objects [i]);
1422                 emit_byte (logbuffer, root_types [i]);
1423                 emit_value (logbuffer, extra_info [i]);
1424         }
1425
1426         EXIT_LOG_EXPLICIT (DO_SEND);
1427 }
1428
1429
1430 static void
1431 trigger_on_demand_heapshot (void)
1432 {
1433         if (heapshot_requested)
1434                 mono_gc_collect (mono_gc_max_generation ());
1435 }
1436
1437 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_HEAPSHOT_FEATURE)
1438
1439 static void
1440 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation)
1441 {
1442         if (ev == MONO_GC_EVENT_START) {
1443                 uint64_t now = current_time ();
1444
1445                 if (hs_mode_ms && (now - last_hs_time) / 1000 * 1000 >= hs_mode_ms)
1446                         do_heap_walk = TRUE;
1447                 else if (hs_mode_gc && !(gc_count % hs_mode_gc))
1448                         do_heap_walk = TRUE;
1449                 else if (hs_mode_ondemand)
1450                         do_heap_walk = heapshot_requested;
1451                 else if (!hs_mode_ms && !hs_mode_gc && generation == mono_gc_max_generation ())
1452                         do_heap_walk = TRUE;
1453
1454                 //If using heapshot, ignore events for collections we don't care
1455                 if (ENABLED (PROFLOG_HEAPSHOT_FEATURE)) {
1456                         // Ignore events generated during the collection itself (IE GC ROOTS)
1457                         ignore_heap_events = !do_heap_walk;
1458                 }
1459         }
1460
1461
1462         if (ENABLED (PROFLOG_GC_EVENTS)) {
1463                 ENTER_LOG (&gc_events_ctr, logbuffer,
1464                         EVENT_SIZE /* event */ +
1465                         BYTE_SIZE /* gc event */ +
1466                         BYTE_SIZE /* generation */
1467                 );
1468
1469                 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1470                 emit_byte (logbuffer, ev);
1471                 emit_byte (logbuffer, generation);
1472
1473                 EXIT_LOG_EXPLICIT (NO_SEND);
1474         }
1475
1476         switch (ev) {
1477         case MONO_GC_EVENT_START:
1478                 if (generation == mono_gc_max_generation ())
1479                         gc_count++;
1480
1481                 break;
1482         case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1483                 /*
1484                  * Ensure that no thread can be in the middle of writing to
1485                  * a buffer when the world stops...
1486                  */
1487                 buffer_lock_excl ();
1488                 break;
1489         case MONO_GC_EVENT_POST_STOP_WORLD:
1490                 /*
1491                  * ... So that we now have a consistent view of all buffers.
1492                  * This allows us to flush them. We need to do this because
1493                  * they may contain object allocation events that need to be
1494                  * committed to the log file before any object move events
1495                  * that will be produced during this GC.
1496                  */
1497                 if (ENABLED (ALL_GC_EVENTS_MASK))
1498                         sync_point (SYNC_POINT_WORLD_STOP);
1499
1500                 /*
1501                  * All heap events are surrounded by a HEAP_START and a HEAP_ENV event.
1502                  * Right now, that's the case for GC Moves, GC Roots or heapshots.
1503                  */
1504                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
1505                         ENTER_LOG (&heap_starts_ctr, logbuffer,
1506                                 EVENT_SIZE /* event */
1507                         );
1508
1509                         emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1510
1511                         EXIT_LOG_EXPLICIT (DO_SEND);
1512                 }
1513
1514                 break;
1515         case MONO_GC_EVENT_PRE_START_WORLD:
1516                 if (do_heap_shot && do_heap_walk)
1517                         mono_gc_walk_heap (0, gc_reference, NULL);
1518
1519                 /* Matching HEAP_END to the HEAP_START from above */
1520                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
1521                         ENTER_LOG (&heap_ends_ctr, logbuffer,
1522                                 EVENT_SIZE /* event */
1523                         );
1524
1525                         emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1526
1527                         EXIT_LOG_EXPLICIT (DO_SEND);
1528                 }
1529
1530                 if (do_heap_shot && do_heap_walk) {
1531                         do_heap_walk = FALSE;
1532                         heapshot_requested = 0;
1533                         last_hs_time = current_time ();
1534                 }
1535
1536                 /*
1537                  * Similarly, we must now make sure that any object moves
1538                  * written to the GC thread's buffer are flushed. Otherwise,
1539                  * object allocation events for certain addresses could come
1540                  * after the move events that made those addresses available.
1541                  */
1542                 if (ENABLED (ALL_GC_EVENTS_MASK))
1543                         sync_point_mark (SYNC_POINT_WORLD_START);
1544                 break;
1545         case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1546                 /*
1547                  * Finally, it is safe to allow other threads to write to
1548                  * their buffers again.
1549                  */
1550                 buffer_unlock_excl ();
1551                 break;
1552         default:
1553                 break;
1554         }
1555 }
1556
1557 static void
1558 gc_resize (MonoProfiler *profiler, int64_t new_size)
1559 {
1560         ENTER_LOG (&gc_resizes_ctr, logbuffer,
1561                 EVENT_SIZE /* event */ +
1562                 LEB128_SIZE /* new size */
1563         );
1564
1565         emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1566         emit_value (logbuffer, new_size);
1567
1568         EXIT_LOG_EXPLICIT (DO_SEND);
1569 }
1570
1571 typedef struct {
1572         int count;
1573         MonoMethod* methods [MAX_FRAMES];
1574         int32_t il_offsets [MAX_FRAMES];
1575         int32_t native_offsets [MAX_FRAMES];
1576 } FrameData;
1577
1578 static int num_frames = MAX_FRAMES;
1579
1580 static mono_bool
1581 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1582 {
1583         FrameData *frame = (FrameData *)data;
1584         if (method && frame->count < num_frames) {
1585                 frame->il_offsets [frame->count] = il_offset;
1586                 frame->native_offsets [frame->count] = native_offset;
1587                 frame->methods [frame->count++] = method;
1588                 //printf ("In %d %s at %d (native: %d)\n", frame->count, mono_method_get_name (method), il_offset, native_offset);
1589         }
1590         return frame->count == num_frames;
1591 }
1592
1593 /*
1594  * a note about stack walks: they can cause more profiler events to fire,
1595  * so we need to make sure they don't happen after we started emitting an
1596  * event, hence the collect_bt/emit_bt split.
1597  */
1598 static void
1599 collect_bt (FrameData *data)
1600 {
1601         data->count = 0;
1602         mono_stack_walk_no_il (walk_stack, data);
1603 }
1604
1605 static void
1606 emit_bt (MonoProfiler *prof, LogBuffer *logbuffer, FrameData *data)
1607 {
1608         /* FIXME: this is actually tons of data and we should
1609          * just output it the first time and use an id the next
1610          */
1611         if (data->count > num_frames)
1612                 printf ("bad num frames: %d\n", data->count);
1613         emit_value (logbuffer, data->count);
1614         //if (*p != data.count) {
1615         //      printf ("bad num frames enc at %d: %d -> %d\n", count, data.count, *p); printf ("frames end: %p->%p\n", p, logbuffer->cursor); exit(0);}
1616         while (data->count) {
1617                 emit_method (logbuffer, data->methods [--data->count]);
1618         }
1619 }
1620
1621 static void
1622 gc_alloc (MonoProfiler *prof, MonoObject *obj, MonoClass *klass)
1623 {
1624         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_ALLOC_BT : 0;
1625         FrameData data;
1626         uintptr_t len = mono_object_get_size (obj);
1627         /* account for object alignment in the heap */
1628         len += 7;
1629         len &= ~7;
1630
1631         if (do_bt)
1632                 collect_bt (&data);
1633
1634         ENTER_LOG (&gc_allocs_ctr, logbuffer,
1635                 EVENT_SIZE /* event */ +
1636                 LEB128_SIZE /* klass */ +
1637                 LEB128_SIZE /* obj */ +
1638                 LEB128_SIZE /* size */ +
1639                 (do_bt ? (
1640                         LEB128_SIZE /* count */ +
1641                         data.count * (
1642                                 LEB128_SIZE /* method */
1643                         )
1644                 ) : 0)
1645         );
1646
1647         emit_event (logbuffer, do_bt | TYPE_ALLOC);
1648         emit_ptr (logbuffer, klass);
1649         emit_obj (logbuffer, obj);
1650         emit_value (logbuffer, len);
1651
1652         if (do_bt)
1653                 emit_bt (prof, logbuffer, &data);
1654
1655         EXIT_LOG;
1656 }
1657
1658 static void
1659 gc_moves (MonoProfiler *prof, void **objects, int num)
1660 {
1661         ENTER_LOG (&gc_moves_ctr, logbuffer,
1662                 EVENT_SIZE /* event */ +
1663                 LEB128_SIZE /* num */ +
1664                 num * (
1665                         LEB128_SIZE /* object */
1666                 )
1667         );
1668
1669         emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1670         emit_value (logbuffer, num);
1671
1672         for (int i = 0; i < num; ++i)
1673                 emit_obj (logbuffer, objects [i]);
1674
1675         EXIT_LOG_EXPLICIT (DO_SEND);
1676 }
1677
1678 static void
1679 gc_handle (MonoProfiler *prof, int op, int type, uintptr_t handle, MonoObject *obj)
1680 {
1681         int do_bt = nocalls && InterlockedRead (&runtime_inited) && !notraces;
1682         FrameData data;
1683
1684         if (do_bt)
1685                 collect_bt (&data);
1686
1687         gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1688
1689         ENTER_LOG (ctr, logbuffer,
1690                 EVENT_SIZE /* event */ +
1691                 LEB128_SIZE /* type */ +
1692                 LEB128_SIZE /* handle */ +
1693                 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1694                         LEB128_SIZE /* obj */
1695                 ) : 0) +
1696                 (do_bt ? (
1697                         LEB128_SIZE /* count */ +
1698                         data.count * (
1699                                 LEB128_SIZE /* method */
1700                         )
1701                 ) : 0)
1702         );
1703
1704         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1705                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1706         else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1707                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1708         else
1709                 g_assert_not_reached ();
1710
1711         emit_value (logbuffer, type);
1712         emit_value (logbuffer, handle);
1713
1714         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1715                 emit_obj (logbuffer, obj);
1716
1717         if (do_bt)
1718                 emit_bt (prof, logbuffer, &data);
1719
1720         EXIT_LOG;
1721 }
1722
1723 static void
1724 finalize_begin (MonoProfiler *prof)
1725 {
1726         ENTER_LOG (&finalize_begins_ctr, buf,
1727                 EVENT_SIZE /* event */
1728         );
1729
1730         emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1731
1732         EXIT_LOG;
1733 }
1734
1735 static void
1736 finalize_end (MonoProfiler *prof)
1737 {
1738         trigger_on_demand_heapshot ();
1739         if (ENABLED (PROFLOG_FINALIZATION_EVENTS)) {
1740                 ENTER_LOG (&finalize_ends_ctr, buf,
1741                         EVENT_SIZE /* event */
1742                 );
1743
1744                 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1745
1746                 EXIT_LOG;
1747         }
1748 }
1749
1750 static void
1751 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1752 {
1753         ENTER_LOG (&finalize_object_begins_ctr, buf,
1754                 EVENT_SIZE /* event */ +
1755                 LEB128_SIZE /* obj */
1756         );
1757
1758         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1759         emit_obj (buf, obj);
1760
1761         EXIT_LOG;
1762 }
1763
1764 static void
1765 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1766 {
1767         ENTER_LOG (&finalize_object_ends_ctr, buf,
1768                 EVENT_SIZE /* event */ +
1769                 LEB128_SIZE /* obj */
1770         );
1771
1772         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1773         emit_obj (buf, obj);
1774
1775         EXIT_LOG;
1776 }
1777
1778 static char*
1779 push_nesting (char *p, MonoClass *klass)
1780 {
1781         MonoClass *nesting;
1782         const char *name;
1783         const char *nspace;
1784         nesting = mono_class_get_nesting_type (klass);
1785         if (nesting) {
1786                 p = push_nesting (p, nesting);
1787                 *p++ = '/';
1788                 *p = 0;
1789         }
1790         name = mono_class_get_name (klass);
1791         nspace = mono_class_get_namespace (klass);
1792         if (*nspace) {
1793                 strcpy (p, nspace);
1794                 p += strlen (nspace);
1795                 *p++ = '.';
1796                 *p = 0;
1797         }
1798         strcpy (p, name);
1799         p += strlen (name);
1800         return p;
1801 }
1802
1803 static char*
1804 type_name (MonoClass *klass)
1805 {
1806         char buf [1024];
1807         char *p;
1808         push_nesting (buf, klass);
1809         p = (char *) g_malloc (strlen (buf) + 1);
1810         strcpy (p, buf);
1811         return p;
1812 }
1813
1814 static void
1815 image_loaded (MonoProfiler *prof, MonoImage *image, int result)
1816 {
1817         if (result != MONO_PROFILE_OK)
1818                 return;
1819
1820         const char *name = mono_image_get_filename (image);
1821         int nlen = strlen (name) + 1;
1822
1823         ENTER_LOG (&image_loads_ctr, logbuffer,
1824                 EVENT_SIZE /* event */ +
1825                 BYTE_SIZE /* type */ +
1826                 LEB128_SIZE /* image */ +
1827                 nlen /* name */
1828         );
1829
1830         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1831         emit_byte (logbuffer, TYPE_IMAGE);
1832         emit_ptr (logbuffer, image);
1833         memcpy (logbuffer->cursor, name, nlen);
1834         logbuffer->cursor += nlen;
1835
1836         EXIT_LOG;
1837 }
1838
1839 static void
1840 image_unloaded (MonoProfiler *prof, MonoImage *image)
1841 {
1842         const char *name = mono_image_get_filename (image);
1843         int nlen = strlen (name) + 1;
1844
1845         ENTER_LOG (&image_unloads_ctr, logbuffer,
1846                 EVENT_SIZE /* event */ +
1847                 BYTE_SIZE /* type */ +
1848                 LEB128_SIZE /* image */ +
1849                 nlen /* name */
1850         );
1851
1852         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1853         emit_byte (logbuffer, TYPE_IMAGE);
1854         emit_ptr (logbuffer, image);
1855         memcpy (logbuffer->cursor, name, nlen);
1856         logbuffer->cursor += nlen;
1857
1858         EXIT_LOG;
1859 }
1860
1861 static void
1862 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly, int result)
1863 {
1864         if (result != MONO_PROFILE_OK)
1865                 return;
1866
1867         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1868         int nlen = strlen (name) + 1;
1869         MonoImage *image = mono_assembly_get_image (assembly);
1870
1871         ENTER_LOG (&assembly_loads_ctr, logbuffer,
1872                 EVENT_SIZE /* event */ +
1873                 BYTE_SIZE /* type */ +
1874                 LEB128_SIZE /* assembly */ +
1875                 LEB128_SIZE /* image */ +
1876                 nlen /* name */
1877         );
1878
1879         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1880         emit_byte (logbuffer, TYPE_ASSEMBLY);
1881         emit_ptr (logbuffer, assembly);
1882         emit_ptr (logbuffer, image);
1883         memcpy (logbuffer->cursor, name, nlen);
1884         logbuffer->cursor += nlen;
1885
1886         EXIT_LOG;
1887
1888         mono_free (name);
1889 }
1890
1891 static void
1892 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1893 {
1894         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1895         int nlen = strlen (name) + 1;
1896         MonoImage *image = mono_assembly_get_image (assembly);
1897
1898         ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1899                 EVENT_SIZE /* event */ +
1900                 BYTE_SIZE /* type */ +
1901                 LEB128_SIZE /* assembly */ +
1902                 LEB128_SIZE /* image */ +
1903                 nlen /* name */
1904         );
1905
1906         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1907         emit_byte (logbuffer, TYPE_ASSEMBLY);
1908         emit_ptr (logbuffer, assembly);
1909         emit_ptr (logbuffer, image);
1910         memcpy (logbuffer->cursor, name, nlen);
1911         logbuffer->cursor += nlen;
1912
1913         EXIT_LOG;
1914
1915         mono_free (name);
1916 }
1917
1918 static void
1919 class_loaded (MonoProfiler *prof, MonoClass *klass, int result)
1920 {
1921         if (result != MONO_PROFILE_OK)
1922                 return;
1923
1924         char *name;
1925
1926         if (InterlockedRead (&runtime_inited))
1927                 name = mono_type_get_name (mono_class_get_type (klass));
1928         else
1929                 name = type_name (klass);
1930
1931         int nlen = strlen (name) + 1;
1932         MonoImage *image = mono_class_get_image (klass);
1933
1934         ENTER_LOG (&class_loads_ctr, logbuffer,
1935                 EVENT_SIZE /* event */ +
1936                 BYTE_SIZE /* type */ +
1937                 LEB128_SIZE /* klass */ +
1938                 LEB128_SIZE /* image */ +
1939                 nlen /* name */
1940         );
1941
1942         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1943         emit_byte (logbuffer, TYPE_CLASS);
1944         emit_ptr (logbuffer, klass);
1945         emit_ptr (logbuffer, image);
1946         memcpy (logbuffer->cursor, name, nlen);
1947         logbuffer->cursor += nlen;
1948
1949         EXIT_LOG;
1950
1951         if (runtime_inited)
1952                 mono_free (name);
1953         else
1954                 g_free (name);
1955 }
1956
1957 static void
1958 class_unloaded (MonoProfiler *prof, MonoClass *klass)
1959 {
1960         char *name;
1961
1962         if (InterlockedRead (&runtime_inited))
1963                 name = mono_type_get_name (mono_class_get_type (klass));
1964         else
1965                 name = type_name (klass);
1966
1967         int nlen = strlen (name) + 1;
1968         MonoImage *image = mono_class_get_image (klass);
1969
1970         ENTER_LOG (&class_unloads_ctr, logbuffer,
1971                 EVENT_SIZE /* event */ +
1972                 BYTE_SIZE /* type */ +
1973                 LEB128_SIZE /* klass */ +
1974                 LEB128_SIZE /* image */ +
1975                 nlen /* name */
1976         );
1977
1978         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1979         emit_byte (logbuffer, TYPE_CLASS);
1980         emit_ptr (logbuffer, klass);
1981         emit_ptr (logbuffer, image);
1982         memcpy (logbuffer->cursor, name, nlen);
1983         logbuffer->cursor += nlen;
1984
1985         EXIT_LOG;
1986
1987         if (runtime_inited)
1988                 mono_free (name);
1989         else
1990                 g_free (name);
1991 }
1992
1993 static void process_method_enter_coverage (MonoProfiler *prof, MonoMethod *method);
1994
1995 static void
1996 method_enter (MonoProfiler *prof, MonoMethod *method)
1997 {
1998         process_method_enter_coverage (prof, method);
1999
2000         if (!only_coverage && get_thread ()->call_depth++ <= max_call_depth) {
2001                 ENTER_LOG (&method_entries_ctr, logbuffer,
2002                         EVENT_SIZE /* event */ +
2003                         LEB128_SIZE /* method */
2004                 );
2005
2006                 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
2007                 emit_method (logbuffer, method);
2008
2009                 EXIT_LOG;
2010         }
2011 }
2012
2013 static void
2014 method_leave (MonoProfiler *prof, MonoMethod *method)
2015 {
2016         if (!only_coverage && --get_thread ()->call_depth <= max_call_depth) {
2017                 ENTER_LOG (&method_exits_ctr, logbuffer,
2018                         EVENT_SIZE /* event */ +
2019                         LEB128_SIZE /* method */
2020                 );
2021
2022                 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
2023                 emit_method (logbuffer, method);
2024
2025                 EXIT_LOG;
2026         }
2027 }
2028
2029 static void
2030 method_exc_leave (MonoProfiler *prof, MonoMethod *method)
2031 {
2032         if (!only_coverage && !nocalls && --get_thread ()->call_depth <= max_call_depth) {
2033                 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
2034                         EVENT_SIZE /* event */ +
2035                         LEB128_SIZE /* method */
2036                 );
2037
2038                 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
2039                 emit_method (logbuffer, method);
2040
2041                 EXIT_LOG;
2042         }
2043 }
2044
2045 static void
2046 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji, int result)
2047 {
2048         if (result != MONO_PROFILE_OK)
2049                 return;
2050
2051         register_method_local (method, ji);
2052 }
2053
2054 static void
2055 code_buffer_new (MonoProfiler *prof, void *buffer, int size, MonoProfilerCodeBufferType type, void *data)
2056 {
2057         char *name;
2058         int nlen;
2059
2060         if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
2061                 name = (char *) data;
2062                 nlen = strlen (name) + 1;
2063         } else {
2064                 name = NULL;
2065                 nlen = 0;
2066         }
2067
2068         ENTER_LOG (&code_buffers_ctr, logbuffer,
2069                 EVENT_SIZE /* event */ +
2070                 BYTE_SIZE /* type */ +
2071                 LEB128_SIZE /* buffer */ +
2072                 LEB128_SIZE /* size */ +
2073                 (name ? (
2074                         nlen /* name */
2075                 ) : 0)
2076         );
2077
2078         emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
2079         emit_byte (logbuffer, type);
2080         emit_ptr (logbuffer, buffer);
2081         emit_value (logbuffer, size);
2082
2083         if (name) {
2084                 memcpy (logbuffer->cursor, name, nlen);
2085                 logbuffer->cursor += nlen;
2086         }
2087
2088         EXIT_LOG;
2089 }
2090
2091 static void
2092 throw_exc (MonoProfiler *prof, MonoObject *object)
2093 {
2094         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_THROW_BT : 0;
2095         FrameData data;
2096
2097         if (do_bt)
2098                 collect_bt (&data);
2099
2100         ENTER_LOG (&exception_throws_ctr, logbuffer,
2101                 EVENT_SIZE /* event */ +
2102                 LEB128_SIZE /* object */ +
2103                 (do_bt ? (
2104                         LEB128_SIZE /* count */ +
2105                         data.count * (
2106                                 LEB128_SIZE /* method */
2107                         )
2108                 ) : 0)
2109         );
2110
2111         emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
2112         emit_obj (logbuffer, object);
2113
2114         if (do_bt)
2115                 emit_bt (prof, logbuffer, &data);
2116
2117         EXIT_LOG;
2118 }
2119
2120 static void
2121 clause_exc (MonoProfiler *prof, MonoMethod *method, int clause_type, int clause_num, MonoObject *exc)
2122 {
2123         ENTER_LOG (&exception_clauses_ctr, logbuffer,
2124                 EVENT_SIZE /* event */ +
2125                 BYTE_SIZE /* clause type */ +
2126                 LEB128_SIZE /* clause num */ +
2127                 LEB128_SIZE /* method */
2128         );
2129
2130         emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
2131         emit_byte (logbuffer, clause_type);
2132         emit_value (logbuffer, clause_num);
2133         emit_method (logbuffer, method);
2134         emit_obj (logbuffer, exc);
2135
2136         EXIT_LOG;
2137 }
2138
2139 static void
2140 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
2141 {
2142         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_MONITOR_BT : 0;
2143         FrameData data;
2144
2145         if (do_bt)
2146                 collect_bt (&data);
2147
2148         ENTER_LOG (&monitor_events_ctr, logbuffer,
2149                 EVENT_SIZE /* event */ +
2150                 BYTE_SIZE /* ev */ +
2151                 LEB128_SIZE /* object */ +
2152                 (do_bt ? (
2153                         LEB128_SIZE /* count */ +
2154                         data.count * (
2155                                 LEB128_SIZE /* method */
2156                         )
2157                 ) : 0)
2158         );
2159
2160         emit_event (logbuffer, do_bt | TYPE_MONITOR);
2161         emit_byte (logbuffer, ev);
2162         emit_obj (logbuffer, object);
2163
2164         if (do_bt)
2165                 emit_bt (profiler, logbuffer, &data);
2166
2167         EXIT_LOG;
2168 }
2169
2170 static void
2171 thread_start (MonoProfiler *prof, uintptr_t tid)
2172 {
2173         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2174                 ENTER_LOG (&thread_starts_ctr, logbuffer,
2175                         EVENT_SIZE /* event */ +
2176                         BYTE_SIZE /* type */ +
2177                         LEB128_SIZE /* tid */
2178                 );
2179
2180                 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2181                 emit_byte (logbuffer, TYPE_THREAD);
2182                 emit_ptr (logbuffer, (void*) tid);
2183
2184                 EXIT_LOG;
2185         }
2186 }
2187
2188 static void
2189 thread_end (MonoProfiler *prof, uintptr_t tid)
2190 {
2191         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2192                 ENTER_LOG (&thread_ends_ctr, logbuffer,
2193                         EVENT_SIZE /* event */ +
2194                         BYTE_SIZE /* type */ +
2195                         LEB128_SIZE /* tid */
2196                 );
2197
2198                 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2199                 emit_byte (logbuffer, TYPE_THREAD);
2200                 emit_ptr (logbuffer, (void*) tid);
2201
2202                 EXIT_LOG_EXPLICIT (NO_SEND);
2203         }
2204
2205         MonoProfilerThread *thread = get_thread ();
2206
2207         thread->ended = TRUE;
2208         remove_thread (thread);
2209
2210         PROF_TLS_SET (NULL);
2211 }
2212
2213 static void
2214 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2215 {
2216         int len = strlen (name) + 1;
2217
2218         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2219                 ENTER_LOG (&thread_names_ctr, logbuffer,
2220                         EVENT_SIZE /* event */ +
2221                         BYTE_SIZE /* type */ +
2222                         LEB128_SIZE /* tid */ +
2223                         len /* name */
2224                 );
2225
2226                 emit_event (logbuffer, TYPE_METADATA);
2227                 emit_byte (logbuffer, TYPE_THREAD);
2228                 emit_ptr (logbuffer, (void*)tid);
2229                 memcpy (logbuffer->cursor, name, len);
2230                 logbuffer->cursor += len;
2231
2232                 EXIT_LOG;
2233         }
2234 }
2235
2236 static void
2237 domain_loaded (MonoProfiler *prof, MonoDomain *domain, int result)
2238 {
2239         if (result != MONO_PROFILE_OK)
2240                 return;
2241
2242         ENTER_LOG (&domain_loads_ctr, logbuffer,
2243                 EVENT_SIZE /* event */ +
2244                 BYTE_SIZE /* type */ +
2245                 LEB128_SIZE /* domain id */
2246         );
2247
2248         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2249         emit_byte (logbuffer, TYPE_DOMAIN);
2250         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2251
2252         EXIT_LOG;
2253 }
2254
2255 static void
2256 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2257 {
2258         ENTER_LOG (&domain_unloads_ctr, logbuffer,
2259                 EVENT_SIZE /* event */ +
2260                 BYTE_SIZE /* type */ +
2261                 LEB128_SIZE /* domain id */
2262         );
2263
2264         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2265         emit_byte (logbuffer, TYPE_DOMAIN);
2266         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2267
2268         EXIT_LOG;
2269 }
2270
2271 static void
2272 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2273 {
2274         int nlen = strlen (name) + 1;
2275
2276         ENTER_LOG (&domain_names_ctr, logbuffer,
2277                 EVENT_SIZE /* event */ +
2278                 BYTE_SIZE /* type */ +
2279                 LEB128_SIZE /* domain id */ +
2280                 nlen /* name */
2281         );
2282
2283         emit_event (logbuffer, TYPE_METADATA);
2284         emit_byte (logbuffer, TYPE_DOMAIN);
2285         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2286         memcpy (logbuffer->cursor, name, nlen);
2287         logbuffer->cursor += nlen;
2288
2289         EXIT_LOG;
2290 }
2291
2292 static void
2293 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2294 {
2295         ENTER_LOG (&context_loads_ctr, logbuffer,
2296                 EVENT_SIZE /* event */ +
2297                 BYTE_SIZE /* type */ +
2298                 LEB128_SIZE /* context id */ +
2299                 LEB128_SIZE /* domain id */
2300         );
2301
2302         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2303         emit_byte (logbuffer, TYPE_CONTEXT);
2304         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2305         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2306
2307         EXIT_LOG;
2308 }
2309
2310 static void
2311 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2312 {
2313         ENTER_LOG (&context_unloads_ctr, logbuffer,
2314                 EVENT_SIZE /* event */ +
2315                 BYTE_SIZE /* type */ +
2316                 LEB128_SIZE /* context id */ +
2317                 LEB128_SIZE /* domain id */
2318         );
2319
2320         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2321         emit_byte (logbuffer, TYPE_CONTEXT);
2322         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2323         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2324
2325         EXIT_LOG;
2326 }
2327
2328 typedef struct {
2329         MonoMethod *method;
2330         MonoDomain *domain;
2331         void *base_address;
2332         int offset;
2333 } AsyncFrameInfo;
2334
2335 typedef struct {
2336         MonoLockFreeQueueNode node;
2337         MonoProfiler *prof;
2338         uint64_t time;
2339         uintptr_t tid;
2340         void *ip;
2341         int count;
2342         AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2343 } SampleHit;
2344
2345 static mono_bool
2346 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2347 {
2348         SampleHit *sample = (SampleHit *) data;
2349
2350         if (sample->count < num_frames) {
2351                 int i = sample->count;
2352
2353                 sample->frames [i].method = method;
2354                 sample->frames [i].domain = domain;
2355                 sample->frames [i].base_address = base_address;
2356                 sample->frames [i].offset = offset;
2357
2358                 sample->count++;
2359         }
2360
2361         return sample->count == num_frames;
2362 }
2363
2364 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2365 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2366
2367 static void
2368 enqueue_sample_hit (gpointer p)
2369 {
2370         SampleHit *sample = p;
2371
2372         mono_lock_free_queue_node_unpoison (&sample->node);
2373         mono_lock_free_queue_enqueue (&sample->prof->dumper_queue, &sample->node);
2374         mono_os_sem_post (&sample->prof->dumper_queue_sem);
2375 }
2376
2377 static void
2378 mono_sample_hit (MonoProfiler *profiler, unsigned char *ip, void *context)
2379 {
2380         /*
2381          * Please note: We rely on the runtime loading the profiler with
2382          * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2383          * this function (and its siblings) are resolved when the profiler is
2384          * loaded. Otherwise, we would potentially invoke the dynamic linker when
2385          * invoking runtime functions, which is not async-signal-safe.
2386          */
2387
2388         if (InterlockedRead (&in_shutdown))
2389                 return;
2390
2391         SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2392
2393         if (!sample) {
2394                 /*
2395                  * If we're out of reusable sample events and we're not allowed to
2396                  * allocate more, we have no choice but to drop the event.
2397                  */
2398                 if (InterlockedRead (&sample_allocations_ctr) >= max_allocated_sample_hits)
2399                         return;
2400
2401                 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2402                 sample->prof = profiler;
2403                 mono_lock_free_queue_node_init (&sample->node, TRUE);
2404
2405                 InterlockedIncrement (&sample_allocations_ctr);
2406         }
2407
2408         sample->count = 0;
2409         mono_stack_walk_async_safe (&async_walk_stack, context, sample);
2410
2411         sample->time = current_time ();
2412         sample->tid = thread_id ();
2413         sample->ip = ip;
2414
2415         mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2416 }
2417
2418 static uintptr_t *code_pages = 0;
2419 static int num_code_pages = 0;
2420 static int size_code_pages = 0;
2421 #define CPAGE_SHIFT (9)
2422 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2423 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2424 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2425
2426 static uintptr_t
2427 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2428 {
2429         uintptr_t i;
2430         uintptr_t start_pos;
2431         start_pos = (page >> CPAGE_SHIFT) % hsize;
2432         i = start_pos;
2433         do {
2434                 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2435                         return 0;
2436                 } else if (!hash [i]) {
2437                         hash [i] = page;
2438                         return 1;
2439                 }
2440                 /* wrap around */
2441                 if (++i == hsize)
2442                         i = 0;
2443         } while (i != start_pos);
2444         /* should not happen */
2445         printf ("failed code page store\n");
2446         return 0;
2447 }
2448
2449 static void
2450 add_code_pointer (uintptr_t ip)
2451 {
2452         uintptr_t i;
2453         if (num_code_pages * 2 >= size_code_pages) {
2454                 uintptr_t *n;
2455                 uintptr_t old_size = size_code_pages;
2456                 size_code_pages *= 2;
2457                 if (size_code_pages == 0)
2458                         size_code_pages = 16;
2459                 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2460                 for (i = 0; i < old_size; ++i) {
2461                         if (code_pages [i])
2462                                 add_code_page (n, size_code_pages, code_pages [i]);
2463                 }
2464                 if (code_pages)
2465                         g_free (code_pages);
2466                 code_pages = n;
2467         }
2468         num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2469 }
2470
2471 /* ELF code crashes on some systems. */
2472 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2473 #if 0
2474 static void
2475 dump_ubin (MonoProfiler *prof, const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2476 {
2477         int len = strlen (filename) + 1;
2478
2479         ENTER_LOG (&sample_ubins_ctr, logbuffer,
2480                 EVENT_SIZE /* event */ +
2481                 LEB128_SIZE /* load address */ +
2482                 LEB128_SIZE /* offset */ +
2483                 LEB128_SIZE /* size */ +
2484                 nlen /* file name */
2485         );
2486
2487         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2488         emit_ptr (logbuffer, load_addr);
2489         emit_uvalue (logbuffer, offset);
2490         emit_uvalue (logbuffer, size);
2491         memcpy (logbuffer->cursor, filename, len);
2492         logbuffer->cursor += len;
2493
2494         EXIT_LOG_EXPLICIT (DO_SEND);
2495 }
2496 #endif
2497
2498 static void
2499 dump_usym (MonoProfiler *prof, const char *name, uintptr_t value, uintptr_t size)
2500 {
2501         int len = strlen (name) + 1;
2502
2503         ENTER_LOG (&sample_usyms_ctr, logbuffer,
2504                 EVENT_SIZE /* event */ +
2505                 LEB128_SIZE /* value */ +
2506                 LEB128_SIZE /* size */ +
2507                 len /* name */
2508         );
2509
2510         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2511         emit_ptr (logbuffer, (void*)value);
2512         emit_value (logbuffer, size);
2513         memcpy (logbuffer->cursor, name, len);
2514         logbuffer->cursor += len;
2515
2516         EXIT_LOG_EXPLICIT (DO_SEND);
2517 }
2518
2519 /* ELF code crashes on some systems. */
2520 //#if defined(ELFMAG0)
2521 #if 0
2522
2523 #if SIZEOF_VOID_P == 4
2524 #define ELF_WSIZE 32
2525 #else
2526 #define ELF_WSIZE 64
2527 #endif
2528 #ifndef ElfW
2529 #define ElfW(type)      _ElfW (Elf, ELF_WSIZE, type)
2530 #define _ElfW(e,w,t)    _ElfW_1 (e, w, _##t)
2531 #define _ElfW_1(e,w,t)  e##w##t
2532 #endif
2533
2534 static void
2535 dump_elf_symbols (MonoProfiler *prof, ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2536 {
2537         int i;
2538         for (i = 0; i < num_symbols; ++i) {
2539                 const char* sym;
2540                 sym =  strtab + symbols [i].st_name;
2541                 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2542                         continue;
2543                 //printf ("symbol %s at %d\n", sym, symbols [i].st_value);
2544                 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2545         }
2546 }
2547
2548 static int
2549 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2550 {
2551         int fd, i;
2552         void *data;
2553         struct stat statb;
2554         uint64_t file_size;
2555         ElfW(Ehdr) *header;
2556         ElfW(Shdr) *sheader;
2557         ElfW(Shdr) *shstrtabh;
2558         ElfW(Shdr) *symtabh = NULL;
2559         ElfW(Shdr) *strtabh = NULL;
2560         ElfW(Sym) *symbols = NULL;
2561         const char *strtab;
2562         int num_symbols;
2563
2564         fd = open (filename, O_RDONLY);
2565         if (fd < 0)
2566                 return 0;
2567         if (fstat (fd, &statb) != 0) {
2568                 close (fd);
2569                 return 0;
2570         }
2571         file_size = statb.st_size;
2572         data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2573         close (fd);
2574         if (data == MAP_FAILED)
2575                 return 0;
2576         header = data;
2577         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2578                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2579                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2580                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2581                 munmap (data, file_size);
2582                 return 0;
2583         }
2584         sheader = (void*)((char*)data + header->e_shoff);
2585         shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2586         strtab = (const char*)data + shstrtabh->sh_offset;
2587         for (i = 0; i < header->e_shnum; ++i) {
2588                 //printf ("section header: %d\n", sheader->sh_type);
2589                 if (sheader->sh_type == SHT_SYMTAB) {
2590                         symtabh = sheader;
2591                         strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2592                         /*printf ("symtab section header: %d, .strstr: %d\n", i, sheader->sh_link);*/
2593                         break;
2594                 }
2595                 sheader = (void*)((char*)sheader + header->e_shentsize);
2596         }
2597         if (!symtabh || !strtabh) {
2598                 munmap (data, file_size);
2599                 return 0;
2600         }
2601         strtab = (const char*)data + strtabh->sh_offset;
2602         num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2603         symbols = (void*)((char*)data + symtabh->sh_offset);
2604         dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2605         munmap (data, file_size);
2606         return 1;
2607 }
2608 #endif
2609
2610 /* ELF code crashes on some systems. */
2611 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2612 #if 0
2613 static int
2614 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2615 {
2616         MonoProfiler *prof = data;
2617         char buf [256];
2618         const char *filename;
2619         BinaryObject *obj;
2620         char *a = (void*)info->dlpi_addr;
2621         int i, num_sym;
2622         ElfW(Dyn) *dyn = NULL;
2623         ElfW(Sym) *symtab = NULL;
2624         ElfW(Word) *hash_table = NULL;
2625         ElfW(Ehdr) *header = NULL;
2626         const char* strtab = NULL;
2627         for (obj = prof->binary_objects; obj; obj = obj->next) {
2628                 if (obj->addr == a)
2629                         return 0;
2630         }
2631         filename = info->dlpi_name;
2632         if (!filename)
2633                 return 0;
2634         if (!info->dlpi_addr && !filename [0]) {
2635                 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2636                 if (l > 0) {
2637                         buf [l] = 0;
2638                         filename = buf;
2639                 }
2640         }
2641         obj = g_calloc (sizeof (BinaryObject), 1);
2642         obj->addr = (void*)info->dlpi_addr;
2643         obj->name = pstrdup (filename);
2644         obj->next = prof->binary_objects;
2645         prof->binary_objects = obj;
2646         //printf ("loaded file: %s at %p, segments: %d\n", filename, (void*)info->dlpi_addr, info->dlpi_phnum);
2647         a = NULL;
2648         for (i = 0; i < info->dlpi_phnum; ++i) {
2649                 //printf ("segment type %d file offset: %d, size: %d\n", info->dlpi_phdr[i].p_type, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2650                 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2651                         header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2652                         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2653                                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2654                                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2655                                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2656                                 header = NULL;
2657                         }
2658                         dump_ubin (prof, filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2659                 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2660                         dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2661                 }
2662         }
2663         if (read_elf_symbols (prof, filename, (void*)info->dlpi_addr))
2664                 return 0;
2665         if (!info->dlpi_name || !info->dlpi_name[0])
2666                 return 0;
2667         if (!dyn)
2668                 return 0;
2669         for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2670                 if (dyn [i].d_tag == DT_SYMTAB) {
2671                         if (symtab && do_debug)
2672                                 printf ("multiple symtabs: %d\n", i);
2673                         symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2674                 } else if (dyn [i].d_tag == DT_HASH) {
2675                         hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2676                 } else if (dyn [i].d_tag == DT_STRTAB) {
2677                         strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2678                 }
2679         }
2680         if (!hash_table)
2681                 return 0;
2682         num_sym = hash_table [1];
2683         dump_elf_symbols (prof, symtab, num_sym, strtab, (void*)info->dlpi_addr);
2684         return 0;
2685 }
2686
2687 static int
2688 load_binaries (MonoProfiler *prof)
2689 {
2690         dl_iterate_phdr (elf_dl_callback, prof);
2691         return 1;
2692 }
2693 #else
2694 static int
2695 load_binaries (MonoProfiler *prof)
2696 {
2697         return 0;
2698 }
2699 #endif
2700
2701 static const char*
2702 symbol_for (uintptr_t code)
2703 {
2704 #ifdef HAVE_DLADDR
2705         void *ip = (void*)code;
2706         Dl_info di;
2707         if (dladdr (ip, &di)) {
2708                 if (di.dli_sname)
2709                         return di.dli_sname;
2710         } else {
2711         /*      char **names;
2712                 names = backtrace_symbols (&ip, 1);
2713                 if (names) {
2714                         const char* p = names [0];
2715                         g_free (names);
2716                         return p;
2717                 }
2718                 */
2719         }
2720 #endif
2721         return NULL;
2722 }
2723
2724 static void
2725 dump_unmanaged_coderefs (MonoProfiler *prof)
2726 {
2727         int i;
2728         const char* last_symbol;
2729         uintptr_t addr, page_end;
2730
2731         if (load_binaries (prof))
2732                 return;
2733         for (i = 0; i < size_code_pages; ++i) {
2734                 const char* sym;
2735                 if (!code_pages [i] || code_pages [i] & 1)
2736                         continue;
2737                 last_symbol = NULL;
2738                 addr = CPAGE_ADDR (code_pages [i]);
2739                 page_end = addr + CPAGE_SIZE;
2740                 code_pages [i] |= 1;
2741                 /* we dump the symbols for the whole page */
2742                 for (; addr < page_end; addr += 16) {
2743                         sym = symbol_for (addr);
2744                         if (sym && sym == last_symbol)
2745                                 continue;
2746                         last_symbol = sym;
2747                         if (!sym)
2748                                 continue;
2749                         dump_usym (prof, sym, addr, 0); /* let's not guess the size */
2750                         //printf ("found symbol at %p: %s\n", (void*)addr, sym);
2751                 }
2752         }
2753 }
2754
2755 typedef struct MonoCounterAgent {
2756         MonoCounter *counter;
2757         // MonoCounterAgent specific data :
2758         void *value;
2759         size_t value_size;
2760         short index;
2761         short emitted;
2762         struct MonoCounterAgent *next;
2763 } MonoCounterAgent;
2764
2765 static MonoCounterAgent* counters;
2766 static int counters_index = 1;
2767 static mono_mutex_t counters_mutex;
2768
2769 static void
2770 counters_add_agent (MonoCounter *counter)
2771 {
2772         if (InterlockedRead (&in_shutdown))
2773                 return;
2774
2775         MonoCounterAgent *agent, *item;
2776
2777         mono_os_mutex_lock (&counters_mutex);
2778
2779         for (agent = counters; agent; agent = agent->next) {
2780                 if (agent->counter == counter) {
2781                         agent->value_size = 0;
2782                         if (agent->value) {
2783                                 g_free (agent->value);
2784                                 agent->value = NULL;
2785                         }
2786                         goto done;
2787                 }
2788         }
2789
2790         agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2791         agent->counter = counter;
2792         agent->value = NULL;
2793         agent->value_size = 0;
2794         agent->index = counters_index++;
2795         agent->emitted = 0;
2796         agent->next = NULL;
2797
2798         if (!counters) {
2799                 counters = agent;
2800         } else {
2801                 item = counters;
2802                 while (item->next)
2803                         item = item->next;
2804                 item->next = agent;
2805         }
2806
2807 done:
2808         mono_os_mutex_unlock (&counters_mutex);
2809 }
2810
2811 static mono_bool
2812 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2813 {
2814         counters_add_agent (counter);
2815         return TRUE;
2816 }
2817
2818 static void
2819 counters_init (MonoProfiler *profiler)
2820 {
2821         mono_os_mutex_init (&counters_mutex);
2822
2823         mono_counters_on_register (&counters_add_agent);
2824         mono_counters_foreach (counters_init_foreach_callback, NULL);
2825 }
2826
2827 static void
2828 counters_emit (MonoProfiler *profiler)
2829 {
2830         MonoCounterAgent *agent;
2831         int len = 0;
2832         int size =
2833                 EVENT_SIZE /* event */ +
2834                 LEB128_SIZE /* len */
2835         ;
2836
2837         mono_os_mutex_lock (&counters_mutex);
2838
2839         for (agent = counters; agent; agent = agent->next) {
2840                 if (agent->emitted)
2841                         continue;
2842
2843                 size +=
2844                         LEB128_SIZE /* section */ +
2845                         strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2846                         BYTE_SIZE /* type */ +
2847                         BYTE_SIZE /* unit */ +
2848                         BYTE_SIZE /* variance */ +
2849                         LEB128_SIZE /* index */
2850                 ;
2851
2852                 len++;
2853         }
2854
2855         if (!len)
2856                 goto done;
2857
2858         ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2859
2860         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2861         emit_value (logbuffer, len);
2862
2863         for (agent = counters; agent; agent = agent->next) {
2864                 const char *name;
2865
2866                 if (agent->emitted)
2867                         continue;
2868
2869                 name = mono_counter_get_name (agent->counter);
2870                 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2871                 emit_string (logbuffer, name, strlen (name) + 1);
2872                 emit_byte (logbuffer, mono_counter_get_type (agent->counter));
2873                 emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
2874                 emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
2875                 emit_value (logbuffer, agent->index);
2876
2877                 agent->emitted = 1;
2878         }
2879
2880         EXIT_LOG_EXPLICIT (DO_SEND);
2881
2882 done:
2883         mono_os_mutex_unlock (&counters_mutex);
2884 }
2885
2886 static void
2887 counters_sample (MonoProfiler *profiler, uint64_t timestamp)
2888 {
2889         MonoCounterAgent *agent;
2890         MonoCounter *counter;
2891         int type;
2892         int buffer_size;
2893         void *buffer;
2894         int size;
2895
2896         counters_emit (profiler);
2897
2898         buffer_size = 8;
2899         buffer = g_calloc (1, buffer_size);
2900
2901         mono_os_mutex_lock (&counters_mutex);
2902
2903         size =
2904                 EVENT_SIZE /* event */
2905         ;
2906
2907         for (agent = counters; agent; agent = agent->next) {
2908                 size +=
2909                         LEB128_SIZE /* index */ +
2910                         BYTE_SIZE /* type */ +
2911                         mono_counter_get_size (agent->counter) /* value */
2912                 ;
2913         }
2914
2915         size +=
2916                 LEB128_SIZE /* stop marker */
2917         ;
2918
2919         ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2920
2921         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2922
2923         for (agent = counters; agent; agent = agent->next) {
2924                 size_t size;
2925
2926                 counter = agent->counter;
2927
2928                 size = mono_counter_get_size (counter);
2929
2930                 if (size > buffer_size) {
2931                         buffer_size = size;
2932                         buffer = g_realloc (buffer, buffer_size);
2933                 }
2934
2935                 memset (buffer, 0, buffer_size);
2936
2937                 g_assert (mono_counters_sample (counter, buffer, size));
2938
2939                 type = mono_counter_get_type (counter);
2940
2941                 if (!agent->value) {
2942                         agent->value = g_calloc (1, size);
2943                         agent->value_size = size;
2944                 } else {
2945                         if (type == MONO_COUNTER_STRING) {
2946                                 if (strcmp (agent->value, buffer) == 0)
2947                                         continue;
2948                         } else {
2949                                 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
2950                                         continue;
2951                         }
2952                 }
2953
2954                 emit_uvalue (logbuffer, agent->index);
2955                 emit_byte (logbuffer, type);
2956                 switch (type) {
2957                 case MONO_COUNTER_INT:
2958 #if SIZEOF_VOID_P == 4
2959                 case MONO_COUNTER_WORD:
2960 #endif
2961                         emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
2962                         break;
2963                 case MONO_COUNTER_UINT:
2964                         emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
2965                         break;
2966                 case MONO_COUNTER_TIME_INTERVAL:
2967                 case MONO_COUNTER_LONG:
2968 #if SIZEOF_VOID_P == 8
2969                 case MONO_COUNTER_WORD:
2970 #endif
2971                         emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
2972                         break;
2973                 case MONO_COUNTER_ULONG:
2974                         emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
2975                         break;
2976                 case MONO_COUNTER_DOUBLE:
2977                         emit_double (logbuffer, *(double*)buffer);
2978                         break;
2979                 case MONO_COUNTER_STRING:
2980                         if (size == 0) {
2981                                 emit_byte (logbuffer, 0);
2982                         } else {
2983                                 emit_byte (logbuffer, 1);
2984                                 emit_string (logbuffer, (char*)buffer, size);
2985                         }
2986                         break;
2987                 default:
2988                         g_assert_not_reached ();
2989                 }
2990
2991                 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
2992                         agent->value = g_realloc (agent->value, size);
2993                         agent->value_size = size;
2994                 }
2995
2996                 if (size > 0)
2997                         memcpy (agent->value, buffer, size);
2998         }
2999         g_free (buffer);
3000
3001         emit_value (logbuffer, 0);
3002
3003         EXIT_LOG_EXPLICIT (DO_SEND);
3004
3005         mono_os_mutex_unlock (&counters_mutex);
3006 }
3007
3008 typedef struct _PerfCounterAgent PerfCounterAgent;
3009 struct _PerfCounterAgent {
3010         PerfCounterAgent *next;
3011         int index;
3012         char *category_name;
3013         char *name;
3014         int type;
3015         gint64 value;
3016         guint8 emitted;
3017         guint8 updated;
3018         guint8 deleted;
3019 };
3020
3021 static PerfCounterAgent *perfcounters = NULL;
3022
3023 static void
3024 perfcounters_emit (MonoProfiler *profiler)
3025 {
3026         PerfCounterAgent *pcagent;
3027         int len = 0;
3028         int size =
3029                 EVENT_SIZE /* event */ +
3030                 LEB128_SIZE /* len */
3031         ;
3032
3033         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3034                 if (pcagent->emitted)
3035                         continue;
3036
3037                 size +=
3038                         LEB128_SIZE /* section */ +
3039                         strlen (pcagent->category_name) + 1 /* category name */ +
3040                         strlen (pcagent->name) + 1 /* name */ +
3041                         BYTE_SIZE /* type */ +
3042                         BYTE_SIZE /* unit */ +
3043                         BYTE_SIZE /* variance */ +
3044                         LEB128_SIZE /* index */
3045                 ;
3046
3047                 len++;
3048         }
3049
3050         if (!len)
3051                 return;
3052
3053         ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
3054
3055         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
3056         emit_value (logbuffer, len);
3057
3058         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3059                 if (pcagent->emitted)
3060                         continue;
3061
3062                 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
3063                 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
3064                 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
3065                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3066                 emit_byte (logbuffer, MONO_COUNTER_RAW);
3067                 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
3068                 emit_value (logbuffer, pcagent->index);
3069
3070                 pcagent->emitted = 1;
3071         }
3072
3073         EXIT_LOG_EXPLICIT (DO_SEND);
3074 }
3075
3076 static gboolean
3077 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
3078 {
3079         PerfCounterAgent *pcagent;
3080
3081         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3082                 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
3083                         continue;
3084                 if (pcagent->value == value)
3085                         return TRUE;
3086
3087                 pcagent->value = value;
3088                 pcagent->updated = 1;
3089                 pcagent->deleted = 0;
3090                 return TRUE;
3091         }
3092
3093         pcagent = g_new0 (PerfCounterAgent, 1);
3094         pcagent->next = perfcounters;
3095         pcagent->index = counters_index++;
3096         pcagent->category_name = g_strdup (category_name);
3097         pcagent->name = g_strdup (name);
3098         pcagent->type = (int) type;
3099         pcagent->value = value;
3100         pcagent->emitted = 0;
3101         pcagent->updated = 1;
3102         pcagent->deleted = 0;
3103
3104         perfcounters = pcagent;
3105
3106         return TRUE;
3107 }
3108
3109 static void
3110 perfcounters_sample (MonoProfiler *profiler, uint64_t timestamp)
3111 {
3112         PerfCounterAgent *pcagent;
3113         int len = 0;
3114         int size;
3115
3116         mono_os_mutex_lock (&counters_mutex);
3117
3118         /* mark all perfcounters as deleted, foreach will unmark them as necessary */
3119         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next)
3120                 pcagent->deleted = 1;
3121
3122         mono_perfcounter_foreach (perfcounters_foreach, perfcounters);
3123
3124         perfcounters_emit (profiler);
3125
3126         size =
3127                 EVENT_SIZE /* event */
3128         ;
3129
3130         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3131                 if (pcagent->deleted || !pcagent->updated)
3132                         continue;
3133
3134                 size +=
3135                         LEB128_SIZE /* index */ +
3136                         BYTE_SIZE /* type */ +
3137                         LEB128_SIZE /* value */
3138                 ;
3139
3140                 len++;
3141         }
3142
3143         if (!len)
3144                 goto done;
3145
3146         size +=
3147                 LEB128_SIZE /* stop marker */
3148         ;
3149
3150         ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
3151
3152         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
3153
3154         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3155                 if (pcagent->deleted || !pcagent->updated)
3156                         continue;
3157                 emit_uvalue (logbuffer, pcagent->index);
3158                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3159                 emit_svalue (logbuffer, pcagent->value);
3160
3161                 pcagent->updated = 0;
3162         }
3163
3164         emit_value (logbuffer, 0);
3165
3166         EXIT_LOG_EXPLICIT (DO_SEND);
3167
3168 done:
3169         mono_os_mutex_unlock (&counters_mutex);
3170 }
3171
3172 static void
3173 counters_and_perfcounters_sample (MonoProfiler *prof)
3174 {
3175         uint64_t now = current_time ();
3176
3177         counters_sample (prof, now);
3178         perfcounters_sample (prof, now);
3179 }
3180
3181 #define COVERAGE_DEBUG(x) if (debug_coverage) {x}
3182 static mono_mutex_t coverage_mutex;
3183 static MonoConcurrentHashTable *coverage_methods = NULL;
3184 static MonoConcurrentHashTable *coverage_assemblies = NULL;
3185 static MonoConcurrentHashTable *coverage_classes = NULL;
3186
3187 static MonoConcurrentHashTable *filtered_classes = NULL;
3188 static MonoConcurrentHashTable *entered_methods = NULL;
3189 static MonoConcurrentHashTable *image_to_methods = NULL;
3190 static MonoConcurrentHashTable *suppressed_assemblies = NULL;
3191 static gboolean coverage_initialized = FALSE;
3192
3193 static GPtrArray *coverage_data = NULL;
3194 static int previous_offset = 0;
3195
3196 typedef struct {
3197         MonoLockFreeQueueNode node;
3198         MonoMethod *method;
3199 } MethodNode;
3200
3201 typedef struct {
3202         int offset;
3203         int counter;
3204         char *filename;
3205         int line;
3206         int column;
3207 } CoverageEntry;
3208
3209 static void
3210 free_coverage_entry (gpointer data, gpointer userdata)
3211 {
3212         CoverageEntry *entry = (CoverageEntry *)data;
3213         g_free (entry->filename);
3214         g_free (entry);
3215 }
3216
3217 static void
3218 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfileCoverageEntry *entry)
3219 {
3220         int offset = entry->iloffset - previous_offset;
3221         CoverageEntry *e = g_new (CoverageEntry, 1);
3222
3223         previous_offset = entry->iloffset;
3224
3225         e->offset = offset;
3226         e->counter = entry->counter;
3227         e->filename = g_strdup(entry->filename ? entry->filename : "");
3228         e->line = entry->line;
3229         e->column = entry->col;
3230
3231         g_ptr_array_add (coverage_data, e);
3232 }
3233
3234 static char *
3235 parse_generic_type_names(char *name)
3236 {
3237         char *new_name, *ret;
3238         int within_generic_declaration = 0, generic_members = 1;
3239
3240         if (name == NULL || *name == '\0')
3241                 return g_strdup ("");
3242
3243         if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
3244                 return NULL;
3245
3246         do {
3247                 switch (*name) {
3248                         case '<':
3249                                 within_generic_declaration = 1;
3250                                 break;
3251
3252                         case '>':
3253                                 within_generic_declaration = 0;
3254
3255                                 if (*(name - 1) != '<') {
3256                                         *new_name++ = '`';
3257                                         *new_name++ = '0' + generic_members;
3258                                 } else {
3259                                         memcpy (new_name, "&lt;&gt;", 8);
3260                                         new_name += 8;
3261                                 }
3262
3263                                 generic_members = 0;
3264                                 break;
3265
3266                         case ',':
3267                                 generic_members++;
3268                                 break;
3269
3270                         default:
3271                                 if (!within_generic_declaration)
3272                                         *new_name++ = *name;
3273
3274                                 break;
3275                 }
3276         } while (*name++);
3277
3278         return ret;
3279 }
3280
3281 static int method_id;
3282 static void
3283 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
3284 {
3285         MonoMethod *method = (MonoMethod *)value;
3286         MonoProfiler *prof = (MonoProfiler *)userdata;
3287         MonoClass *klass;
3288         MonoImage *image;
3289         char *class_name;
3290         const char *image_name, *method_name, *sig, *first_filename;
3291         guint i;
3292
3293         previous_offset = 0;
3294         coverage_data = g_ptr_array_new ();
3295
3296         mono_profiler_coverage_get (prof, method, obtain_coverage_for_method);
3297
3298         klass = mono_method_get_class (method);
3299         image = mono_class_get_image (klass);
3300         image_name = mono_image_get_name (image);
3301
3302         sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3303         class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3304         method_name = mono_method_get_name (method);
3305
3306         if (coverage_data->len != 0) {
3307                 CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[0];
3308                 first_filename = entry->filename ? entry->filename : "";
3309         } else
3310                 first_filename = "";
3311
3312         image_name = image_name ? image_name : "";
3313         sig = sig ? sig : "";
3314         method_name = method_name ? method_name : "";
3315
3316         ENTER_LOG (&coverage_methods_ctr, logbuffer,
3317                 EVENT_SIZE /* event */ +
3318                 strlen (image_name) + 1 /* image name */ +
3319                 strlen (class_name) + 1 /* class name */ +
3320                 strlen (method_name) + 1 /* method name */ +
3321                 strlen (sig) + 1 /* signature */ +
3322                 strlen (first_filename) + 1 /* first file name */ +
3323                 LEB128_SIZE /* token */ +
3324                 LEB128_SIZE /* method id */ +
3325                 LEB128_SIZE /* entries */
3326         );
3327
3328         emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3329         emit_string (logbuffer, image_name, strlen (image_name) + 1);
3330         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3331         emit_string (logbuffer, method_name, strlen (method_name) + 1);
3332         emit_string (logbuffer, sig, strlen (sig) + 1);
3333         emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3334
3335         emit_uvalue (logbuffer, mono_method_get_token (method));
3336         emit_uvalue (logbuffer, method_id);
3337         emit_value (logbuffer, coverage_data->len);
3338
3339         EXIT_LOG_EXPLICIT (DO_SEND);
3340
3341         for (i = 0; i < coverage_data->len; i++) {
3342                 CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[i];
3343
3344                 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3345                         EVENT_SIZE /* event */ +
3346                         LEB128_SIZE /* method id */ +
3347                         LEB128_SIZE /* offset */ +
3348                         LEB128_SIZE /* counter */ +
3349                         LEB128_SIZE /* line */ +
3350                         LEB128_SIZE /* column */
3351                 );
3352
3353                 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3354                 emit_uvalue (logbuffer, method_id);
3355                 emit_uvalue (logbuffer, entry->offset);
3356                 emit_uvalue (logbuffer, entry->counter);
3357                 emit_uvalue (logbuffer, entry->line);
3358                 emit_uvalue (logbuffer, entry->column);
3359
3360                 EXIT_LOG_EXPLICIT (DO_SEND);
3361         }
3362
3363         method_id++;
3364
3365         g_free (class_name);
3366
3367         g_ptr_array_foreach (coverage_data, free_coverage_entry, NULL);
3368         g_ptr_array_free (coverage_data, TRUE);
3369         coverage_data = NULL;
3370 }
3371
3372 /* This empties the queue */
3373 static guint
3374 count_queue (MonoLockFreeQueue *queue)
3375 {
3376         MonoLockFreeQueueNode *node;
3377         guint count = 0;
3378
3379         while ((node = mono_lock_free_queue_dequeue (queue))) {
3380                 count++;
3381                 mono_thread_hazardous_try_free (node, g_free);
3382         }
3383
3384         return count;
3385 }
3386
3387 static void
3388 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3389 {
3390         MonoClass *klass = (MonoClass *)key;
3391         MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3392         MonoImage *image;
3393         char *class_name;
3394         const char *assembly_name;
3395         int number_of_methods, partially_covered;
3396         guint fully_covered;
3397
3398         image = mono_class_get_image (klass);
3399         assembly_name = mono_image_get_name (image);
3400         class_name = mono_type_get_name (mono_class_get_type (klass));
3401
3402         assembly_name = assembly_name ? assembly_name : "";
3403         number_of_methods = mono_class_num_methods (klass);
3404         fully_covered = count_queue (class_methods);
3405         /* We don't handle partial covered yet */
3406         partially_covered = 0;
3407
3408         ENTER_LOG (&coverage_classes_ctr, logbuffer,
3409                 EVENT_SIZE /* event */ +
3410                 strlen (assembly_name) + 1 /* assembly name */ +
3411                 strlen (class_name) + 1 /* class name */ +
3412                 LEB128_SIZE /* no. methods */ +
3413                 LEB128_SIZE /* fully covered */ +
3414                 LEB128_SIZE /* partially covered */
3415         );
3416
3417         emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3418         emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3419         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3420         emit_uvalue (logbuffer, number_of_methods);
3421         emit_uvalue (logbuffer, fully_covered);
3422         emit_uvalue (logbuffer, partially_covered);
3423
3424         EXIT_LOG_EXPLICIT (DO_SEND);
3425
3426         g_free (class_name);
3427 }
3428
3429 static void
3430 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3431 {
3432         MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (image_to_methods, image);
3433
3434         *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3435         if (image_methods)
3436                 *fully_covered = count_queue (image_methods);
3437         else
3438                 *fully_covered = 0;
3439
3440         // FIXME: We don't handle partially covered yet.
3441         *partially_covered = 0;
3442 }
3443
3444 static void
3445 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3446 {
3447         MonoAssembly *assembly = (MonoAssembly *)value;
3448         MonoImage *image = mono_assembly_get_image (assembly);
3449         const char *name, *guid, *filename;
3450         int number_of_methods = 0, partially_covered = 0;
3451         guint fully_covered = 0;
3452
3453         name = mono_image_get_name (image);
3454         guid = mono_image_get_guid (image);
3455         filename = mono_image_get_filename (image);
3456
3457         name = name ? name : "";
3458         guid = guid ? guid : "";
3459         filename = filename ? filename : "";
3460
3461         get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3462
3463         ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3464                 EVENT_SIZE /* event */ +
3465                 strlen (name) + 1 /* name */ +
3466                 strlen (guid) + 1 /* guid */ +
3467                 strlen (filename) + 1 /* file name */ +
3468                 LEB128_SIZE /* no. methods */ +
3469                 LEB128_SIZE /* fully covered */ +
3470                 LEB128_SIZE /* partially covered */
3471         );
3472
3473         emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3474         emit_string (logbuffer, name, strlen (name) + 1);
3475         emit_string (logbuffer, guid, strlen (guid) + 1);
3476         emit_string (logbuffer, filename, strlen (filename) + 1);
3477         emit_uvalue (logbuffer, number_of_methods);
3478         emit_uvalue (logbuffer, fully_covered);
3479         emit_uvalue (logbuffer, partially_covered);
3480
3481         EXIT_LOG_EXPLICIT (DO_SEND);
3482 }
3483
3484 static void
3485 dump_coverage (MonoProfiler *prof)
3486 {
3487         if (!coverage_initialized)
3488                 return;
3489
3490         COVERAGE_DEBUG(fprintf (stderr, "Coverage: Started dump\n");)
3491         method_id = 0;
3492
3493         mono_os_mutex_lock (&coverage_mutex);
3494         mono_conc_hashtable_foreach (coverage_assemblies, build_assembly_buffer, NULL);
3495         mono_conc_hashtable_foreach (coverage_classes, build_class_buffer, NULL);
3496         mono_conc_hashtable_foreach (coverage_methods, build_method_buffer, prof);
3497         mono_os_mutex_unlock (&coverage_mutex);
3498
3499         COVERAGE_DEBUG(fprintf (stderr, "Coverage: Finished dump\n");)
3500 }
3501
3502 static void
3503 process_method_enter_coverage (MonoProfiler *prof, MonoMethod *method)
3504 {
3505         MonoClass *klass;
3506         MonoImage *image;
3507
3508         if (!coverage_initialized)
3509                 return;
3510
3511         klass = mono_method_get_class (method);
3512         image = mono_class_get_image (klass);
3513
3514         if (mono_conc_hashtable_lookup (suppressed_assemblies, (gpointer) mono_image_get_name (image)))
3515                 return;
3516
3517         mono_os_mutex_lock (&coverage_mutex);
3518         mono_conc_hashtable_insert (entered_methods, method, method);
3519         mono_os_mutex_unlock (&coverage_mutex);
3520 }
3521
3522 static MonoLockFreeQueueNode *
3523 create_method_node (MonoMethod *method)
3524 {
3525         MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3526         mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3527         node->method = method;
3528
3529         return (MonoLockFreeQueueNode *) node;
3530 }
3531
3532 static gboolean
3533 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3534 {
3535         MonoError error;
3536         MonoClass *klass;
3537         MonoImage *image;
3538         MonoAssembly *assembly;
3539         MonoMethodHeader *header;
3540         guint32 iflags, flags, code_size;
3541         char *fqn, *classname;
3542         gboolean has_positive, found;
3543         MonoLockFreeQueue *image_methods, *class_methods;
3544         MonoLockFreeQueueNode *node;
3545
3546         g_assert (coverage_initialized && "Why are we being asked for coverage filter info when we're not doing coverage?");
3547
3548         COVERAGE_DEBUG(fprintf (stderr, "Coverage filter for %s\n", mono_method_get_name (method));)
3549
3550         flags = mono_method_get_flags (method, &iflags);
3551         if ((iflags & 0x1000 /*METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL*/) ||
3552             (flags & 0x2000 /*METHOD_ATTRIBUTE_PINVOKE_IMPL*/)) {
3553                 COVERAGE_DEBUG(fprintf (stderr, "   Internal call or pinvoke - ignoring\n");)
3554                 return FALSE;
3555         }
3556
3557         // Don't need to do anything else if we're already tracking this method
3558         if (mono_conc_hashtable_lookup (coverage_methods, method)) {
3559                 COVERAGE_DEBUG(fprintf (stderr, "   Already tracking\n");)
3560                 return TRUE;
3561         }
3562
3563         klass = mono_method_get_class (method);
3564         image = mono_class_get_image (klass);
3565
3566         // Don't handle coverage for the core assemblies
3567         if (mono_conc_hashtable_lookup (suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3568                 return FALSE;
3569
3570         if (prof->coverage_filters) {
3571                 /* Check already filtered classes first */
3572                 if (mono_conc_hashtable_lookup (filtered_classes, klass)) {
3573                         COVERAGE_DEBUG(fprintf (stderr, "   Already filtered\n");)
3574                         return FALSE;
3575                 }
3576
3577                 classname = mono_type_get_name (mono_class_get_type (klass));
3578
3579                 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3580
3581                 COVERAGE_DEBUG(fprintf (stderr, "   Looking for %s in filter\n", fqn);)
3582                 // Check positive filters first
3583                 has_positive = FALSE;
3584                 found = FALSE;
3585                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3586                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3587
3588                         if (filter [0] == '+') {
3589                                 filter = &filter [1];
3590
3591                                 COVERAGE_DEBUG(fprintf (stderr, "   Checking against +%s ...", filter);)
3592
3593                                 if (strstr (fqn, filter) != NULL) {
3594                                         COVERAGE_DEBUG(fprintf (stderr, "matched\n");)
3595                                         found = TRUE;
3596                                 } else
3597                                         COVERAGE_DEBUG(fprintf (stderr, "no match\n");)
3598
3599                                 has_positive = TRUE;
3600                         }
3601                 }
3602
3603                 if (has_positive && !found) {
3604                         COVERAGE_DEBUG(fprintf (stderr, "   Positive match was not found\n");)
3605
3606                         mono_os_mutex_lock (&coverage_mutex);
3607                         mono_conc_hashtable_insert (filtered_classes, klass, klass);
3608                         mono_os_mutex_unlock (&coverage_mutex);
3609                         g_free (fqn);
3610                         g_free (classname);
3611
3612                         return FALSE;
3613                 }
3614
3615                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3616                         // FIXME: Is substring search sufficient?
3617                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3618                         if (filter [0] == '+')
3619                                 continue;
3620
3621                         // Skip '-'
3622                         filter = &filter [1];
3623                         COVERAGE_DEBUG(fprintf (stderr, "   Checking against -%s ...", filter);)
3624
3625                         if (strstr (fqn, filter) != NULL) {
3626                                 COVERAGE_DEBUG(fprintf (stderr, "matched\n");)
3627
3628                                 mono_os_mutex_lock (&coverage_mutex);
3629                                 mono_conc_hashtable_insert (filtered_classes, klass, klass);
3630                                 mono_os_mutex_unlock (&coverage_mutex);
3631                                 g_free (fqn);
3632                                 g_free (classname);
3633
3634                                 return FALSE;
3635                         } else
3636                                 COVERAGE_DEBUG(fprintf (stderr, "no match\n");)
3637
3638                 }
3639
3640                 g_free (fqn);
3641                 g_free (classname);
3642         }
3643
3644         COVERAGE_DEBUG(fprintf (stderr, "   Handling coverage for %s\n", mono_method_get_name (method));)
3645         header = mono_method_get_header_checked (method, &error);
3646         mono_error_cleanup (&error);
3647
3648         mono_method_header_get_code (header, &code_size, NULL);
3649
3650         assembly = mono_image_get_assembly (image);
3651
3652         // Need to keep the assemblies around for as long as they are kept in the hashtable
3653         // Nunit, for example, has a habit of unloading them before the coverage statistics are
3654         // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3655         mono_assembly_addref (assembly);
3656
3657         mono_os_mutex_lock (&coverage_mutex);
3658         mono_conc_hashtable_insert (coverage_methods, method, method);
3659         mono_conc_hashtable_insert (coverage_assemblies, assembly, assembly);
3660         mono_os_mutex_unlock (&coverage_mutex);
3661
3662         image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (image_to_methods, image);
3663
3664         if (image_methods == NULL) {
3665                 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3666                 mono_lock_free_queue_init (image_methods);
3667                 mono_os_mutex_lock (&coverage_mutex);
3668                 mono_conc_hashtable_insert (image_to_methods, image, image_methods);
3669                 mono_os_mutex_unlock (&coverage_mutex);
3670         }
3671
3672         node = create_method_node (method);
3673         mono_lock_free_queue_enqueue (image_methods, node);
3674
3675         class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (coverage_classes, klass);
3676
3677         if (class_methods == NULL) {
3678                 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3679                 mono_lock_free_queue_init (class_methods);
3680                 mono_os_mutex_lock (&coverage_mutex);
3681                 mono_conc_hashtable_insert (coverage_classes, klass, class_methods);
3682                 mono_os_mutex_unlock (&coverage_mutex);
3683         }
3684
3685         node = create_method_node (method);
3686         mono_lock_free_queue_enqueue (class_methods, node);
3687
3688         return TRUE;
3689 }
3690
3691 #define LINE_BUFFER_SIZE 4096
3692 /* Max file limit of 128KB */
3693 #define MAX_FILE_SIZE 128 * 1024
3694 static char *
3695 get_file_content (FILE *stream)
3696 {
3697         char *buffer;
3698         ssize_t bytes_read;
3699         long filesize;
3700         int res, offset = 0;
3701
3702         res = fseek (stream, 0, SEEK_END);
3703         if (res < 0)
3704           return NULL;
3705
3706         filesize = ftell (stream);
3707         if (filesize < 0)
3708           return NULL;
3709
3710         res = fseek (stream, 0, SEEK_SET);
3711         if (res < 0)
3712           return NULL;
3713
3714         if (filesize > MAX_FILE_SIZE)
3715           return NULL;
3716
3717         buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3718         while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3719                 offset += bytes_read;
3720
3721         /* NULL terminate our buffer */
3722         buffer[filesize] = '\0';
3723         return buffer;
3724 }
3725
3726 static char *
3727 get_next_line (char *contents, char **next_start)
3728 {
3729         char *p = contents;
3730
3731         if (p == NULL || *p == '\0') {
3732                 *next_start = NULL;
3733                 return NULL;
3734         }
3735
3736         while (*p != '\n' && *p != '\0')
3737                 p++;
3738
3739         if (*p == '\n') {
3740                 *p = '\0';
3741                 *next_start = p + 1;
3742         } else
3743                 *next_start = NULL;
3744
3745         return contents;
3746 }
3747
3748 static void
3749 init_suppressed_assemblies (void)
3750 {
3751         char *content;
3752         char *line;
3753         FILE *sa_file;
3754
3755         suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3756         sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3757         if (sa_file == NULL)
3758                 return;
3759
3760         /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3761         content = get_file_content (sa_file);
3762         if (content == NULL) {
3763                 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting\n");
3764         }
3765
3766         while ((line = get_next_line (content, &content))) {
3767                 line = g_strchomp (g_strchug (line));
3768                 /* No locking needed as we're doing initialization */
3769                 mono_conc_hashtable_insert (suppressed_assemblies, line, line);
3770         }
3771
3772         fclose (sa_file);
3773 }
3774
3775 static void
3776 parse_cov_filter_file (GPtrArray *filters, const char *file)
3777 {
3778         FILE *filter_file;
3779         char *line, *content;
3780
3781         filter_file = fopen (file, "r");
3782         if (filter_file == NULL) {
3783                 fprintf (stderr, "Unable to open %s\n", file);
3784                 return;
3785         }
3786
3787         /* Don't need to free content as it is referred to by the lines stored in @filters */
3788         content = get_file_content (filter_file);
3789         if (content == NULL)
3790                 fprintf (stderr, "WARNING: %s is greater than 128kb - ignoring\n", file);
3791
3792         while ((line = get_next_line (content, &content)))
3793                 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3794
3795         fclose (filter_file);
3796 }
3797
3798 static void
3799 coverage_init (MonoProfiler *prof)
3800 {
3801         g_assert (!coverage_initialized && "Why are we initializing coverage twice?");
3802
3803         COVERAGE_DEBUG(fprintf (stderr, "Coverage initialized\n");)
3804
3805         mono_os_mutex_init (&coverage_mutex);
3806         coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3807         coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3808         coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3809         filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3810         entered_methods = mono_conc_hashtable_new (NULL, NULL);
3811         image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3812         init_suppressed_assemblies ();
3813
3814         coverage_initialized = TRUE;
3815 }
3816
3817 static void
3818 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3819 {
3820         MonoAssembly *assembly = (MonoAssembly *)value;
3821         mono_assembly_close (assembly);
3822 }
3823
3824 static void
3825 free_sample_hit (gpointer p)
3826 {
3827         mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3828 }
3829
3830 static void
3831 cleanup_reusable_samples (MonoProfiler *prof)
3832 {
3833         SampleHit *sample;
3834
3835         while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->sample_reuse_queue)))
3836                 mono_thread_hazardous_try_free (sample, free_sample_hit);
3837 }
3838
3839 static void
3840 log_shutdown (MonoProfiler *prof)
3841 {
3842         InterlockedWrite (&in_shutdown, 1);
3843
3844         if (!no_counters)
3845                 counters_and_perfcounters_sample (prof);
3846
3847         dump_coverage (prof);
3848
3849         char c = 1;
3850
3851         if (write (prof->pipes [1], &c, 1) != 1) {
3852                 fprintf (stderr, "Could not write to pipe: %s\n", strerror (errno));
3853                 exit (1);
3854         }
3855
3856         mono_native_thread_join (prof->helper_thread);
3857
3858         mono_os_mutex_destroy (&counters_mutex);
3859
3860         MonoCounterAgent *mc_next;
3861
3862         for (MonoCounterAgent *cur = counters; cur; cur = mc_next) {
3863                 mc_next = cur->next;
3864                 g_free (cur);
3865         }
3866
3867         PerfCounterAgent *pc_next;
3868
3869         for (PerfCounterAgent *cur = perfcounters; cur; cur = pc_next) {
3870                 pc_next = cur->next;
3871                 g_free (cur);
3872         }
3873
3874         /*
3875          * Ensure that we empty the LLS completely, even if some nodes are
3876          * not immediately removed upon calling mono_lls_remove (), by
3877          * iterating until the head is NULL.
3878          */
3879         while (profiler_thread_list.head) {
3880                 MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
3881                         g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3882
3883                         remove_thread (thread);
3884                 } MONO_LLS_FOREACH_SAFE_END
3885         }
3886
3887         /*
3888          * Ensure that all threads have been freed, so that we don't miss any
3889          * buffers when we shut down the writer thread below.
3890          */
3891         mono_thread_hazardous_try_free_all ();
3892
3893         InterlockedWrite (&prof->run_dumper_thread, 0);
3894         mono_os_sem_post (&prof->dumper_queue_sem);
3895         mono_native_thread_join (prof->dumper_thread);
3896         mono_os_sem_destroy (&prof->dumper_queue_sem);
3897
3898         InterlockedWrite (&prof->run_writer_thread, 0);
3899         mono_os_sem_post (&prof->writer_queue_sem);
3900         mono_native_thread_join (prof->writer_thread);
3901         mono_os_sem_destroy (&prof->writer_queue_sem);
3902
3903         /*
3904          * Free all writer queue entries, and ensure that all sample hits will be
3905          * added to the sample reuse queue.
3906          */
3907         mono_thread_hazardous_try_free_all ();
3908
3909         cleanup_reusable_samples (prof);
3910
3911         /*
3912          * Finally, make sure that all sample hits are freed. This should cover all
3913          * hazardous data from the profiler. We can now be sure that the runtime
3914          * won't later invoke free functions in the profiler library after it has
3915          * been unloaded.
3916          */
3917         mono_thread_hazardous_try_free_all ();
3918
3919         g_assert (!InterlockedRead (&buffer_rwlock_count) && "Why is the reader count still non-zero?");
3920         g_assert (!InterlockedReadPointer (&buffer_rwlock_exclusive) && "Why does someone still hold the exclusive lock?");
3921
3922 #if defined (HAVE_SYS_ZLIB)
3923         if (prof->gzfile)
3924                 gzclose (prof->gzfile);
3925 #endif
3926         if (prof->pipe_output)
3927                 pclose (prof->file);
3928         else
3929                 fclose (prof->file);
3930
3931         mono_conc_hashtable_destroy (prof->method_table);
3932         mono_os_mutex_destroy (&prof->method_table_mutex);
3933
3934         if (coverage_initialized) {
3935                 mono_os_mutex_lock (&coverage_mutex);
3936                 mono_conc_hashtable_foreach (coverage_assemblies, unref_coverage_assemblies, prof);
3937                 mono_os_mutex_unlock (&coverage_mutex);
3938
3939                 mono_conc_hashtable_destroy (coverage_methods);
3940                 mono_conc_hashtable_destroy (coverage_assemblies);
3941                 mono_conc_hashtable_destroy (coverage_classes);
3942                 mono_conc_hashtable_destroy (filtered_classes);
3943
3944                 mono_conc_hashtable_destroy (entered_methods);
3945                 mono_conc_hashtable_destroy (image_to_methods);
3946                 mono_conc_hashtable_destroy (suppressed_assemblies);
3947                 mono_os_mutex_destroy (&coverage_mutex);
3948         }
3949
3950         PROF_TLS_FREE ();
3951
3952         g_free (prof->args);
3953         g_free (prof);
3954 }
3955
3956 static char*
3957 new_filename (const char* filename)
3958 {
3959         time_t t = time (NULL);
3960         int pid = process_id ();
3961         char pid_buf [16];
3962         char time_buf [16];
3963         char *res, *d;
3964         const char *p;
3965         int count_dates = 0;
3966         int count_pids = 0;
3967         int s_date, s_pid;
3968         struct tm *ts;
3969         for (p = filename; *p; p++) {
3970                 if (*p != '%')
3971                         continue;
3972                 p++;
3973                 if (*p == 't')
3974                         count_dates++;
3975                 else if (*p == 'p')
3976                         count_pids++;
3977                 else if (*p == 0)
3978                         break;
3979         }
3980         if (!count_dates && !count_pids)
3981                 return pstrdup (filename);
3982         snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
3983         ts = gmtime (&t);
3984         snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
3985                 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
3986         s_date = strlen (time_buf);
3987         s_pid = strlen (pid_buf);
3988         d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
3989         for (p = filename; *p; p++) {
3990                 if (*p != '%') {
3991                         *d++ = *p;
3992                         continue;
3993                 }
3994                 p++;
3995                 if (*p == 't') {
3996                         strcpy (d, time_buf);
3997                         d += s_date;
3998                         continue;
3999                 } else if (*p == 'p') {
4000                         strcpy (d, pid_buf);
4001                         d += s_pid;
4002                         continue;
4003                 } else if (*p == '%') {
4004                         *d++ = '%';
4005                         continue;
4006                 } else if (*p == 0)
4007                         break;
4008                 *d++ = '%';
4009                 *d++ = *p;
4010         }
4011         *d = 0;
4012         return res;
4013 }
4014
4015 static void
4016 add_to_fd_set (fd_set *set, int fd, int *max_fd)
4017 {
4018         /*
4019          * This should only trigger for the basic FDs (server socket, pipes) at
4020          * startup if for some mysterious reason they're too large. In this case,
4021          * the profiler really can't function, and we're better off printing an
4022          * error and exiting.
4023          */
4024         if (fd >= FD_SETSIZE) {
4025                 fprintf (stderr, "File descriptor is out of bounds for fd_set: %d\n", fd);
4026                 exit (1);
4027         }
4028
4029         FD_SET (fd, set);
4030
4031         if (*max_fd < fd)
4032                 *max_fd = fd;
4033 }
4034
4035 static void *
4036 helper_thread (void *arg)
4037 {
4038         MonoProfiler *prof = (MonoProfiler *) arg;
4039
4040         mono_threads_attach_tools_thread ();
4041         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
4042
4043         MonoProfilerThread *thread = init_thread (prof, FALSE);
4044
4045         GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
4046
4047         while (1) {
4048                 fd_set rfds;
4049                 int max_fd = -1;
4050
4051                 FD_ZERO (&rfds);
4052
4053                 add_to_fd_set (&rfds, prof->server_socket, &max_fd);
4054                 add_to_fd_set (&rfds, prof->pipes [0], &max_fd);
4055
4056                 for (gint i = 0; i < command_sockets->len; i++)
4057                         add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
4058
4059                 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
4060
4061                 // Sleep for 1sec or until a file descriptor has data.
4062                 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
4063                         if (errno == EINTR)
4064                                 continue;
4065
4066                         fprintf (stderr, "Error in mono-profiler-log server: %s", strerror (errno));
4067                         exit (1);
4068                 }
4069
4070                 if (!no_counters)
4071                         counters_and_perfcounters_sample (prof);
4072
4073                 buffer_lock_excl ();
4074
4075                 sync_point (SYNC_POINT_PERIODIC);
4076
4077                 buffer_unlock_excl ();
4078
4079                 // Are we shutting down?
4080                 if (FD_ISSET (prof->pipes [0], &rfds)) {
4081                         char c;
4082                         read (prof->pipes [0], &c, 1);
4083                         break;
4084                 }
4085
4086                 for (gint i = 0; i < command_sockets->len; i++) {
4087                         int fd = g_array_index (command_sockets, int, i);
4088
4089                         if (!FD_ISSET (fd, &rfds))
4090                                 continue;
4091
4092                         char buf [64];
4093                         int len = read (fd, buf, sizeof (buf) - 1);
4094
4095                         if (len == -1)
4096                                 continue;
4097
4098                         if (!len) {
4099                                 // The other end disconnected.
4100                                 g_array_remove_index (command_sockets, i);
4101                                 close (fd);
4102
4103                                 continue;
4104                         }
4105
4106                         buf [len] = 0;
4107
4108                         if (!strcmp (buf, "heapshot\n") && hs_mode_ondemand) {
4109                                 // Rely on the finalization callback triggering a GC.
4110                                 heapshot_requested = 1;
4111                                 mono_gc_finalize_notify ();
4112                         }
4113                 }
4114
4115                 if (FD_ISSET (prof->server_socket, &rfds)) {
4116                         int fd = accept (prof->server_socket, NULL, NULL);
4117
4118                         if (fd != -1) {
4119                                 if (fd >= FD_SETSIZE)
4120                                         close (fd);
4121                                 else
4122                                         g_array_append_val (command_sockets, fd);
4123                         }
4124                 }
4125         }
4126
4127         for (gint i = 0; i < command_sockets->len; i++)
4128                 close (g_array_index (command_sockets, int, i));
4129
4130         g_array_free (command_sockets, TRUE);
4131
4132         send_log_unsafe (FALSE);
4133         deinit_thread (thread);
4134
4135         mono_thread_info_detach ();
4136
4137         return NULL;
4138 }
4139
4140 static void
4141 start_helper_thread (MonoProfiler* prof)
4142 {
4143         if (pipe (prof->pipes) == -1) {
4144                 fprintf (stderr, "Cannot create pipe: %s\n", strerror (errno));
4145                 exit (1);
4146         }
4147
4148         prof->server_socket = socket (PF_INET, SOCK_STREAM, 0);
4149
4150         if (prof->server_socket == -1) {
4151                 fprintf (stderr, "Cannot create server socket: %s\n", strerror (errno));
4152                 exit (1);
4153         }
4154
4155         struct sockaddr_in server_address;
4156
4157         memset (&server_address, 0, sizeof (server_address));
4158         server_address.sin_family = AF_INET;
4159         server_address.sin_addr.s_addr = INADDR_ANY;
4160         server_address.sin_port = htons (prof->command_port);
4161
4162         if (bind (prof->server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
4163                 fprintf (stderr, "Cannot bind server socket on port %d: %s\n", prof->command_port, strerror (errno));
4164                 close (prof->server_socket);
4165                 exit (1);
4166         }
4167
4168         if (listen (prof->server_socket, 1) == -1) {
4169                 fprintf (stderr, "Cannot listen on server socket: %s\n", strerror (errno));
4170                 close (prof->server_socket);
4171                 exit (1);
4172         }
4173
4174         socklen_t slen = sizeof (server_address);
4175
4176         if (getsockname (prof->server_socket, (struct sockaddr *) &server_address, &slen)) {
4177                 fprintf (stderr, "Could not get assigned port: %s\n", strerror (errno));
4178                 close (prof->server_socket);
4179                 exit (1);
4180         }
4181
4182         prof->command_port = ntohs (server_address.sin_port);
4183
4184         if (!mono_native_thread_create (&prof->helper_thread, helper_thread, prof)) {
4185                 fprintf (stderr, "Could not start helper thread\n");
4186                 close (prof->server_socket);
4187                 exit (1);
4188         }
4189 }
4190
4191 static void
4192 free_writer_entry (gpointer p)
4193 {
4194         mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
4195 }
4196
4197 static gboolean
4198 handle_writer_queue_entry (MonoProfiler *prof)
4199 {
4200         WriterQueueEntry *entry;
4201
4202         if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&prof->writer_queue))) {
4203                 if (!entry->methods)
4204                         goto no_methods;
4205
4206                 gboolean wrote_methods = FALSE;
4207
4208                 /*
4209                  * Encode the method events in a temporary log buffer that we
4210                  * flush to disk before the main buffer, ensuring that all
4211                  * methods have metadata emitted before they're referenced.
4212                  *
4213                  * We use a 'proper' thread-local buffer for this as opposed
4214                  * to allocating and freeing a buffer by hand because the call
4215                  * to mono_method_full_name () below may trigger class load
4216                  * events when it retrieves the signature of the method. So a
4217                  * thread-local buffer needs to exist when such events occur.
4218                  */
4219                 for (guint i = 0; i < entry->methods->len; i++) {
4220                         MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
4221
4222                         if (mono_conc_hashtable_lookup (prof->method_table, info->method))
4223                                 goto free_info; // This method already has metadata emitted.
4224
4225                         /*
4226                          * Other threads use this hash table to get a general
4227                          * idea of whether a method has already been emitted to
4228                          * the stream. Due to the way we add to this table, it
4229                          * can easily happen that multiple threads queue up the
4230                          * same methods, but that's OK since eventually all
4231                          * methods will be in this table and the thread-local
4232                          * method lists will just be empty for the rest of the
4233                          * app's lifetime.
4234                          */
4235                         mono_os_mutex_lock (&prof->method_table_mutex);
4236                         mono_conc_hashtable_insert (prof->method_table, info->method, info->method);
4237                         mono_os_mutex_unlock (&prof->method_table_mutex);
4238
4239                         char *name = mono_method_full_name (info->method, 1);
4240                         int nlen = strlen (name) + 1;
4241                         void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
4242                         int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
4243
4244                         ENTER_LOG (&method_jits_ctr, logbuffer,
4245                                 EVENT_SIZE /* event */ +
4246                                 LEB128_SIZE /* method */ +
4247                                 LEB128_SIZE /* start */ +
4248                                 LEB128_SIZE /* size */ +
4249                                 nlen /* name */
4250                         );
4251
4252                         emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
4253                         emit_method_inner (logbuffer, info->method);
4254                         emit_ptr (logbuffer, cstart);
4255                         emit_value (logbuffer, csize);
4256
4257                         memcpy (logbuffer->cursor, name, nlen);
4258                         logbuffer->cursor += nlen;
4259
4260                         EXIT_LOG_EXPLICIT (NO_SEND);
4261
4262                         mono_free (name);
4263
4264                         wrote_methods = TRUE;
4265
4266                 free_info:
4267                         g_free (info);
4268                 }
4269
4270                 g_ptr_array_free (entry->methods, TRUE);
4271
4272                 if (wrote_methods) {
4273                         MonoProfilerThread *thread = PROF_TLS_GET ();
4274
4275                         dump_buffer_threadless (prof, thread->buffer);
4276                         init_buffer_state (thread);
4277                 }
4278
4279         no_methods:
4280                 dump_buffer (prof, entry->buffer);
4281
4282                 mono_thread_hazardous_try_free (entry, free_writer_entry);
4283
4284                 return TRUE;
4285         }
4286
4287         return FALSE;
4288 }
4289
4290 static void *
4291 writer_thread (void *arg)
4292 {
4293         MonoProfiler *prof = (MonoProfiler *)arg;
4294
4295         mono_threads_attach_tools_thread ();
4296         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4297
4298         dump_header (prof);
4299
4300         MonoProfilerThread *thread = init_thread (prof, FALSE);
4301
4302         while (InterlockedRead (&prof->run_writer_thread)) {
4303                 mono_os_sem_wait (&prof->writer_queue_sem, MONO_SEM_FLAGS_NONE);
4304                 handle_writer_queue_entry (prof);
4305         }
4306
4307         /* Drain any remaining entries on shutdown. */
4308         while (handle_writer_queue_entry (prof));
4309
4310         free_buffer (thread->buffer, thread->buffer->size);
4311         deinit_thread (thread);
4312
4313         mono_thread_info_detach ();
4314
4315         return NULL;
4316 }
4317
4318 static void
4319 start_writer_thread (MonoProfiler* prof)
4320 {
4321         InterlockedWrite (&prof->run_writer_thread, 1);
4322
4323         if (!mono_native_thread_create (&prof->writer_thread, writer_thread, prof)) {
4324                 fprintf (stderr, "Could not start writer thread\n");
4325                 exit (1);
4326         }
4327 }
4328
4329 static void
4330 reuse_sample_hit (gpointer p)
4331 {
4332         SampleHit *sample = p;
4333
4334         mono_lock_free_queue_node_unpoison (&sample->node);
4335         mono_lock_free_queue_enqueue (&sample->prof->sample_reuse_queue, &sample->node);
4336 }
4337
4338 static gboolean
4339 handle_dumper_queue_entry (MonoProfiler *prof)
4340 {
4341         SampleHit *sample;
4342
4343         if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->dumper_queue))) {
4344                 for (int i = 0; i < sample->count; ++i) {
4345                         MonoMethod *method = sample->frames [i].method;
4346                         MonoDomain *domain = sample->frames [i].domain;
4347                         void *address = sample->frames [i].base_address;
4348
4349                         if (!method) {
4350                                 g_assert (domain && "What happened to the domain pointer?");
4351                                 g_assert (address && "What happened to the instruction pointer?");
4352
4353                                 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
4354
4355                                 if (ji)
4356                                         sample->frames [i].method = mono_jit_info_get_method (ji);
4357                         }
4358                 }
4359
4360                 ENTER_LOG (&sample_hits_ctr, logbuffer,
4361                         EVENT_SIZE /* event */ +
4362                         BYTE_SIZE /* type */ +
4363                         LEB128_SIZE /* tid */ +
4364                         LEB128_SIZE /* count */ +
4365                         1 * (
4366                                 LEB128_SIZE /* ip */
4367                         ) +
4368                         LEB128_SIZE /* managed count */ +
4369                         sample->count * (
4370                                 LEB128_SIZE /* method */
4371                         )
4372                 );
4373
4374                 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4375                 emit_byte (logbuffer, SAMPLE_CYCLES);
4376                 emit_ptr (logbuffer, (void *) sample->tid);
4377                 emit_value (logbuffer, 1);
4378
4379                 // TODO: Actual native unwinding.
4380                 for (int i = 0; i < 1; ++i) {
4381                         emit_ptr (logbuffer, sample->ip);
4382                         add_code_pointer ((uintptr_t) sample->ip);
4383                 }
4384
4385                 /* new in data version 6 */
4386                 emit_uvalue (logbuffer, sample->count);
4387
4388                 for (int i = 0; i < sample->count; ++i)
4389                         emit_method (logbuffer, sample->frames [i].method);
4390
4391                 EXIT_LOG_EXPLICIT (DO_SEND);
4392
4393                 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4394
4395                 dump_unmanaged_coderefs (prof);
4396         }
4397
4398         return FALSE;
4399 }
4400
4401 static void *
4402 dumper_thread (void *arg)
4403 {
4404         MonoProfiler *prof = (MonoProfiler *)arg;
4405
4406         mono_threads_attach_tools_thread ();
4407         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4408
4409         MonoProfilerThread *thread = init_thread (prof, FALSE);
4410
4411         while (InterlockedRead (&prof->run_dumper_thread)) {
4412                 /*
4413                  * Flush samples every second so it doesn't seem like the profiler is
4414                  * not working if the program is mostly idle.
4415                  */
4416                 if (mono_os_sem_timedwait (&prof->dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4417                         send_log_unsafe (FALSE);
4418
4419                 handle_dumper_queue_entry (prof);
4420         }
4421
4422         /* Drain any remaining entries on shutdown. */
4423         while (handle_dumper_queue_entry (prof));
4424
4425         send_log_unsafe (FALSE);
4426         deinit_thread (thread);
4427
4428         mono_thread_info_detach ();
4429
4430         return NULL;
4431 }
4432
4433 static void
4434 start_dumper_thread (MonoProfiler* prof)
4435 {
4436         InterlockedWrite (&prof->run_dumper_thread, 1);
4437
4438         if (!mono_native_thread_create (&prof->dumper_thread, dumper_thread, prof)) {
4439                 fprintf (stderr, "Could not start dumper thread\n");
4440                 exit (1);
4441         }
4442 }
4443
4444 static void
4445 register_counter (const char *name, gint32 *counter)
4446 {
4447         mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4448 }
4449
4450 static void
4451 runtime_initialized (MonoProfiler *profiler)
4452 {
4453         InterlockedWrite (&runtime_inited, 1);
4454
4455         register_counter ("Sample events allocated", &sample_allocations_ctr);
4456         register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4457
4458         register_counter ("Event: Sync points", &sync_points_ctr);
4459         register_counter ("Event: Heap objects", &heap_objects_ctr);
4460         register_counter ("Event: Heap starts", &heap_starts_ctr);
4461         register_counter ("Event: Heap ends", &heap_ends_ctr);
4462         register_counter ("Event: Heap roots", &heap_roots_ctr);
4463         register_counter ("Event: GC events", &gc_events_ctr);
4464         register_counter ("Event: GC resizes", &gc_resizes_ctr);
4465         register_counter ("Event: GC allocations", &gc_allocs_ctr);
4466         register_counter ("Event: GC moves", &gc_moves_ctr);
4467         register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4468         register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4469         register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4470         register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4471         register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4472         register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4473         register_counter ("Event: Image loads", &image_loads_ctr);
4474         register_counter ("Event: Image unloads", &image_unloads_ctr);
4475         register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4476         register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4477         register_counter ("Event: Class loads", &class_loads_ctr);
4478         register_counter ("Event: Class unloads", &class_unloads_ctr);
4479         register_counter ("Event: Method entries", &method_entries_ctr);
4480         register_counter ("Event: Method exits", &method_exits_ctr);
4481         register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4482         register_counter ("Event: Method JITs", &method_jits_ctr);
4483         register_counter ("Event: Code buffers", &code_buffers_ctr);
4484         register_counter ("Event: Exception throws", &exception_throws_ctr);
4485         register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4486         register_counter ("Event: Monitor events", &monitor_events_ctr);
4487         register_counter ("Event: Thread starts", &thread_starts_ctr);
4488         register_counter ("Event: Thread ends", &thread_ends_ctr);
4489         register_counter ("Event: Thread names", &thread_names_ctr);
4490         register_counter ("Event: Domain loads", &domain_loads_ctr);
4491         register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4492         register_counter ("Event: Domain names", &domain_names_ctr);
4493         register_counter ("Event: Context loads", &context_loads_ctr);
4494         register_counter ("Event: Context unloads", &context_unloads_ctr);
4495         register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4496         register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4497         register_counter ("Event: Sample hits", &sample_hits_ctr);
4498         register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4499         register_counter ("Event: Counter samples", &counter_samples_ctr);
4500         register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4501         register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4502         register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4503         register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4504         register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4505         register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4506
4507         counters_init (profiler);
4508
4509         /*
4510          * We must start the helper thread before the writer thread. This is
4511          * because the helper thread sets up the command port which is written to
4512          * the log header by the writer thread.
4513          */
4514         start_helper_thread (profiler);
4515         start_writer_thread (profiler);
4516         start_dumper_thread (profiler);
4517 }
4518
4519 static void
4520 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4521 {
4522         char *nf;
4523         int force_delete = 0;
4524
4525         log_profiler = (MonoProfiler *) g_calloc (1, sizeof (MonoProfiler));
4526         log_profiler->args = pstrdup (args);
4527         log_profiler->command_port = command_port;
4528
4529         if (filename && *filename == '-') {
4530                 force_delete = 1;
4531                 filename++;
4532                 g_warning ("WARNING: the output:-FILENAME option is deprecated, the profiler now always overrides the output file\n");
4533         }
4534
4535         //If filename begin with +, append the pid at the end
4536         if (filename && *filename == '+')
4537                 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4538
4539
4540         if (!filename) {
4541                 if (do_report)
4542                         filename = "|mprof-report -";
4543                 else
4544                         filename = "output.mlpd";
4545                 nf = (char*)filename;
4546         } else {
4547                 nf = new_filename (filename);
4548                 if (do_report) {
4549                         int s = strlen (nf) + 32;
4550                         char *p = (char *) g_malloc (s);
4551                         snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4552                         g_free (nf);
4553                         nf = p;
4554                 }
4555         }
4556         if (*nf == '|') {
4557                 log_profiler->file = popen (nf + 1, "w");
4558                 log_profiler->pipe_output = 1;
4559         } else if (*nf == '#') {
4560                 int fd = strtol (nf + 1, NULL, 10);
4561                 log_profiler->file = fdopen (fd, "a");
4562         } else {
4563                 if (force_delete)
4564                         unlink (nf);
4565                 log_profiler->file = fopen (nf, "wb");
4566         }
4567         if (!log_profiler->file) {
4568                 fprintf (stderr, "Cannot create profiler output: %s\n", nf);
4569                 exit (1);
4570         }
4571
4572 #if defined (HAVE_SYS_ZLIB)
4573         if (use_zip)
4574                 log_profiler->gzfile = gzdopen (fileno (log_profiler->file), "wb");
4575 #endif
4576
4577         /*
4578          * If you hit this assert while increasing MAX_FRAMES, you need to increase
4579          * SAMPLE_BLOCK_SIZE as well.
4580          */
4581         g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4582
4583         // FIXME: We should free this stuff too.
4584         mono_lock_free_allocator_init_size_class (&log_profiler->sample_size_class, SAMPLE_SLOT_SIZE (num_frames), SAMPLE_BLOCK_SIZE);
4585         mono_lock_free_allocator_init_allocator (&log_profiler->sample_allocator, &log_profiler->sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4586
4587         mono_lock_free_queue_init (&log_profiler->sample_reuse_queue);
4588
4589         g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4590
4591         // FIXME: We should free this stuff too.
4592         mono_lock_free_allocator_init_size_class (&log_profiler->writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4593         mono_lock_free_allocator_init_allocator (&log_profiler->writer_entry_allocator, &log_profiler->writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4594
4595         mono_lock_free_queue_init (&log_profiler->writer_queue);
4596         mono_os_sem_init (&log_profiler->writer_queue_sem, 0);
4597
4598         mono_lock_free_queue_init (&log_profiler->dumper_queue);
4599         mono_os_sem_init (&log_profiler->dumper_queue_sem, 0);
4600
4601         mono_os_mutex_init (&log_profiler->method_table_mutex);
4602         log_profiler->method_table = mono_conc_hashtable_new (NULL, NULL);
4603
4604         if (do_coverage)
4605                 coverage_init (log_profiler);
4606         log_profiler->coverage_filters = filters;
4607
4608         log_profiler->startup_time = current_time ();
4609 }
4610
4611 /*
4612  * declaration to silence the compiler: this is the entry point that
4613  * mono will load from the shared library and call.
4614  */
4615 extern void
4616 mono_profiler_startup (const char *desc);
4617
4618 extern void
4619 mono_profiler_startup_log (const char *desc);
4620
4621 /*
4622  * this is the entry point that will be used when the profiler
4623  * is embedded inside the main executable.
4624  */
4625 void
4626 mono_profiler_startup_log (const char *desc)
4627 {
4628         mono_profiler_startup (desc);
4629 }
4630
4631 void
4632 mono_profiler_startup (const char *desc)
4633 {
4634         GPtrArray *filters = NULL;
4635
4636         proflog_parse_args (&config, desc [3] == ':' ? desc + 4 : "");
4637
4638         //XXX maybe later cleanup to use config directly
4639         nocalls = !(config.effective_mask & PROFLOG_CALL_EVENTS);
4640         no_counters = !(config.effective_mask & PROFLOG_COUNTER_EVENTS);
4641         do_report = config.do_report;
4642         do_debug = config.do_debug;
4643         do_heap_shot = (config.effective_mask & PROFLOG_HEAPSHOT_FEATURE);
4644         hs_mode_ondemand = config.hs_mode_ondemand;
4645         hs_mode_ms = config.hs_mode_ms;
4646         hs_mode_gc = config.hs_mode_gc;
4647         do_mono_sample = (config.effective_mask & PROFLOG_SAMPLING_FEATURE);
4648         use_zip = config.use_zip;
4649         command_port = config.command_port;
4650         num_frames = config.num_frames;
4651         notraces = config.notraces;
4652         max_allocated_sample_hits = config.max_allocated_sample_hits;
4653         max_call_depth = config.max_call_depth;
4654         do_coverage = (config.effective_mask & PROFLOG_CODE_COV_FEATURE);
4655         debug_coverage = config.debug_coverage;
4656         only_coverage = config.only_coverage;
4657
4658         if (config.cov_filter_files) {
4659                 filters = g_ptr_array_new ();
4660                 int i;
4661                 for (i = 0; i < config.cov_filter_files->len; ++i) {
4662                         const char *name = config.cov_filter_files->pdata [i];
4663                         parse_cov_filter_file (filters, name);
4664                 }
4665         }
4666
4667         init_time ();
4668
4669         PROF_TLS_INIT ();
4670
4671         create_profiler (desc, config.output_filename, filters);
4672
4673         mono_lls_init (&profiler_thread_list, NULL);
4674
4675         //This two events are required for the profiler to work
4676         int events = MONO_PROFILE_THREADS | MONO_PROFILE_GC;
4677
4678         //Required callbacks
4679         mono_profiler_install (log_profiler, log_shutdown);
4680         mono_profiler_install_runtime_initialized (runtime_initialized);
4681
4682         mono_profiler_install_gc (gc_event, gc_resize);
4683         mono_profiler_install_thread (thread_start, thread_end);
4684
4685         //It's questionable whether we actually want this to be mandatory, maybe put it behind the actual event?
4686         mono_profiler_install_thread_name (thread_name);
4687
4688
4689         if (config.effective_mask & PROFLOG_DOMAIN_EVENTS) {
4690                 events |= MONO_PROFILE_APPDOMAIN_EVENTS;
4691                 mono_profiler_install_appdomain (NULL, domain_loaded, domain_unloaded, NULL);
4692                 mono_profiler_install_appdomain_name (domain_name);
4693         }
4694
4695         if (config.effective_mask & PROFLOG_ASSEMBLY_EVENTS) {
4696                 events |= MONO_PROFILE_ASSEMBLY_EVENTS;
4697                 mono_profiler_install_assembly (NULL, assembly_loaded, assembly_unloaded, NULL);
4698         }
4699
4700         if (config.effective_mask & PROFLOG_MODULE_EVENTS) {
4701                 events |= MONO_PROFILE_MODULE_EVENTS;
4702                 mono_profiler_install_module (NULL, image_loaded, image_unloaded, NULL);
4703         }
4704
4705         if (config.effective_mask & PROFLOG_CLASS_EVENTS) {
4706                 events |= MONO_PROFILE_CLASS_EVENTS;
4707                 mono_profiler_install_class (NULL, class_loaded, class_unloaded, NULL);
4708         }
4709
4710         if (config.effective_mask & PROFLOG_JIT_COMPILATION_EVENTS) {
4711                 events |= MONO_PROFILE_JIT_COMPILATION;
4712                 mono_profiler_install_jit_end (method_jitted);
4713                 mono_profiler_install_code_buffer_new (code_buffer_new);
4714         }
4715
4716         if (config.effective_mask & PROFLOG_EXCEPTION_EVENTS) {
4717                 events |= MONO_PROFILE_EXCEPTIONS;
4718                 mono_profiler_install_exception (throw_exc, method_exc_leave, NULL);
4719                 mono_profiler_install_exception_clause (clause_exc);
4720         }
4721
4722         if (config.effective_mask & PROFLOG_ALLOCATION_EVENTS) {
4723                 events |= MONO_PROFILE_ALLOCATIONS;
4724                 mono_profiler_install_allocation (gc_alloc);
4725         }
4726
4727         //PROFLOG_GC_EVENTS is mandatory
4728         //PROFLOG_THREAD_EVENTS is mandatory
4729
4730         if (config.effective_mask & PROFLOG_CALL_EVENTS) {
4731                 events |= MONO_PROFILE_ENTER_LEAVE;
4732                 mono_profiler_install_enter_leave (method_enter, method_leave);
4733         }
4734
4735         if (config.effective_mask & PROFLOG_INS_COVERAGE_EVENTS) {
4736                 events |= MONO_PROFILE_INS_COVERAGE;
4737                 mono_profiler_install_coverage_filter (coverage_filter);
4738         }
4739
4740         //XXX should we check for PROFLOG_SAMPLING_FEATURE instead??
4741         if (config.effective_mask & PROFLOG_SAMPLING_EVENTS) {
4742                 events |= MONO_PROFILE_STATISTICAL;
4743                 mono_profiler_set_statistical_mode (config.sampling_mode, config.sample_freq);
4744                 mono_profiler_install_statistical (mono_sample_hit);
4745         }
4746
4747         if (config.effective_mask & PROFLOG_MONITOR_EVENTS) {
4748                 events |= MONO_PROFILE_MONITOR_EVENTS;
4749                 mono_profiler_install_monitor (monitor_event);
4750         }
4751
4752         if (config.effective_mask & PROFLOG_GC_MOVES_EVENTS) {
4753                 events |= MONO_PROFILE_GC_MOVES;
4754                 mono_profiler_install_gc_moves (gc_moves);
4755         }
4756
4757         // TODO split those in two profiler events
4758         if (config.effective_mask & (PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_HANDLE_EVENTS)) {
4759                 events |= MONO_PROFILE_GC_ROOTS;
4760                 mono_profiler_install_gc_roots (
4761                         config.effective_mask & (PROFLOG_GC_HANDLE_EVENTS) ? gc_handle : NULL,
4762                         (config.effective_mask & PROFLOG_GC_ROOT_EVENTS) ? gc_roots : NULL);
4763         }
4764
4765         if (config.effective_mask & PROFLOG_CONTEXT_EVENTS) {
4766                 events |= MONO_PROFILE_CONTEXT_EVENTS;
4767                 mono_profiler_install_context (context_loaded, context_unloaded);
4768         }
4769
4770         if (config.effective_mask & PROFLOG_FINALIZATION_EVENTS) {
4771                 events |= MONO_PROFILE_GC_FINALIZATION;
4772                 mono_profiler_install_gc_finalize (finalize_begin, finalize_object_begin, finalize_object_end, finalize_end);   
4773         } else if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && config.hs_mode_ondemand) {
4774                 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4775                 events |= MONO_PROFILE_GC_FINALIZATION;
4776                 mono_profiler_install_gc_finalize (NULL, NULL, NULL, finalize_end);
4777         }
4778
4779         //PROFLOG_COUNTER_EVENTS is a pseudo event controled by the no_counters global var
4780         //PROFLOG_GC_HANDLE_EVENTS is handled together with PROFLOG_GC_ROOT_EVENTS
4781
4782         mono_profiler_set_events ((MonoProfileFlags)events);
4783 }