[profiler] Remove class unload events.
[mono.git] / mono / profiler / log.c
1 /*
2  * mono-profiler-log.c: mono log profiler
3  *
4  * Authors:
5  *   Paolo Molaro (lupus@ximian.com)
6  *   Alex Rønne Petersen (alexrp@xamarin.com)
7  *
8  * Copyright 2010 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11  */
12
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/debug-helpers.h>
16 #include "../metadata/metadata-internals.h"
17 #include <mono/metadata/mono-config.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/metadata/mono-perfcounters.h>
20 #include <mono/utils/atomic.h>
21 #include <mono/utils/hazard-pointer.h>
22 #include <mono/utils/lock-free-alloc.h>
23 #include <mono/utils/lock-free-queue.h>
24 #include <mono/utils/mono-conc-hashtable.h>
25 #include <mono/utils/mono-counters.h>
26 #include <mono/utils/mono-linked-list-set.h>
27 #include <mono/utils/mono-membar.h>
28 #include <mono/utils/mono-mmap.h>
29 #include <mono/utils/mono-os-mutex.h>
30 #include <mono/utils/mono-os-semaphore.h>
31 #include <mono/utils/mono-threads.h>
32 #include <mono/utils/mono-threads-api.h>
33 #include "log.h"
34
35 #ifdef HAVE_DLFCN_H
36 #include <dlfcn.h>
37 #endif
38 #include <fcntl.h>
39 #ifdef HAVE_LINK_H
40 #include <link.h>
41 #endif
42 #ifdef HAVE_UNISTD_H
43 #include <unistd.h>
44 #endif
45 #if defined(__APPLE__)
46 #include <mach/mach_time.h>
47 #endif
48 #include <netinet/in.h>
49 #ifdef HAVE_SYS_MMAN_H
50 #include <sys/mman.h>
51 #endif
52 #include <sys/socket.h>
53 #if defined (HAVE_SYS_ZLIB)
54 #include <zlib.h>
55 #endif
56
57 #define BUFFER_SIZE (4096 * 16)
58
59 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
60 #define LEB128_SIZE 10
61
62 /* Size of a value encoded as a single byte. */
63 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
64 #define BYTE_SIZE 1
65
66 /* Size in bytes of the event prefix (ID + time). */
67 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
68
69 static volatile gint32 runtime_inited;
70 static volatile gint32 in_shutdown;
71
72 static ProfilerConfig config;
73 static int nocalls = 0;
74 static int notraces = 0;
75 static int use_zip = 0;
76 static int do_report = 0;
77 static int do_heap_shot = 0;
78 static int max_call_depth = 0;
79 static int command_port = 0;
80 static int heapshot_requested = 0;
81 static int do_mono_sample = 0;
82 static int do_debug = 0;
83 static int do_coverage = 0;
84 static gboolean no_counters = FALSE;
85 static gboolean only_coverage = FALSE;
86 static gboolean debug_coverage = FALSE;
87 static int max_allocated_sample_hits;
88
89 #define ENABLED(EVT) (config.effective_mask & (EVT))
90
91 // Statistics for internal profiler data structures.
92 static gint32 sample_allocations_ctr,
93               buffer_allocations_ctr;
94
95 // Statistics for profiler events.
96 static gint32 sync_points_ctr,
97               heap_objects_ctr,
98               heap_starts_ctr,
99               heap_ends_ctr,
100               heap_roots_ctr,
101               gc_events_ctr,
102               gc_resizes_ctr,
103               gc_allocs_ctr,
104               gc_moves_ctr,
105               gc_handle_creations_ctr,
106               gc_handle_deletions_ctr,
107               finalize_begins_ctr,
108               finalize_ends_ctr,
109               finalize_object_begins_ctr,
110               finalize_object_ends_ctr,
111               image_loads_ctr,
112               image_unloads_ctr,
113               assembly_loads_ctr,
114               assembly_unloads_ctr,
115               class_loads_ctr,
116               class_unloads_ctr,
117               method_entries_ctr,
118               method_exits_ctr,
119               method_exception_exits_ctr,
120               method_jits_ctr,
121               code_buffers_ctr,
122               exception_throws_ctr,
123               exception_clauses_ctr,
124               monitor_events_ctr,
125               thread_starts_ctr,
126               thread_ends_ctr,
127               thread_names_ctr,
128               domain_loads_ctr,
129               domain_unloads_ctr,
130               domain_names_ctr,
131               context_loads_ctr,
132               context_unloads_ctr,
133               sample_ubins_ctr,
134               sample_usyms_ctr,
135               sample_hits_ctr,
136               counter_descriptors_ctr,
137               counter_samples_ctr,
138               perfcounter_descriptors_ctr,
139               perfcounter_samples_ctr,
140               coverage_methods_ctr,
141               coverage_statements_ctr,
142               coverage_classes_ctr,
143               coverage_assemblies_ctr;
144
145 static MonoLinkedListSet profiler_thread_list;
146
147 /*
148  * file format:
149  * [header] [buffer]*
150  *
151  * The file is composed by a header followed by 0 or more buffers.
152  * Each buffer contains events that happened on a thread: for a given thread
153  * buffers that appear later in the file are guaranteed to contain events
154  * that happened later in time. Buffers from separate threads could be interleaved,
155  * though.
156  * Buffers are not required to be aligned.
157  *
158  * header format:
159  * [id: 4 bytes] constant value: LOG_HEADER_ID
160  * [major: 1 byte] [minor: 1 byte] major and minor version of the log profiler
161  * [format: 1 byte] version of the data format for the rest of the file
162  * [ptrsize: 1 byte] size in bytes of a pointer in the profiled program
163  * [startup time: 8 bytes] time in milliseconds since the unix epoch when the program started
164  * [timer overhead: 4 bytes] approximate overhead in nanoseconds of the timer
165  * [flags: 4 bytes] file format flags, should be 0 for now
166  * [pid: 4 bytes] pid of the profiled process
167  * [port: 2 bytes] tcp port for server if != 0
168  * [args size: 4 bytes] size of args
169  * [args: string] arguments passed to the profiler
170  * [arch size: 4 bytes] size of arch
171  * [arch: string] architecture the profiler is running on
172  * [os size: 4 bytes] size of os
173  * [os: string] operating system the profiler is running on
174  *
175  * The multiple byte integers are in little-endian format.
176  *
177  * buffer format:
178  * [buffer header] [event]*
179  * Buffers have a fixed-size header followed by 0 or more bytes of event data.
180  * Timing information and other values in the event data are usually stored
181  * as uleb128 or sleb128 integers. To save space, as noted for each item below,
182  * some data is represented as a difference between the actual value and
183  * either the last value of the same type (like for timing information) or
184  * as the difference from a value stored in a buffer header.
185  *
186  * For timing information the data is stored as uleb128, since timing
187  * increases in a monotonic way in each thread: the value is the number of
188  * nanoseconds to add to the last seen timing data in a buffer. The first value
189  * in a buffer will be calculated from the time_base field in the buffer head.
190  *
191  * Object or heap sizes are stored as uleb128.
192  * Pointer differences are stored as sleb128, instead.
193  *
194  * If an unexpected value is found, the rest of the buffer should be ignored,
195  * as generally the later values need the former to be interpreted correctly.
196  *
197  * buffer header format:
198  * [bufid: 4 bytes] constant value: BUF_ID
199  * [len: 4 bytes] size of the data following the buffer header
200  * [time_base: 8 bytes] time base in nanoseconds since an unspecified epoch
201  * [ptr_base: 8 bytes] base value for pointers
202  * [obj_base: 8 bytes] base value for object addresses
203  * [thread id: 8 bytes] system-specific thread ID (pthread_t for example)
204  * [method_base: 8 bytes] base value for MonoMethod pointers
205  *
206  * event format:
207  * [extended info: upper 4 bits] [type: lower 4 bits]
208  * [time diff: uleb128] nanoseconds since last timing
209  * [data]*
210  * The data that follows depends on type and the extended info.
211  * Type is one of the enum values in mono-profiler-log.h: TYPE_ALLOC, TYPE_GC,
212  * TYPE_METADATA, TYPE_METHOD, TYPE_EXCEPTION, TYPE_MONITOR, TYPE_HEAP.
213  * The extended info bits are interpreted based on type, see
214  * each individual event description below.
215  * strings are represented as a 0-terminated utf8 sequence.
216  *
217  * backtrace format:
218  * [num: uleb128] number of frames following
219  * [frame: sleb128]* mum MonoMethod* as a pointer difference from the last such
220  * pointer or the buffer method_base
221  *
222  * type alloc format:
223  * type: TYPE_ALLOC
224  * exinfo: zero or TYPE_ALLOC_BT
225  * [ptr: sleb128] class as a byte difference from ptr_base
226  * [obj: sleb128] object address as a byte difference from obj_base
227  * [size: uleb128] size of the object in the heap
228  * If exinfo == TYPE_ALLOC_BT, a backtrace follows.
229  *
230  * type GC format:
231  * type: TYPE_GC
232  * exinfo: one of TYPE_GC_EVENT, TYPE_GC_RESIZE, TYPE_GC_MOVE, TYPE_GC_HANDLE_CREATED[_BT],
233  * TYPE_GC_HANDLE_DESTROYED[_BT], TYPE_GC_FINALIZE_START, TYPE_GC_FINALIZE_END,
234  * TYPE_GC_FINALIZE_OBJECT_START, TYPE_GC_FINALIZE_OBJECT_END
235  * if exinfo == TYPE_GC_RESIZE
236  *      [heap_size: uleb128] new heap size
237  * if exinfo == TYPE_GC_EVENT
238  *      [event type: byte] GC event (MONO_GC_EVENT_* from profiler.h)
239  *      [generation: byte] GC generation event refers to
240  * if exinfo == TYPE_GC_MOVE
241  *      [num_objects: uleb128] number of object moves that follow
242  *      [objaddr: sleb128]+ num_objects object pointer differences from obj_base
243  *      num is always an even number: the even items are the old
244  *      addresses, the odd numbers are the respective new object addresses
245  * if exinfo == TYPE_GC_HANDLE_CREATED[_BT]
246  *      [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
247  *      upper bits reserved as flags
248  *      [handle: uleb128] GC handle value
249  *      [objaddr: sleb128] object pointer differences from obj_base
250  *      If exinfo == TYPE_GC_HANDLE_CREATED_BT, a backtrace follows.
251  * if exinfo == TYPE_GC_HANDLE_DESTROYED[_BT]
252  *      [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
253  *      upper bits reserved as flags
254  *      [handle: uleb128] GC handle value
255  *      If exinfo == TYPE_GC_HANDLE_DESTROYED_BT, a backtrace follows.
256  * if exinfo == TYPE_GC_FINALIZE_OBJECT_{START,END}
257  *      [object: sleb128] the object as a difference from obj_base
258  *
259  * type metadata format:
260  * type: TYPE_METADATA
261  * exinfo: one of: TYPE_END_LOAD, TYPE_END_UNLOAD (optional for TYPE_THREAD and TYPE_DOMAIN,
262  * doesn't occur for TYPE_CLASS)
263  * [mtype: byte] metadata type, one of: TYPE_CLASS, TYPE_IMAGE, TYPE_ASSEMBLY, TYPE_DOMAIN,
264  * TYPE_THREAD, TYPE_CONTEXT
265  * [pointer: sleb128] pointer of the metadata type depending on mtype
266  * if mtype == TYPE_CLASS
267  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
268  *      [name: string] full class name
269  * if mtype == TYPE_IMAGE
270  *      [name: string] image file name
271  * if mtype == TYPE_ASSEMBLY
272  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
273  *      [name: string] assembly name
274  * if mtype == TYPE_DOMAIN && exinfo == 0
275  *      [name: string] domain friendly name
276  * if mtype == TYPE_CONTEXT
277  *      [domain: sleb128] domain id as pointer
278  * if mtype == TYPE_THREAD && exinfo == 0
279  *      [name: string] thread name
280  *
281  * type method format:
282  * type: TYPE_METHOD
283  * exinfo: one of: TYPE_LEAVE, TYPE_ENTER, TYPE_EXC_LEAVE, TYPE_JIT
284  * [method: sleb128] MonoMethod* as a pointer difference from the last such
285  * pointer or the buffer method_base
286  * if exinfo == TYPE_JIT
287  *      [code address: sleb128] pointer to the native code as a diff from ptr_base
288  *      [code size: uleb128] size of the generated code
289  *      [name: string] full method name
290  *
291  * type exception format:
292  * type: TYPE_EXCEPTION
293  * exinfo: zero, TYPE_CLAUSE, or TYPE_THROW_BT
294  * if exinfo == TYPE_CLAUSE
295  *      [clause type: byte] MonoExceptionEnum enum value
296  *      [clause index: uleb128] index of the current clause
297  *      [method: sleb128] MonoMethod* as a pointer difference from the last such
298  *      pointer or the buffer method_base
299  *      [object: sleb128] the exception object as a difference from obj_base
300  * else
301  *      [object: sleb128] the exception object as a difference from obj_base
302  *      If exinfo == TYPE_THROW_BT, a backtrace follows.
303  *
304  * type runtime format:
305  * type: TYPE_RUNTIME
306  * exinfo: one of: TYPE_JITHELPER
307  * if exinfo == TYPE_JITHELPER
308  *      [type: byte] MonoProfilerCodeBufferType enum value
309  *      [buffer address: sleb128] pointer to the native code as a diff from ptr_base
310  *      [buffer size: uleb128] size of the generated code
311  *      if type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
312  *              [name: string] buffer description name
313  *
314  * type monitor format:
315  * type: TYPE_MONITOR
316  * exinfo: zero or TYPE_MONITOR_BT
317  * [type: byte] MONO_PROFILER_MONITOR_{CONTENTION,FAIL,DONE}
318  * [object: sleb128] the lock object as a difference from obj_base
319  * If exinfo == TYPE_MONITOR_BT, a backtrace follows.
320  *
321  * type heap format
322  * type: TYPE_HEAP
323  * exinfo: one of TYPE_HEAP_START, TYPE_HEAP_END, TYPE_HEAP_OBJECT, TYPE_HEAP_ROOT
324  * if exinfo == TYPE_HEAP_OBJECT
325  *      [object: sleb128] the object as a difference from obj_base
326  *      [class: sleb128] the object MonoClass* as a difference from ptr_base
327  *      [size: uleb128] size of the object on the heap
328  *      [num_refs: uleb128] number of object references
329  *      each referenced objref is preceded by a uleb128 encoded offset: the
330  *      first offset is from the object address and each next offset is relative
331  *      to the previous one
332  *      [objrefs: sleb128]+ object referenced as a difference from obj_base
333  *      The same object can appear multiple times, but only the first time
334  *      with size != 0: in the other cases this data will only be used to
335  *      provide additional referenced objects.
336  * if exinfo == TYPE_HEAP_ROOT
337  *      [num_roots: uleb128] number of root references
338  *      [num_gc: uleb128] number of major gcs
339  *      [object: sleb128] the object as a difference from obj_base
340  *      [root_type: byte] the root_type: MonoProfileGCRootType (profiler.h)
341  *      [extra_info: uleb128] the extra_info value
342  *      object, root_type and extra_info are repeated num_roots times
343  *
344  * type sample format
345  * type: TYPE_SAMPLE
346  * exinfo: one of TYPE_SAMPLE_HIT, TYPE_SAMPLE_USYM, TYPE_SAMPLE_UBIN, TYPE_SAMPLE_COUNTERS_DESC, TYPE_SAMPLE_COUNTERS
347  * if exinfo == TYPE_SAMPLE_HIT
348  *      [thread: sleb128] thread id as difference from ptr_base
349  *      [count: uleb128] number of following instruction addresses
350  *      [ip: sleb128]* instruction pointer as difference from ptr_base
351  *      [mbt_count: uleb128] number of managed backtrace frames
352  *      [method: sleb128]* MonoMethod* as a pointer difference from the last such
353  *      pointer or the buffer method_base (the first such method can be also indentified by ip, but this is not neccessarily true)
354  * if exinfo == TYPE_SAMPLE_USYM
355  *      [address: sleb128] symbol address as a difference from ptr_base
356  *      [size: uleb128] symbol size (may be 0 if unknown)
357  *      [name: string] symbol name
358  * if exinfo == TYPE_SAMPLE_UBIN
359  *      [address: sleb128] address where binary has been loaded as a difference from ptr_base
360  *      [offset: uleb128] file offset of mapping (the same file can be mapped multiple times)
361  *      [size: uleb128] memory size
362  *      [name: string] binary name
363  * if exinfo == TYPE_SAMPLE_COUNTERS_DESC
364  *      [len: uleb128] number of counters
365  *      for i = 0 to len
366  *              [section: uleb128] section of counter
367  *              if section == MONO_COUNTER_PERFCOUNTERS:
368  *                      [section_name: string] section name of counter
369  *              [name: string] name of counter
370  *              [type: byte] type of counter
371  *              [unit: byte] unit of counter
372  *              [variance: byte] variance of counter
373  *              [index: uleb128] unique index of counter
374  * if exinfo == TYPE_SAMPLE_COUNTERS
375  *      while true:
376  *              [index: uleb128] unique index of counter
377  *              if index == 0:
378  *                      break
379  *              [type: byte] type of counter value
380  *              if type == string:
381  *                      if value == null:
382  *                              [0: byte] 0 -> value is null
383  *                      else:
384  *                              [1: byte] 1 -> value is not null
385  *                              [value: string] counter value
386  *              else:
387  *                      [value: uleb128/sleb128/double] counter value, can be sleb128, uleb128 or double (determined by using type)
388  *
389  * type coverage format
390  * type: TYPE_COVERAGE
391  * exinfo: one of TYPE_COVERAGE_METHOD, TYPE_COVERAGE_STATEMENT, TYPE_COVERAGE_ASSEMBLY, TYPE_COVERAGE_CLASS
392  * if exinfo == TYPE_COVERAGE_METHOD
393  *  [assembly: string] name of assembly
394  *  [class: string] name of the class
395  *  [name: string] name of the method
396  *  [signature: string] the signature of the method
397  *  [filename: string] the file path of the file that contains this method
398  *  [token: uleb128] the method token
399  *  [method_id: uleb128] an ID for this data to associate with the buffers of TYPE_COVERAGE_STATEMENTS
400  *  [len: uleb128] the number of TYPE_COVERAGE_BUFFERS associated with this method
401  * if exinfo == TYPE_COVERAGE_STATEMENTS
402  *  [method_id: uleb128] an the TYPE_COVERAGE_METHOD buffer to associate this with
403  *  [offset: uleb128] the il offset relative to the previous offset
404  *  [counter: uleb128] the counter for this instruction
405  *  [line: uleb128] the line of filename containing this instruction
406  *  [column: uleb128] the column containing this instruction
407  * if exinfo == TYPE_COVERAGE_ASSEMBLY
408  *  [name: string] assembly name
409  *  [guid: string] assembly GUID
410  *  [filename: string] assembly filename
411  *  [number_of_methods: uleb128] the number of methods in this assembly
412  *  [fully_covered: uleb128] the number of fully covered methods
413  *  [partially_covered: uleb128] the number of partially covered methods
414  *    currently partially_covered will always be 0, and fully_covered is the
415  *    number of methods that are fully and partially covered.
416  * if exinfo == TYPE_COVERAGE_CLASS
417  *  [name: string] assembly name
418  *  [class: string] class name
419  *  [number_of_methods: uleb128] the number of methods in this class
420  *  [fully_covered: uleb128] the number of fully covered methods
421  *  [partially_covered: uleb128] the number of partially covered methods
422  *    currently partially_covered will always be 0, and fully_covered is the
423  *    number of methods that are fully and partially covered.
424  *
425  * type meta format:
426  * type: TYPE_META
427  * exinfo: one of: TYPE_SYNC_POINT
428  * if exinfo == TYPE_SYNC_POINT
429  *      [type: byte] MonoProfilerSyncPointType enum value
430  */
431
432 // Pending data to be written to the log, for a single thread.
433 // Threads periodically flush their own LogBuffers by calling safe_send
434 typedef struct _LogBuffer LogBuffer;
435 struct _LogBuffer {
436         // Next (older) LogBuffer in processing queue
437         LogBuffer *next;
438
439         uint64_t time_base;
440         uint64_t last_time;
441         uintptr_t ptr_base;
442         uintptr_t method_base;
443         uintptr_t last_method;
444         uintptr_t obj_base;
445         uintptr_t thread_id;
446
447         // Bytes allocated for this LogBuffer
448         int size;
449
450         // Start of currently unused space in buffer
451         unsigned char* cursor;
452
453         // Pointer to start-of-structure-plus-size (for convenience)
454         unsigned char* buf_end;
455
456         // Start of data in buffer. Contents follow "buffer format" described above.
457         unsigned char buf [1];
458 };
459
460 typedef struct {
461         MonoLinkedListSetNode node;
462
463         // Convenience pointer to the profiler structure.
464         MonoProfiler *profiler;
465
466         // Was this thread added to the LLS?
467         gboolean attached;
468
469         // The current log buffer for this thread.
470         LogBuffer *buffer;
471
472         // Methods referenced by events in `buffer`, see `MethodInfo`.
473         GPtrArray *methods;
474
475         // Current call depth for enter/leave events.
476         int call_depth;
477
478         // Indicates whether this thread is currently writing to its `buffer`.
479         gboolean busy;
480
481         // Has this thread written a thread end event to `buffer`?
482         gboolean ended;
483
484         // Stored in `buffer_lock_state` to take the exclusive lock.
485         int small_id;
486 } MonoProfilerThread;
487
488 // Do not use these TLS macros directly unless you know what you're doing.
489
490 #ifdef HOST_WIN32
491
492 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
493 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
494 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
495 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
496
497 static DWORD profiler_tls;
498
499 #elif HAVE_KW_THREAD
500
501 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
502 #define PROF_TLS_GET() (profiler_tls)
503 #define PROF_TLS_INIT()
504 #define PROF_TLS_FREE()
505
506 static __thread MonoProfilerThread *profiler_tls;
507
508 #else
509
510 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
511 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
512 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
513 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
514
515 static pthread_key_t profiler_tls;
516
517 #endif
518
519 static uintptr_t
520 thread_id (void)
521 {
522         return (uintptr_t) mono_native_thread_id_get ();
523 }
524
525 static uintptr_t
526 process_id (void)
527 {
528 #ifdef HOST_WIN32
529         return (uintptr_t) GetCurrentProcessId ();
530 #else
531         return (uintptr_t) getpid ();
532 #endif
533 }
534
535 #ifdef __APPLE__
536 static mach_timebase_info_data_t timebase_info;
537 #elif defined (HOST_WIN32)
538 static LARGE_INTEGER pcounter_freq;
539 #endif
540
541 #define TICKS_PER_SEC 1000000000LL
542
543 static uint64_t
544 current_time (void)
545 {
546 #ifdef __APPLE__
547         uint64_t time = mach_absolute_time ();
548
549         time *= timebase_info.numer;
550         time /= timebase_info.denom;
551
552         return time;
553 #elif defined (HOST_WIN32)
554         LARGE_INTEGER value;
555
556         QueryPerformanceCounter (&value);
557
558         return value.QuadPart * TICKS_PER_SEC / pcounter_freq.QuadPart;
559 #elif defined (CLOCK_MONOTONIC)
560         struct timespec tspec;
561
562         clock_gettime (CLOCK_MONOTONIC, &tspec);
563
564         return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
565 #else
566         struct timeval tv;
567
568         gettimeofday (&tv, NULL);
569
570         return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
571 #endif
572 }
573
574 static int timer_overhead;
575
576 static void
577 init_time (void)
578 {
579 #ifdef __APPLE__
580         mach_timebase_info (&timebase_info);
581 #elif defined (HOST_WIN32)
582         QueryPerformanceFrequency (&pcounter_freq);
583 #endif
584
585         uint64_t time_start = current_time ();
586
587         for (int i = 0; i < 256; ++i)
588                 current_time ();
589
590         uint64_t time_end = current_time ();
591
592         timer_overhead = (time_end - time_start) / 256;
593 }
594
595 /*
596  * These macros should be used when writing an event to a log buffer. They
597  * take care of a bunch of stuff that can be repetitive and error-prone, such
598  * as attaching the current thread, acquiring/releasing the buffer lock,
599  * incrementing the event counter, expanding the log buffer, etc. They also
600  * create a scope so that it's harder to leak the LogBuffer pointer, which can
601  * be problematic as the pointer is unstable when the buffer lock isn't
602  * acquired.
603  *
604  * If the calling thread is already attached, these macros will not alter its
605  * attach mode (i.e. whether it's added to the LLS). If the thread is not
606  * attached, init_thread () will be called with add_to_lls = TRUE.
607  */
608
609 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
610         do { \
611                 MonoProfilerThread *thread__ = get_thread (); \
612                 if (thread__->attached) \
613                         buffer_lock (); \
614                 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
615                 thread__->busy = TRUE; \
616                 InterlockedIncrement ((COUNTER)); \
617                 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
618
619 #define EXIT_LOG_EXPLICIT(SEND) \
620                 thread__->busy = FALSE; \
621                 if ((SEND)) \
622                         send_log_unsafe (TRUE); \
623                 if (thread__->attached) \
624                         buffer_unlock (); \
625         } while (0)
626
627 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
628 #define DO_SEND TRUE
629 #define NO_SEND FALSE
630
631 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
632
633 typedef struct _BinaryObject BinaryObject;
634 struct _BinaryObject {
635         BinaryObject *next;
636         void *addr;
637         char *name;
638 };
639
640 static MonoProfiler *log_profiler;
641
642 struct _MonoProfiler {
643         FILE* file;
644 #if defined (HAVE_SYS_ZLIB)
645         gzFile gzfile;
646 #endif
647         char *args;
648         uint64_t startup_time;
649         int pipe_output;
650         int command_port;
651         int server_socket;
652         int pipes [2];
653         MonoNativeThreadId helper_thread;
654         MonoNativeThreadId writer_thread;
655         MonoNativeThreadId dumper_thread;
656         volatile gint32 run_writer_thread;
657         MonoLockFreeAllocSizeClass writer_entry_size_class;
658         MonoLockFreeAllocator writer_entry_allocator;
659         MonoLockFreeQueue writer_queue;
660         MonoSemType writer_queue_sem;
661         MonoConcurrentHashTable *method_table;
662         mono_mutex_t method_table_mutex;
663         volatile gint32 run_dumper_thread;
664         MonoLockFreeQueue dumper_queue;
665         MonoSemType dumper_queue_sem;
666         MonoLockFreeAllocSizeClass sample_size_class;
667         MonoLockFreeAllocator sample_allocator;
668         MonoLockFreeQueue sample_reuse_queue;
669         BinaryObject *binary_objects;
670         GPtrArray *coverage_filters;
671 };
672
673 typedef struct {
674         MonoLockFreeQueueNode node;
675         GPtrArray *methods;
676         LogBuffer *buffer;
677 } WriterQueueEntry;
678
679 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
680
681 typedef struct {
682         MonoMethod *method;
683         MonoJitInfo *ji;
684         uint64_t time;
685 } MethodInfo;
686
687 static char*
688 pstrdup (const char *s)
689 {
690         int len = strlen (s) + 1;
691         char *p = (char *) g_malloc (len);
692         memcpy (p, s, len);
693         return p;
694 }
695
696 static void *
697 alloc_buffer (int size)
698 {
699         return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
700 }
701
702 static void
703 free_buffer (void *buf, int size)
704 {
705         mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
706 }
707
708 static LogBuffer*
709 create_buffer (uintptr_t tid, int bytes)
710 {
711         LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
712
713         InterlockedIncrement (&buffer_allocations_ctr);
714
715         buf->size = BUFFER_SIZE;
716         buf->time_base = current_time ();
717         buf->last_time = buf->time_base;
718         buf->buf_end = (unsigned char *) buf + buf->size;
719         buf->cursor = buf->buf;
720         buf->thread_id = tid;
721
722         return buf;
723 }
724
725 /*
726  * Must be called with the reader lock held if thread is the current thread, or
727  * the exclusive lock if thread is a different thread. However, if thread is
728  * the current thread, and init_thread () was called with add_to_lls = FALSE,
729  * then no locking is necessary.
730  */
731 static void
732 init_buffer_state (MonoProfilerThread *thread)
733 {
734         thread->buffer = create_buffer (thread->node.key, 0);
735         thread->methods = NULL;
736 }
737
738 static void
739 clear_hazard_pointers (MonoThreadHazardPointers *hp)
740 {
741         mono_hazard_pointer_clear (hp, 0);
742         mono_hazard_pointer_clear (hp, 1);
743         mono_hazard_pointer_clear (hp, 2);
744 }
745
746 static MonoProfilerThread *
747 init_thread (MonoProfiler *prof, gboolean add_to_lls)
748 {
749         MonoProfilerThread *thread = PROF_TLS_GET ();
750
751         /*
752          * Sometimes we may try to initialize a thread twice. One example is the
753          * main thread: We initialize it when setting up the profiler, but we will
754          * also get a thread_start () callback for it. Another example is when
755          * attaching new threads to the runtime: We may get a gc_alloc () callback
756          * for that thread's thread object (where we initialize it), soon followed
757          * by a thread_start () callback.
758          *
759          * These cases are harmless anyhow. Just return if we've already done the
760          * initialization work.
761          */
762         if (thread)
763                 return thread;
764
765         thread = g_malloc (sizeof (MonoProfilerThread));
766         thread->node.key = thread_id ();
767         thread->profiler = prof;
768         thread->attached = add_to_lls;
769         thread->call_depth = 0;
770         thread->busy = 0;
771         thread->ended = FALSE;
772
773         init_buffer_state (thread);
774
775         thread->small_id = mono_thread_info_register_small_id ();
776
777         /*
778          * Some internal profiler threads don't need to be cleaned up
779          * by the main thread on shutdown.
780          */
781         if (add_to_lls) {
782                 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
783                 g_assert (mono_lls_insert (&profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
784                 clear_hazard_pointers (hp);
785         }
786
787         PROF_TLS_SET (thread);
788
789         return thread;
790 }
791
792 // Only valid if init_thread () was called with add_to_lls = FALSE.
793 static void
794 deinit_thread (MonoProfilerThread *thread)
795 {
796         g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
797
798         g_free (thread);
799         PROF_TLS_SET (NULL);
800 }
801
802 static MonoProfilerThread *
803 get_thread (void)
804 {
805         return init_thread (log_profiler, TRUE);
806 }
807
808 // Only valid if init_thread () was called with add_to_lls = FALSE.
809 static LogBuffer *
810 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
811 {
812         LogBuffer *old = thread->buffer;
813
814         if (old->cursor + bytes < old->buf_end)
815                 return old;
816
817         LogBuffer *new_ = create_buffer (thread->node.key, bytes);
818         new_->next = old;
819         thread->buffer = new_;
820
821         return new_;
822 }
823
824 /*
825  * This is a reader/writer spin lock of sorts used to protect log buffers.
826  * When a thread modifies its own log buffer, it increments the reader
827  * count. When a thread wants to access log buffers of other threads, it
828  * takes the exclusive lock.
829  *
830  * `buffer_lock_state` holds the reader count in its lower 16 bits, and
831  * the small ID of the thread currently holding the exclusive (writer)
832  * lock in its upper 16 bits. Both can be zero. It's important that the
833  * whole lock state is a single word that can be read/written atomically
834  * to avoid race conditions where there could end up being readers while
835  * the writer lock is held.
836  *
837  * The lock is writer-biased. When a thread wants to take the exclusive
838  * lock, it increments `buffer_lock_exclusive_intent` which will make new
839  * readers spin until it's back to zero, then takes the exclusive lock
840  * once the reader count has reached zero. After releasing the exclusive
841  * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
842  * reaches zero again, allows readers to increment the reader count.
843  *
844  * The writer bias is necessary because we take the exclusive lock in
845  * `gc_event ()` during STW. If the writer bias was not there, and a
846  * program had a large number of threads, STW-induced pauses could be
847  * significantly longer than they have to be. Also, we emit periodic
848  * sync points from the helper thread, which requires taking the
849  * exclusive lock, and we need those to arrive with a reasonably
850  * consistent frequency so that readers don't have to queue up too many
851  * events between sync points.
852  */
853 static volatile gint32 buffer_lock_state;
854 static volatile gint32 buffer_lock_exclusive_intent;
855
856 // Can be used recursively.
857 static void
858 buffer_lock (void)
859 {
860         /*
861          * If the thread holding the exclusive lock tries to modify the
862          * reader count, just make it a no-op. This way, we also avoid
863          * invoking the GC safe point macros below, which could break if
864          * done from a thread that is currently the initiator of STW.
865          *
866          * In other words, we rely on the fact that the GC thread takes
867          * the exclusive lock in the gc_event () callback when the world
868          * is about to stop.
869          */
870         if (InterlockedRead (&buffer_lock_state) != get_thread ()->small_id << 16) {
871                 MONO_ENTER_GC_SAFE;
872
873                 gint32 old, new_;
874
875                 do {
876                 restart:
877                         // Hold off if a thread wants to take the exclusive lock.
878                         while (InterlockedRead (&buffer_lock_exclusive_intent))
879                                 mono_thread_info_yield ();
880
881                         old = InterlockedRead (&buffer_lock_state);
882
883                         // Is a thread holding the exclusive lock?
884                         if (old >> 16) {
885                                 mono_thread_info_yield ();
886                                 goto restart;
887                         }
888
889                         new_ = old + 1;
890                 } while (InterlockedCompareExchange (&buffer_lock_state, new_, old) != old);
891
892                 MONO_EXIT_GC_SAFE;
893         }
894
895         mono_memory_barrier ();
896 }
897
898 static void
899 buffer_unlock (void)
900 {
901         mono_memory_barrier ();
902
903         gint32 state = InterlockedRead (&buffer_lock_state);
904
905         // See the comment in buffer_lock ().
906         if (state == PROF_TLS_GET ()->small_id << 16)
907                 return;
908
909         g_assert (state && "Why are we decrementing a zero reader count?");
910         g_assert (!(state >> 16) && "Why is the exclusive lock held?");
911
912         InterlockedDecrement (&buffer_lock_state);
913 }
914
915 // Cannot be used recursively.
916 static void
917 buffer_lock_excl (void)
918 {
919         gint32 new_ = get_thread ()->small_id << 16;
920
921         g_assert (InterlockedRead (&buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
922
923         InterlockedIncrement (&buffer_lock_exclusive_intent);
924
925         MONO_ENTER_GC_SAFE;
926
927         while (InterlockedCompareExchange (&buffer_lock_state, new_, 0))
928                 mono_thread_info_yield ();
929
930         MONO_EXIT_GC_SAFE;
931
932         mono_memory_barrier ();
933 }
934
935 static void
936 buffer_unlock_excl (void)
937 {
938         mono_memory_barrier ();
939
940         gint32 state = InterlockedRead (&buffer_lock_state);
941         gint32 excl = state >> 16;
942
943         g_assert (excl && "Why is the exclusive lock not held?");
944         g_assert (excl == PROF_TLS_GET ()->small_id && "Why does another thread hold the exclusive lock?");
945         g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
946
947         InterlockedWrite (&buffer_lock_state, 0);
948         InterlockedDecrement (&buffer_lock_exclusive_intent);
949 }
950
951 static void
952 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
953 {
954         uint8_t *p = buf;
955
956         do {
957                 uint8_t b = value & 0x7f;
958                 value >>= 7;
959
960                 if (value != 0) /* more bytes to come */
961                         b |= 0x80;
962
963                 *p ++ = b;
964         } while (value);
965
966         *endbuf = p;
967 }
968
969 static void
970 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
971 {
972         int more = 1;
973         int negative = (value < 0);
974         unsigned int size = sizeof (intptr_t) * 8;
975         uint8_t byte;
976         uint8_t *p = buf;
977
978         while (more) {
979                 byte = value & 0x7f;
980                 value >>= 7;
981
982                 /* the following is unnecessary if the
983                  * implementation of >>= uses an arithmetic rather
984                  * than logical shift for a signed left operand
985                  */
986                 if (negative)
987                         /* sign extend */
988                         value |= - ((intptr_t) 1 <<(size - 7));
989
990                 /* sign bit of byte is second high order bit (0x40) */
991                 if ((value == 0 && !(byte & 0x40)) ||
992                     (value == -1 && (byte & 0x40)))
993                         more = 0;
994                 else
995                         byte |= 0x80;
996
997                 *p ++= byte;
998         }
999
1000         *endbuf = p;
1001 }
1002
1003 static void
1004 emit_byte (LogBuffer *logbuffer, int value)
1005 {
1006         logbuffer->cursor [0] = value;
1007         logbuffer->cursor++;
1008
1009         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1010 }
1011
1012 static void
1013 emit_value (LogBuffer *logbuffer, int value)
1014 {
1015         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1016
1017         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1018 }
1019
1020 static void
1021 emit_time (LogBuffer *logbuffer, uint64_t value)
1022 {
1023         uint64_t tdiff = value - logbuffer->last_time;
1024         encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
1025         logbuffer->last_time = value;
1026
1027         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1028 }
1029
1030 static void
1031 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
1032 {
1033         emit_byte (logbuffer, event);
1034         emit_time (logbuffer, time);
1035 }
1036
1037 static void
1038 emit_event (LogBuffer *logbuffer, int event)
1039 {
1040         emit_event_time (logbuffer, event, current_time ());
1041 }
1042
1043 static void
1044 emit_svalue (LogBuffer *logbuffer, int64_t value)
1045 {
1046         encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1047
1048         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1049 }
1050
1051 static void
1052 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
1053 {
1054         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1055
1056         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1057 }
1058
1059 static void
1060 emit_ptr (LogBuffer *logbuffer, void *ptr)
1061 {
1062         if (!logbuffer->ptr_base)
1063                 logbuffer->ptr_base = (uintptr_t) ptr;
1064
1065         emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
1066
1067         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1068 }
1069
1070 static void
1071 emit_method_inner (LogBuffer *logbuffer, void *method)
1072 {
1073         if (!logbuffer->method_base) {
1074                 logbuffer->method_base = (intptr_t) method;
1075                 logbuffer->last_method = (intptr_t) method;
1076         }
1077
1078         encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
1079         logbuffer->last_method = (intptr_t) method;
1080
1081         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1082 }
1083
1084 static void
1085 register_method_local (MonoMethod *method, MonoJitInfo *ji)
1086 {
1087         MonoProfilerThread *thread = get_thread ();
1088
1089         if (!mono_conc_hashtable_lookup (thread->profiler->method_table, method)) {
1090                 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
1091
1092                 info->method = method;
1093                 info->ji = ji;
1094                 info->time = current_time ();
1095
1096                 buffer_lock ();
1097
1098                 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
1099                 g_ptr_array_add (arr, info);
1100
1101                 buffer_unlock ();
1102         }
1103 }
1104
1105 static void
1106 emit_method (LogBuffer *logbuffer, MonoMethod *method)
1107 {
1108         register_method_local (method, NULL);
1109         emit_method_inner (logbuffer, method);
1110 }
1111
1112 static void
1113 emit_obj (LogBuffer *logbuffer, void *ptr)
1114 {
1115         if (!logbuffer->obj_base)
1116                 logbuffer->obj_base = (uintptr_t) ptr >> 3;
1117
1118         emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
1119
1120         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1121 }
1122
1123 static void
1124 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
1125 {
1126         size_t i = 0;
1127         if (str) {
1128                 for (; i < size; i++) {
1129                         if (str[i] == '\0')
1130                                 break;
1131                         emit_byte (logbuffer, str [i]);
1132                 }
1133         }
1134         emit_byte (logbuffer, '\0');
1135 }
1136
1137 static void
1138 emit_double (LogBuffer *logbuffer, double value)
1139 {
1140         int i;
1141         unsigned char buffer[8];
1142         memcpy (buffer, &value, 8);
1143 #if G_BYTE_ORDER == G_BIG_ENDIAN
1144         for (i = 7; i >= 0; i--)
1145 #else
1146         for (i = 0; i < 8; i++)
1147 #endif
1148                 emit_byte (logbuffer, buffer[i]);
1149 }
1150
1151 static char*
1152 write_int16 (char *buf, int32_t value)
1153 {
1154         int i;
1155         for (i = 0; i < 2; ++i) {
1156                 buf [i] = value;
1157                 value >>= 8;
1158         }
1159         return buf + 2;
1160 }
1161
1162 static char*
1163 write_int32 (char *buf, int32_t value)
1164 {
1165         int i;
1166         for (i = 0; i < 4; ++i) {
1167                 buf [i] = value;
1168                 value >>= 8;
1169         }
1170         return buf + 4;
1171 }
1172
1173 static char*
1174 write_int64 (char *buf, int64_t value)
1175 {
1176         int i;
1177         for (i = 0; i < 8; ++i) {
1178                 buf [i] = value;
1179                 value >>= 8;
1180         }
1181         return buf + 8;
1182 }
1183
1184 static char *
1185 write_header_string (char *p, const char *str)
1186 {
1187         size_t len = strlen (str) + 1;
1188
1189         p = write_int32 (p, len);
1190         strcpy (p, str);
1191
1192         return p + len;
1193 }
1194
1195 static void
1196 dump_header (MonoProfiler *profiler)
1197 {
1198         const char *args = profiler->args;
1199         const char *arch = mono_config_get_cpu ();
1200         const char *os = mono_config_get_os ();
1201
1202         char *hbuf = g_malloc (
1203                 sizeof (gint32) /* header id */ +
1204                 sizeof (gint8) /* major version */ +
1205                 sizeof (gint8) /* minor version */ +
1206                 sizeof (gint8) /* data version */ +
1207                 sizeof (gint8) /* word size */ +
1208                 sizeof (gint64) /* startup time */ +
1209                 sizeof (gint32) /* timer overhead */ +
1210                 sizeof (gint32) /* flags */ +
1211                 sizeof (gint32) /* process id */ +
1212                 sizeof (gint16) /* command port */ +
1213                 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
1214                 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
1215                 sizeof (gint32) + strlen (os) + 1 /* operating system */
1216         );
1217         char *p = hbuf;
1218
1219         p = write_int32 (p, LOG_HEADER_ID);
1220         *p++ = LOG_VERSION_MAJOR;
1221         *p++ = LOG_VERSION_MINOR;
1222         *p++ = LOG_DATA_VERSION;
1223         *p++ = sizeof (void *);
1224         p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
1225         p = write_int32 (p, timer_overhead);
1226         p = write_int32 (p, 0); /* flags */
1227         p = write_int32 (p, process_id ());
1228         p = write_int16 (p, profiler->command_port);
1229         p = write_header_string (p, args);
1230         p = write_header_string (p, arch);
1231         p = write_header_string (p, os);
1232
1233 #if defined (HAVE_SYS_ZLIB)
1234         if (profiler->gzfile) {
1235                 gzwrite (profiler->gzfile, hbuf, p - hbuf);
1236         } else
1237 #endif
1238         {
1239                 fwrite (hbuf, p - hbuf, 1, profiler->file);
1240                 fflush (profiler->file);
1241         }
1242
1243         g_free (hbuf);
1244 }
1245
1246 /*
1247  * Must be called with the reader lock held if thread is the current thread, or
1248  * the exclusive lock if thread is a different thread. However, if thread is
1249  * the current thread, and init_thread () was called with add_to_lls = FALSE,
1250  * then no locking is necessary.
1251  */
1252 static void
1253 send_buffer (MonoProfilerThread *thread)
1254 {
1255         WriterQueueEntry *entry = mono_lock_free_alloc (&thread->profiler->writer_entry_allocator);
1256         entry->methods = thread->methods;
1257         entry->buffer = thread->buffer;
1258
1259         mono_lock_free_queue_node_init (&entry->node, FALSE);
1260
1261         mono_lock_free_queue_enqueue (&thread->profiler->writer_queue, &entry->node);
1262         mono_os_sem_post (&thread->profiler->writer_queue_sem);
1263 }
1264
1265 static void
1266 free_thread (gpointer p)
1267 {
1268         MonoProfilerThread *thread = p;
1269
1270         if (!thread->ended) {
1271                 /*
1272                  * The thread is being cleaned up by the main thread during
1273                  * shutdown. This typically happens for internal runtime
1274                  * threads. We need to synthesize a thread end event.
1275                  */
1276
1277                 InterlockedIncrement (&thread_ends_ctr);
1278
1279                 if (ENABLED (PROFLOG_THREAD_EVENTS)) {
1280                         LogBuffer *buf = ensure_logbuf_unsafe (thread,
1281                                 EVENT_SIZE /* event */ +
1282                                 BYTE_SIZE /* type */ +
1283                                 LEB128_SIZE /* tid */
1284                         );
1285
1286                         emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1287                         emit_byte (buf, TYPE_THREAD);
1288                         emit_ptr (buf, (void *) thread->node.key);
1289                 }
1290         }
1291
1292         send_buffer (thread);
1293
1294         g_free (thread);
1295 }
1296
1297 static void
1298 remove_thread (MonoProfilerThread *thread)
1299 {
1300         MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1301
1302         if (mono_lls_remove (&profiler_thread_list, hp, &thread->node))
1303                 mono_thread_hazardous_try_free (thread, free_thread);
1304
1305         clear_hazard_pointers (hp);
1306 }
1307
1308 static void
1309 dump_buffer (MonoProfiler *profiler, LogBuffer *buf)
1310 {
1311         char hbuf [128];
1312         char *p = hbuf;
1313
1314         if (buf->next)
1315                 dump_buffer (profiler, buf->next);
1316
1317         if (buf->cursor - buf->buf) {
1318                 p = write_int32 (p, BUF_ID);
1319                 p = write_int32 (p, buf->cursor - buf->buf);
1320                 p = write_int64 (p, buf->time_base);
1321                 p = write_int64 (p, buf->ptr_base);
1322                 p = write_int64 (p, buf->obj_base);
1323                 p = write_int64 (p, buf->thread_id);
1324                 p = write_int64 (p, buf->method_base);
1325
1326 #if defined (HAVE_SYS_ZLIB)
1327                 if (profiler->gzfile) {
1328                         gzwrite (profiler->gzfile, hbuf, p - hbuf);
1329                         gzwrite (profiler->gzfile, buf->buf, buf->cursor - buf->buf);
1330                 } else
1331 #endif
1332                 {
1333                         fwrite (hbuf, p - hbuf, 1, profiler->file);
1334                         fwrite (buf->buf, buf->cursor - buf->buf, 1, profiler->file);
1335                         fflush (profiler->file);
1336                 }
1337         }
1338
1339         free_buffer (buf, buf->size);
1340 }
1341
1342 static void
1343 dump_buffer_threadless (MonoProfiler *profiler, LogBuffer *buf)
1344 {
1345         for (LogBuffer *iter = buf; iter; iter = iter->next)
1346                 iter->thread_id = 0;
1347
1348         dump_buffer (profiler, buf);
1349 }
1350
1351 // Only valid if init_thread () was called with add_to_lls = FALSE.
1352 static void
1353 send_log_unsafe (gboolean if_needed)
1354 {
1355         MonoProfilerThread *thread = PROF_TLS_GET ();
1356
1357         if (!if_needed || (if_needed && thread->buffer->next)) {
1358                 if (!thread->attached)
1359                         for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1360                                 iter->thread_id = 0;
1361
1362                 send_buffer (thread);
1363                 init_buffer_state (thread);
1364         }
1365 }
1366
1367 // Assumes that the exclusive lock is held.
1368 static void
1369 sync_point_flush (void)
1370 {
1371         g_assert (InterlockedRead (&buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1372
1373         MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
1374                 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1375
1376                 send_buffer (thread);
1377                 init_buffer_state (thread);
1378         } MONO_LLS_FOREACH_SAFE_END
1379 }
1380
1381 // Assumes that the exclusive lock is held.
1382 static void
1383 sync_point_mark (MonoProfilerSyncPointType type)
1384 {
1385         g_assert (InterlockedRead (&buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1386
1387         ENTER_LOG (&sync_points_ctr, logbuffer,
1388                 EVENT_SIZE /* event */ +
1389                 LEB128_SIZE /* type */
1390         );
1391
1392         emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1393         emit_byte (logbuffer, type);
1394
1395         EXIT_LOG_EXPLICIT (NO_SEND);
1396
1397         send_log_unsafe (FALSE);
1398 }
1399
1400 // Assumes that the exclusive lock is held.
1401 static void
1402 sync_point (MonoProfilerSyncPointType type)
1403 {
1404         sync_point_flush ();
1405         sync_point_mark (type);
1406 }
1407
1408 static int
1409 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1410 {
1411         /* account for object alignment in the heap */
1412         size += 7;
1413         size &= ~7;
1414
1415         ENTER_LOG (&heap_objects_ctr, logbuffer,
1416                 EVENT_SIZE /* event */ +
1417                 LEB128_SIZE /* obj */ +
1418                 LEB128_SIZE /* klass */ +
1419                 LEB128_SIZE /* size */ +
1420                 LEB128_SIZE /* num */ +
1421                 num * (
1422                         LEB128_SIZE /* offset */ +
1423                         LEB128_SIZE /* ref */
1424                 )
1425         );
1426
1427         emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1428         emit_obj (logbuffer, obj);
1429         emit_ptr (logbuffer, klass);
1430         emit_value (logbuffer, size);
1431         emit_value (logbuffer, num);
1432
1433         uintptr_t last_offset = 0;
1434
1435         for (int i = 0; i < num; ++i) {
1436                 emit_value (logbuffer, offsets [i] - last_offset);
1437                 last_offset = offsets [i];
1438                 emit_obj (logbuffer, refs [i]);
1439         }
1440
1441         EXIT_LOG_EXPLICIT (DO_SEND);
1442
1443         return 0;
1444 }
1445
1446 static unsigned int hs_mode_ms = 0;
1447 static unsigned int hs_mode_gc = 0;
1448 static unsigned int hs_mode_ondemand = 0;
1449 static unsigned int gc_count = 0;
1450 static uint64_t last_hs_time = 0;
1451 static gboolean do_heap_walk = FALSE;
1452 static gboolean ignore_heap_events;
1453
1454 static void
1455 gc_roots (MonoProfiler *prof, int num, void **objects, int *root_types, uintptr_t *extra_info)
1456 {
1457         if (ignore_heap_events)
1458                 return;
1459
1460         ENTER_LOG (&heap_roots_ctr, logbuffer,
1461                 EVENT_SIZE /* event */ +
1462                 LEB128_SIZE /* num */ +
1463                 LEB128_SIZE /* collections */ +
1464                 num * (
1465                         LEB128_SIZE /* object */ +
1466                         LEB128_SIZE /* root type */ +
1467                         LEB128_SIZE /* extra info */
1468                 )
1469         );
1470
1471         emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1472         emit_value (logbuffer, num);
1473         emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
1474
1475         for (int i = 0; i < num; ++i) {
1476                 emit_obj (logbuffer, objects [i]);
1477                 emit_byte (logbuffer, root_types [i]);
1478                 emit_value (logbuffer, extra_info [i]);
1479         }
1480
1481         EXIT_LOG_EXPLICIT (DO_SEND);
1482 }
1483
1484
1485 static void
1486 trigger_on_demand_heapshot (void)
1487 {
1488         if (heapshot_requested)
1489                 mono_gc_collect (mono_gc_max_generation ());
1490 }
1491
1492 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_HEAPSHOT_FEATURE)
1493
1494 static void
1495 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation)
1496 {
1497         if (ev == MONO_GC_EVENT_START) {
1498                 uint64_t now = current_time ();
1499
1500                 if (hs_mode_ms && (now - last_hs_time) / 1000 * 1000 >= hs_mode_ms)
1501                         do_heap_walk = TRUE;
1502                 else if (hs_mode_gc && !(gc_count % hs_mode_gc))
1503                         do_heap_walk = TRUE;
1504                 else if (hs_mode_ondemand)
1505                         do_heap_walk = heapshot_requested;
1506                 else if (!hs_mode_ms && !hs_mode_gc && generation == mono_gc_max_generation ())
1507                         do_heap_walk = TRUE;
1508
1509                 //If using heapshot, ignore events for collections we don't care
1510                 if (ENABLED (PROFLOG_HEAPSHOT_FEATURE)) {
1511                         // Ignore events generated during the collection itself (IE GC ROOTS)
1512                         ignore_heap_events = !do_heap_walk;
1513                 }
1514         }
1515
1516
1517         if (ENABLED (PROFLOG_GC_EVENTS)) {
1518                 ENTER_LOG (&gc_events_ctr, logbuffer,
1519                         EVENT_SIZE /* event */ +
1520                         BYTE_SIZE /* gc event */ +
1521                         BYTE_SIZE /* generation */
1522                 );
1523
1524                 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1525                 emit_byte (logbuffer, ev);
1526                 emit_byte (logbuffer, generation);
1527
1528                 EXIT_LOG_EXPLICIT (NO_SEND);
1529         }
1530
1531         switch (ev) {
1532         case MONO_GC_EVENT_START:
1533                 if (generation == mono_gc_max_generation ())
1534                         gc_count++;
1535
1536                 break;
1537         case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1538                 /*
1539                  * Ensure that no thread can be in the middle of writing to
1540                  * a buffer when the world stops...
1541                  */
1542                 buffer_lock_excl ();
1543                 break;
1544         case MONO_GC_EVENT_POST_STOP_WORLD:
1545                 /*
1546                  * ... So that we now have a consistent view of all buffers.
1547                  * This allows us to flush them. We need to do this because
1548                  * they may contain object allocation events that need to be
1549                  * committed to the log file before any object move events
1550                  * that will be produced during this GC.
1551                  */
1552                 if (ENABLED (ALL_GC_EVENTS_MASK))
1553                         sync_point (SYNC_POINT_WORLD_STOP);
1554
1555                 /*
1556                  * All heap events are surrounded by a HEAP_START and a HEAP_ENV event.
1557                  * Right now, that's the case for GC Moves, GC Roots or heapshots.
1558                  */
1559                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
1560                         ENTER_LOG (&heap_starts_ctr, logbuffer,
1561                                 EVENT_SIZE /* event */
1562                         );
1563
1564                         emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1565
1566                         EXIT_LOG_EXPLICIT (DO_SEND);
1567                 }
1568
1569                 break;
1570         case MONO_GC_EVENT_PRE_START_WORLD:
1571                 if (do_heap_shot && do_heap_walk)
1572                         mono_gc_walk_heap (0, gc_reference, NULL);
1573
1574                 /* Matching HEAP_END to the HEAP_START from above */
1575                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
1576                         ENTER_LOG (&heap_ends_ctr, logbuffer,
1577                                 EVENT_SIZE /* event */
1578                         );
1579
1580                         emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1581
1582                         EXIT_LOG_EXPLICIT (DO_SEND);
1583                 }
1584
1585                 if (do_heap_shot && do_heap_walk) {
1586                         do_heap_walk = FALSE;
1587                         heapshot_requested = 0;
1588                         last_hs_time = current_time ();
1589                 }
1590
1591                 /*
1592                  * Similarly, we must now make sure that any object moves
1593                  * written to the GC thread's buffer are flushed. Otherwise,
1594                  * object allocation events for certain addresses could come
1595                  * after the move events that made those addresses available.
1596                  */
1597                 if (ENABLED (ALL_GC_EVENTS_MASK))
1598                         sync_point_mark (SYNC_POINT_WORLD_START);
1599                 break;
1600         case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1601                 /*
1602                  * Finally, it is safe to allow other threads to write to
1603                  * their buffers again.
1604                  */
1605                 buffer_unlock_excl ();
1606                 break;
1607         default:
1608                 break;
1609         }
1610 }
1611
1612 static void
1613 gc_resize (MonoProfiler *profiler, int64_t new_size)
1614 {
1615         ENTER_LOG (&gc_resizes_ctr, logbuffer,
1616                 EVENT_SIZE /* event */ +
1617                 LEB128_SIZE /* new size */
1618         );
1619
1620         emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1621         emit_value (logbuffer, new_size);
1622
1623         EXIT_LOG_EXPLICIT (DO_SEND);
1624 }
1625
1626 typedef struct {
1627         int count;
1628         MonoMethod* methods [MAX_FRAMES];
1629         int32_t il_offsets [MAX_FRAMES];
1630         int32_t native_offsets [MAX_FRAMES];
1631 } FrameData;
1632
1633 static int num_frames = MAX_FRAMES;
1634
1635 static mono_bool
1636 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1637 {
1638         FrameData *frame = (FrameData *)data;
1639         if (method && frame->count < num_frames) {
1640                 frame->il_offsets [frame->count] = il_offset;
1641                 frame->native_offsets [frame->count] = native_offset;
1642                 frame->methods [frame->count++] = method;
1643                 //printf ("In %d %s at %d (native: %d)\n", frame->count, mono_method_get_name (method), il_offset, native_offset);
1644         }
1645         return frame->count == num_frames;
1646 }
1647
1648 /*
1649  * a note about stack walks: they can cause more profiler events to fire,
1650  * so we need to make sure they don't happen after we started emitting an
1651  * event, hence the collect_bt/emit_bt split.
1652  */
1653 static void
1654 collect_bt (FrameData *data)
1655 {
1656         data->count = 0;
1657         mono_stack_walk_no_il (walk_stack, data);
1658 }
1659
1660 static void
1661 emit_bt (MonoProfiler *prof, LogBuffer *logbuffer, FrameData *data)
1662 {
1663         if (data->count > num_frames)
1664                 printf ("bad num frames: %d\n", data->count);
1665
1666         emit_value (logbuffer, data->count);
1667
1668         while (data->count)
1669                 emit_method (logbuffer, data->methods [--data->count]);
1670 }
1671
1672 static void
1673 gc_alloc (MonoProfiler *prof, MonoObject *obj, MonoClass *klass)
1674 {
1675         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_ALLOC_BT : 0;
1676         FrameData data;
1677         uintptr_t len = mono_object_get_size (obj);
1678         /* account for object alignment in the heap */
1679         len += 7;
1680         len &= ~7;
1681
1682         if (do_bt)
1683                 collect_bt (&data);
1684
1685         ENTER_LOG (&gc_allocs_ctr, logbuffer,
1686                 EVENT_SIZE /* event */ +
1687                 LEB128_SIZE /* klass */ +
1688                 LEB128_SIZE /* obj */ +
1689                 LEB128_SIZE /* size */ +
1690                 (do_bt ? (
1691                         LEB128_SIZE /* count */ +
1692                         data.count * (
1693                                 LEB128_SIZE /* method */
1694                         )
1695                 ) : 0)
1696         );
1697
1698         emit_event (logbuffer, do_bt | TYPE_ALLOC);
1699         emit_ptr (logbuffer, klass);
1700         emit_obj (logbuffer, obj);
1701         emit_value (logbuffer, len);
1702
1703         if (do_bt)
1704                 emit_bt (prof, logbuffer, &data);
1705
1706         EXIT_LOG;
1707 }
1708
1709 static void
1710 gc_moves (MonoProfiler *prof, void **objects, int num)
1711 {
1712         ENTER_LOG (&gc_moves_ctr, logbuffer,
1713                 EVENT_SIZE /* event */ +
1714                 LEB128_SIZE /* num */ +
1715                 num * (
1716                         LEB128_SIZE /* object */
1717                 )
1718         );
1719
1720         emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1721         emit_value (logbuffer, num);
1722
1723         for (int i = 0; i < num; ++i)
1724                 emit_obj (logbuffer, objects [i]);
1725
1726         EXIT_LOG_EXPLICIT (DO_SEND);
1727 }
1728
1729 static void
1730 gc_handle (MonoProfiler *prof, int op, int type, uintptr_t handle, MonoObject *obj)
1731 {
1732         int do_bt = nocalls && InterlockedRead (&runtime_inited) && !notraces;
1733         FrameData data;
1734
1735         if (do_bt)
1736                 collect_bt (&data);
1737
1738         gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1739
1740         ENTER_LOG (ctr, logbuffer,
1741                 EVENT_SIZE /* event */ +
1742                 LEB128_SIZE /* type */ +
1743                 LEB128_SIZE /* handle */ +
1744                 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1745                         LEB128_SIZE /* obj */
1746                 ) : 0) +
1747                 (do_bt ? (
1748                         LEB128_SIZE /* count */ +
1749                         data.count * (
1750                                 LEB128_SIZE /* method */
1751                         )
1752                 ) : 0)
1753         );
1754
1755         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1756                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1757         else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1758                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1759         else
1760                 g_assert_not_reached ();
1761
1762         emit_value (logbuffer, type);
1763         emit_value (logbuffer, handle);
1764
1765         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1766                 emit_obj (logbuffer, obj);
1767
1768         if (do_bt)
1769                 emit_bt (prof, logbuffer, &data);
1770
1771         EXIT_LOG;
1772 }
1773
1774 static void
1775 finalize_begin (MonoProfiler *prof)
1776 {
1777         ENTER_LOG (&finalize_begins_ctr, buf,
1778                 EVENT_SIZE /* event */
1779         );
1780
1781         emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1782
1783         EXIT_LOG;
1784 }
1785
1786 static void
1787 finalize_end (MonoProfiler *prof)
1788 {
1789         trigger_on_demand_heapshot ();
1790         if (ENABLED (PROFLOG_FINALIZATION_EVENTS)) {
1791                 ENTER_LOG (&finalize_ends_ctr, buf,
1792                         EVENT_SIZE /* event */
1793                 );
1794
1795                 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1796
1797                 EXIT_LOG;
1798         }
1799 }
1800
1801 static void
1802 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1803 {
1804         ENTER_LOG (&finalize_object_begins_ctr, buf,
1805                 EVENT_SIZE /* event */ +
1806                 LEB128_SIZE /* obj */
1807         );
1808
1809         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1810         emit_obj (buf, obj);
1811
1812         EXIT_LOG;
1813 }
1814
1815 static void
1816 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1817 {
1818         ENTER_LOG (&finalize_object_ends_ctr, buf,
1819                 EVENT_SIZE /* event */ +
1820                 LEB128_SIZE /* obj */
1821         );
1822
1823         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1824         emit_obj (buf, obj);
1825
1826         EXIT_LOG;
1827 }
1828
1829 static char*
1830 push_nesting (char *p, MonoClass *klass)
1831 {
1832         MonoClass *nesting;
1833         const char *name;
1834         const char *nspace;
1835         nesting = mono_class_get_nesting_type (klass);
1836         if (nesting) {
1837                 p = push_nesting (p, nesting);
1838                 *p++ = '/';
1839                 *p = 0;
1840         }
1841         name = mono_class_get_name (klass);
1842         nspace = mono_class_get_namespace (klass);
1843         if (*nspace) {
1844                 strcpy (p, nspace);
1845                 p += strlen (nspace);
1846                 *p++ = '.';
1847                 *p = 0;
1848         }
1849         strcpy (p, name);
1850         p += strlen (name);
1851         return p;
1852 }
1853
1854 static char*
1855 type_name (MonoClass *klass)
1856 {
1857         char buf [1024];
1858         char *p;
1859         push_nesting (buf, klass);
1860         p = (char *) g_malloc (strlen (buf) + 1);
1861         strcpy (p, buf);
1862         return p;
1863 }
1864
1865 static void
1866 image_loaded (MonoProfiler *prof, MonoImage *image, int result)
1867 {
1868         if (result != MONO_PROFILE_OK)
1869                 return;
1870
1871         const char *name = mono_image_get_filename (image);
1872         int nlen = strlen (name) + 1;
1873
1874         ENTER_LOG (&image_loads_ctr, logbuffer,
1875                 EVENT_SIZE /* event */ +
1876                 BYTE_SIZE /* type */ +
1877                 LEB128_SIZE /* image */ +
1878                 nlen /* name */
1879         );
1880
1881         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1882         emit_byte (logbuffer, TYPE_IMAGE);
1883         emit_ptr (logbuffer, image);
1884         memcpy (logbuffer->cursor, name, nlen);
1885         logbuffer->cursor += nlen;
1886
1887         EXIT_LOG;
1888 }
1889
1890 static void
1891 image_unloaded (MonoProfiler *prof, MonoImage *image)
1892 {
1893         const char *name = mono_image_get_filename (image);
1894         int nlen = strlen (name) + 1;
1895
1896         ENTER_LOG (&image_unloads_ctr, logbuffer,
1897                 EVENT_SIZE /* event */ +
1898                 BYTE_SIZE /* type */ +
1899                 LEB128_SIZE /* image */ +
1900                 nlen /* name */
1901         );
1902
1903         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1904         emit_byte (logbuffer, TYPE_IMAGE);
1905         emit_ptr (logbuffer, image);
1906         memcpy (logbuffer->cursor, name, nlen);
1907         logbuffer->cursor += nlen;
1908
1909         EXIT_LOG;
1910 }
1911
1912 static void
1913 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly, int result)
1914 {
1915         if (result != MONO_PROFILE_OK)
1916                 return;
1917
1918         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1919         int nlen = strlen (name) + 1;
1920         MonoImage *image = mono_assembly_get_image (assembly);
1921
1922         ENTER_LOG (&assembly_loads_ctr, logbuffer,
1923                 EVENT_SIZE /* event */ +
1924                 BYTE_SIZE /* type */ +
1925                 LEB128_SIZE /* assembly */ +
1926                 LEB128_SIZE /* image */ +
1927                 nlen /* name */
1928         );
1929
1930         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1931         emit_byte (logbuffer, TYPE_ASSEMBLY);
1932         emit_ptr (logbuffer, assembly);
1933         emit_ptr (logbuffer, image);
1934         memcpy (logbuffer->cursor, name, nlen);
1935         logbuffer->cursor += nlen;
1936
1937         EXIT_LOG;
1938
1939         mono_free (name);
1940 }
1941
1942 static void
1943 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1944 {
1945         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1946         int nlen = strlen (name) + 1;
1947         MonoImage *image = mono_assembly_get_image (assembly);
1948
1949         ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1950                 EVENT_SIZE /* event */ +
1951                 BYTE_SIZE /* type */ +
1952                 LEB128_SIZE /* assembly */ +
1953                 LEB128_SIZE /* image */ +
1954                 nlen /* name */
1955         );
1956
1957         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1958         emit_byte (logbuffer, TYPE_ASSEMBLY);
1959         emit_ptr (logbuffer, assembly);
1960         emit_ptr (logbuffer, image);
1961         memcpy (logbuffer->cursor, name, nlen);
1962         logbuffer->cursor += nlen;
1963
1964         EXIT_LOG;
1965
1966         mono_free (name);
1967 }
1968
1969 static void
1970 class_loaded (MonoProfiler *prof, MonoClass *klass, int result)
1971 {
1972         if (result != MONO_PROFILE_OK)
1973                 return;
1974
1975         char *name;
1976
1977         if (InterlockedRead (&runtime_inited))
1978                 name = mono_type_get_name (mono_class_get_type (klass));
1979         else
1980                 name = type_name (klass);
1981
1982         int nlen = strlen (name) + 1;
1983         MonoImage *image = mono_class_get_image (klass);
1984
1985         ENTER_LOG (&class_loads_ctr, logbuffer,
1986                 EVENT_SIZE /* event */ +
1987                 BYTE_SIZE /* type */ +
1988                 LEB128_SIZE /* klass */ +
1989                 LEB128_SIZE /* image */ +
1990                 nlen /* name */
1991         );
1992
1993         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1994         emit_byte (logbuffer, TYPE_CLASS);
1995         emit_ptr (logbuffer, klass);
1996         emit_ptr (logbuffer, image);
1997         memcpy (logbuffer->cursor, name, nlen);
1998         logbuffer->cursor += nlen;
1999
2000         EXIT_LOG;
2001
2002         if (runtime_inited)
2003                 mono_free (name);
2004         else
2005                 g_free (name);
2006 }
2007
2008 static void process_method_enter_coverage (MonoProfiler *prof, MonoMethod *method);
2009
2010 static void
2011 method_enter (MonoProfiler *prof, MonoMethod *method)
2012 {
2013         process_method_enter_coverage (prof, method);
2014
2015         if (!only_coverage && get_thread ()->call_depth++ <= max_call_depth) {
2016                 ENTER_LOG (&method_entries_ctr, logbuffer,
2017                         EVENT_SIZE /* event */ +
2018                         LEB128_SIZE /* method */
2019                 );
2020
2021                 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
2022                 emit_method (logbuffer, method);
2023
2024                 EXIT_LOG;
2025         }
2026 }
2027
2028 static void
2029 method_leave (MonoProfiler *prof, MonoMethod *method)
2030 {
2031         if (!only_coverage && --get_thread ()->call_depth <= max_call_depth) {
2032                 ENTER_LOG (&method_exits_ctr, logbuffer,
2033                         EVENT_SIZE /* event */ +
2034                         LEB128_SIZE /* method */
2035                 );
2036
2037                 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
2038                 emit_method (logbuffer, method);
2039
2040                 EXIT_LOG;
2041         }
2042 }
2043
2044 static void
2045 method_exc_leave (MonoProfiler *prof, MonoMethod *method)
2046 {
2047         if (!only_coverage && !nocalls && --get_thread ()->call_depth <= max_call_depth) {
2048                 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
2049                         EVENT_SIZE /* event */ +
2050                         LEB128_SIZE /* method */
2051                 );
2052
2053                 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
2054                 emit_method (logbuffer, method);
2055
2056                 EXIT_LOG;
2057         }
2058 }
2059
2060 static void
2061 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji, int result)
2062 {
2063         if (result != MONO_PROFILE_OK)
2064                 return;
2065
2066         register_method_local (method, ji);
2067 }
2068
2069 static void
2070 code_buffer_new (MonoProfiler *prof, void *buffer, int size, MonoProfilerCodeBufferType type, void *data)
2071 {
2072         char *name;
2073         int nlen;
2074
2075         if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
2076                 name = (char *) data;
2077                 nlen = strlen (name) + 1;
2078         } else {
2079                 name = NULL;
2080                 nlen = 0;
2081         }
2082
2083         ENTER_LOG (&code_buffers_ctr, logbuffer,
2084                 EVENT_SIZE /* event */ +
2085                 BYTE_SIZE /* type */ +
2086                 LEB128_SIZE /* buffer */ +
2087                 LEB128_SIZE /* size */ +
2088                 (name ? (
2089                         nlen /* name */
2090                 ) : 0)
2091         );
2092
2093         emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
2094         emit_byte (logbuffer, type);
2095         emit_ptr (logbuffer, buffer);
2096         emit_value (logbuffer, size);
2097
2098         if (name) {
2099                 memcpy (logbuffer->cursor, name, nlen);
2100                 logbuffer->cursor += nlen;
2101         }
2102
2103         EXIT_LOG;
2104 }
2105
2106 static void
2107 throw_exc (MonoProfiler *prof, MonoObject *object)
2108 {
2109         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_THROW_BT : 0;
2110         FrameData data;
2111
2112         if (do_bt)
2113                 collect_bt (&data);
2114
2115         ENTER_LOG (&exception_throws_ctr, logbuffer,
2116                 EVENT_SIZE /* event */ +
2117                 LEB128_SIZE /* object */ +
2118                 (do_bt ? (
2119                         LEB128_SIZE /* count */ +
2120                         data.count * (
2121                                 LEB128_SIZE /* method */
2122                         )
2123                 ) : 0)
2124         );
2125
2126         emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
2127         emit_obj (logbuffer, object);
2128
2129         if (do_bt)
2130                 emit_bt (prof, logbuffer, &data);
2131
2132         EXIT_LOG;
2133 }
2134
2135 static void
2136 clause_exc (MonoProfiler *prof, MonoMethod *method, int clause_type, int clause_num, MonoObject *exc)
2137 {
2138         ENTER_LOG (&exception_clauses_ctr, logbuffer,
2139                 EVENT_SIZE /* event */ +
2140                 BYTE_SIZE /* clause type */ +
2141                 LEB128_SIZE /* clause num */ +
2142                 LEB128_SIZE /* method */
2143         );
2144
2145         emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
2146         emit_byte (logbuffer, clause_type);
2147         emit_value (logbuffer, clause_num);
2148         emit_method (logbuffer, method);
2149         emit_obj (logbuffer, exc);
2150
2151         EXIT_LOG;
2152 }
2153
2154 static void
2155 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
2156 {
2157         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_MONITOR_BT : 0;
2158         FrameData data;
2159
2160         if (do_bt)
2161                 collect_bt (&data);
2162
2163         ENTER_LOG (&monitor_events_ctr, logbuffer,
2164                 EVENT_SIZE /* event */ +
2165                 BYTE_SIZE /* ev */ +
2166                 LEB128_SIZE /* object */ +
2167                 (do_bt ? (
2168                         LEB128_SIZE /* count */ +
2169                         data.count * (
2170                                 LEB128_SIZE /* method */
2171                         )
2172                 ) : 0)
2173         );
2174
2175         emit_event (logbuffer, do_bt | TYPE_MONITOR);
2176         emit_byte (logbuffer, ev);
2177         emit_obj (logbuffer, object);
2178
2179         if (do_bt)
2180                 emit_bt (profiler, logbuffer, &data);
2181
2182         EXIT_LOG;
2183 }
2184
2185 static void
2186 thread_start (MonoProfiler *prof, uintptr_t tid)
2187 {
2188         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2189                 ENTER_LOG (&thread_starts_ctr, logbuffer,
2190                         EVENT_SIZE /* event */ +
2191                         BYTE_SIZE /* type */ +
2192                         LEB128_SIZE /* tid */
2193                 );
2194
2195                 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2196                 emit_byte (logbuffer, TYPE_THREAD);
2197                 emit_ptr (logbuffer, (void*) tid);
2198
2199                 EXIT_LOG;
2200         }
2201 }
2202
2203 static void
2204 thread_end (MonoProfiler *prof, uintptr_t tid)
2205 {
2206         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2207                 ENTER_LOG (&thread_ends_ctr, logbuffer,
2208                         EVENT_SIZE /* event */ +
2209                         BYTE_SIZE /* type */ +
2210                         LEB128_SIZE /* tid */
2211                 );
2212
2213                 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2214                 emit_byte (logbuffer, TYPE_THREAD);
2215                 emit_ptr (logbuffer, (void*) tid);
2216
2217                 EXIT_LOG_EXPLICIT (NO_SEND);
2218         }
2219
2220         MonoProfilerThread *thread = get_thread ();
2221
2222         thread->ended = TRUE;
2223         remove_thread (thread);
2224
2225         PROF_TLS_SET (NULL);
2226 }
2227
2228 static void
2229 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2230 {
2231         int len = strlen (name) + 1;
2232
2233         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2234                 ENTER_LOG (&thread_names_ctr, logbuffer,
2235                         EVENT_SIZE /* event */ +
2236                         BYTE_SIZE /* type */ +
2237                         LEB128_SIZE /* tid */ +
2238                         len /* name */
2239                 );
2240
2241                 emit_event (logbuffer, TYPE_METADATA);
2242                 emit_byte (logbuffer, TYPE_THREAD);
2243                 emit_ptr (logbuffer, (void*)tid);
2244                 memcpy (logbuffer->cursor, name, len);
2245                 logbuffer->cursor += len;
2246
2247                 EXIT_LOG;
2248         }
2249 }
2250
2251 static void
2252 domain_loaded (MonoProfiler *prof, MonoDomain *domain, int result)
2253 {
2254         if (result != MONO_PROFILE_OK)
2255                 return;
2256
2257         ENTER_LOG (&domain_loads_ctr, logbuffer,
2258                 EVENT_SIZE /* event */ +
2259                 BYTE_SIZE /* type */ +
2260                 LEB128_SIZE /* domain id */
2261         );
2262
2263         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2264         emit_byte (logbuffer, TYPE_DOMAIN);
2265         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2266
2267         EXIT_LOG;
2268 }
2269
2270 static void
2271 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2272 {
2273         ENTER_LOG (&domain_unloads_ctr, logbuffer,
2274                 EVENT_SIZE /* event */ +
2275                 BYTE_SIZE /* type */ +
2276                 LEB128_SIZE /* domain id */
2277         );
2278
2279         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2280         emit_byte (logbuffer, TYPE_DOMAIN);
2281         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2282
2283         EXIT_LOG;
2284 }
2285
2286 static void
2287 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2288 {
2289         int nlen = strlen (name) + 1;
2290
2291         ENTER_LOG (&domain_names_ctr, logbuffer,
2292                 EVENT_SIZE /* event */ +
2293                 BYTE_SIZE /* type */ +
2294                 LEB128_SIZE /* domain id */ +
2295                 nlen /* name */
2296         );
2297
2298         emit_event (logbuffer, TYPE_METADATA);
2299         emit_byte (logbuffer, TYPE_DOMAIN);
2300         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2301         memcpy (logbuffer->cursor, name, nlen);
2302         logbuffer->cursor += nlen;
2303
2304         EXIT_LOG;
2305 }
2306
2307 static void
2308 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2309 {
2310         ENTER_LOG (&context_loads_ctr, logbuffer,
2311                 EVENT_SIZE /* event */ +
2312                 BYTE_SIZE /* type */ +
2313                 LEB128_SIZE /* context id */ +
2314                 LEB128_SIZE /* domain id */
2315         );
2316
2317         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2318         emit_byte (logbuffer, TYPE_CONTEXT);
2319         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2320         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2321
2322         EXIT_LOG;
2323 }
2324
2325 static void
2326 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2327 {
2328         ENTER_LOG (&context_unloads_ctr, logbuffer,
2329                 EVENT_SIZE /* event */ +
2330                 BYTE_SIZE /* type */ +
2331                 LEB128_SIZE /* context id */ +
2332                 LEB128_SIZE /* domain id */
2333         );
2334
2335         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2336         emit_byte (logbuffer, TYPE_CONTEXT);
2337         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2338         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2339
2340         EXIT_LOG;
2341 }
2342
2343 typedef struct {
2344         MonoMethod *method;
2345         MonoDomain *domain;
2346         void *base_address;
2347         int offset;
2348 } AsyncFrameInfo;
2349
2350 typedef struct {
2351         MonoLockFreeQueueNode node;
2352         MonoProfiler *prof;
2353         uint64_t time;
2354         uintptr_t tid;
2355         void *ip;
2356         int count;
2357         AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2358 } SampleHit;
2359
2360 static mono_bool
2361 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2362 {
2363         SampleHit *sample = (SampleHit *) data;
2364
2365         if (sample->count < num_frames) {
2366                 int i = sample->count;
2367
2368                 sample->frames [i].method = method;
2369                 sample->frames [i].domain = domain;
2370                 sample->frames [i].base_address = base_address;
2371                 sample->frames [i].offset = offset;
2372
2373                 sample->count++;
2374         }
2375
2376         return sample->count == num_frames;
2377 }
2378
2379 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2380 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2381
2382 static void
2383 enqueue_sample_hit (gpointer p)
2384 {
2385         SampleHit *sample = p;
2386
2387         mono_lock_free_queue_node_unpoison (&sample->node);
2388         mono_lock_free_queue_enqueue (&sample->prof->dumper_queue, &sample->node);
2389         mono_os_sem_post (&sample->prof->dumper_queue_sem);
2390 }
2391
2392 static void
2393 mono_sample_hit (MonoProfiler *profiler, unsigned char *ip, void *context)
2394 {
2395         /*
2396          * Please note: We rely on the runtime loading the profiler with
2397          * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2398          * this function (and its siblings) are resolved when the profiler is
2399          * loaded. Otherwise, we would potentially invoke the dynamic linker when
2400          * invoking runtime functions, which is not async-signal-safe.
2401          */
2402
2403         if (InterlockedRead (&in_shutdown))
2404                 return;
2405
2406         SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2407
2408         if (!sample) {
2409                 /*
2410                  * If we're out of reusable sample events and we're not allowed to
2411                  * allocate more, we have no choice but to drop the event.
2412                  */
2413                 if (InterlockedRead (&sample_allocations_ctr) >= max_allocated_sample_hits)
2414                         return;
2415
2416                 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2417                 sample->prof = profiler;
2418                 mono_lock_free_queue_node_init (&sample->node, TRUE);
2419
2420                 InterlockedIncrement (&sample_allocations_ctr);
2421         }
2422
2423         sample->count = 0;
2424         mono_stack_walk_async_safe (&async_walk_stack, context, sample);
2425
2426         sample->time = current_time ();
2427         sample->tid = thread_id ();
2428         sample->ip = ip;
2429
2430         mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2431 }
2432
2433 static uintptr_t *code_pages = 0;
2434 static int num_code_pages = 0;
2435 static int size_code_pages = 0;
2436 #define CPAGE_SHIFT (9)
2437 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2438 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2439 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2440
2441 static uintptr_t
2442 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2443 {
2444         uintptr_t i;
2445         uintptr_t start_pos;
2446         start_pos = (page >> CPAGE_SHIFT) % hsize;
2447         i = start_pos;
2448         do {
2449                 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2450                         return 0;
2451                 } else if (!hash [i]) {
2452                         hash [i] = page;
2453                         return 1;
2454                 }
2455                 /* wrap around */
2456                 if (++i == hsize)
2457                         i = 0;
2458         } while (i != start_pos);
2459         /* should not happen */
2460         printf ("failed code page store\n");
2461         return 0;
2462 }
2463
2464 static void
2465 add_code_pointer (uintptr_t ip)
2466 {
2467         uintptr_t i;
2468         if (num_code_pages * 2 >= size_code_pages) {
2469                 uintptr_t *n;
2470                 uintptr_t old_size = size_code_pages;
2471                 size_code_pages *= 2;
2472                 if (size_code_pages == 0)
2473                         size_code_pages = 16;
2474                 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2475                 for (i = 0; i < old_size; ++i) {
2476                         if (code_pages [i])
2477                                 add_code_page (n, size_code_pages, code_pages [i]);
2478                 }
2479                 if (code_pages)
2480                         g_free (code_pages);
2481                 code_pages = n;
2482         }
2483         num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2484 }
2485
2486 /* ELF code crashes on some systems. */
2487 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2488 #if 0
2489 static void
2490 dump_ubin (MonoProfiler *prof, const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2491 {
2492         int len = strlen (filename) + 1;
2493
2494         ENTER_LOG (&sample_ubins_ctr, logbuffer,
2495                 EVENT_SIZE /* event */ +
2496                 LEB128_SIZE /* load address */ +
2497                 LEB128_SIZE /* offset */ +
2498                 LEB128_SIZE /* size */ +
2499                 nlen /* file name */
2500         );
2501
2502         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2503         emit_ptr (logbuffer, load_addr);
2504         emit_uvalue (logbuffer, offset);
2505         emit_uvalue (logbuffer, size);
2506         memcpy (logbuffer->cursor, filename, len);
2507         logbuffer->cursor += len;
2508
2509         EXIT_LOG_EXPLICIT (DO_SEND);
2510 }
2511 #endif
2512
2513 static void
2514 dump_usym (MonoProfiler *prof, const char *name, uintptr_t value, uintptr_t size)
2515 {
2516         int len = strlen (name) + 1;
2517
2518         ENTER_LOG (&sample_usyms_ctr, logbuffer,
2519                 EVENT_SIZE /* event */ +
2520                 LEB128_SIZE /* value */ +
2521                 LEB128_SIZE /* size */ +
2522                 len /* name */
2523         );
2524
2525         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2526         emit_ptr (logbuffer, (void*)value);
2527         emit_value (logbuffer, size);
2528         memcpy (logbuffer->cursor, name, len);
2529         logbuffer->cursor += len;
2530
2531         EXIT_LOG_EXPLICIT (DO_SEND);
2532 }
2533
2534 /* ELF code crashes on some systems. */
2535 //#if defined(ELFMAG0)
2536 #if 0
2537
2538 #if SIZEOF_VOID_P == 4
2539 #define ELF_WSIZE 32
2540 #else
2541 #define ELF_WSIZE 64
2542 #endif
2543 #ifndef ElfW
2544 #define ElfW(type)      _ElfW (Elf, ELF_WSIZE, type)
2545 #define _ElfW(e,w,t)    _ElfW_1 (e, w, _##t)
2546 #define _ElfW_1(e,w,t)  e##w##t
2547 #endif
2548
2549 static void
2550 dump_elf_symbols (MonoProfiler *prof, ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2551 {
2552         int i;
2553         for (i = 0; i < num_symbols; ++i) {
2554                 const char* sym;
2555                 sym =  strtab + symbols [i].st_name;
2556                 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2557                         continue;
2558                 //printf ("symbol %s at %d\n", sym, symbols [i].st_value);
2559                 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2560         }
2561 }
2562
2563 static int
2564 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2565 {
2566         int fd, i;
2567         void *data;
2568         struct stat statb;
2569         uint64_t file_size;
2570         ElfW(Ehdr) *header;
2571         ElfW(Shdr) *sheader;
2572         ElfW(Shdr) *shstrtabh;
2573         ElfW(Shdr) *symtabh = NULL;
2574         ElfW(Shdr) *strtabh = NULL;
2575         ElfW(Sym) *symbols = NULL;
2576         const char *strtab;
2577         int num_symbols;
2578
2579         fd = open (filename, O_RDONLY);
2580         if (fd < 0)
2581                 return 0;
2582         if (fstat (fd, &statb) != 0) {
2583                 close (fd);
2584                 return 0;
2585         }
2586         file_size = statb.st_size;
2587         data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2588         close (fd);
2589         if (data == MAP_FAILED)
2590                 return 0;
2591         header = data;
2592         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2593                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2594                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2595                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2596                 munmap (data, file_size);
2597                 return 0;
2598         }
2599         sheader = (void*)((char*)data + header->e_shoff);
2600         shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2601         strtab = (const char*)data + shstrtabh->sh_offset;
2602         for (i = 0; i < header->e_shnum; ++i) {
2603                 //printf ("section header: %d\n", sheader->sh_type);
2604                 if (sheader->sh_type == SHT_SYMTAB) {
2605                         symtabh = sheader;
2606                         strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2607                         /*printf ("symtab section header: %d, .strstr: %d\n", i, sheader->sh_link);*/
2608                         break;
2609                 }
2610                 sheader = (void*)((char*)sheader + header->e_shentsize);
2611         }
2612         if (!symtabh || !strtabh) {
2613                 munmap (data, file_size);
2614                 return 0;
2615         }
2616         strtab = (const char*)data + strtabh->sh_offset;
2617         num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2618         symbols = (void*)((char*)data + symtabh->sh_offset);
2619         dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2620         munmap (data, file_size);
2621         return 1;
2622 }
2623 #endif
2624
2625 /* ELF code crashes on some systems. */
2626 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2627 #if 0
2628 static int
2629 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2630 {
2631         MonoProfiler *prof = data;
2632         char buf [256];
2633         const char *filename;
2634         BinaryObject *obj;
2635         char *a = (void*)info->dlpi_addr;
2636         int i, num_sym;
2637         ElfW(Dyn) *dyn = NULL;
2638         ElfW(Sym) *symtab = NULL;
2639         ElfW(Word) *hash_table = NULL;
2640         ElfW(Ehdr) *header = NULL;
2641         const char* strtab = NULL;
2642         for (obj = prof->binary_objects; obj; obj = obj->next) {
2643                 if (obj->addr == a)
2644                         return 0;
2645         }
2646         filename = info->dlpi_name;
2647         if (!filename)
2648                 return 0;
2649         if (!info->dlpi_addr && !filename [0]) {
2650                 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2651                 if (l > 0) {
2652                         buf [l] = 0;
2653                         filename = buf;
2654                 }
2655         }
2656         obj = g_calloc (sizeof (BinaryObject), 1);
2657         obj->addr = (void*)info->dlpi_addr;
2658         obj->name = pstrdup (filename);
2659         obj->next = prof->binary_objects;
2660         prof->binary_objects = obj;
2661         //printf ("loaded file: %s at %p, segments: %d\n", filename, (void*)info->dlpi_addr, info->dlpi_phnum);
2662         a = NULL;
2663         for (i = 0; i < info->dlpi_phnum; ++i) {
2664                 //printf ("segment type %d file offset: %d, size: %d\n", info->dlpi_phdr[i].p_type, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2665                 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2666                         header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2667                         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2668                                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2669                                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2670                                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2671                                 header = NULL;
2672                         }
2673                         dump_ubin (prof, filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2674                 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2675                         dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2676                 }
2677         }
2678         if (read_elf_symbols (prof, filename, (void*)info->dlpi_addr))
2679                 return 0;
2680         if (!info->dlpi_name || !info->dlpi_name[0])
2681                 return 0;
2682         if (!dyn)
2683                 return 0;
2684         for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2685                 if (dyn [i].d_tag == DT_SYMTAB) {
2686                         if (symtab && do_debug)
2687                                 printf ("multiple symtabs: %d\n", i);
2688                         symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2689                 } else if (dyn [i].d_tag == DT_HASH) {
2690                         hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2691                 } else if (dyn [i].d_tag == DT_STRTAB) {
2692                         strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2693                 }
2694         }
2695         if (!hash_table)
2696                 return 0;
2697         num_sym = hash_table [1];
2698         dump_elf_symbols (prof, symtab, num_sym, strtab, (void*)info->dlpi_addr);
2699         return 0;
2700 }
2701
2702 static int
2703 load_binaries (MonoProfiler *prof)
2704 {
2705         dl_iterate_phdr (elf_dl_callback, prof);
2706         return 1;
2707 }
2708 #else
2709 static int
2710 load_binaries (MonoProfiler *prof)
2711 {
2712         return 0;
2713 }
2714 #endif
2715
2716 static const char*
2717 symbol_for (uintptr_t code)
2718 {
2719 #ifdef HAVE_DLADDR
2720         void *ip = (void*)code;
2721         Dl_info di;
2722         if (dladdr (ip, &di)) {
2723                 if (di.dli_sname)
2724                         return di.dli_sname;
2725         } else {
2726         /*      char **names;
2727                 names = backtrace_symbols (&ip, 1);
2728                 if (names) {
2729                         const char* p = names [0];
2730                         g_free (names);
2731                         return p;
2732                 }
2733                 */
2734         }
2735 #endif
2736         return NULL;
2737 }
2738
2739 static void
2740 dump_unmanaged_coderefs (MonoProfiler *prof)
2741 {
2742         int i;
2743         const char* last_symbol;
2744         uintptr_t addr, page_end;
2745
2746         if (load_binaries (prof))
2747                 return;
2748         for (i = 0; i < size_code_pages; ++i) {
2749                 const char* sym;
2750                 if (!code_pages [i] || code_pages [i] & 1)
2751                         continue;
2752                 last_symbol = NULL;
2753                 addr = CPAGE_ADDR (code_pages [i]);
2754                 page_end = addr + CPAGE_SIZE;
2755                 code_pages [i] |= 1;
2756                 /* we dump the symbols for the whole page */
2757                 for (; addr < page_end; addr += 16) {
2758                         sym = symbol_for (addr);
2759                         if (sym && sym == last_symbol)
2760                                 continue;
2761                         last_symbol = sym;
2762                         if (!sym)
2763                                 continue;
2764                         dump_usym (prof, sym, addr, 0); /* let's not guess the size */
2765                         //printf ("found symbol at %p: %s\n", (void*)addr, sym);
2766                 }
2767         }
2768 }
2769
2770 typedef struct MonoCounterAgent {
2771         MonoCounter *counter;
2772         // MonoCounterAgent specific data :
2773         void *value;
2774         size_t value_size;
2775         short index;
2776         short emitted;
2777         struct MonoCounterAgent *next;
2778 } MonoCounterAgent;
2779
2780 static MonoCounterAgent* counters;
2781 static int counters_index = 1;
2782 static mono_mutex_t counters_mutex;
2783
2784 static void
2785 counters_add_agent (MonoCounter *counter)
2786 {
2787         if (InterlockedRead (&in_shutdown))
2788                 return;
2789
2790         MonoCounterAgent *agent, *item;
2791
2792         mono_os_mutex_lock (&counters_mutex);
2793
2794         for (agent = counters; agent; agent = agent->next) {
2795                 if (agent->counter == counter) {
2796                         agent->value_size = 0;
2797                         if (agent->value) {
2798                                 g_free (agent->value);
2799                                 agent->value = NULL;
2800                         }
2801                         goto done;
2802                 }
2803         }
2804
2805         agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2806         agent->counter = counter;
2807         agent->value = NULL;
2808         agent->value_size = 0;
2809         agent->index = counters_index++;
2810         agent->emitted = 0;
2811         agent->next = NULL;
2812
2813         if (!counters) {
2814                 counters = agent;
2815         } else {
2816                 item = counters;
2817                 while (item->next)
2818                         item = item->next;
2819                 item->next = agent;
2820         }
2821
2822 done:
2823         mono_os_mutex_unlock (&counters_mutex);
2824 }
2825
2826 static mono_bool
2827 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2828 {
2829         counters_add_agent (counter);
2830         return TRUE;
2831 }
2832
2833 static void
2834 counters_init (MonoProfiler *profiler)
2835 {
2836         mono_os_mutex_init (&counters_mutex);
2837
2838         mono_counters_on_register (&counters_add_agent);
2839         mono_counters_foreach (counters_init_foreach_callback, NULL);
2840 }
2841
2842 static void
2843 counters_emit (MonoProfiler *profiler)
2844 {
2845         MonoCounterAgent *agent;
2846         int len = 0;
2847         int size =
2848                 EVENT_SIZE /* event */ +
2849                 LEB128_SIZE /* len */
2850         ;
2851
2852         mono_os_mutex_lock (&counters_mutex);
2853
2854         for (agent = counters; agent; agent = agent->next) {
2855                 if (agent->emitted)
2856                         continue;
2857
2858                 size +=
2859                         LEB128_SIZE /* section */ +
2860                         strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2861                         BYTE_SIZE /* type */ +
2862                         BYTE_SIZE /* unit */ +
2863                         BYTE_SIZE /* variance */ +
2864                         LEB128_SIZE /* index */
2865                 ;
2866
2867                 len++;
2868         }
2869
2870         if (!len)
2871                 goto done;
2872
2873         ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2874
2875         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2876         emit_value (logbuffer, len);
2877
2878         for (agent = counters; agent; agent = agent->next) {
2879                 const char *name;
2880
2881                 if (agent->emitted)
2882                         continue;
2883
2884                 name = mono_counter_get_name (agent->counter);
2885                 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2886                 emit_string (logbuffer, name, strlen (name) + 1);
2887                 emit_byte (logbuffer, mono_counter_get_type (agent->counter));
2888                 emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
2889                 emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
2890                 emit_value (logbuffer, agent->index);
2891
2892                 agent->emitted = 1;
2893         }
2894
2895         EXIT_LOG_EXPLICIT (DO_SEND);
2896
2897 done:
2898         mono_os_mutex_unlock (&counters_mutex);
2899 }
2900
2901 static void
2902 counters_sample (MonoProfiler *profiler, uint64_t timestamp)
2903 {
2904         MonoCounterAgent *agent;
2905         MonoCounter *counter;
2906         int type;
2907         int buffer_size;
2908         void *buffer;
2909         int size;
2910
2911         counters_emit (profiler);
2912
2913         buffer_size = 8;
2914         buffer = g_calloc (1, buffer_size);
2915
2916         mono_os_mutex_lock (&counters_mutex);
2917
2918         size =
2919                 EVENT_SIZE /* event */
2920         ;
2921
2922         for (agent = counters; agent; agent = agent->next) {
2923                 size +=
2924                         LEB128_SIZE /* index */ +
2925                         BYTE_SIZE /* type */ +
2926                         mono_counter_get_size (agent->counter) /* value */
2927                 ;
2928         }
2929
2930         size +=
2931                 LEB128_SIZE /* stop marker */
2932         ;
2933
2934         ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2935
2936         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2937
2938         for (agent = counters; agent; agent = agent->next) {
2939                 size_t size;
2940
2941                 counter = agent->counter;
2942
2943                 size = mono_counter_get_size (counter);
2944
2945                 if (size > buffer_size) {
2946                         buffer_size = size;
2947                         buffer = g_realloc (buffer, buffer_size);
2948                 }
2949
2950                 memset (buffer, 0, buffer_size);
2951
2952                 g_assert (mono_counters_sample (counter, buffer, size));
2953
2954                 type = mono_counter_get_type (counter);
2955
2956                 if (!agent->value) {
2957                         agent->value = g_calloc (1, size);
2958                         agent->value_size = size;
2959                 } else {
2960                         if (type == MONO_COUNTER_STRING) {
2961                                 if (strcmp (agent->value, buffer) == 0)
2962                                         continue;
2963                         } else {
2964                                 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
2965                                         continue;
2966                         }
2967                 }
2968
2969                 emit_uvalue (logbuffer, agent->index);
2970                 emit_byte (logbuffer, type);
2971                 switch (type) {
2972                 case MONO_COUNTER_INT:
2973 #if SIZEOF_VOID_P == 4
2974                 case MONO_COUNTER_WORD:
2975 #endif
2976                         emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
2977                         break;
2978                 case MONO_COUNTER_UINT:
2979                         emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
2980                         break;
2981                 case MONO_COUNTER_TIME_INTERVAL:
2982                 case MONO_COUNTER_LONG:
2983 #if SIZEOF_VOID_P == 8
2984                 case MONO_COUNTER_WORD:
2985 #endif
2986                         emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
2987                         break;
2988                 case MONO_COUNTER_ULONG:
2989                         emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
2990                         break;
2991                 case MONO_COUNTER_DOUBLE:
2992                         emit_double (logbuffer, *(double*)buffer);
2993                         break;
2994                 case MONO_COUNTER_STRING:
2995                         if (size == 0) {
2996                                 emit_byte (logbuffer, 0);
2997                         } else {
2998                                 emit_byte (logbuffer, 1);
2999                                 emit_string (logbuffer, (char*)buffer, size);
3000                         }
3001                         break;
3002                 default:
3003                         g_assert_not_reached ();
3004                 }
3005
3006                 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
3007                         agent->value = g_realloc (agent->value, size);
3008                         agent->value_size = size;
3009                 }
3010
3011                 if (size > 0)
3012                         memcpy (agent->value, buffer, size);
3013         }
3014         g_free (buffer);
3015
3016         emit_value (logbuffer, 0);
3017
3018         EXIT_LOG_EXPLICIT (DO_SEND);
3019
3020         mono_os_mutex_unlock (&counters_mutex);
3021 }
3022
3023 typedef struct _PerfCounterAgent PerfCounterAgent;
3024 struct _PerfCounterAgent {
3025         PerfCounterAgent *next;
3026         int index;
3027         char *category_name;
3028         char *name;
3029         int type;
3030         gint64 value;
3031         guint8 emitted;
3032         guint8 updated;
3033         guint8 deleted;
3034 };
3035
3036 static PerfCounterAgent *perfcounters = NULL;
3037
3038 static void
3039 perfcounters_emit (MonoProfiler *profiler)
3040 {
3041         PerfCounterAgent *pcagent;
3042         int len = 0;
3043         int size =
3044                 EVENT_SIZE /* event */ +
3045                 LEB128_SIZE /* len */
3046         ;
3047
3048         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3049                 if (pcagent->emitted)
3050                         continue;
3051
3052                 size +=
3053                         LEB128_SIZE /* section */ +
3054                         strlen (pcagent->category_name) + 1 /* category name */ +
3055                         strlen (pcagent->name) + 1 /* name */ +
3056                         BYTE_SIZE /* type */ +
3057                         BYTE_SIZE /* unit */ +
3058                         BYTE_SIZE /* variance */ +
3059                         LEB128_SIZE /* index */
3060                 ;
3061
3062                 len++;
3063         }
3064
3065         if (!len)
3066                 return;
3067
3068         ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
3069
3070         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
3071         emit_value (logbuffer, len);
3072
3073         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3074                 if (pcagent->emitted)
3075                         continue;
3076
3077                 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
3078                 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
3079                 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
3080                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3081                 emit_byte (logbuffer, MONO_COUNTER_RAW);
3082                 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
3083                 emit_value (logbuffer, pcagent->index);
3084
3085                 pcagent->emitted = 1;
3086         }
3087
3088         EXIT_LOG_EXPLICIT (DO_SEND);
3089 }
3090
3091 static gboolean
3092 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
3093 {
3094         PerfCounterAgent *pcagent;
3095
3096         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3097                 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
3098                         continue;
3099                 if (pcagent->value == value)
3100                         return TRUE;
3101
3102                 pcagent->value = value;
3103                 pcagent->updated = 1;
3104                 pcagent->deleted = 0;
3105                 return TRUE;
3106         }
3107
3108         pcagent = g_new0 (PerfCounterAgent, 1);
3109         pcagent->next = perfcounters;
3110         pcagent->index = counters_index++;
3111         pcagent->category_name = g_strdup (category_name);
3112         pcagent->name = g_strdup (name);
3113         pcagent->type = (int) type;
3114         pcagent->value = value;
3115         pcagent->emitted = 0;
3116         pcagent->updated = 1;
3117         pcagent->deleted = 0;
3118
3119         perfcounters = pcagent;
3120
3121         return TRUE;
3122 }
3123
3124 static void
3125 perfcounters_sample (MonoProfiler *profiler, uint64_t timestamp)
3126 {
3127         PerfCounterAgent *pcagent;
3128         int len = 0;
3129         int size;
3130
3131         mono_os_mutex_lock (&counters_mutex);
3132
3133         /* mark all perfcounters as deleted, foreach will unmark them as necessary */
3134         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next)
3135                 pcagent->deleted = 1;
3136
3137         mono_perfcounter_foreach (perfcounters_foreach, perfcounters);
3138
3139         perfcounters_emit (profiler);
3140
3141         size =
3142                 EVENT_SIZE /* event */
3143         ;
3144
3145         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3146                 if (pcagent->deleted || !pcagent->updated)
3147                         continue;
3148
3149                 size +=
3150                         LEB128_SIZE /* index */ +
3151                         BYTE_SIZE /* type */ +
3152                         LEB128_SIZE /* value */
3153                 ;
3154
3155                 len++;
3156         }
3157
3158         if (!len)
3159                 goto done;
3160
3161         size +=
3162                 LEB128_SIZE /* stop marker */
3163         ;
3164
3165         ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
3166
3167         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
3168
3169         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3170                 if (pcagent->deleted || !pcagent->updated)
3171                         continue;
3172                 emit_uvalue (logbuffer, pcagent->index);
3173                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3174                 emit_svalue (logbuffer, pcagent->value);
3175
3176                 pcagent->updated = 0;
3177         }
3178
3179         emit_value (logbuffer, 0);
3180
3181         EXIT_LOG_EXPLICIT (DO_SEND);
3182
3183 done:
3184         mono_os_mutex_unlock (&counters_mutex);
3185 }
3186
3187 static void
3188 counters_and_perfcounters_sample (MonoProfiler *prof)
3189 {
3190         uint64_t now = current_time ();
3191
3192         counters_sample (prof, now);
3193         perfcounters_sample (prof, now);
3194 }
3195
3196 #define COVERAGE_DEBUG(x) if (debug_coverage) {x}
3197 static mono_mutex_t coverage_mutex;
3198 static MonoConcurrentHashTable *coverage_methods = NULL;
3199 static MonoConcurrentHashTable *coverage_assemblies = NULL;
3200 static MonoConcurrentHashTable *coverage_classes = NULL;
3201
3202 static MonoConcurrentHashTable *filtered_classes = NULL;
3203 static MonoConcurrentHashTable *entered_methods = NULL;
3204 static MonoConcurrentHashTable *image_to_methods = NULL;
3205 static MonoConcurrentHashTable *suppressed_assemblies = NULL;
3206 static gboolean coverage_initialized = FALSE;
3207
3208 static GPtrArray *coverage_data = NULL;
3209 static int previous_offset = 0;
3210
3211 typedef struct {
3212         MonoLockFreeQueueNode node;
3213         MonoMethod *method;
3214 } MethodNode;
3215
3216 typedef struct {
3217         int offset;
3218         int counter;
3219         char *filename;
3220         int line;
3221         int column;
3222 } CoverageEntry;
3223
3224 static void
3225 free_coverage_entry (gpointer data, gpointer userdata)
3226 {
3227         CoverageEntry *entry = (CoverageEntry *)data;
3228         g_free (entry->filename);
3229         g_free (entry);
3230 }
3231
3232 static void
3233 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfileCoverageEntry *entry)
3234 {
3235         int offset = entry->iloffset - previous_offset;
3236         CoverageEntry *e = g_new (CoverageEntry, 1);
3237
3238         previous_offset = entry->iloffset;
3239
3240         e->offset = offset;
3241         e->counter = entry->counter;
3242         e->filename = g_strdup(entry->filename ? entry->filename : "");
3243         e->line = entry->line;
3244         e->column = entry->col;
3245
3246         g_ptr_array_add (coverage_data, e);
3247 }
3248
3249 static char *
3250 parse_generic_type_names(char *name)
3251 {
3252         char *new_name, *ret;
3253         int within_generic_declaration = 0, generic_members = 1;
3254
3255         if (name == NULL || *name == '\0')
3256                 return g_strdup ("");
3257
3258         if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
3259                 return NULL;
3260
3261         do {
3262                 switch (*name) {
3263                         case '<':
3264                                 within_generic_declaration = 1;
3265                                 break;
3266
3267                         case '>':
3268                                 within_generic_declaration = 0;
3269
3270                                 if (*(name - 1) != '<') {
3271                                         *new_name++ = '`';
3272                                         *new_name++ = '0' + generic_members;
3273                                 } else {
3274                                         memcpy (new_name, "&lt;&gt;", 8);
3275                                         new_name += 8;
3276                                 }
3277
3278                                 generic_members = 0;
3279                                 break;
3280
3281                         case ',':
3282                                 generic_members++;
3283                                 break;
3284
3285                         default:
3286                                 if (!within_generic_declaration)
3287                                         *new_name++ = *name;
3288
3289                                 break;
3290                 }
3291         } while (*name++);
3292
3293         return ret;
3294 }
3295
3296 static int method_id;
3297 static void
3298 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
3299 {
3300         MonoMethod *method = (MonoMethod *)value;
3301         MonoProfiler *prof = (MonoProfiler *)userdata;
3302         MonoClass *klass;
3303         MonoImage *image;
3304         char *class_name;
3305         const char *image_name, *method_name, *sig, *first_filename;
3306         guint i;
3307
3308         previous_offset = 0;
3309         coverage_data = g_ptr_array_new ();
3310
3311         mono_profiler_coverage_get (prof, method, obtain_coverage_for_method);
3312
3313         klass = mono_method_get_class (method);
3314         image = mono_class_get_image (klass);
3315         image_name = mono_image_get_name (image);
3316
3317         sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3318         class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3319         method_name = mono_method_get_name (method);
3320
3321         if (coverage_data->len != 0) {
3322                 CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[0];
3323                 first_filename = entry->filename ? entry->filename : "";
3324         } else
3325                 first_filename = "";
3326
3327         image_name = image_name ? image_name : "";
3328         sig = sig ? sig : "";
3329         method_name = method_name ? method_name : "";
3330
3331         ENTER_LOG (&coverage_methods_ctr, logbuffer,
3332                 EVENT_SIZE /* event */ +
3333                 strlen (image_name) + 1 /* image name */ +
3334                 strlen (class_name) + 1 /* class name */ +
3335                 strlen (method_name) + 1 /* method name */ +
3336                 strlen (sig) + 1 /* signature */ +
3337                 strlen (first_filename) + 1 /* first file name */ +
3338                 LEB128_SIZE /* token */ +
3339                 LEB128_SIZE /* method id */ +
3340                 LEB128_SIZE /* entries */
3341         );
3342
3343         emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3344         emit_string (logbuffer, image_name, strlen (image_name) + 1);
3345         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3346         emit_string (logbuffer, method_name, strlen (method_name) + 1);
3347         emit_string (logbuffer, sig, strlen (sig) + 1);
3348         emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3349
3350         emit_uvalue (logbuffer, mono_method_get_token (method));
3351         emit_uvalue (logbuffer, method_id);
3352         emit_value (logbuffer, coverage_data->len);
3353
3354         EXIT_LOG_EXPLICIT (DO_SEND);
3355
3356         for (i = 0; i < coverage_data->len; i++) {
3357                 CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[i];
3358
3359                 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3360                         EVENT_SIZE /* event */ +
3361                         LEB128_SIZE /* method id */ +
3362                         LEB128_SIZE /* offset */ +
3363                         LEB128_SIZE /* counter */ +
3364                         LEB128_SIZE /* line */ +
3365                         LEB128_SIZE /* column */
3366                 );
3367
3368                 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3369                 emit_uvalue (logbuffer, method_id);
3370                 emit_uvalue (logbuffer, entry->offset);
3371                 emit_uvalue (logbuffer, entry->counter);
3372                 emit_uvalue (logbuffer, entry->line);
3373                 emit_uvalue (logbuffer, entry->column);
3374
3375                 EXIT_LOG_EXPLICIT (DO_SEND);
3376         }
3377
3378         method_id++;
3379
3380         g_free (class_name);
3381
3382         g_ptr_array_foreach (coverage_data, free_coverage_entry, NULL);
3383         g_ptr_array_free (coverage_data, TRUE);
3384         coverage_data = NULL;
3385 }
3386
3387 /* This empties the queue */
3388 static guint
3389 count_queue (MonoLockFreeQueue *queue)
3390 {
3391         MonoLockFreeQueueNode *node;
3392         guint count = 0;
3393
3394         while ((node = mono_lock_free_queue_dequeue (queue))) {
3395                 count++;
3396                 mono_thread_hazardous_try_free (node, g_free);
3397         }
3398
3399         return count;
3400 }
3401
3402 static void
3403 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3404 {
3405         MonoClass *klass = (MonoClass *)key;
3406         MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3407         MonoImage *image;
3408         char *class_name;
3409         const char *assembly_name;
3410         int number_of_methods, partially_covered;
3411         guint fully_covered;
3412
3413         image = mono_class_get_image (klass);
3414         assembly_name = mono_image_get_name (image);
3415         class_name = mono_type_get_name (mono_class_get_type (klass));
3416
3417         assembly_name = assembly_name ? assembly_name : "";
3418         number_of_methods = mono_class_num_methods (klass);
3419         fully_covered = count_queue (class_methods);
3420         /* We don't handle partial covered yet */
3421         partially_covered = 0;
3422
3423         ENTER_LOG (&coverage_classes_ctr, logbuffer,
3424                 EVENT_SIZE /* event */ +
3425                 strlen (assembly_name) + 1 /* assembly name */ +
3426                 strlen (class_name) + 1 /* class name */ +
3427                 LEB128_SIZE /* no. methods */ +
3428                 LEB128_SIZE /* fully covered */ +
3429                 LEB128_SIZE /* partially covered */
3430         );
3431
3432         emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3433         emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3434         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3435         emit_uvalue (logbuffer, number_of_methods);
3436         emit_uvalue (logbuffer, fully_covered);
3437         emit_uvalue (logbuffer, partially_covered);
3438
3439         EXIT_LOG_EXPLICIT (DO_SEND);
3440
3441         g_free (class_name);
3442 }
3443
3444 static void
3445 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3446 {
3447         MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (image_to_methods, image);
3448
3449         *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3450         if (image_methods)
3451                 *fully_covered = count_queue (image_methods);
3452         else
3453                 *fully_covered = 0;
3454
3455         // FIXME: We don't handle partially covered yet.
3456         *partially_covered = 0;
3457 }
3458
3459 static void
3460 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3461 {
3462         MonoAssembly *assembly = (MonoAssembly *)value;
3463         MonoImage *image = mono_assembly_get_image (assembly);
3464         const char *name, *guid, *filename;
3465         int number_of_methods = 0, partially_covered = 0;
3466         guint fully_covered = 0;
3467
3468         name = mono_image_get_name (image);
3469         guid = mono_image_get_guid (image);
3470         filename = mono_image_get_filename (image);
3471
3472         name = name ? name : "";
3473         guid = guid ? guid : "";
3474         filename = filename ? filename : "";
3475
3476         get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3477
3478         ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3479                 EVENT_SIZE /* event */ +
3480                 strlen (name) + 1 /* name */ +
3481                 strlen (guid) + 1 /* guid */ +
3482                 strlen (filename) + 1 /* file name */ +
3483                 LEB128_SIZE /* no. methods */ +
3484                 LEB128_SIZE /* fully covered */ +
3485                 LEB128_SIZE /* partially covered */
3486         );
3487
3488         emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3489         emit_string (logbuffer, name, strlen (name) + 1);
3490         emit_string (logbuffer, guid, strlen (guid) + 1);
3491         emit_string (logbuffer, filename, strlen (filename) + 1);
3492         emit_uvalue (logbuffer, number_of_methods);
3493         emit_uvalue (logbuffer, fully_covered);
3494         emit_uvalue (logbuffer, partially_covered);
3495
3496         EXIT_LOG_EXPLICIT (DO_SEND);
3497 }
3498
3499 static void
3500 dump_coverage (MonoProfiler *prof)
3501 {
3502         if (!coverage_initialized)
3503                 return;
3504
3505         COVERAGE_DEBUG(fprintf (stderr, "Coverage: Started dump\n");)
3506         method_id = 0;
3507
3508         mono_os_mutex_lock (&coverage_mutex);
3509         mono_conc_hashtable_foreach (coverage_assemblies, build_assembly_buffer, NULL);
3510         mono_conc_hashtable_foreach (coverage_classes, build_class_buffer, NULL);
3511         mono_conc_hashtable_foreach (coverage_methods, build_method_buffer, prof);
3512         mono_os_mutex_unlock (&coverage_mutex);
3513
3514         COVERAGE_DEBUG(fprintf (stderr, "Coverage: Finished dump\n");)
3515 }
3516
3517 static void
3518 process_method_enter_coverage (MonoProfiler *prof, MonoMethod *method)
3519 {
3520         MonoClass *klass;
3521         MonoImage *image;
3522
3523         if (!coverage_initialized)
3524                 return;
3525
3526         klass = mono_method_get_class (method);
3527         image = mono_class_get_image (klass);
3528
3529         if (mono_conc_hashtable_lookup (suppressed_assemblies, (gpointer) mono_image_get_name (image)))
3530                 return;
3531
3532         mono_os_mutex_lock (&coverage_mutex);
3533         mono_conc_hashtable_insert (entered_methods, method, method);
3534         mono_os_mutex_unlock (&coverage_mutex);
3535 }
3536
3537 static MonoLockFreeQueueNode *
3538 create_method_node (MonoMethod *method)
3539 {
3540         MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3541         mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3542         node->method = method;
3543
3544         return (MonoLockFreeQueueNode *) node;
3545 }
3546
3547 static gboolean
3548 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3549 {
3550         MonoError error;
3551         MonoClass *klass;
3552         MonoImage *image;
3553         MonoAssembly *assembly;
3554         MonoMethodHeader *header;
3555         guint32 iflags, flags, code_size;
3556         char *fqn, *classname;
3557         gboolean has_positive, found;
3558         MonoLockFreeQueue *image_methods, *class_methods;
3559         MonoLockFreeQueueNode *node;
3560
3561         g_assert (coverage_initialized && "Why are we being asked for coverage filter info when we're not doing coverage?");
3562
3563         COVERAGE_DEBUG(fprintf (stderr, "Coverage filter for %s\n", mono_method_get_name (method));)
3564
3565         flags = mono_method_get_flags (method, &iflags);
3566         if ((iflags & 0x1000 /*METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL*/) ||
3567             (flags & 0x2000 /*METHOD_ATTRIBUTE_PINVOKE_IMPL*/)) {
3568                 COVERAGE_DEBUG(fprintf (stderr, "   Internal call or pinvoke - ignoring\n");)
3569                 return FALSE;
3570         }
3571
3572         // Don't need to do anything else if we're already tracking this method
3573         if (mono_conc_hashtable_lookup (coverage_methods, method)) {
3574                 COVERAGE_DEBUG(fprintf (stderr, "   Already tracking\n");)
3575                 return TRUE;
3576         }
3577
3578         klass = mono_method_get_class (method);
3579         image = mono_class_get_image (klass);
3580
3581         // Don't handle coverage for the core assemblies
3582         if (mono_conc_hashtable_lookup (suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3583                 return FALSE;
3584
3585         if (prof->coverage_filters) {
3586                 /* Check already filtered classes first */
3587                 if (mono_conc_hashtable_lookup (filtered_classes, klass)) {
3588                         COVERAGE_DEBUG(fprintf (stderr, "   Already filtered\n");)
3589                         return FALSE;
3590                 }
3591
3592                 classname = mono_type_get_name (mono_class_get_type (klass));
3593
3594                 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3595
3596                 COVERAGE_DEBUG(fprintf (stderr, "   Looking for %s in filter\n", fqn);)
3597                 // Check positive filters first
3598                 has_positive = FALSE;
3599                 found = FALSE;
3600                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3601                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3602
3603                         if (filter [0] == '+') {
3604                                 filter = &filter [1];
3605
3606                                 COVERAGE_DEBUG(fprintf (stderr, "   Checking against +%s ...", filter);)
3607
3608                                 if (strstr (fqn, filter) != NULL) {
3609                                         COVERAGE_DEBUG(fprintf (stderr, "matched\n");)
3610                                         found = TRUE;
3611                                 } else
3612                                         COVERAGE_DEBUG(fprintf (stderr, "no match\n");)
3613
3614                                 has_positive = TRUE;
3615                         }
3616                 }
3617
3618                 if (has_positive && !found) {
3619                         COVERAGE_DEBUG(fprintf (stderr, "   Positive match was not found\n");)
3620
3621                         mono_os_mutex_lock (&coverage_mutex);
3622                         mono_conc_hashtable_insert (filtered_classes, klass, klass);
3623                         mono_os_mutex_unlock (&coverage_mutex);
3624                         g_free (fqn);
3625                         g_free (classname);
3626
3627                         return FALSE;
3628                 }
3629
3630                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3631                         // FIXME: Is substring search sufficient?
3632                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3633                         if (filter [0] == '+')
3634                                 continue;
3635
3636                         // Skip '-'
3637                         filter = &filter [1];
3638                         COVERAGE_DEBUG(fprintf (stderr, "   Checking against -%s ...", filter);)
3639
3640                         if (strstr (fqn, filter) != NULL) {
3641                                 COVERAGE_DEBUG(fprintf (stderr, "matched\n");)
3642
3643                                 mono_os_mutex_lock (&coverage_mutex);
3644                                 mono_conc_hashtable_insert (filtered_classes, klass, klass);
3645                                 mono_os_mutex_unlock (&coverage_mutex);
3646                                 g_free (fqn);
3647                                 g_free (classname);
3648
3649                                 return FALSE;
3650                         } else
3651                                 COVERAGE_DEBUG(fprintf (stderr, "no match\n");)
3652
3653                 }
3654
3655                 g_free (fqn);
3656                 g_free (classname);
3657         }
3658
3659         COVERAGE_DEBUG(fprintf (stderr, "   Handling coverage for %s\n", mono_method_get_name (method));)
3660         header = mono_method_get_header_checked (method, &error);
3661         mono_error_cleanup (&error);
3662
3663         mono_method_header_get_code (header, &code_size, NULL);
3664
3665         assembly = mono_image_get_assembly (image);
3666
3667         // Need to keep the assemblies around for as long as they are kept in the hashtable
3668         // Nunit, for example, has a habit of unloading them before the coverage statistics are
3669         // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3670         mono_assembly_addref (assembly);
3671
3672         mono_os_mutex_lock (&coverage_mutex);
3673         mono_conc_hashtable_insert (coverage_methods, method, method);
3674         mono_conc_hashtable_insert (coverage_assemblies, assembly, assembly);
3675         mono_os_mutex_unlock (&coverage_mutex);
3676
3677         image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (image_to_methods, image);
3678
3679         if (image_methods == NULL) {
3680                 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3681                 mono_lock_free_queue_init (image_methods);
3682                 mono_os_mutex_lock (&coverage_mutex);
3683                 mono_conc_hashtable_insert (image_to_methods, image, image_methods);
3684                 mono_os_mutex_unlock (&coverage_mutex);
3685         }
3686
3687         node = create_method_node (method);
3688         mono_lock_free_queue_enqueue (image_methods, node);
3689
3690         class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (coverage_classes, klass);
3691
3692         if (class_methods == NULL) {
3693                 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3694                 mono_lock_free_queue_init (class_methods);
3695                 mono_os_mutex_lock (&coverage_mutex);
3696                 mono_conc_hashtable_insert (coverage_classes, klass, class_methods);
3697                 mono_os_mutex_unlock (&coverage_mutex);
3698         }
3699
3700         node = create_method_node (method);
3701         mono_lock_free_queue_enqueue (class_methods, node);
3702
3703         return TRUE;
3704 }
3705
3706 #define LINE_BUFFER_SIZE 4096
3707 /* Max file limit of 128KB */
3708 #define MAX_FILE_SIZE 128 * 1024
3709 static char *
3710 get_file_content (FILE *stream)
3711 {
3712         char *buffer;
3713         ssize_t bytes_read;
3714         long filesize;
3715         int res, offset = 0;
3716
3717         res = fseek (stream, 0, SEEK_END);
3718         if (res < 0)
3719           return NULL;
3720
3721         filesize = ftell (stream);
3722         if (filesize < 0)
3723           return NULL;
3724
3725         res = fseek (stream, 0, SEEK_SET);
3726         if (res < 0)
3727           return NULL;
3728
3729         if (filesize > MAX_FILE_SIZE)
3730           return NULL;
3731
3732         buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3733         while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3734                 offset += bytes_read;
3735
3736         /* NULL terminate our buffer */
3737         buffer[filesize] = '\0';
3738         return buffer;
3739 }
3740
3741 static char *
3742 get_next_line (char *contents, char **next_start)
3743 {
3744         char *p = contents;
3745
3746         if (p == NULL || *p == '\0') {
3747                 *next_start = NULL;
3748                 return NULL;
3749         }
3750
3751         while (*p != '\n' && *p != '\0')
3752                 p++;
3753
3754         if (*p == '\n') {
3755                 *p = '\0';
3756                 *next_start = p + 1;
3757         } else
3758                 *next_start = NULL;
3759
3760         return contents;
3761 }
3762
3763 static void
3764 init_suppressed_assemblies (void)
3765 {
3766         char *content;
3767         char *line;
3768         FILE *sa_file;
3769
3770         suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3771         sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3772         if (sa_file == NULL)
3773                 return;
3774
3775         /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3776         content = get_file_content (sa_file);
3777         if (content == NULL) {
3778                 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting\n");
3779         }
3780
3781         while ((line = get_next_line (content, &content))) {
3782                 line = g_strchomp (g_strchug (line));
3783                 /* No locking needed as we're doing initialization */
3784                 mono_conc_hashtable_insert (suppressed_assemblies, line, line);
3785         }
3786
3787         fclose (sa_file);
3788 }
3789
3790 static void
3791 parse_cov_filter_file (GPtrArray *filters, const char *file)
3792 {
3793         FILE *filter_file;
3794         char *line, *content;
3795
3796         filter_file = fopen (file, "r");
3797         if (filter_file == NULL) {
3798                 fprintf (stderr, "Unable to open %s\n", file);
3799                 return;
3800         }
3801
3802         /* Don't need to free content as it is referred to by the lines stored in @filters */
3803         content = get_file_content (filter_file);
3804         if (content == NULL)
3805                 fprintf (stderr, "WARNING: %s is greater than 128kb - ignoring\n", file);
3806
3807         while ((line = get_next_line (content, &content)))
3808                 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3809
3810         fclose (filter_file);
3811 }
3812
3813 static void
3814 coverage_init (MonoProfiler *prof)
3815 {
3816         g_assert (!coverage_initialized && "Why are we initializing coverage twice?");
3817
3818         COVERAGE_DEBUG(fprintf (stderr, "Coverage initialized\n");)
3819
3820         mono_os_mutex_init (&coverage_mutex);
3821         coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3822         coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3823         coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3824         filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3825         entered_methods = mono_conc_hashtable_new (NULL, NULL);
3826         image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3827         init_suppressed_assemblies ();
3828
3829         coverage_initialized = TRUE;
3830 }
3831
3832 static void
3833 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3834 {
3835         MonoAssembly *assembly = (MonoAssembly *)value;
3836         mono_assembly_close (assembly);
3837 }
3838
3839 static void
3840 free_sample_hit (gpointer p)
3841 {
3842         mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3843 }
3844
3845 static void
3846 cleanup_reusable_samples (MonoProfiler *prof)
3847 {
3848         SampleHit *sample;
3849
3850         while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->sample_reuse_queue)))
3851                 mono_thread_hazardous_try_free (sample, free_sample_hit);
3852 }
3853
3854 static void
3855 log_shutdown (MonoProfiler *prof)
3856 {
3857         InterlockedWrite (&in_shutdown, 1);
3858
3859         if (!no_counters)
3860                 counters_and_perfcounters_sample (prof);
3861
3862         dump_coverage (prof);
3863
3864         char c = 1;
3865
3866         if (write (prof->pipes [1], &c, 1) != 1) {
3867                 fprintf (stderr, "Could not write to pipe: %s\n", strerror (errno));
3868                 exit (1);
3869         }
3870
3871         mono_native_thread_join (prof->helper_thread);
3872
3873         mono_os_mutex_destroy (&counters_mutex);
3874
3875         MonoCounterAgent *mc_next;
3876
3877         for (MonoCounterAgent *cur = counters; cur; cur = mc_next) {
3878                 mc_next = cur->next;
3879                 g_free (cur);
3880         }
3881
3882         PerfCounterAgent *pc_next;
3883
3884         for (PerfCounterAgent *cur = perfcounters; cur; cur = pc_next) {
3885                 pc_next = cur->next;
3886                 g_free (cur);
3887         }
3888
3889         /*
3890          * Ensure that we empty the LLS completely, even if some nodes are
3891          * not immediately removed upon calling mono_lls_remove (), by
3892          * iterating until the head is NULL.
3893          */
3894         while (profiler_thread_list.head) {
3895                 MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
3896                         g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3897
3898                         remove_thread (thread);
3899                 } MONO_LLS_FOREACH_SAFE_END
3900         }
3901
3902         /*
3903          * Ensure that all threads have been freed, so that we don't miss any
3904          * buffers when we shut down the writer thread below.
3905          */
3906         mono_thread_hazardous_try_free_all ();
3907
3908         InterlockedWrite (&prof->run_dumper_thread, 0);
3909         mono_os_sem_post (&prof->dumper_queue_sem);
3910         mono_native_thread_join (prof->dumper_thread);
3911         mono_os_sem_destroy (&prof->dumper_queue_sem);
3912
3913         InterlockedWrite (&prof->run_writer_thread, 0);
3914         mono_os_sem_post (&prof->writer_queue_sem);
3915         mono_native_thread_join (prof->writer_thread);
3916         mono_os_sem_destroy (&prof->writer_queue_sem);
3917
3918         /*
3919          * Free all writer queue entries, and ensure that all sample hits will be
3920          * added to the sample reuse queue.
3921          */
3922         mono_thread_hazardous_try_free_all ();
3923
3924         cleanup_reusable_samples (prof);
3925
3926         /*
3927          * Finally, make sure that all sample hits are freed. This should cover all
3928          * hazardous data from the profiler. We can now be sure that the runtime
3929          * won't later invoke free functions in the profiler library after it has
3930          * been unloaded.
3931          */
3932         mono_thread_hazardous_try_free_all ();
3933
3934         gint32 state = InterlockedRead (&buffer_lock_state);
3935
3936         g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
3937         g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
3938
3939 #if defined (HAVE_SYS_ZLIB)
3940         if (prof->gzfile)
3941                 gzclose (prof->gzfile);
3942 #endif
3943         if (prof->pipe_output)
3944                 pclose (prof->file);
3945         else
3946                 fclose (prof->file);
3947
3948         mono_conc_hashtable_destroy (prof->method_table);
3949         mono_os_mutex_destroy (&prof->method_table_mutex);
3950
3951         if (coverage_initialized) {
3952                 mono_os_mutex_lock (&coverage_mutex);
3953                 mono_conc_hashtable_foreach (coverage_assemblies, unref_coverage_assemblies, prof);
3954                 mono_os_mutex_unlock (&coverage_mutex);
3955
3956                 mono_conc_hashtable_destroy (coverage_methods);
3957                 mono_conc_hashtable_destroy (coverage_assemblies);
3958                 mono_conc_hashtable_destroy (coverage_classes);
3959                 mono_conc_hashtable_destroy (filtered_classes);
3960
3961                 mono_conc_hashtable_destroy (entered_methods);
3962                 mono_conc_hashtable_destroy (image_to_methods);
3963                 mono_conc_hashtable_destroy (suppressed_assemblies);
3964                 mono_os_mutex_destroy (&coverage_mutex);
3965         }
3966
3967         PROF_TLS_FREE ();
3968
3969         g_free (prof->args);
3970         g_free (prof);
3971 }
3972
3973 static char*
3974 new_filename (const char* filename)
3975 {
3976         time_t t = time (NULL);
3977         int pid = process_id ();
3978         char pid_buf [16];
3979         char time_buf [16];
3980         char *res, *d;
3981         const char *p;
3982         int count_dates = 0;
3983         int count_pids = 0;
3984         int s_date, s_pid;
3985         struct tm *ts;
3986         for (p = filename; *p; p++) {
3987                 if (*p != '%')
3988                         continue;
3989                 p++;
3990                 if (*p == 't')
3991                         count_dates++;
3992                 else if (*p == 'p')
3993                         count_pids++;
3994                 else if (*p == 0)
3995                         break;
3996         }
3997         if (!count_dates && !count_pids)
3998                 return pstrdup (filename);
3999         snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
4000         ts = gmtime (&t);
4001         snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
4002                 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
4003         s_date = strlen (time_buf);
4004         s_pid = strlen (pid_buf);
4005         d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
4006         for (p = filename; *p; p++) {
4007                 if (*p != '%') {
4008                         *d++ = *p;
4009                         continue;
4010                 }
4011                 p++;
4012                 if (*p == 't') {
4013                         strcpy (d, time_buf);
4014                         d += s_date;
4015                         continue;
4016                 } else if (*p == 'p') {
4017                         strcpy (d, pid_buf);
4018                         d += s_pid;
4019                         continue;
4020                 } else if (*p == '%') {
4021                         *d++ = '%';
4022                         continue;
4023                 } else if (*p == 0)
4024                         break;
4025                 *d++ = '%';
4026                 *d++ = *p;
4027         }
4028         *d = 0;
4029         return res;
4030 }
4031
4032 static void
4033 add_to_fd_set (fd_set *set, int fd, int *max_fd)
4034 {
4035         /*
4036          * This should only trigger for the basic FDs (server socket, pipes) at
4037          * startup if for some mysterious reason they're too large. In this case,
4038          * the profiler really can't function, and we're better off printing an
4039          * error and exiting.
4040          */
4041         if (fd >= FD_SETSIZE) {
4042                 fprintf (stderr, "File descriptor is out of bounds for fd_set: %d\n", fd);
4043                 exit (1);
4044         }
4045
4046         FD_SET (fd, set);
4047
4048         if (*max_fd < fd)
4049                 *max_fd = fd;
4050 }
4051
4052 static void *
4053 helper_thread (void *arg)
4054 {
4055         MonoProfiler *prof = (MonoProfiler *) arg;
4056
4057         mono_threads_attach_tools_thread ();
4058         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
4059
4060         MonoProfilerThread *thread = init_thread (prof, FALSE);
4061
4062         GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
4063
4064         while (1) {
4065                 fd_set rfds;
4066                 int max_fd = -1;
4067
4068                 FD_ZERO (&rfds);
4069
4070                 add_to_fd_set (&rfds, prof->server_socket, &max_fd);
4071                 add_to_fd_set (&rfds, prof->pipes [0], &max_fd);
4072
4073                 for (gint i = 0; i < command_sockets->len; i++)
4074                         add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
4075
4076                 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
4077
4078                 // Sleep for 1sec or until a file descriptor has data.
4079                 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
4080                         if (errno == EINTR)
4081                                 continue;
4082
4083                         fprintf (stderr, "Error in mono-profiler-log server: %s", strerror (errno));
4084                         exit (1);
4085                 }
4086
4087                 if (!no_counters)
4088                         counters_and_perfcounters_sample (prof);
4089
4090                 buffer_lock_excl ();
4091
4092                 sync_point (SYNC_POINT_PERIODIC);
4093
4094                 buffer_unlock_excl ();
4095
4096                 // Are we shutting down?
4097                 if (FD_ISSET (prof->pipes [0], &rfds)) {
4098                         char c;
4099                         read (prof->pipes [0], &c, 1);
4100                         break;
4101                 }
4102
4103                 for (gint i = 0; i < command_sockets->len; i++) {
4104                         int fd = g_array_index (command_sockets, int, i);
4105
4106                         if (!FD_ISSET (fd, &rfds))
4107                                 continue;
4108
4109                         char buf [64];
4110                         int len = read (fd, buf, sizeof (buf) - 1);
4111
4112                         if (len == -1)
4113                                 continue;
4114
4115                         if (!len) {
4116                                 // The other end disconnected.
4117                                 g_array_remove_index (command_sockets, i);
4118                                 close (fd);
4119
4120                                 continue;
4121                         }
4122
4123                         buf [len] = 0;
4124
4125                         if (!strcmp (buf, "heapshot\n") && hs_mode_ondemand) {
4126                                 // Rely on the finalization callback triggering a GC.
4127                                 heapshot_requested = 1;
4128                                 mono_gc_finalize_notify ();
4129                         }
4130                 }
4131
4132                 if (FD_ISSET (prof->server_socket, &rfds)) {
4133                         int fd = accept (prof->server_socket, NULL, NULL);
4134
4135                         if (fd != -1) {
4136                                 if (fd >= FD_SETSIZE)
4137                                         close (fd);
4138                                 else
4139                                         g_array_append_val (command_sockets, fd);
4140                         }
4141                 }
4142         }
4143
4144         for (gint i = 0; i < command_sockets->len; i++)
4145                 close (g_array_index (command_sockets, int, i));
4146
4147         g_array_free (command_sockets, TRUE);
4148
4149         send_log_unsafe (FALSE);
4150         deinit_thread (thread);
4151
4152         mono_thread_info_detach ();
4153
4154         return NULL;
4155 }
4156
4157 static void
4158 start_helper_thread (MonoProfiler* prof)
4159 {
4160         if (pipe (prof->pipes) == -1) {
4161                 fprintf (stderr, "Cannot create pipe: %s\n", strerror (errno));
4162                 exit (1);
4163         }
4164
4165         prof->server_socket = socket (PF_INET, SOCK_STREAM, 0);
4166
4167         if (prof->server_socket == -1) {
4168                 fprintf (stderr, "Cannot create server socket: %s\n", strerror (errno));
4169                 exit (1);
4170         }
4171
4172         struct sockaddr_in server_address;
4173
4174         memset (&server_address, 0, sizeof (server_address));
4175         server_address.sin_family = AF_INET;
4176         server_address.sin_addr.s_addr = INADDR_ANY;
4177         server_address.sin_port = htons (prof->command_port);
4178
4179         if (bind (prof->server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
4180                 fprintf (stderr, "Cannot bind server socket on port %d: %s\n", prof->command_port, strerror (errno));
4181                 close (prof->server_socket);
4182                 exit (1);
4183         }
4184
4185         if (listen (prof->server_socket, 1) == -1) {
4186                 fprintf (stderr, "Cannot listen on server socket: %s\n", strerror (errno));
4187                 close (prof->server_socket);
4188                 exit (1);
4189         }
4190
4191         socklen_t slen = sizeof (server_address);
4192
4193         if (getsockname (prof->server_socket, (struct sockaddr *) &server_address, &slen)) {
4194                 fprintf (stderr, "Could not get assigned port: %s\n", strerror (errno));
4195                 close (prof->server_socket);
4196                 exit (1);
4197         }
4198
4199         prof->command_port = ntohs (server_address.sin_port);
4200
4201         if (!mono_native_thread_create (&prof->helper_thread, helper_thread, prof)) {
4202                 fprintf (stderr, "Could not start helper thread\n");
4203                 close (prof->server_socket);
4204                 exit (1);
4205         }
4206 }
4207
4208 static void
4209 free_writer_entry (gpointer p)
4210 {
4211         mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
4212 }
4213
4214 static gboolean
4215 handle_writer_queue_entry (MonoProfiler *prof)
4216 {
4217         WriterQueueEntry *entry;
4218
4219         if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&prof->writer_queue))) {
4220                 if (!entry->methods)
4221                         goto no_methods;
4222
4223                 gboolean wrote_methods = FALSE;
4224
4225                 /*
4226                  * Encode the method events in a temporary log buffer that we
4227                  * flush to disk before the main buffer, ensuring that all
4228                  * methods have metadata emitted before they're referenced.
4229                  *
4230                  * We use a 'proper' thread-local buffer for this as opposed
4231                  * to allocating and freeing a buffer by hand because the call
4232                  * to mono_method_full_name () below may trigger class load
4233                  * events when it retrieves the signature of the method. So a
4234                  * thread-local buffer needs to exist when such events occur.
4235                  */
4236                 for (guint i = 0; i < entry->methods->len; i++) {
4237                         MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
4238
4239                         if (mono_conc_hashtable_lookup (prof->method_table, info->method))
4240                                 goto free_info; // This method already has metadata emitted.
4241
4242                         /*
4243                          * Other threads use this hash table to get a general
4244                          * idea of whether a method has already been emitted to
4245                          * the stream. Due to the way we add to this table, it
4246                          * can easily happen that multiple threads queue up the
4247                          * same methods, but that's OK since eventually all
4248                          * methods will be in this table and the thread-local
4249                          * method lists will just be empty for the rest of the
4250                          * app's lifetime.
4251                          */
4252                         mono_os_mutex_lock (&prof->method_table_mutex);
4253                         mono_conc_hashtable_insert (prof->method_table, info->method, info->method);
4254                         mono_os_mutex_unlock (&prof->method_table_mutex);
4255
4256                         char *name = mono_method_full_name (info->method, 1);
4257                         int nlen = strlen (name) + 1;
4258                         void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
4259                         int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
4260
4261                         ENTER_LOG (&method_jits_ctr, logbuffer,
4262                                 EVENT_SIZE /* event */ +
4263                                 LEB128_SIZE /* method */ +
4264                                 LEB128_SIZE /* start */ +
4265                                 LEB128_SIZE /* size */ +
4266                                 nlen /* name */
4267                         );
4268
4269                         emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
4270                         emit_method_inner (logbuffer, info->method);
4271                         emit_ptr (logbuffer, cstart);
4272                         emit_value (logbuffer, csize);
4273
4274                         memcpy (logbuffer->cursor, name, nlen);
4275                         logbuffer->cursor += nlen;
4276
4277                         EXIT_LOG_EXPLICIT (NO_SEND);
4278
4279                         mono_free (name);
4280
4281                         wrote_methods = TRUE;
4282
4283                 free_info:
4284                         g_free (info);
4285                 }
4286
4287                 g_ptr_array_free (entry->methods, TRUE);
4288
4289                 if (wrote_methods) {
4290                         MonoProfilerThread *thread = PROF_TLS_GET ();
4291
4292                         dump_buffer_threadless (prof, thread->buffer);
4293                         init_buffer_state (thread);
4294                 }
4295
4296         no_methods:
4297                 dump_buffer (prof, entry->buffer);
4298
4299                 mono_thread_hazardous_try_free (entry, free_writer_entry);
4300
4301                 return TRUE;
4302         }
4303
4304         return FALSE;
4305 }
4306
4307 static void *
4308 writer_thread (void *arg)
4309 {
4310         MonoProfiler *prof = (MonoProfiler *)arg;
4311
4312         mono_threads_attach_tools_thread ();
4313         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4314
4315         dump_header (prof);
4316
4317         MonoProfilerThread *thread = init_thread (prof, FALSE);
4318
4319         while (InterlockedRead (&prof->run_writer_thread)) {
4320                 mono_os_sem_wait (&prof->writer_queue_sem, MONO_SEM_FLAGS_NONE);
4321                 handle_writer_queue_entry (prof);
4322         }
4323
4324         /* Drain any remaining entries on shutdown. */
4325         while (handle_writer_queue_entry (prof));
4326
4327         free_buffer (thread->buffer, thread->buffer->size);
4328         deinit_thread (thread);
4329
4330         mono_thread_info_detach ();
4331
4332         return NULL;
4333 }
4334
4335 static void
4336 start_writer_thread (MonoProfiler* prof)
4337 {
4338         InterlockedWrite (&prof->run_writer_thread, 1);
4339
4340         if (!mono_native_thread_create (&prof->writer_thread, writer_thread, prof)) {
4341                 fprintf (stderr, "Could not start writer thread\n");
4342                 exit (1);
4343         }
4344 }
4345
4346 static void
4347 reuse_sample_hit (gpointer p)
4348 {
4349         SampleHit *sample = p;
4350
4351         mono_lock_free_queue_node_unpoison (&sample->node);
4352         mono_lock_free_queue_enqueue (&sample->prof->sample_reuse_queue, &sample->node);
4353 }
4354
4355 static gboolean
4356 handle_dumper_queue_entry (MonoProfiler *prof)
4357 {
4358         SampleHit *sample;
4359
4360         if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->dumper_queue))) {
4361                 for (int i = 0; i < sample->count; ++i) {
4362                         MonoMethod *method = sample->frames [i].method;
4363                         MonoDomain *domain = sample->frames [i].domain;
4364                         void *address = sample->frames [i].base_address;
4365
4366                         if (!method) {
4367                                 g_assert (domain && "What happened to the domain pointer?");
4368                                 g_assert (address && "What happened to the instruction pointer?");
4369
4370                                 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
4371
4372                                 if (ji)
4373                                         sample->frames [i].method = mono_jit_info_get_method (ji);
4374                         }
4375                 }
4376
4377                 ENTER_LOG (&sample_hits_ctr, logbuffer,
4378                         EVENT_SIZE /* event */ +
4379                         BYTE_SIZE /* type */ +
4380                         LEB128_SIZE /* tid */ +
4381                         LEB128_SIZE /* count */ +
4382                         1 * (
4383                                 LEB128_SIZE /* ip */
4384                         ) +
4385                         LEB128_SIZE /* managed count */ +
4386                         sample->count * (
4387                                 LEB128_SIZE /* method */
4388                         )
4389                 );
4390
4391                 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4392                 emit_byte (logbuffer, SAMPLE_CYCLES);
4393                 emit_ptr (logbuffer, (void *) sample->tid);
4394                 emit_value (logbuffer, 1);
4395
4396                 // TODO: Actual native unwinding.
4397                 for (int i = 0; i < 1; ++i) {
4398                         emit_ptr (logbuffer, sample->ip);
4399                         add_code_pointer ((uintptr_t) sample->ip);
4400                 }
4401
4402                 /* new in data version 6 */
4403                 emit_uvalue (logbuffer, sample->count);
4404
4405                 for (int i = 0; i < sample->count; ++i)
4406                         emit_method (logbuffer, sample->frames [i].method);
4407
4408                 EXIT_LOG_EXPLICIT (DO_SEND);
4409
4410                 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4411
4412                 dump_unmanaged_coderefs (prof);
4413         }
4414
4415         return FALSE;
4416 }
4417
4418 static void *
4419 dumper_thread (void *arg)
4420 {
4421         MonoProfiler *prof = (MonoProfiler *)arg;
4422
4423         mono_threads_attach_tools_thread ();
4424         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4425
4426         MonoProfilerThread *thread = init_thread (prof, FALSE);
4427
4428         while (InterlockedRead (&prof->run_dumper_thread)) {
4429                 /*
4430                  * Flush samples every second so it doesn't seem like the profiler is
4431                  * not working if the program is mostly idle.
4432                  */
4433                 if (mono_os_sem_timedwait (&prof->dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4434                         send_log_unsafe (FALSE);
4435
4436                 handle_dumper_queue_entry (prof);
4437         }
4438
4439         /* Drain any remaining entries on shutdown. */
4440         while (handle_dumper_queue_entry (prof));
4441
4442         send_log_unsafe (FALSE);
4443         deinit_thread (thread);
4444
4445         mono_thread_info_detach ();
4446
4447         return NULL;
4448 }
4449
4450 static void
4451 start_dumper_thread (MonoProfiler* prof)
4452 {
4453         InterlockedWrite (&prof->run_dumper_thread, 1);
4454
4455         if (!mono_native_thread_create (&prof->dumper_thread, dumper_thread, prof)) {
4456                 fprintf (stderr, "Could not start dumper thread\n");
4457                 exit (1);
4458         }
4459 }
4460
4461 static void
4462 register_counter (const char *name, gint32 *counter)
4463 {
4464         mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4465 }
4466
4467 static void
4468 runtime_initialized (MonoProfiler *profiler)
4469 {
4470         InterlockedWrite (&runtime_inited, 1);
4471
4472         register_counter ("Sample events allocated", &sample_allocations_ctr);
4473         register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4474
4475         register_counter ("Event: Sync points", &sync_points_ctr);
4476         register_counter ("Event: Heap objects", &heap_objects_ctr);
4477         register_counter ("Event: Heap starts", &heap_starts_ctr);
4478         register_counter ("Event: Heap ends", &heap_ends_ctr);
4479         register_counter ("Event: Heap roots", &heap_roots_ctr);
4480         register_counter ("Event: GC events", &gc_events_ctr);
4481         register_counter ("Event: GC resizes", &gc_resizes_ctr);
4482         register_counter ("Event: GC allocations", &gc_allocs_ctr);
4483         register_counter ("Event: GC moves", &gc_moves_ctr);
4484         register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4485         register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4486         register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4487         register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4488         register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4489         register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4490         register_counter ("Event: Image loads", &image_loads_ctr);
4491         register_counter ("Event: Image unloads", &image_unloads_ctr);
4492         register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4493         register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4494         register_counter ("Event: Class loads", &class_loads_ctr);
4495         register_counter ("Event: Class unloads", &class_unloads_ctr);
4496         register_counter ("Event: Method entries", &method_entries_ctr);
4497         register_counter ("Event: Method exits", &method_exits_ctr);
4498         register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4499         register_counter ("Event: Method JITs", &method_jits_ctr);
4500         register_counter ("Event: Code buffers", &code_buffers_ctr);
4501         register_counter ("Event: Exception throws", &exception_throws_ctr);
4502         register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4503         register_counter ("Event: Monitor events", &monitor_events_ctr);
4504         register_counter ("Event: Thread starts", &thread_starts_ctr);
4505         register_counter ("Event: Thread ends", &thread_ends_ctr);
4506         register_counter ("Event: Thread names", &thread_names_ctr);
4507         register_counter ("Event: Domain loads", &domain_loads_ctr);
4508         register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4509         register_counter ("Event: Domain names", &domain_names_ctr);
4510         register_counter ("Event: Context loads", &context_loads_ctr);
4511         register_counter ("Event: Context unloads", &context_unloads_ctr);
4512         register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4513         register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4514         register_counter ("Event: Sample hits", &sample_hits_ctr);
4515         register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4516         register_counter ("Event: Counter samples", &counter_samples_ctr);
4517         register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4518         register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4519         register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4520         register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4521         register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4522         register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4523
4524         counters_init (profiler);
4525
4526         /*
4527          * We must start the helper thread before the writer thread. This is
4528          * because the helper thread sets up the command port which is written to
4529          * the log header by the writer thread.
4530          */
4531         start_helper_thread (profiler);
4532         start_writer_thread (profiler);
4533         start_dumper_thread (profiler);
4534 }
4535
4536 static void
4537 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4538 {
4539         char *nf;
4540         int force_delete = 0;
4541
4542         log_profiler = (MonoProfiler *) g_calloc (1, sizeof (MonoProfiler));
4543         log_profiler->args = pstrdup (args);
4544         log_profiler->command_port = command_port;
4545
4546         if (filename && *filename == '-') {
4547                 force_delete = 1;
4548                 filename++;
4549                 g_warning ("WARNING: the output:-FILENAME option is deprecated, the profiler now always overrides the output file\n");
4550         }
4551
4552         //If filename begin with +, append the pid at the end
4553         if (filename && *filename == '+')
4554                 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4555
4556
4557         if (!filename) {
4558                 if (do_report)
4559                         filename = "|mprof-report -";
4560                 else
4561                         filename = "output.mlpd";
4562                 nf = (char*)filename;
4563         } else {
4564                 nf = new_filename (filename);
4565                 if (do_report) {
4566                         int s = strlen (nf) + 32;
4567                         char *p = (char *) g_malloc (s);
4568                         snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4569                         g_free (nf);
4570                         nf = p;
4571                 }
4572         }
4573         if (*nf == '|') {
4574                 log_profiler->file = popen (nf + 1, "w");
4575                 log_profiler->pipe_output = 1;
4576         } else if (*nf == '#') {
4577                 int fd = strtol (nf + 1, NULL, 10);
4578                 log_profiler->file = fdopen (fd, "a");
4579         } else {
4580                 if (force_delete)
4581                         unlink (nf);
4582                 log_profiler->file = fopen (nf, "wb");
4583         }
4584         if (!log_profiler->file) {
4585                 fprintf (stderr, "Cannot create profiler output: %s\n", nf);
4586                 exit (1);
4587         }
4588
4589 #if defined (HAVE_SYS_ZLIB)
4590         if (use_zip)
4591                 log_profiler->gzfile = gzdopen (fileno (log_profiler->file), "wb");
4592 #endif
4593
4594         /*
4595          * If you hit this assert while increasing MAX_FRAMES, you need to increase
4596          * SAMPLE_BLOCK_SIZE as well.
4597          */
4598         g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4599
4600         // FIXME: We should free this stuff too.
4601         mono_lock_free_allocator_init_size_class (&log_profiler->sample_size_class, SAMPLE_SLOT_SIZE (num_frames), SAMPLE_BLOCK_SIZE);
4602         mono_lock_free_allocator_init_allocator (&log_profiler->sample_allocator, &log_profiler->sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4603
4604         mono_lock_free_queue_init (&log_profiler->sample_reuse_queue);
4605
4606         g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4607
4608         // FIXME: We should free this stuff too.
4609         mono_lock_free_allocator_init_size_class (&log_profiler->writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4610         mono_lock_free_allocator_init_allocator (&log_profiler->writer_entry_allocator, &log_profiler->writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4611
4612         mono_lock_free_queue_init (&log_profiler->writer_queue);
4613         mono_os_sem_init (&log_profiler->writer_queue_sem, 0);
4614
4615         mono_lock_free_queue_init (&log_profiler->dumper_queue);
4616         mono_os_sem_init (&log_profiler->dumper_queue_sem, 0);
4617
4618         mono_os_mutex_init (&log_profiler->method_table_mutex);
4619         log_profiler->method_table = mono_conc_hashtable_new (NULL, NULL);
4620
4621         if (do_coverage)
4622                 coverage_init (log_profiler);
4623         log_profiler->coverage_filters = filters;
4624
4625         log_profiler->startup_time = current_time ();
4626 }
4627
4628 /*
4629  * declaration to silence the compiler: this is the entry point that
4630  * mono will load from the shared library and call.
4631  */
4632 extern void
4633 mono_profiler_startup (const char *desc);
4634
4635 extern void
4636 mono_profiler_startup_log (const char *desc);
4637
4638 /*
4639  * this is the entry point that will be used when the profiler
4640  * is embedded inside the main executable.
4641  */
4642 void
4643 mono_profiler_startup_log (const char *desc)
4644 {
4645         mono_profiler_startup (desc);
4646 }
4647
4648 void
4649 mono_profiler_startup (const char *desc)
4650 {
4651         GPtrArray *filters = NULL;
4652
4653         proflog_parse_args (&config, desc [3] == ':' ? desc + 4 : "");
4654
4655         //XXX maybe later cleanup to use config directly
4656         nocalls = !(config.effective_mask & PROFLOG_CALL_EVENTS);
4657         no_counters = !(config.effective_mask & PROFLOG_COUNTER_EVENTS);
4658         do_report = config.do_report;
4659         do_debug = config.do_debug;
4660         do_heap_shot = (config.effective_mask & PROFLOG_HEAPSHOT_FEATURE);
4661         hs_mode_ondemand = config.hs_mode_ondemand;
4662         hs_mode_ms = config.hs_mode_ms;
4663         hs_mode_gc = config.hs_mode_gc;
4664         do_mono_sample = (config.effective_mask & PROFLOG_SAMPLING_FEATURE);
4665         use_zip = config.use_zip;
4666         command_port = config.command_port;
4667         num_frames = config.num_frames;
4668         notraces = config.notraces;
4669         max_allocated_sample_hits = config.max_allocated_sample_hits;
4670         max_call_depth = config.max_call_depth;
4671         do_coverage = (config.effective_mask & PROFLOG_CODE_COV_FEATURE);
4672         debug_coverage = config.debug_coverage;
4673         only_coverage = config.only_coverage;
4674
4675         if (config.cov_filter_files) {
4676                 filters = g_ptr_array_new ();
4677                 int i;
4678                 for (i = 0; i < config.cov_filter_files->len; ++i) {
4679                         const char *name = config.cov_filter_files->pdata [i];
4680                         parse_cov_filter_file (filters, name);
4681                 }
4682         }
4683
4684         init_time ();
4685
4686         PROF_TLS_INIT ();
4687
4688         create_profiler (desc, config.output_filename, filters);
4689
4690         mono_lls_init (&profiler_thread_list, NULL);
4691
4692         //This two events are required for the profiler to work
4693         int events = MONO_PROFILE_THREADS | MONO_PROFILE_GC;
4694
4695         //Required callbacks
4696         mono_profiler_install (log_profiler, log_shutdown);
4697         mono_profiler_install_runtime_initialized (runtime_initialized);
4698
4699         mono_profiler_install_gc (gc_event, gc_resize);
4700         mono_profiler_install_thread (thread_start, thread_end);
4701
4702         //It's questionable whether we actually want this to be mandatory, maybe put it behind the actual event?
4703         mono_profiler_install_thread_name (thread_name);
4704
4705
4706         if (config.effective_mask & PROFLOG_DOMAIN_EVENTS) {
4707                 events |= MONO_PROFILE_APPDOMAIN_EVENTS;
4708                 mono_profiler_install_appdomain (NULL, domain_loaded, domain_unloaded, NULL);
4709                 mono_profiler_install_appdomain_name (domain_name);
4710         }
4711
4712         if (config.effective_mask & PROFLOG_ASSEMBLY_EVENTS) {
4713                 events |= MONO_PROFILE_ASSEMBLY_EVENTS;
4714                 mono_profiler_install_assembly (NULL, assembly_loaded, assembly_unloaded, NULL);
4715         }
4716
4717         if (config.effective_mask & PROFLOG_MODULE_EVENTS) {
4718                 events |= MONO_PROFILE_MODULE_EVENTS;
4719                 mono_profiler_install_module (NULL, image_loaded, image_unloaded, NULL);
4720         }
4721
4722         if (config.effective_mask & PROFLOG_CLASS_EVENTS) {
4723                 events |= MONO_PROFILE_CLASS_EVENTS;
4724                 mono_profiler_install_class (NULL, class_loaded, NULL, NULL);
4725         }
4726
4727         if (config.effective_mask & PROFLOG_JIT_COMPILATION_EVENTS) {
4728                 events |= MONO_PROFILE_JIT_COMPILATION;
4729                 mono_profiler_install_jit_end (method_jitted);
4730                 mono_profiler_install_code_buffer_new (code_buffer_new);
4731         }
4732
4733         if (config.effective_mask & PROFLOG_EXCEPTION_EVENTS) {
4734                 events |= MONO_PROFILE_EXCEPTIONS;
4735                 mono_profiler_install_exception (throw_exc, method_exc_leave, NULL);
4736                 mono_profiler_install_exception_clause (clause_exc);
4737         }
4738
4739         if (config.effective_mask & PROFLOG_ALLOCATION_EVENTS) {
4740                 events |= MONO_PROFILE_ALLOCATIONS;
4741                 mono_profiler_install_allocation (gc_alloc);
4742         }
4743
4744         //PROFLOG_GC_EVENTS is mandatory
4745         //PROFLOG_THREAD_EVENTS is mandatory
4746
4747         if (config.effective_mask & PROFLOG_CALL_EVENTS) {
4748                 events |= MONO_PROFILE_ENTER_LEAVE;
4749                 mono_profiler_install_enter_leave (method_enter, method_leave);
4750         }
4751
4752         if (config.effective_mask & PROFLOG_INS_COVERAGE_EVENTS) {
4753                 events |= MONO_PROFILE_INS_COVERAGE;
4754                 mono_profiler_install_coverage_filter (coverage_filter);
4755         }
4756
4757         //XXX should we check for PROFLOG_SAMPLING_FEATURE instead??
4758         if (config.effective_mask & PROFLOG_SAMPLING_EVENTS) {
4759                 events |= MONO_PROFILE_STATISTICAL;
4760                 mono_profiler_set_statistical_mode (config.sampling_mode, config.sample_freq);
4761                 mono_profiler_install_statistical (mono_sample_hit);
4762         }
4763
4764         if (config.effective_mask & PROFLOG_MONITOR_EVENTS) {
4765                 events |= MONO_PROFILE_MONITOR_EVENTS;
4766                 mono_profiler_install_monitor (monitor_event);
4767         }
4768
4769         if (config.effective_mask & PROFLOG_GC_MOVES_EVENTS) {
4770                 events |= MONO_PROFILE_GC_MOVES;
4771                 mono_profiler_install_gc_moves (gc_moves);
4772         }
4773
4774         // TODO split those in two profiler events
4775         if (config.effective_mask & (PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_HANDLE_EVENTS)) {
4776                 events |= MONO_PROFILE_GC_ROOTS;
4777                 mono_profiler_install_gc_roots (
4778                         config.effective_mask & (PROFLOG_GC_HANDLE_EVENTS) ? gc_handle : NULL,
4779                         (config.effective_mask & PROFLOG_GC_ROOT_EVENTS) ? gc_roots : NULL);
4780         }
4781
4782         if (config.effective_mask & PROFLOG_CONTEXT_EVENTS) {
4783                 events |= MONO_PROFILE_CONTEXT_EVENTS;
4784                 mono_profiler_install_context (context_loaded, context_unloaded);
4785         }
4786
4787         if (config.effective_mask & PROFLOG_FINALIZATION_EVENTS) {
4788                 events |= MONO_PROFILE_GC_FINALIZATION;
4789                 mono_profiler_install_gc_finalize (finalize_begin, finalize_object_begin, finalize_object_end, finalize_end);   
4790         } else if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && config.hs_mode_ondemand) {
4791                 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4792                 events |= MONO_PROFILE_GC_FINALIZATION;
4793                 mono_profiler_install_gc_finalize (NULL, NULL, NULL, finalize_end);
4794         }
4795
4796         //PROFLOG_COUNTER_EVENTS is a pseudo event controled by the no_counters global var
4797         //PROFLOG_GC_HANDLE_EVENTS is handled together with PROFLOG_GC_ROOT_EVENTS
4798
4799         mono_profiler_set_events ((MonoProfileFlags)events);
4800 }