Merge pull request #5079 from lambdageek/dev-setvalue
[mono.git] / mono / profiler / log.c
1 /*
2  * mono-profiler-log.c: mono log profiler
3  *
4  * Authors:
5  *   Paolo Molaro (lupus@ximian.com)
6  *   Alex Rønne Petersen (alexrp@xamarin.com)
7  *
8  * Copyright 2010 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11  */
12
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/debug-helpers.h>
16 #include "../metadata/metadata-internals.h"
17 #include <mono/metadata/mono-config.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/metadata/mono-perfcounters.h>
20 #include <mono/utils/atomic.h>
21 #include <mono/utils/hazard-pointer.h>
22 #include <mono/utils/lock-free-alloc.h>
23 #include <mono/utils/lock-free-queue.h>
24 #include <mono/utils/mono-conc-hashtable.h>
25 #include <mono/utils/mono-counters.h>
26 #include <mono/utils/mono-linked-list-set.h>
27 #include <mono/utils/mono-membar.h>
28 #include <mono/utils/mono-mmap.h>
29 #include <mono/utils/mono-os-mutex.h>
30 #include <mono/utils/mono-os-semaphore.h>
31 #include <mono/utils/mono-threads.h>
32 #include <mono/utils/mono-threads-api.h>
33 #include "log.h"
34
35 #ifdef HAVE_DLFCN_H
36 #include <dlfcn.h>
37 #endif
38 #include <fcntl.h>
39 #ifdef HAVE_LINK_H
40 #include <link.h>
41 #endif
42 #ifdef HAVE_UNISTD_H
43 #include <unistd.h>
44 #endif
45 #if defined(__APPLE__)
46 #include <mach/mach_time.h>
47 #endif
48 #include <netinet/in.h>
49 #ifdef HAVE_SYS_MMAN_H
50 #include <sys/mman.h>
51 #endif
52 #include <sys/socket.h>
53 #if defined (HAVE_SYS_ZLIB)
54 #include <zlib.h>
55 #endif
56
57 #define BUFFER_SIZE (4096 * 16)
58
59 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
60 #define LEB128_SIZE 10
61
62 /* Size of a value encoded as a single byte. */
63 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
64 #define BYTE_SIZE 1
65
66 /* Size in bytes of the event prefix (ID + time). */
67 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
68
69 static volatile gint32 runtime_inited;
70 static volatile gint32 in_shutdown;
71
72 static ProfilerConfig config;
73 static int nocalls = 0;
74 static int notraces = 0;
75 static int use_zip = 0;
76 static int do_report = 0;
77 static int do_heap_shot = 0;
78 static int max_call_depth = 0;
79 static int command_port = 0;
80 static int heapshot_requested = 0;
81 static int do_mono_sample = 0;
82 static int do_debug = 0;
83 static int do_coverage = 0;
84 static gboolean no_counters = FALSE;
85 static gboolean only_coverage = FALSE;
86 static gboolean debug_coverage = FALSE;
87 static int max_allocated_sample_hits;
88
89 #define ENABLED(EVT) (config.effective_mask & (EVT))
90
91 // Statistics for internal profiler data structures.
92 static gint32 sample_allocations_ctr,
93               buffer_allocations_ctr;
94
95 // Statistics for profiler events.
96 static gint32 sync_points_ctr,
97               heap_objects_ctr,
98               heap_starts_ctr,
99               heap_ends_ctr,
100               heap_roots_ctr,
101               gc_events_ctr,
102               gc_resizes_ctr,
103               gc_allocs_ctr,
104               gc_moves_ctr,
105               gc_handle_creations_ctr,
106               gc_handle_deletions_ctr,
107               finalize_begins_ctr,
108               finalize_ends_ctr,
109               finalize_object_begins_ctr,
110               finalize_object_ends_ctr,
111               image_loads_ctr,
112               image_unloads_ctr,
113               assembly_loads_ctr,
114               assembly_unloads_ctr,
115               class_loads_ctr,
116               class_unloads_ctr,
117               method_entries_ctr,
118               method_exits_ctr,
119               method_exception_exits_ctr,
120               method_jits_ctr,
121               code_buffers_ctr,
122               exception_throws_ctr,
123               exception_clauses_ctr,
124               monitor_events_ctr,
125               thread_starts_ctr,
126               thread_ends_ctr,
127               thread_names_ctr,
128               domain_loads_ctr,
129               domain_unloads_ctr,
130               domain_names_ctr,
131               context_loads_ctr,
132               context_unloads_ctr,
133               sample_ubins_ctr,
134               sample_usyms_ctr,
135               sample_hits_ctr,
136               counter_descriptors_ctr,
137               counter_samples_ctr,
138               perfcounter_descriptors_ctr,
139               perfcounter_samples_ctr,
140               coverage_methods_ctr,
141               coverage_statements_ctr,
142               coverage_classes_ctr,
143               coverage_assemblies_ctr;
144
145 static MonoLinkedListSet profiler_thread_list;
146
147 /*
148  * file format:
149  * [header] [buffer]*
150  *
151  * The file is composed by a header followed by 0 or more buffers.
152  * Each buffer contains events that happened on a thread: for a given thread
153  * buffers that appear later in the file are guaranteed to contain events
154  * that happened later in time. Buffers from separate threads could be interleaved,
155  * though.
156  * Buffers are not required to be aligned.
157  *
158  * header format:
159  * [id: 4 bytes] constant value: LOG_HEADER_ID
160  * [major: 1 byte] [minor: 1 byte] major and minor version of the log profiler
161  * [format: 1 byte] version of the data format for the rest of the file
162  * [ptrsize: 1 byte] size in bytes of a pointer in the profiled program
163  * [startup time: 8 bytes] time in milliseconds since the unix epoch when the program started
164  * [timer overhead: 4 bytes] approximate overhead in nanoseconds of the timer
165  * [flags: 4 bytes] file format flags, should be 0 for now
166  * [pid: 4 bytes] pid of the profiled process
167  * [port: 2 bytes] tcp port for server if != 0
168  * [args size: 4 bytes] size of args
169  * [args: string] arguments passed to the profiler
170  * [arch size: 4 bytes] size of arch
171  * [arch: string] architecture the profiler is running on
172  * [os size: 4 bytes] size of os
173  * [os: string] operating system the profiler is running on
174  *
175  * The multiple byte integers are in little-endian format.
176  *
177  * buffer format:
178  * [buffer header] [event]*
179  * Buffers have a fixed-size header followed by 0 or more bytes of event data.
180  * Timing information and other values in the event data are usually stored
181  * as uleb128 or sleb128 integers. To save space, as noted for each item below,
182  * some data is represented as a difference between the actual value and
183  * either the last value of the same type (like for timing information) or
184  * as the difference from a value stored in a buffer header.
185  *
186  * For timing information the data is stored as uleb128, since timing
187  * increases in a monotonic way in each thread: the value is the number of
188  * nanoseconds to add to the last seen timing data in a buffer. The first value
189  * in a buffer will be calculated from the time_base field in the buffer head.
190  *
191  * Object or heap sizes are stored as uleb128.
192  * Pointer differences are stored as sleb128, instead.
193  *
194  * If an unexpected value is found, the rest of the buffer should be ignored,
195  * as generally the later values need the former to be interpreted correctly.
196  *
197  * buffer header format:
198  * [bufid: 4 bytes] constant value: BUF_ID
199  * [len: 4 bytes] size of the data following the buffer header
200  * [time_base: 8 bytes] time base in nanoseconds since an unspecified epoch
201  * [ptr_base: 8 bytes] base value for pointers
202  * [obj_base: 8 bytes] base value for object addresses
203  * [thread id: 8 bytes] system-specific thread ID (pthread_t for example)
204  * [method_base: 8 bytes] base value for MonoMethod pointers
205  *
206  * event format:
207  * [extended info: upper 4 bits] [type: lower 4 bits]
208  * [time diff: uleb128] nanoseconds since last timing
209  * [data]*
210  * The data that follows depends on type and the extended info.
211  * Type is one of the enum values in mono-profiler-log.h: TYPE_ALLOC, TYPE_GC,
212  * TYPE_METADATA, TYPE_METHOD, TYPE_EXCEPTION, TYPE_MONITOR, TYPE_HEAP.
213  * The extended info bits are interpreted based on type, see
214  * each individual event description below.
215  * strings are represented as a 0-terminated utf8 sequence.
216  *
217  * backtrace format:
218  * [num: uleb128] number of frames following
219  * [frame: sleb128]* mum MonoMethod* as a pointer difference from the last such
220  * pointer or the buffer method_base
221  *
222  * type alloc format:
223  * type: TYPE_ALLOC
224  * exinfo: zero or TYPE_ALLOC_BT
225  * [ptr: sleb128] class as a byte difference from ptr_base
226  * [obj: sleb128] object address as a byte difference from obj_base
227  * [size: uleb128] size of the object in the heap
228  * If exinfo == TYPE_ALLOC_BT, a backtrace follows.
229  *
230  * type GC format:
231  * type: TYPE_GC
232  * exinfo: one of TYPE_GC_EVENT, TYPE_GC_RESIZE, TYPE_GC_MOVE, TYPE_GC_HANDLE_CREATED[_BT],
233  * TYPE_GC_HANDLE_DESTROYED[_BT], TYPE_GC_FINALIZE_START, TYPE_GC_FINALIZE_END,
234  * TYPE_GC_FINALIZE_OBJECT_START, TYPE_GC_FINALIZE_OBJECT_END
235  * if exinfo == TYPE_GC_RESIZE
236  *      [heap_size: uleb128] new heap size
237  * if exinfo == TYPE_GC_EVENT
238  *      [event type: byte] GC event (MONO_GC_EVENT_* from profiler.h)
239  *      [generation: byte] GC generation event refers to
240  * if exinfo == TYPE_GC_MOVE
241  *      [num_objects: uleb128] number of object moves that follow
242  *      [objaddr: sleb128]+ num_objects object pointer differences from obj_base
243  *      num is always an even number: the even items are the old
244  *      addresses, the odd numbers are the respective new object addresses
245  * if exinfo == TYPE_GC_HANDLE_CREATED[_BT]
246  *      [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
247  *      upper bits reserved as flags
248  *      [handle: uleb128] GC handle value
249  *      [objaddr: sleb128] object pointer differences from obj_base
250  *      If exinfo == TYPE_GC_HANDLE_CREATED_BT, a backtrace follows.
251  * if exinfo == TYPE_GC_HANDLE_DESTROYED[_BT]
252  *      [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
253  *      upper bits reserved as flags
254  *      [handle: uleb128] GC handle value
255  *      If exinfo == TYPE_GC_HANDLE_DESTROYED_BT, a backtrace follows.
256  * if exinfo == TYPE_GC_FINALIZE_OBJECT_{START,END}
257  *      [object: sleb128] the object as a difference from obj_base
258  *
259  * type metadata format:
260  * type: TYPE_METADATA
261  * exinfo: one of: TYPE_END_LOAD, TYPE_END_UNLOAD (optional for TYPE_THREAD and TYPE_DOMAIN)
262  * [mtype: byte] metadata type, one of: TYPE_CLASS, TYPE_IMAGE, TYPE_ASSEMBLY, TYPE_DOMAIN,
263  * TYPE_THREAD, TYPE_CONTEXT
264  * [pointer: sleb128] pointer of the metadata type depending on mtype
265  * if mtype == TYPE_CLASS
266  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
267  *      [name: string] full class name
268  * if mtype == TYPE_IMAGE
269  *      [name: string] image file name
270  * if mtype == TYPE_ASSEMBLY
271  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
272  *      [name: string] assembly name
273  * if mtype == TYPE_DOMAIN && exinfo == 0
274  *      [name: string] domain friendly name
275  * if mtype == TYPE_CONTEXT
276  *      [domain: sleb128] domain id as pointer
277  * if mtype == TYPE_THREAD && exinfo == 0
278  *      [name: string] thread name
279  *
280  * type method format:
281  * type: TYPE_METHOD
282  * exinfo: one of: TYPE_LEAVE, TYPE_ENTER, TYPE_EXC_LEAVE, TYPE_JIT
283  * [method: sleb128] MonoMethod* as a pointer difference from the last such
284  * pointer or the buffer method_base
285  * if exinfo == TYPE_JIT
286  *      [code address: sleb128] pointer to the native code as a diff from ptr_base
287  *      [code size: uleb128] size of the generated code
288  *      [name: string] full method name
289  *
290  * type exception format:
291  * type: TYPE_EXCEPTION
292  * exinfo: zero, TYPE_CLAUSE, or TYPE_THROW_BT
293  * if exinfo == TYPE_CLAUSE
294  *      [clause type: byte] MonoExceptionEnum enum value
295  *      [clause index: uleb128] index of the current clause
296  *      [method: sleb128] MonoMethod* as a pointer difference from the last such
297  *      pointer or the buffer method_base
298  *      [object: sleb128] the exception object as a difference from obj_base
299  * else
300  *      [object: sleb128] the exception object as a difference from obj_base
301  *      If exinfo == TYPE_THROW_BT, a backtrace follows.
302  *
303  * type runtime format:
304  * type: TYPE_RUNTIME
305  * exinfo: one of: TYPE_JITHELPER
306  * if exinfo == TYPE_JITHELPER
307  *      [type: byte] MonoProfilerCodeBufferType enum value
308  *      [buffer address: sleb128] pointer to the native code as a diff from ptr_base
309  *      [buffer size: uleb128] size of the generated code
310  *      if type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
311  *              [name: string] buffer description name
312  *
313  * type monitor format:
314  * type: TYPE_MONITOR
315  * exinfo: zero or TYPE_MONITOR_BT
316  * [type: byte] MONO_PROFILER_MONITOR_{CONTENTION,FAIL,DONE}
317  * [object: sleb128] the lock object as a difference from obj_base
318  * If exinfo == TYPE_MONITOR_BT, a backtrace follows.
319  *
320  * type heap format
321  * type: TYPE_HEAP
322  * exinfo: one of TYPE_HEAP_START, TYPE_HEAP_END, TYPE_HEAP_OBJECT, TYPE_HEAP_ROOT
323  * if exinfo == TYPE_HEAP_OBJECT
324  *      [object: sleb128] the object as a difference from obj_base
325  *      [class: sleb128] the object MonoClass* as a difference from ptr_base
326  *      [size: uleb128] size of the object on the heap
327  *      [num_refs: uleb128] number of object references
328  *      each referenced objref is preceded by a uleb128 encoded offset: the
329  *      first offset is from the object address and each next offset is relative
330  *      to the previous one
331  *      [objrefs: sleb128]+ object referenced as a difference from obj_base
332  *      The same object can appear multiple times, but only the first time
333  *      with size != 0: in the other cases this data will only be used to
334  *      provide additional referenced objects.
335  * if exinfo == TYPE_HEAP_ROOT
336  *      [num_roots: uleb128] number of root references
337  *      [num_gc: uleb128] number of major gcs
338  *      [object: sleb128] the object as a difference from obj_base
339  *      [root_type: byte] the root_type: MonoProfileGCRootType (profiler.h)
340  *      [extra_info: uleb128] the extra_info value
341  *      object, root_type and extra_info are repeated num_roots times
342  *
343  * type sample format
344  * type: TYPE_SAMPLE
345  * exinfo: one of TYPE_SAMPLE_HIT, TYPE_SAMPLE_USYM, TYPE_SAMPLE_UBIN, TYPE_SAMPLE_COUNTERS_DESC, TYPE_SAMPLE_COUNTERS
346  * if exinfo == TYPE_SAMPLE_HIT
347  *      [thread: sleb128] thread id as difference from ptr_base
348  *      [count: uleb128] number of following instruction addresses
349  *      [ip: sleb128]* instruction pointer as difference from ptr_base
350  *      [mbt_count: uleb128] number of managed backtrace frames
351  *      [method: sleb128]* MonoMethod* as a pointer difference from the last such
352  *      pointer or the buffer method_base (the first such method can be also indentified by ip, but this is not neccessarily true)
353  * if exinfo == TYPE_SAMPLE_USYM
354  *      [address: sleb128] symbol address as a difference from ptr_base
355  *      [size: uleb128] symbol size (may be 0 if unknown)
356  *      [name: string] symbol name
357  * if exinfo == TYPE_SAMPLE_UBIN
358  *      [address: sleb128] address where binary has been loaded as a difference from ptr_base
359  *      [offset: uleb128] file offset of mapping (the same file can be mapped multiple times)
360  *      [size: uleb128] memory size
361  *      [name: string] binary name
362  * if exinfo == TYPE_SAMPLE_COUNTERS_DESC
363  *      [len: uleb128] number of counters
364  *      for i = 0 to len
365  *              [section: uleb128] section of counter
366  *              if section == MONO_COUNTER_PERFCOUNTERS:
367  *                      [section_name: string] section name of counter
368  *              [name: string] name of counter
369  *              [type: byte] type of counter
370  *              [unit: byte] unit of counter
371  *              [variance: byte] variance of counter
372  *              [index: uleb128] unique index of counter
373  * if exinfo == TYPE_SAMPLE_COUNTERS
374  *      while true:
375  *              [index: uleb128] unique index of counter
376  *              if index == 0:
377  *                      break
378  *              [type: byte] type of counter value
379  *              if type == string:
380  *                      if value == null:
381  *                              [0: byte] 0 -> value is null
382  *                      else:
383  *                              [1: byte] 1 -> value is not null
384  *                              [value: string] counter value
385  *              else:
386  *                      [value: uleb128/sleb128/double] counter value, can be sleb128, uleb128 or double (determined by using type)
387  *
388  * type coverage format
389  * type: TYPE_COVERAGE
390  * exinfo: one of TYPE_COVERAGE_METHOD, TYPE_COVERAGE_STATEMENT, TYPE_COVERAGE_ASSEMBLY, TYPE_COVERAGE_CLASS
391  * if exinfo == TYPE_COVERAGE_METHOD
392  *  [assembly: string] name of assembly
393  *  [class: string] name of the class
394  *  [name: string] name of the method
395  *  [signature: string] the signature of the method
396  *  [filename: string] the file path of the file that contains this method
397  *  [token: uleb128] the method token
398  *  [method_id: uleb128] an ID for this data to associate with the buffers of TYPE_COVERAGE_STATEMENTS
399  *  [len: uleb128] the number of TYPE_COVERAGE_BUFFERS associated with this method
400  * if exinfo == TYPE_COVERAGE_STATEMENTS
401  *  [method_id: uleb128] an the TYPE_COVERAGE_METHOD buffer to associate this with
402  *  [offset: uleb128] the il offset relative to the previous offset
403  *  [counter: uleb128] the counter for this instruction
404  *  [line: uleb128] the line of filename containing this instruction
405  *  [column: uleb128] the column containing this instruction
406  * if exinfo == TYPE_COVERAGE_ASSEMBLY
407  *  [name: string] assembly name
408  *  [guid: string] assembly GUID
409  *  [filename: string] assembly filename
410  *  [number_of_methods: uleb128] the number of methods in this assembly
411  *  [fully_covered: uleb128] the number of fully covered methods
412  *  [partially_covered: uleb128] the number of partially covered methods
413  *    currently partially_covered will always be 0, and fully_covered is the
414  *    number of methods that are fully and partially covered.
415  * if exinfo == TYPE_COVERAGE_CLASS
416  *  [name: string] assembly name
417  *  [class: string] class name
418  *  [number_of_methods: uleb128] the number of methods in this class
419  *  [fully_covered: uleb128] the number of fully covered methods
420  *  [partially_covered: uleb128] the number of partially covered methods
421  *    currently partially_covered will always be 0, and fully_covered is the
422  *    number of methods that are fully and partially covered.
423  *
424  * type meta format:
425  * type: TYPE_META
426  * exinfo: one of: TYPE_SYNC_POINT
427  * if exinfo == TYPE_SYNC_POINT
428  *      [type: byte] MonoProfilerSyncPointType enum value
429  */
430
431 // Pending data to be written to the log, for a single thread.
432 // Threads periodically flush their own LogBuffers by calling safe_send
433 typedef struct _LogBuffer LogBuffer;
434 struct _LogBuffer {
435         // Next (older) LogBuffer in processing queue
436         LogBuffer *next;
437
438         uint64_t time_base;
439         uint64_t last_time;
440         uintptr_t ptr_base;
441         uintptr_t method_base;
442         uintptr_t last_method;
443         uintptr_t obj_base;
444         uintptr_t thread_id;
445
446         // Bytes allocated for this LogBuffer
447         int size;
448
449         // Start of currently unused space in buffer
450         unsigned char* cursor;
451
452         // Pointer to start-of-structure-plus-size (for convenience)
453         unsigned char* buf_end;
454
455         // Start of data in buffer. Contents follow "buffer format" described above.
456         unsigned char buf [1];
457 };
458
459 typedef struct {
460         MonoLinkedListSetNode node;
461
462         // Convenience pointer to the profiler structure.
463         MonoProfiler *profiler;
464
465         // Was this thread added to the LLS?
466         gboolean attached;
467
468         // The current log buffer for this thread.
469         LogBuffer *buffer;
470
471         // Methods referenced by events in `buffer`, see `MethodInfo`.
472         GPtrArray *methods;
473
474         // Current call depth for enter/leave events.
475         int call_depth;
476
477         // Indicates whether this thread is currently writing to its `buffer`.
478         gboolean busy;
479
480         // Has this thread written a thread end event to `buffer`?
481         gboolean ended;
482
483         // Stored in `buffer_lock_state` to take the exclusive lock.
484         int small_id;
485 } MonoProfilerThread;
486
487 // Do not use these TLS macros directly unless you know what you're doing.
488
489 #ifdef HOST_WIN32
490
491 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
492 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
493 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
494 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
495
496 static DWORD profiler_tls;
497
498 #elif HAVE_KW_THREAD
499
500 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
501 #define PROF_TLS_GET() (profiler_tls)
502 #define PROF_TLS_INIT()
503 #define PROF_TLS_FREE()
504
505 static __thread MonoProfilerThread *profiler_tls;
506
507 #else
508
509 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
510 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
511 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
512 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
513
514 static pthread_key_t profiler_tls;
515
516 #endif
517
518 static uintptr_t
519 thread_id (void)
520 {
521         return (uintptr_t) mono_native_thread_id_get ();
522 }
523
524 static uintptr_t
525 process_id (void)
526 {
527 #ifdef HOST_WIN32
528         return (uintptr_t) GetCurrentProcessId ();
529 #else
530         return (uintptr_t) getpid ();
531 #endif
532 }
533
534 #ifdef __APPLE__
535 static mach_timebase_info_data_t timebase_info;
536 #elif defined (HOST_WIN32)
537 static LARGE_INTEGER pcounter_freq;
538 #endif
539
540 #define TICKS_PER_SEC 1000000000LL
541
542 static uint64_t
543 current_time (void)
544 {
545 #ifdef __APPLE__
546         uint64_t time = mach_absolute_time ();
547
548         time *= timebase_info.numer;
549         time /= timebase_info.denom;
550
551         return time;
552 #elif defined (HOST_WIN32)
553         LARGE_INTEGER value;
554
555         QueryPerformanceCounter (&value);
556
557         return value.QuadPart * TICKS_PER_SEC / pcounter_freq.QuadPart;
558 #elif defined (CLOCK_MONOTONIC)
559         struct timespec tspec;
560
561         clock_gettime (CLOCK_MONOTONIC, &tspec);
562
563         return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
564 #else
565         struct timeval tv;
566
567         gettimeofday (&tv, NULL);
568
569         return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
570 #endif
571 }
572
573 static int timer_overhead;
574
575 static void
576 init_time (void)
577 {
578 #ifdef __APPLE__
579         mach_timebase_info (&timebase_info);
580 #elif defined (HOST_WIN32)
581         QueryPerformanceFrequency (&pcounter_freq);
582 #endif
583
584         uint64_t time_start = current_time ();
585
586         for (int i = 0; i < 256; ++i)
587                 current_time ();
588
589         uint64_t time_end = current_time ();
590
591         timer_overhead = (time_end - time_start) / 256;
592 }
593
594 /*
595  * These macros should be used when writing an event to a log buffer. They
596  * take care of a bunch of stuff that can be repetitive and error-prone, such
597  * as attaching the current thread, acquiring/releasing the buffer lock,
598  * incrementing the event counter, expanding the log buffer, etc. They also
599  * create a scope so that it's harder to leak the LogBuffer pointer, which can
600  * be problematic as the pointer is unstable when the buffer lock isn't
601  * acquired.
602  *
603  * If the calling thread is already attached, these macros will not alter its
604  * attach mode (i.e. whether it's added to the LLS). If the thread is not
605  * attached, init_thread () will be called with add_to_lls = TRUE.
606  */
607
608 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
609         do { \
610                 MonoProfilerThread *thread__ = get_thread (); \
611                 if (thread__->attached) \
612                         buffer_lock (); \
613                 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
614                 thread__->busy = TRUE; \
615                 InterlockedIncrement ((COUNTER)); \
616                 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
617
618 #define EXIT_LOG_EXPLICIT(SEND) \
619                 thread__->busy = FALSE; \
620                 if ((SEND)) \
621                         send_log_unsafe (TRUE); \
622                 if (thread__->attached) \
623                         buffer_unlock (); \
624         } while (0)
625
626 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
627 #define DO_SEND TRUE
628 #define NO_SEND FALSE
629
630 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
631
632 typedef struct _BinaryObject BinaryObject;
633 struct _BinaryObject {
634         BinaryObject *next;
635         void *addr;
636         char *name;
637 };
638
639 static MonoProfiler *log_profiler;
640
641 struct _MonoProfiler {
642         FILE* file;
643 #if defined (HAVE_SYS_ZLIB)
644         gzFile gzfile;
645 #endif
646         char *args;
647         uint64_t startup_time;
648         int pipe_output;
649         int command_port;
650         int server_socket;
651         int pipes [2];
652         MonoNativeThreadId helper_thread;
653         MonoNativeThreadId writer_thread;
654         MonoNativeThreadId dumper_thread;
655         volatile gint32 run_writer_thread;
656         MonoLockFreeAllocSizeClass writer_entry_size_class;
657         MonoLockFreeAllocator writer_entry_allocator;
658         MonoLockFreeQueue writer_queue;
659         MonoSemType writer_queue_sem;
660         MonoConcurrentHashTable *method_table;
661         mono_mutex_t method_table_mutex;
662         volatile gint32 run_dumper_thread;
663         MonoLockFreeQueue dumper_queue;
664         MonoSemType dumper_queue_sem;
665         MonoLockFreeAllocSizeClass sample_size_class;
666         MonoLockFreeAllocator sample_allocator;
667         MonoLockFreeQueue sample_reuse_queue;
668         BinaryObject *binary_objects;
669         GPtrArray *coverage_filters;
670 };
671
672 typedef struct {
673         MonoLockFreeQueueNode node;
674         GPtrArray *methods;
675         LogBuffer *buffer;
676 } WriterQueueEntry;
677
678 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
679
680 typedef struct {
681         MonoMethod *method;
682         MonoJitInfo *ji;
683         uint64_t time;
684 } MethodInfo;
685
686 static char*
687 pstrdup (const char *s)
688 {
689         int len = strlen (s) + 1;
690         char *p = (char *) g_malloc (len);
691         memcpy (p, s, len);
692         return p;
693 }
694
695 static void *
696 alloc_buffer (int size)
697 {
698         return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
699 }
700
701 static void
702 free_buffer (void *buf, int size)
703 {
704         mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
705 }
706
707 static LogBuffer*
708 create_buffer (uintptr_t tid, int bytes)
709 {
710         LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
711
712         InterlockedIncrement (&buffer_allocations_ctr);
713
714         buf->size = BUFFER_SIZE;
715         buf->time_base = current_time ();
716         buf->last_time = buf->time_base;
717         buf->buf_end = (unsigned char *) buf + buf->size;
718         buf->cursor = buf->buf;
719         buf->thread_id = tid;
720
721         return buf;
722 }
723
724 /*
725  * Must be called with the reader lock held if thread is the current thread, or
726  * the exclusive lock if thread is a different thread. However, if thread is
727  * the current thread, and init_thread () was called with add_to_lls = FALSE,
728  * then no locking is necessary.
729  */
730 static void
731 init_buffer_state (MonoProfilerThread *thread)
732 {
733         thread->buffer = create_buffer (thread->node.key, 0);
734         thread->methods = NULL;
735 }
736
737 static void
738 clear_hazard_pointers (MonoThreadHazardPointers *hp)
739 {
740         mono_hazard_pointer_clear (hp, 0);
741         mono_hazard_pointer_clear (hp, 1);
742         mono_hazard_pointer_clear (hp, 2);
743 }
744
745 static MonoProfilerThread *
746 init_thread (MonoProfiler *prof, gboolean add_to_lls)
747 {
748         MonoProfilerThread *thread = PROF_TLS_GET ();
749
750         /*
751          * Sometimes we may try to initialize a thread twice. One example is the
752          * main thread: We initialize it when setting up the profiler, but we will
753          * also get a thread_start () callback for it. Another example is when
754          * attaching new threads to the runtime: We may get a gc_alloc () callback
755          * for that thread's thread object (where we initialize it), soon followed
756          * by a thread_start () callback.
757          *
758          * These cases are harmless anyhow. Just return if we've already done the
759          * initialization work.
760          */
761         if (thread)
762                 return thread;
763
764         thread = g_malloc (sizeof (MonoProfilerThread));
765         thread->node.key = thread_id ();
766         thread->profiler = prof;
767         thread->attached = add_to_lls;
768         thread->call_depth = 0;
769         thread->busy = 0;
770         thread->ended = FALSE;
771
772         init_buffer_state (thread);
773
774         thread->small_id = mono_thread_info_register_small_id ();
775
776         /*
777          * Some internal profiler threads don't need to be cleaned up
778          * by the main thread on shutdown.
779          */
780         if (add_to_lls) {
781                 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
782                 g_assert (mono_lls_insert (&profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
783                 clear_hazard_pointers (hp);
784         }
785
786         PROF_TLS_SET (thread);
787
788         return thread;
789 }
790
791 // Only valid if init_thread () was called with add_to_lls = FALSE.
792 static void
793 deinit_thread (MonoProfilerThread *thread)
794 {
795         g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
796
797         g_free (thread);
798         PROF_TLS_SET (NULL);
799 }
800
801 static MonoProfilerThread *
802 get_thread (void)
803 {
804         return init_thread (log_profiler, TRUE);
805 }
806
807 // Only valid if init_thread () was called with add_to_lls = FALSE.
808 static LogBuffer *
809 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
810 {
811         LogBuffer *old = thread->buffer;
812
813         if (old->cursor + bytes < old->buf_end)
814                 return old;
815
816         LogBuffer *new_ = create_buffer (thread->node.key, bytes);
817         new_->next = old;
818         thread->buffer = new_;
819
820         return new_;
821 }
822
823 /*
824  * This is a reader/writer spin lock of sorts used to protect log buffers.
825  * When a thread modifies its own log buffer, it increments the reader
826  * count. When a thread wants to access log buffers of other threads, it
827  * takes the exclusive lock.
828  *
829  * `buffer_lock_state` holds the reader count in its lower 16 bits, and
830  * the small ID of the thread currently holding the exclusive (writer)
831  * lock in its upper 16 bits. Both can be zero. It's important that the
832  * whole lock state is a single word that can be read/written atomically
833  * to avoid race conditions where there could end up being readers while
834  * the writer lock is held.
835  *
836  * The lock is writer-biased. When a thread wants to take the exclusive
837  * lock, it increments `buffer_lock_exclusive_intent` which will make new
838  * readers spin until it's back to zero, then takes the exclusive lock
839  * once the reader count has reached zero. After releasing the exclusive
840  * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
841  * reaches zero again, allows readers to increment the reader count.
842  *
843  * The writer bias is necessary because we take the exclusive lock in
844  * `gc_event ()` during STW. If the writer bias was not there, and a
845  * program had a large number of threads, STW-induced pauses could be
846  * significantly longer than they have to be. Also, we emit periodic
847  * sync points from the helper thread, which requires taking the
848  * exclusive lock, and we need those to arrive with a reasonably
849  * consistent frequency so that readers don't have to queue up too many
850  * events between sync points.
851  */
852 static volatile gint32 buffer_lock_state;
853 static volatile gint32 buffer_lock_exclusive_intent;
854
855 // Can be used recursively.
856 static void
857 buffer_lock (void)
858 {
859         /*
860          * If the thread holding the exclusive lock tries to modify the
861          * reader count, just make it a no-op. This way, we also avoid
862          * invoking the GC safe point macros below, which could break if
863          * done from a thread that is currently the initiator of STW.
864          *
865          * In other words, we rely on the fact that the GC thread takes
866          * the exclusive lock in the gc_event () callback when the world
867          * is about to stop.
868          */
869         if (InterlockedRead (&buffer_lock_state) != get_thread ()->small_id << 16) {
870                 MONO_ENTER_GC_SAFE;
871
872                 gint32 old, new_;
873
874                 do {
875                 restart:
876                         // Hold off if a thread wants to take the exclusive lock.
877                         while (InterlockedRead (&buffer_lock_exclusive_intent))
878                                 mono_thread_info_yield ();
879
880                         old = InterlockedRead (&buffer_lock_state);
881
882                         // Is a thread holding the exclusive lock?
883                         if (old >> 16) {
884                                 mono_thread_info_yield ();
885                                 goto restart;
886                         }
887
888                         new_ = old + 1;
889                 } while (InterlockedCompareExchange (&buffer_lock_state, new_, old) != old);
890
891                 MONO_EXIT_GC_SAFE;
892         }
893
894         mono_memory_barrier ();
895 }
896
897 static void
898 buffer_unlock (void)
899 {
900         mono_memory_barrier ();
901
902         gint32 state = InterlockedRead (&buffer_lock_state);
903
904         // See the comment in buffer_lock ().
905         if (state == PROF_TLS_GET ()->small_id << 16)
906                 return;
907
908         g_assert (state && "Why are we decrementing a zero reader count?");
909         g_assert (!(state >> 16) && "Why is the exclusive lock held?");
910
911         InterlockedDecrement (&buffer_lock_state);
912 }
913
914 // Cannot be used recursively.
915 static void
916 buffer_lock_excl (void)
917 {
918         gint32 new_ = get_thread ()->small_id << 16;
919
920         g_assert (InterlockedRead (&buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
921
922         InterlockedIncrement (&buffer_lock_exclusive_intent);
923
924         MONO_ENTER_GC_SAFE;
925
926         while (InterlockedCompareExchange (&buffer_lock_state, new_, 0))
927                 mono_thread_info_yield ();
928
929         MONO_EXIT_GC_SAFE;
930
931         mono_memory_barrier ();
932 }
933
934 static void
935 buffer_unlock_excl (void)
936 {
937         mono_memory_barrier ();
938
939         gint32 state = InterlockedRead (&buffer_lock_state);
940         gint32 excl = state >> 16;
941
942         g_assert (excl && "Why is the exclusive lock not held?");
943         g_assert (excl == PROF_TLS_GET ()->small_id && "Why does another thread hold the exclusive lock?");
944         g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
945
946         InterlockedWrite (&buffer_lock_state, 0);
947         InterlockedDecrement (&buffer_lock_exclusive_intent);
948 }
949
950 static void
951 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
952 {
953         uint8_t *p = buf;
954
955         do {
956                 uint8_t b = value & 0x7f;
957                 value >>= 7;
958
959                 if (value != 0) /* more bytes to come */
960                         b |= 0x80;
961
962                 *p ++ = b;
963         } while (value);
964
965         *endbuf = p;
966 }
967
968 static void
969 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
970 {
971         int more = 1;
972         int negative = (value < 0);
973         unsigned int size = sizeof (intptr_t) * 8;
974         uint8_t byte;
975         uint8_t *p = buf;
976
977         while (more) {
978                 byte = value & 0x7f;
979                 value >>= 7;
980
981                 /* the following is unnecessary if the
982                  * implementation of >>= uses an arithmetic rather
983                  * than logical shift for a signed left operand
984                  */
985                 if (negative)
986                         /* sign extend */
987                         value |= - ((intptr_t) 1 <<(size - 7));
988
989                 /* sign bit of byte is second high order bit (0x40) */
990                 if ((value == 0 && !(byte & 0x40)) ||
991                     (value == -1 && (byte & 0x40)))
992                         more = 0;
993                 else
994                         byte |= 0x80;
995
996                 *p ++= byte;
997         }
998
999         *endbuf = p;
1000 }
1001
1002 static void
1003 emit_byte (LogBuffer *logbuffer, int value)
1004 {
1005         logbuffer->cursor [0] = value;
1006         logbuffer->cursor++;
1007
1008         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1009 }
1010
1011 static void
1012 emit_value (LogBuffer *logbuffer, int value)
1013 {
1014         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1015
1016         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1017 }
1018
1019 static void
1020 emit_time (LogBuffer *logbuffer, uint64_t value)
1021 {
1022         uint64_t tdiff = value - logbuffer->last_time;
1023         encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
1024         logbuffer->last_time = value;
1025
1026         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1027 }
1028
1029 static void
1030 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
1031 {
1032         emit_byte (logbuffer, event);
1033         emit_time (logbuffer, time);
1034 }
1035
1036 static void
1037 emit_event (LogBuffer *logbuffer, int event)
1038 {
1039         emit_event_time (logbuffer, event, current_time ());
1040 }
1041
1042 static void
1043 emit_svalue (LogBuffer *logbuffer, int64_t value)
1044 {
1045         encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1046
1047         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1048 }
1049
1050 static void
1051 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
1052 {
1053         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1054
1055         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1056 }
1057
1058 static void
1059 emit_ptr (LogBuffer *logbuffer, void *ptr)
1060 {
1061         if (!logbuffer->ptr_base)
1062                 logbuffer->ptr_base = (uintptr_t) ptr;
1063
1064         emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
1065
1066         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1067 }
1068
1069 static void
1070 emit_method_inner (LogBuffer *logbuffer, void *method)
1071 {
1072         if (!logbuffer->method_base) {
1073                 logbuffer->method_base = (intptr_t) method;
1074                 logbuffer->last_method = (intptr_t) method;
1075         }
1076
1077         encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
1078         logbuffer->last_method = (intptr_t) method;
1079
1080         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1081 }
1082
1083 static void
1084 register_method_local (MonoMethod *method, MonoJitInfo *ji)
1085 {
1086         MonoProfilerThread *thread = get_thread ();
1087
1088         if (!mono_conc_hashtable_lookup (thread->profiler->method_table, method)) {
1089                 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
1090
1091                 info->method = method;
1092                 info->ji = ji;
1093                 info->time = current_time ();
1094
1095                 buffer_lock ();
1096
1097                 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
1098                 g_ptr_array_add (arr, info);
1099
1100                 buffer_unlock ();
1101         }
1102 }
1103
1104 static void
1105 emit_method (LogBuffer *logbuffer, MonoMethod *method)
1106 {
1107         register_method_local (method, NULL);
1108         emit_method_inner (logbuffer, method);
1109 }
1110
1111 static void
1112 emit_obj (LogBuffer *logbuffer, void *ptr)
1113 {
1114         if (!logbuffer->obj_base)
1115                 logbuffer->obj_base = (uintptr_t) ptr >> 3;
1116
1117         emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
1118
1119         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1120 }
1121
1122 static void
1123 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
1124 {
1125         size_t i = 0;
1126         if (str) {
1127                 for (; i < size; i++) {
1128                         if (str[i] == '\0')
1129                                 break;
1130                         emit_byte (logbuffer, str [i]);
1131                 }
1132         }
1133         emit_byte (logbuffer, '\0');
1134 }
1135
1136 static void
1137 emit_double (LogBuffer *logbuffer, double value)
1138 {
1139         int i;
1140         unsigned char buffer[8];
1141         memcpy (buffer, &value, 8);
1142 #if G_BYTE_ORDER == G_BIG_ENDIAN
1143         for (i = 7; i >= 0; i--)
1144 #else
1145         for (i = 0; i < 8; i++)
1146 #endif
1147                 emit_byte (logbuffer, buffer[i]);
1148 }
1149
1150 static char*
1151 write_int16 (char *buf, int32_t value)
1152 {
1153         int i;
1154         for (i = 0; i < 2; ++i) {
1155                 buf [i] = value;
1156                 value >>= 8;
1157         }
1158         return buf + 2;
1159 }
1160
1161 static char*
1162 write_int32 (char *buf, int32_t value)
1163 {
1164         int i;
1165         for (i = 0; i < 4; ++i) {
1166                 buf [i] = value;
1167                 value >>= 8;
1168         }
1169         return buf + 4;
1170 }
1171
1172 static char*
1173 write_int64 (char *buf, int64_t value)
1174 {
1175         int i;
1176         for (i = 0; i < 8; ++i) {
1177                 buf [i] = value;
1178                 value >>= 8;
1179         }
1180         return buf + 8;
1181 }
1182
1183 static char *
1184 write_header_string (char *p, const char *str)
1185 {
1186         size_t len = strlen (str) + 1;
1187
1188         p = write_int32 (p, len);
1189         strcpy (p, str);
1190
1191         return p + len;
1192 }
1193
1194 static void
1195 dump_header (MonoProfiler *profiler)
1196 {
1197         const char *args = profiler->args;
1198         const char *arch = mono_config_get_cpu ();
1199         const char *os = mono_config_get_os ();
1200
1201         char *hbuf = g_malloc (
1202                 sizeof (gint32) /* header id */ +
1203                 sizeof (gint8) /* major version */ +
1204                 sizeof (gint8) /* minor version */ +
1205                 sizeof (gint8) /* data version */ +
1206                 sizeof (gint8) /* word size */ +
1207                 sizeof (gint64) /* startup time */ +
1208                 sizeof (gint32) /* timer overhead */ +
1209                 sizeof (gint32) /* flags */ +
1210                 sizeof (gint32) /* process id */ +
1211                 sizeof (gint16) /* command port */ +
1212                 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
1213                 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
1214                 sizeof (gint32) + strlen (os) + 1 /* operating system */
1215         );
1216         char *p = hbuf;
1217
1218         p = write_int32 (p, LOG_HEADER_ID);
1219         *p++ = LOG_VERSION_MAJOR;
1220         *p++ = LOG_VERSION_MINOR;
1221         *p++ = LOG_DATA_VERSION;
1222         *p++ = sizeof (void *);
1223         p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
1224         p = write_int32 (p, timer_overhead);
1225         p = write_int32 (p, 0); /* flags */
1226         p = write_int32 (p, process_id ());
1227         p = write_int16 (p, profiler->command_port);
1228         p = write_header_string (p, args);
1229         p = write_header_string (p, arch);
1230         p = write_header_string (p, os);
1231
1232 #if defined (HAVE_SYS_ZLIB)
1233         if (profiler->gzfile) {
1234                 gzwrite (profiler->gzfile, hbuf, p - hbuf);
1235         } else
1236 #endif
1237         {
1238                 fwrite (hbuf, p - hbuf, 1, profiler->file);
1239                 fflush (profiler->file);
1240         }
1241
1242         g_free (hbuf);
1243 }
1244
1245 /*
1246  * Must be called with the reader lock held if thread is the current thread, or
1247  * the exclusive lock if thread is a different thread. However, if thread is
1248  * the current thread, and init_thread () was called with add_to_lls = FALSE,
1249  * then no locking is necessary.
1250  */
1251 static void
1252 send_buffer (MonoProfilerThread *thread)
1253 {
1254         WriterQueueEntry *entry = mono_lock_free_alloc (&thread->profiler->writer_entry_allocator);
1255         entry->methods = thread->methods;
1256         entry->buffer = thread->buffer;
1257
1258         mono_lock_free_queue_node_init (&entry->node, FALSE);
1259
1260         mono_lock_free_queue_enqueue (&thread->profiler->writer_queue, &entry->node);
1261         mono_os_sem_post (&thread->profiler->writer_queue_sem);
1262 }
1263
1264 static void
1265 free_thread (gpointer p)
1266 {
1267         MonoProfilerThread *thread = p;
1268
1269         if (!thread->ended) {
1270                 /*
1271                  * The thread is being cleaned up by the main thread during
1272                  * shutdown. This typically happens for internal runtime
1273                  * threads. We need to synthesize a thread end event.
1274                  */
1275
1276                 InterlockedIncrement (&thread_ends_ctr);
1277
1278                 if (ENABLED (PROFLOG_THREAD_EVENTS)) {
1279                         LogBuffer *buf = ensure_logbuf_unsafe (thread,
1280                                 EVENT_SIZE /* event */ +
1281                                 BYTE_SIZE /* type */ +
1282                                 LEB128_SIZE /* tid */
1283                         );
1284
1285                         emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1286                         emit_byte (buf, TYPE_THREAD);
1287                         emit_ptr (buf, (void *) thread->node.key);
1288                 }
1289         }
1290
1291         send_buffer (thread);
1292
1293         g_free (thread);
1294 }
1295
1296 static void
1297 remove_thread (MonoProfilerThread *thread)
1298 {
1299         MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1300
1301         if (mono_lls_remove (&profiler_thread_list, hp, &thread->node))
1302                 mono_thread_hazardous_try_free (thread, free_thread);
1303
1304         clear_hazard_pointers (hp);
1305 }
1306
1307 static void
1308 dump_buffer (MonoProfiler *profiler, LogBuffer *buf)
1309 {
1310         char hbuf [128];
1311         char *p = hbuf;
1312
1313         if (buf->next)
1314                 dump_buffer (profiler, buf->next);
1315
1316         if (buf->cursor - buf->buf) {
1317                 p = write_int32 (p, BUF_ID);
1318                 p = write_int32 (p, buf->cursor - buf->buf);
1319                 p = write_int64 (p, buf->time_base);
1320                 p = write_int64 (p, buf->ptr_base);
1321                 p = write_int64 (p, buf->obj_base);
1322                 p = write_int64 (p, buf->thread_id);
1323                 p = write_int64 (p, buf->method_base);
1324
1325 #if defined (HAVE_SYS_ZLIB)
1326                 if (profiler->gzfile) {
1327                         gzwrite (profiler->gzfile, hbuf, p - hbuf);
1328                         gzwrite (profiler->gzfile, buf->buf, buf->cursor - buf->buf);
1329                 } else
1330 #endif
1331                 {
1332                         fwrite (hbuf, p - hbuf, 1, profiler->file);
1333                         fwrite (buf->buf, buf->cursor - buf->buf, 1, profiler->file);
1334                         fflush (profiler->file);
1335                 }
1336         }
1337
1338         free_buffer (buf, buf->size);
1339 }
1340
1341 static void
1342 dump_buffer_threadless (MonoProfiler *profiler, LogBuffer *buf)
1343 {
1344         for (LogBuffer *iter = buf; iter; iter = iter->next)
1345                 iter->thread_id = 0;
1346
1347         dump_buffer (profiler, buf);
1348 }
1349
1350 // Only valid if init_thread () was called with add_to_lls = FALSE.
1351 static void
1352 send_log_unsafe (gboolean if_needed)
1353 {
1354         MonoProfilerThread *thread = PROF_TLS_GET ();
1355
1356         if (!if_needed || (if_needed && thread->buffer->next)) {
1357                 if (!thread->attached)
1358                         for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1359                                 iter->thread_id = 0;
1360
1361                 send_buffer (thread);
1362                 init_buffer_state (thread);
1363         }
1364 }
1365
1366 // Assumes that the exclusive lock is held.
1367 static void
1368 sync_point_flush (void)
1369 {
1370         g_assert (InterlockedRead (&buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1371
1372         MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
1373                 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1374
1375                 send_buffer (thread);
1376                 init_buffer_state (thread);
1377         } MONO_LLS_FOREACH_SAFE_END
1378 }
1379
1380 // Assumes that the exclusive lock is held.
1381 static void
1382 sync_point_mark (MonoProfilerSyncPointType type)
1383 {
1384         g_assert (InterlockedRead (&buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1385
1386         ENTER_LOG (&sync_points_ctr, logbuffer,
1387                 EVENT_SIZE /* event */ +
1388                 LEB128_SIZE /* type */
1389         );
1390
1391         emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1392         emit_byte (logbuffer, type);
1393
1394         EXIT_LOG_EXPLICIT (NO_SEND);
1395
1396         send_log_unsafe (FALSE);
1397 }
1398
1399 // Assumes that the exclusive lock is held.
1400 static void
1401 sync_point (MonoProfilerSyncPointType type)
1402 {
1403         sync_point_flush ();
1404         sync_point_mark (type);
1405 }
1406
1407 static int
1408 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1409 {
1410         /* account for object alignment in the heap */
1411         size += 7;
1412         size &= ~7;
1413
1414         ENTER_LOG (&heap_objects_ctr, logbuffer,
1415                 EVENT_SIZE /* event */ +
1416                 LEB128_SIZE /* obj */ +
1417                 LEB128_SIZE /* klass */ +
1418                 LEB128_SIZE /* size */ +
1419                 LEB128_SIZE /* num */ +
1420                 num * (
1421                         LEB128_SIZE /* offset */ +
1422                         LEB128_SIZE /* ref */
1423                 )
1424         );
1425
1426         emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1427         emit_obj (logbuffer, obj);
1428         emit_ptr (logbuffer, klass);
1429         emit_value (logbuffer, size);
1430         emit_value (logbuffer, num);
1431
1432         uintptr_t last_offset = 0;
1433
1434         for (int i = 0; i < num; ++i) {
1435                 emit_value (logbuffer, offsets [i] - last_offset);
1436                 last_offset = offsets [i];
1437                 emit_obj (logbuffer, refs [i]);
1438         }
1439
1440         EXIT_LOG_EXPLICIT (DO_SEND);
1441
1442         return 0;
1443 }
1444
1445 static unsigned int hs_mode_ms = 0;
1446 static unsigned int hs_mode_gc = 0;
1447 static unsigned int hs_mode_ondemand = 0;
1448 static unsigned int gc_count = 0;
1449 static uint64_t last_hs_time = 0;
1450 static gboolean do_heap_walk = FALSE;
1451 static gboolean ignore_heap_events;
1452
1453 static void
1454 gc_roots (MonoProfiler *prof, int num, void **objects, int *root_types, uintptr_t *extra_info)
1455 {
1456         if (ignore_heap_events)
1457                 return;
1458
1459         ENTER_LOG (&heap_roots_ctr, logbuffer,
1460                 EVENT_SIZE /* event */ +
1461                 LEB128_SIZE /* num */ +
1462                 LEB128_SIZE /* collections */ +
1463                 num * (
1464                         LEB128_SIZE /* object */ +
1465                         LEB128_SIZE /* root type */ +
1466                         LEB128_SIZE /* extra info */
1467                 )
1468         );
1469
1470         emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1471         emit_value (logbuffer, num);
1472         emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
1473
1474         for (int i = 0; i < num; ++i) {
1475                 emit_obj (logbuffer, objects [i]);
1476                 emit_byte (logbuffer, root_types [i]);
1477                 emit_value (logbuffer, extra_info [i]);
1478         }
1479
1480         EXIT_LOG_EXPLICIT (DO_SEND);
1481 }
1482
1483
1484 static void
1485 trigger_on_demand_heapshot (void)
1486 {
1487         if (heapshot_requested)
1488                 mono_gc_collect (mono_gc_max_generation ());
1489 }
1490
1491 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_HEAPSHOT_FEATURE)
1492
1493 static void
1494 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation)
1495 {
1496         if (ev == MONO_GC_EVENT_START) {
1497                 uint64_t now = current_time ();
1498
1499                 if (hs_mode_ms && (now - last_hs_time) / 1000 * 1000 >= hs_mode_ms)
1500                         do_heap_walk = TRUE;
1501                 else if (hs_mode_gc && !(gc_count % hs_mode_gc))
1502                         do_heap_walk = TRUE;
1503                 else if (hs_mode_ondemand)
1504                         do_heap_walk = heapshot_requested;
1505                 else if (!hs_mode_ms && !hs_mode_gc && generation == mono_gc_max_generation ())
1506                         do_heap_walk = TRUE;
1507
1508                 //If using heapshot, ignore events for collections we don't care
1509                 if (ENABLED (PROFLOG_HEAPSHOT_FEATURE)) {
1510                         // Ignore events generated during the collection itself (IE GC ROOTS)
1511                         ignore_heap_events = !do_heap_walk;
1512                 }
1513         }
1514
1515
1516         if (ENABLED (PROFLOG_GC_EVENTS)) {
1517                 ENTER_LOG (&gc_events_ctr, logbuffer,
1518                         EVENT_SIZE /* event */ +
1519                         BYTE_SIZE /* gc event */ +
1520                         BYTE_SIZE /* generation */
1521                 );
1522
1523                 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1524                 emit_byte (logbuffer, ev);
1525                 emit_byte (logbuffer, generation);
1526
1527                 EXIT_LOG_EXPLICIT (NO_SEND);
1528         }
1529
1530         switch (ev) {
1531         case MONO_GC_EVENT_START:
1532                 if (generation == mono_gc_max_generation ())
1533                         gc_count++;
1534
1535                 break;
1536         case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1537                 /*
1538                  * Ensure that no thread can be in the middle of writing to
1539                  * a buffer when the world stops...
1540                  */
1541                 buffer_lock_excl ();
1542                 break;
1543         case MONO_GC_EVENT_POST_STOP_WORLD:
1544                 /*
1545                  * ... So that we now have a consistent view of all buffers.
1546                  * This allows us to flush them. We need to do this because
1547                  * they may contain object allocation events that need to be
1548                  * committed to the log file before any object move events
1549                  * that will be produced during this GC.
1550                  */
1551                 if (ENABLED (ALL_GC_EVENTS_MASK))
1552                         sync_point (SYNC_POINT_WORLD_STOP);
1553
1554                 /*
1555                  * All heap events are surrounded by a HEAP_START and a HEAP_ENV event.
1556                  * Right now, that's the case for GC Moves, GC Roots or heapshots.
1557                  */
1558                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
1559                         ENTER_LOG (&heap_starts_ctr, logbuffer,
1560                                 EVENT_SIZE /* event */
1561                         );
1562
1563                         emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1564
1565                         EXIT_LOG_EXPLICIT (DO_SEND);
1566                 }
1567
1568                 break;
1569         case MONO_GC_EVENT_PRE_START_WORLD:
1570                 if (do_heap_shot && do_heap_walk)
1571                         mono_gc_walk_heap (0, gc_reference, NULL);
1572
1573                 /* Matching HEAP_END to the HEAP_START from above */
1574                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
1575                         ENTER_LOG (&heap_ends_ctr, logbuffer,
1576                                 EVENT_SIZE /* event */
1577                         );
1578
1579                         emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1580
1581                         EXIT_LOG_EXPLICIT (DO_SEND);
1582                 }
1583
1584                 if (do_heap_shot && do_heap_walk) {
1585                         do_heap_walk = FALSE;
1586                         heapshot_requested = 0;
1587                         last_hs_time = current_time ();
1588                 }
1589
1590                 /*
1591                  * Similarly, we must now make sure that any object moves
1592                  * written to the GC thread's buffer are flushed. Otherwise,
1593                  * object allocation events for certain addresses could come
1594                  * after the move events that made those addresses available.
1595                  */
1596                 if (ENABLED (ALL_GC_EVENTS_MASK))
1597                         sync_point_mark (SYNC_POINT_WORLD_START);
1598                 break;
1599         case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1600                 /*
1601                  * Finally, it is safe to allow other threads to write to
1602                  * their buffers again.
1603                  */
1604                 buffer_unlock_excl ();
1605                 break;
1606         default:
1607                 break;
1608         }
1609 }
1610
1611 static void
1612 gc_resize (MonoProfiler *profiler, int64_t new_size)
1613 {
1614         ENTER_LOG (&gc_resizes_ctr, logbuffer,
1615                 EVENT_SIZE /* event */ +
1616                 LEB128_SIZE /* new size */
1617         );
1618
1619         emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1620         emit_value (logbuffer, new_size);
1621
1622         EXIT_LOG_EXPLICIT (DO_SEND);
1623 }
1624
1625 typedef struct {
1626         int count;
1627         MonoMethod* methods [MAX_FRAMES];
1628         int32_t il_offsets [MAX_FRAMES];
1629         int32_t native_offsets [MAX_FRAMES];
1630 } FrameData;
1631
1632 static int num_frames = MAX_FRAMES;
1633
1634 static mono_bool
1635 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1636 {
1637         FrameData *frame = (FrameData *)data;
1638         if (method && frame->count < num_frames) {
1639                 frame->il_offsets [frame->count] = il_offset;
1640                 frame->native_offsets [frame->count] = native_offset;
1641                 frame->methods [frame->count++] = method;
1642                 //printf ("In %d %s at %d (native: %d)\n", frame->count, mono_method_get_name (method), il_offset, native_offset);
1643         }
1644         return frame->count == num_frames;
1645 }
1646
1647 /*
1648  * a note about stack walks: they can cause more profiler events to fire,
1649  * so we need to make sure they don't happen after we started emitting an
1650  * event, hence the collect_bt/emit_bt split.
1651  */
1652 static void
1653 collect_bt (FrameData *data)
1654 {
1655         data->count = 0;
1656         mono_stack_walk_no_il (walk_stack, data);
1657 }
1658
1659 static void
1660 emit_bt (MonoProfiler *prof, LogBuffer *logbuffer, FrameData *data)
1661 {
1662         if (data->count > num_frames)
1663                 printf ("bad num frames: %d\n", data->count);
1664
1665         emit_value (logbuffer, data->count);
1666
1667         while (data->count)
1668                 emit_method (logbuffer, data->methods [--data->count]);
1669 }
1670
1671 static void
1672 gc_alloc (MonoProfiler *prof, MonoObject *obj, MonoClass *klass)
1673 {
1674         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_ALLOC_BT : 0;
1675         FrameData data;
1676         uintptr_t len = mono_object_get_size (obj);
1677         /* account for object alignment in the heap */
1678         len += 7;
1679         len &= ~7;
1680
1681         if (do_bt)
1682                 collect_bt (&data);
1683
1684         ENTER_LOG (&gc_allocs_ctr, logbuffer,
1685                 EVENT_SIZE /* event */ +
1686                 LEB128_SIZE /* klass */ +
1687                 LEB128_SIZE /* obj */ +
1688                 LEB128_SIZE /* size */ +
1689                 (do_bt ? (
1690                         LEB128_SIZE /* count */ +
1691                         data.count * (
1692                                 LEB128_SIZE /* method */
1693                         )
1694                 ) : 0)
1695         );
1696
1697         emit_event (logbuffer, do_bt | TYPE_ALLOC);
1698         emit_ptr (logbuffer, klass);
1699         emit_obj (logbuffer, obj);
1700         emit_value (logbuffer, len);
1701
1702         if (do_bt)
1703                 emit_bt (prof, logbuffer, &data);
1704
1705         EXIT_LOG;
1706 }
1707
1708 static void
1709 gc_moves (MonoProfiler *prof, void **objects, int num)
1710 {
1711         ENTER_LOG (&gc_moves_ctr, logbuffer,
1712                 EVENT_SIZE /* event */ +
1713                 LEB128_SIZE /* num */ +
1714                 num * (
1715                         LEB128_SIZE /* object */
1716                 )
1717         );
1718
1719         emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1720         emit_value (logbuffer, num);
1721
1722         for (int i = 0; i < num; ++i)
1723                 emit_obj (logbuffer, objects [i]);
1724
1725         EXIT_LOG_EXPLICIT (DO_SEND);
1726 }
1727
1728 static void
1729 gc_handle (MonoProfiler *prof, int op, int type, uintptr_t handle, MonoObject *obj)
1730 {
1731         int do_bt = nocalls && InterlockedRead (&runtime_inited) && !notraces;
1732         FrameData data;
1733
1734         if (do_bt)
1735                 collect_bt (&data);
1736
1737         gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1738
1739         ENTER_LOG (ctr, logbuffer,
1740                 EVENT_SIZE /* event */ +
1741                 LEB128_SIZE /* type */ +
1742                 LEB128_SIZE /* handle */ +
1743                 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1744                         LEB128_SIZE /* obj */
1745                 ) : 0) +
1746                 (do_bt ? (
1747                         LEB128_SIZE /* count */ +
1748                         data.count * (
1749                                 LEB128_SIZE /* method */
1750                         )
1751                 ) : 0)
1752         );
1753
1754         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1755                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1756         else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1757                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1758         else
1759                 g_assert_not_reached ();
1760
1761         emit_value (logbuffer, type);
1762         emit_value (logbuffer, handle);
1763
1764         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1765                 emit_obj (logbuffer, obj);
1766
1767         if (do_bt)
1768                 emit_bt (prof, logbuffer, &data);
1769
1770         EXIT_LOG;
1771 }
1772
1773 static void
1774 finalize_begin (MonoProfiler *prof)
1775 {
1776         ENTER_LOG (&finalize_begins_ctr, buf,
1777                 EVENT_SIZE /* event */
1778         );
1779
1780         emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1781
1782         EXIT_LOG;
1783 }
1784
1785 static void
1786 finalize_end (MonoProfiler *prof)
1787 {
1788         trigger_on_demand_heapshot ();
1789         if (ENABLED (PROFLOG_FINALIZATION_EVENTS)) {
1790                 ENTER_LOG (&finalize_ends_ctr, buf,
1791                         EVENT_SIZE /* event */
1792                 );
1793
1794                 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1795
1796                 EXIT_LOG;
1797         }
1798 }
1799
1800 static void
1801 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1802 {
1803         ENTER_LOG (&finalize_object_begins_ctr, buf,
1804                 EVENT_SIZE /* event */ +
1805                 LEB128_SIZE /* obj */
1806         );
1807
1808         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1809         emit_obj (buf, obj);
1810
1811         EXIT_LOG;
1812 }
1813
1814 static void
1815 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1816 {
1817         ENTER_LOG (&finalize_object_ends_ctr, buf,
1818                 EVENT_SIZE /* event */ +
1819                 LEB128_SIZE /* obj */
1820         );
1821
1822         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1823         emit_obj (buf, obj);
1824
1825         EXIT_LOG;
1826 }
1827
1828 static char*
1829 push_nesting (char *p, MonoClass *klass)
1830 {
1831         MonoClass *nesting;
1832         const char *name;
1833         const char *nspace;
1834         nesting = mono_class_get_nesting_type (klass);
1835         if (nesting) {
1836                 p = push_nesting (p, nesting);
1837                 *p++ = '/';
1838                 *p = 0;
1839         }
1840         name = mono_class_get_name (klass);
1841         nspace = mono_class_get_namespace (klass);
1842         if (*nspace) {
1843                 strcpy (p, nspace);
1844                 p += strlen (nspace);
1845                 *p++ = '.';
1846                 *p = 0;
1847         }
1848         strcpy (p, name);
1849         p += strlen (name);
1850         return p;
1851 }
1852
1853 static char*
1854 type_name (MonoClass *klass)
1855 {
1856         char buf [1024];
1857         char *p;
1858         push_nesting (buf, klass);
1859         p = (char *) g_malloc (strlen (buf) + 1);
1860         strcpy (p, buf);
1861         return p;
1862 }
1863
1864 static void
1865 image_loaded (MonoProfiler *prof, MonoImage *image, int result)
1866 {
1867         if (result != MONO_PROFILE_OK)
1868                 return;
1869
1870         const char *name = mono_image_get_filename (image);
1871         int nlen = strlen (name) + 1;
1872
1873         ENTER_LOG (&image_loads_ctr, logbuffer,
1874                 EVENT_SIZE /* event */ +
1875                 BYTE_SIZE /* type */ +
1876                 LEB128_SIZE /* image */ +
1877                 nlen /* name */
1878         );
1879
1880         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1881         emit_byte (logbuffer, TYPE_IMAGE);
1882         emit_ptr (logbuffer, image);
1883         memcpy (logbuffer->cursor, name, nlen);
1884         logbuffer->cursor += nlen;
1885
1886         EXIT_LOG;
1887 }
1888
1889 static void
1890 image_unloaded (MonoProfiler *prof, MonoImage *image)
1891 {
1892         const char *name = mono_image_get_filename (image);
1893         int nlen = strlen (name) + 1;
1894
1895         ENTER_LOG (&image_unloads_ctr, logbuffer,
1896                 EVENT_SIZE /* event */ +
1897                 BYTE_SIZE /* type */ +
1898                 LEB128_SIZE /* image */ +
1899                 nlen /* name */
1900         );
1901
1902         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1903         emit_byte (logbuffer, TYPE_IMAGE);
1904         emit_ptr (logbuffer, image);
1905         memcpy (logbuffer->cursor, name, nlen);
1906         logbuffer->cursor += nlen;
1907
1908         EXIT_LOG;
1909 }
1910
1911 static void
1912 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly, int result)
1913 {
1914         if (result != MONO_PROFILE_OK)
1915                 return;
1916
1917         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1918         int nlen = strlen (name) + 1;
1919         MonoImage *image = mono_assembly_get_image (assembly);
1920
1921         ENTER_LOG (&assembly_loads_ctr, logbuffer,
1922                 EVENT_SIZE /* event */ +
1923                 BYTE_SIZE /* type */ +
1924                 LEB128_SIZE /* assembly */ +
1925                 LEB128_SIZE /* image */ +
1926                 nlen /* name */
1927         );
1928
1929         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1930         emit_byte (logbuffer, TYPE_ASSEMBLY);
1931         emit_ptr (logbuffer, assembly);
1932         emit_ptr (logbuffer, image);
1933         memcpy (logbuffer->cursor, name, nlen);
1934         logbuffer->cursor += nlen;
1935
1936         EXIT_LOG;
1937
1938         mono_free (name);
1939 }
1940
1941 static void
1942 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1943 {
1944         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1945         int nlen = strlen (name) + 1;
1946         MonoImage *image = mono_assembly_get_image (assembly);
1947
1948         ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1949                 EVENT_SIZE /* event */ +
1950                 BYTE_SIZE /* type */ +
1951                 LEB128_SIZE /* assembly */ +
1952                 LEB128_SIZE /* image */ +
1953                 nlen /* name */
1954         );
1955
1956         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1957         emit_byte (logbuffer, TYPE_ASSEMBLY);
1958         emit_ptr (logbuffer, assembly);
1959         emit_ptr (logbuffer, image);
1960         memcpy (logbuffer->cursor, name, nlen);
1961         logbuffer->cursor += nlen;
1962
1963         EXIT_LOG;
1964
1965         mono_free (name);
1966 }
1967
1968 static void
1969 class_loaded (MonoProfiler *prof, MonoClass *klass, int result)
1970 {
1971         if (result != MONO_PROFILE_OK)
1972                 return;
1973
1974         char *name;
1975
1976         if (InterlockedRead (&runtime_inited))
1977                 name = mono_type_get_name (mono_class_get_type (klass));
1978         else
1979                 name = type_name (klass);
1980
1981         int nlen = strlen (name) + 1;
1982         MonoImage *image = mono_class_get_image (klass);
1983
1984         ENTER_LOG (&class_loads_ctr, logbuffer,
1985                 EVENT_SIZE /* event */ +
1986                 BYTE_SIZE /* type */ +
1987                 LEB128_SIZE /* klass */ +
1988                 LEB128_SIZE /* image */ +
1989                 nlen /* name */
1990         );
1991
1992         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1993         emit_byte (logbuffer, TYPE_CLASS);
1994         emit_ptr (logbuffer, klass);
1995         emit_ptr (logbuffer, image);
1996         memcpy (logbuffer->cursor, name, nlen);
1997         logbuffer->cursor += nlen;
1998
1999         EXIT_LOG;
2000
2001         if (runtime_inited)
2002                 mono_free (name);
2003         else
2004                 g_free (name);
2005 }
2006
2007 static void
2008 class_unloaded (MonoProfiler *prof, MonoClass *klass)
2009 {
2010         char *name;
2011
2012         if (InterlockedRead (&runtime_inited))
2013                 name = mono_type_get_name (mono_class_get_type (klass));
2014         else
2015                 name = type_name (klass);
2016
2017         int nlen = strlen (name) + 1;
2018         MonoImage *image = mono_class_get_image (klass);
2019
2020         ENTER_LOG (&class_unloads_ctr, logbuffer,
2021                 EVENT_SIZE /* event */ +
2022                 BYTE_SIZE /* type */ +
2023                 LEB128_SIZE /* klass */ +
2024                 LEB128_SIZE /* image */ +
2025                 nlen /* name */
2026         );
2027
2028         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2029         emit_byte (logbuffer, TYPE_CLASS);
2030         emit_ptr (logbuffer, klass);
2031         emit_ptr (logbuffer, image);
2032         memcpy (logbuffer->cursor, name, nlen);
2033         logbuffer->cursor += nlen;
2034
2035         EXIT_LOG;
2036
2037         if (runtime_inited)
2038                 mono_free (name);
2039         else
2040                 g_free (name);
2041 }
2042
2043 static void process_method_enter_coverage (MonoProfiler *prof, MonoMethod *method);
2044
2045 static void
2046 method_enter (MonoProfiler *prof, MonoMethod *method)
2047 {
2048         process_method_enter_coverage (prof, method);
2049
2050         if (!only_coverage && get_thread ()->call_depth++ <= max_call_depth) {
2051                 ENTER_LOG (&method_entries_ctr, logbuffer,
2052                         EVENT_SIZE /* event */ +
2053                         LEB128_SIZE /* method */
2054                 );
2055
2056                 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
2057                 emit_method (logbuffer, method);
2058
2059                 EXIT_LOG;
2060         }
2061 }
2062
2063 static void
2064 method_leave (MonoProfiler *prof, MonoMethod *method)
2065 {
2066         if (!only_coverage && --get_thread ()->call_depth <= max_call_depth) {
2067                 ENTER_LOG (&method_exits_ctr, logbuffer,
2068                         EVENT_SIZE /* event */ +
2069                         LEB128_SIZE /* method */
2070                 );
2071
2072                 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
2073                 emit_method (logbuffer, method);
2074
2075                 EXIT_LOG;
2076         }
2077 }
2078
2079 static void
2080 method_exc_leave (MonoProfiler *prof, MonoMethod *method)
2081 {
2082         if (!only_coverage && !nocalls && --get_thread ()->call_depth <= max_call_depth) {
2083                 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
2084                         EVENT_SIZE /* event */ +
2085                         LEB128_SIZE /* method */
2086                 );
2087
2088                 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
2089                 emit_method (logbuffer, method);
2090
2091                 EXIT_LOG;
2092         }
2093 }
2094
2095 static void
2096 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji, int result)
2097 {
2098         if (result != MONO_PROFILE_OK)
2099                 return;
2100
2101         register_method_local (method, ji);
2102 }
2103
2104 static void
2105 code_buffer_new (MonoProfiler *prof, void *buffer, int size, MonoProfilerCodeBufferType type, void *data)
2106 {
2107         char *name;
2108         int nlen;
2109
2110         if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
2111                 name = (char *) data;
2112                 nlen = strlen (name) + 1;
2113         } else {
2114                 name = NULL;
2115                 nlen = 0;
2116         }
2117
2118         ENTER_LOG (&code_buffers_ctr, logbuffer,
2119                 EVENT_SIZE /* event */ +
2120                 BYTE_SIZE /* type */ +
2121                 LEB128_SIZE /* buffer */ +
2122                 LEB128_SIZE /* size */ +
2123                 (name ? (
2124                         nlen /* name */
2125                 ) : 0)
2126         );
2127
2128         emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
2129         emit_byte (logbuffer, type);
2130         emit_ptr (logbuffer, buffer);
2131         emit_value (logbuffer, size);
2132
2133         if (name) {
2134                 memcpy (logbuffer->cursor, name, nlen);
2135                 logbuffer->cursor += nlen;
2136         }
2137
2138         EXIT_LOG;
2139 }
2140
2141 static void
2142 throw_exc (MonoProfiler *prof, MonoObject *object)
2143 {
2144         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_THROW_BT : 0;
2145         FrameData data;
2146
2147         if (do_bt)
2148                 collect_bt (&data);
2149
2150         ENTER_LOG (&exception_throws_ctr, logbuffer,
2151                 EVENT_SIZE /* event */ +
2152                 LEB128_SIZE /* object */ +
2153                 (do_bt ? (
2154                         LEB128_SIZE /* count */ +
2155                         data.count * (
2156                                 LEB128_SIZE /* method */
2157                         )
2158                 ) : 0)
2159         );
2160
2161         emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
2162         emit_obj (logbuffer, object);
2163
2164         if (do_bt)
2165                 emit_bt (prof, logbuffer, &data);
2166
2167         EXIT_LOG;
2168 }
2169
2170 static void
2171 clause_exc (MonoProfiler *prof, MonoMethod *method, int clause_type, int clause_num, MonoObject *exc)
2172 {
2173         ENTER_LOG (&exception_clauses_ctr, logbuffer,
2174                 EVENT_SIZE /* event */ +
2175                 BYTE_SIZE /* clause type */ +
2176                 LEB128_SIZE /* clause num */ +
2177                 LEB128_SIZE /* method */
2178         );
2179
2180         emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
2181         emit_byte (logbuffer, clause_type);
2182         emit_value (logbuffer, clause_num);
2183         emit_method (logbuffer, method);
2184         emit_obj (logbuffer, exc);
2185
2186         EXIT_LOG;
2187 }
2188
2189 static void
2190 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
2191 {
2192         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_MONITOR_BT : 0;
2193         FrameData data;
2194
2195         if (do_bt)
2196                 collect_bt (&data);
2197
2198         ENTER_LOG (&monitor_events_ctr, logbuffer,
2199                 EVENT_SIZE /* event */ +
2200                 BYTE_SIZE /* ev */ +
2201                 LEB128_SIZE /* object */ +
2202                 (do_bt ? (
2203                         LEB128_SIZE /* count */ +
2204                         data.count * (
2205                                 LEB128_SIZE /* method */
2206                         )
2207                 ) : 0)
2208         );
2209
2210         emit_event (logbuffer, do_bt | TYPE_MONITOR);
2211         emit_byte (logbuffer, ev);
2212         emit_obj (logbuffer, object);
2213
2214         if (do_bt)
2215                 emit_bt (profiler, logbuffer, &data);
2216
2217         EXIT_LOG;
2218 }
2219
2220 static void
2221 thread_start (MonoProfiler *prof, uintptr_t tid)
2222 {
2223         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2224                 ENTER_LOG (&thread_starts_ctr, logbuffer,
2225                         EVENT_SIZE /* event */ +
2226                         BYTE_SIZE /* type */ +
2227                         LEB128_SIZE /* tid */
2228                 );
2229
2230                 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2231                 emit_byte (logbuffer, TYPE_THREAD);
2232                 emit_ptr (logbuffer, (void*) tid);
2233
2234                 EXIT_LOG;
2235         }
2236 }
2237
2238 static void
2239 thread_end (MonoProfiler *prof, uintptr_t tid)
2240 {
2241         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2242                 ENTER_LOG (&thread_ends_ctr, logbuffer,
2243                         EVENT_SIZE /* event */ +
2244                         BYTE_SIZE /* type */ +
2245                         LEB128_SIZE /* tid */
2246                 );
2247
2248                 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2249                 emit_byte (logbuffer, TYPE_THREAD);
2250                 emit_ptr (logbuffer, (void*) tid);
2251
2252                 EXIT_LOG_EXPLICIT (NO_SEND);
2253         }
2254
2255         MonoProfilerThread *thread = get_thread ();
2256
2257         thread->ended = TRUE;
2258         remove_thread (thread);
2259
2260         PROF_TLS_SET (NULL);
2261 }
2262
2263 static void
2264 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2265 {
2266         int len = strlen (name) + 1;
2267
2268         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2269                 ENTER_LOG (&thread_names_ctr, logbuffer,
2270                         EVENT_SIZE /* event */ +
2271                         BYTE_SIZE /* type */ +
2272                         LEB128_SIZE /* tid */ +
2273                         len /* name */
2274                 );
2275
2276                 emit_event (logbuffer, TYPE_METADATA);
2277                 emit_byte (logbuffer, TYPE_THREAD);
2278                 emit_ptr (logbuffer, (void*)tid);
2279                 memcpy (logbuffer->cursor, name, len);
2280                 logbuffer->cursor += len;
2281
2282                 EXIT_LOG;
2283         }
2284 }
2285
2286 static void
2287 domain_loaded (MonoProfiler *prof, MonoDomain *domain, int result)
2288 {
2289         if (result != MONO_PROFILE_OK)
2290                 return;
2291
2292         ENTER_LOG (&domain_loads_ctr, logbuffer,
2293                 EVENT_SIZE /* event */ +
2294                 BYTE_SIZE /* type */ +
2295                 LEB128_SIZE /* domain id */
2296         );
2297
2298         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2299         emit_byte (logbuffer, TYPE_DOMAIN);
2300         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2301
2302         EXIT_LOG;
2303 }
2304
2305 static void
2306 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2307 {
2308         ENTER_LOG (&domain_unloads_ctr, logbuffer,
2309                 EVENT_SIZE /* event */ +
2310                 BYTE_SIZE /* type */ +
2311                 LEB128_SIZE /* domain id */
2312         );
2313
2314         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2315         emit_byte (logbuffer, TYPE_DOMAIN);
2316         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2317
2318         EXIT_LOG;
2319 }
2320
2321 static void
2322 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2323 {
2324         int nlen = strlen (name) + 1;
2325
2326         ENTER_LOG (&domain_names_ctr, logbuffer,
2327                 EVENT_SIZE /* event */ +
2328                 BYTE_SIZE /* type */ +
2329                 LEB128_SIZE /* domain id */ +
2330                 nlen /* name */
2331         );
2332
2333         emit_event (logbuffer, TYPE_METADATA);
2334         emit_byte (logbuffer, TYPE_DOMAIN);
2335         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2336         memcpy (logbuffer->cursor, name, nlen);
2337         logbuffer->cursor += nlen;
2338
2339         EXIT_LOG;
2340 }
2341
2342 static void
2343 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2344 {
2345         ENTER_LOG (&context_loads_ctr, logbuffer,
2346                 EVENT_SIZE /* event */ +
2347                 BYTE_SIZE /* type */ +
2348                 LEB128_SIZE /* context id */ +
2349                 LEB128_SIZE /* domain id */
2350         );
2351
2352         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2353         emit_byte (logbuffer, TYPE_CONTEXT);
2354         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2355         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2356
2357         EXIT_LOG;
2358 }
2359
2360 static void
2361 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2362 {
2363         ENTER_LOG (&context_unloads_ctr, logbuffer,
2364                 EVENT_SIZE /* event */ +
2365                 BYTE_SIZE /* type */ +
2366                 LEB128_SIZE /* context id */ +
2367                 LEB128_SIZE /* domain id */
2368         );
2369
2370         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2371         emit_byte (logbuffer, TYPE_CONTEXT);
2372         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2373         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2374
2375         EXIT_LOG;
2376 }
2377
2378 typedef struct {
2379         MonoMethod *method;
2380         MonoDomain *domain;
2381         void *base_address;
2382         int offset;
2383 } AsyncFrameInfo;
2384
2385 typedef struct {
2386         MonoLockFreeQueueNode node;
2387         MonoProfiler *prof;
2388         uint64_t time;
2389         uintptr_t tid;
2390         void *ip;
2391         int count;
2392         AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2393 } SampleHit;
2394
2395 static mono_bool
2396 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2397 {
2398         SampleHit *sample = (SampleHit *) data;
2399
2400         if (sample->count < num_frames) {
2401                 int i = sample->count;
2402
2403                 sample->frames [i].method = method;
2404                 sample->frames [i].domain = domain;
2405                 sample->frames [i].base_address = base_address;
2406                 sample->frames [i].offset = offset;
2407
2408                 sample->count++;
2409         }
2410
2411         return sample->count == num_frames;
2412 }
2413
2414 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2415 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2416
2417 static void
2418 enqueue_sample_hit (gpointer p)
2419 {
2420         SampleHit *sample = p;
2421
2422         mono_lock_free_queue_node_unpoison (&sample->node);
2423         mono_lock_free_queue_enqueue (&sample->prof->dumper_queue, &sample->node);
2424         mono_os_sem_post (&sample->prof->dumper_queue_sem);
2425 }
2426
2427 static void
2428 mono_sample_hit (MonoProfiler *profiler, unsigned char *ip, void *context)
2429 {
2430         /*
2431          * Please note: We rely on the runtime loading the profiler with
2432          * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2433          * this function (and its siblings) are resolved when the profiler is
2434          * loaded. Otherwise, we would potentially invoke the dynamic linker when
2435          * invoking runtime functions, which is not async-signal-safe.
2436          */
2437
2438         if (InterlockedRead (&in_shutdown))
2439                 return;
2440
2441         SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2442
2443         if (!sample) {
2444                 /*
2445                  * If we're out of reusable sample events and we're not allowed to
2446                  * allocate more, we have no choice but to drop the event.
2447                  */
2448                 if (InterlockedRead (&sample_allocations_ctr) >= max_allocated_sample_hits)
2449                         return;
2450
2451                 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2452                 sample->prof = profiler;
2453                 mono_lock_free_queue_node_init (&sample->node, TRUE);
2454
2455                 InterlockedIncrement (&sample_allocations_ctr);
2456         }
2457
2458         sample->count = 0;
2459         mono_stack_walk_async_safe (&async_walk_stack, context, sample);
2460
2461         sample->time = current_time ();
2462         sample->tid = thread_id ();
2463         sample->ip = ip;
2464
2465         mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2466 }
2467
2468 static uintptr_t *code_pages = 0;
2469 static int num_code_pages = 0;
2470 static int size_code_pages = 0;
2471 #define CPAGE_SHIFT (9)
2472 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2473 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2474 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2475
2476 static uintptr_t
2477 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2478 {
2479         uintptr_t i;
2480         uintptr_t start_pos;
2481         start_pos = (page >> CPAGE_SHIFT) % hsize;
2482         i = start_pos;
2483         do {
2484                 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2485                         return 0;
2486                 } else if (!hash [i]) {
2487                         hash [i] = page;
2488                         return 1;
2489                 }
2490                 /* wrap around */
2491                 if (++i == hsize)
2492                         i = 0;
2493         } while (i != start_pos);
2494         /* should not happen */
2495         printf ("failed code page store\n");
2496         return 0;
2497 }
2498
2499 static void
2500 add_code_pointer (uintptr_t ip)
2501 {
2502         uintptr_t i;
2503         if (num_code_pages * 2 >= size_code_pages) {
2504                 uintptr_t *n;
2505                 uintptr_t old_size = size_code_pages;
2506                 size_code_pages *= 2;
2507                 if (size_code_pages == 0)
2508                         size_code_pages = 16;
2509                 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2510                 for (i = 0; i < old_size; ++i) {
2511                         if (code_pages [i])
2512                                 add_code_page (n, size_code_pages, code_pages [i]);
2513                 }
2514                 if (code_pages)
2515                         g_free (code_pages);
2516                 code_pages = n;
2517         }
2518         num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2519 }
2520
2521 /* ELF code crashes on some systems. */
2522 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2523 #if 0
2524 static void
2525 dump_ubin (MonoProfiler *prof, const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2526 {
2527         int len = strlen (filename) + 1;
2528
2529         ENTER_LOG (&sample_ubins_ctr, logbuffer,
2530                 EVENT_SIZE /* event */ +
2531                 LEB128_SIZE /* load address */ +
2532                 LEB128_SIZE /* offset */ +
2533                 LEB128_SIZE /* size */ +
2534                 nlen /* file name */
2535         );
2536
2537         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2538         emit_ptr (logbuffer, load_addr);
2539         emit_uvalue (logbuffer, offset);
2540         emit_uvalue (logbuffer, size);
2541         memcpy (logbuffer->cursor, filename, len);
2542         logbuffer->cursor += len;
2543
2544         EXIT_LOG_EXPLICIT (DO_SEND);
2545 }
2546 #endif
2547
2548 static void
2549 dump_usym (MonoProfiler *prof, const char *name, uintptr_t value, uintptr_t size)
2550 {
2551         int len = strlen (name) + 1;
2552
2553         ENTER_LOG (&sample_usyms_ctr, logbuffer,
2554                 EVENT_SIZE /* event */ +
2555                 LEB128_SIZE /* value */ +
2556                 LEB128_SIZE /* size */ +
2557                 len /* name */
2558         );
2559
2560         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2561         emit_ptr (logbuffer, (void*)value);
2562         emit_value (logbuffer, size);
2563         memcpy (logbuffer->cursor, name, len);
2564         logbuffer->cursor += len;
2565
2566         EXIT_LOG_EXPLICIT (DO_SEND);
2567 }
2568
2569 /* ELF code crashes on some systems. */
2570 //#if defined(ELFMAG0)
2571 #if 0
2572
2573 #if SIZEOF_VOID_P == 4
2574 #define ELF_WSIZE 32
2575 #else
2576 #define ELF_WSIZE 64
2577 #endif
2578 #ifndef ElfW
2579 #define ElfW(type)      _ElfW (Elf, ELF_WSIZE, type)
2580 #define _ElfW(e,w,t)    _ElfW_1 (e, w, _##t)
2581 #define _ElfW_1(e,w,t)  e##w##t
2582 #endif
2583
2584 static void
2585 dump_elf_symbols (MonoProfiler *prof, ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2586 {
2587         int i;
2588         for (i = 0; i < num_symbols; ++i) {
2589                 const char* sym;
2590                 sym =  strtab + symbols [i].st_name;
2591                 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2592                         continue;
2593                 //printf ("symbol %s at %d\n", sym, symbols [i].st_value);
2594                 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2595         }
2596 }
2597
2598 static int
2599 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2600 {
2601         int fd, i;
2602         void *data;
2603         struct stat statb;
2604         uint64_t file_size;
2605         ElfW(Ehdr) *header;
2606         ElfW(Shdr) *sheader;
2607         ElfW(Shdr) *shstrtabh;
2608         ElfW(Shdr) *symtabh = NULL;
2609         ElfW(Shdr) *strtabh = NULL;
2610         ElfW(Sym) *symbols = NULL;
2611         const char *strtab;
2612         int num_symbols;
2613
2614         fd = open (filename, O_RDONLY);
2615         if (fd < 0)
2616                 return 0;
2617         if (fstat (fd, &statb) != 0) {
2618                 close (fd);
2619                 return 0;
2620         }
2621         file_size = statb.st_size;
2622         data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2623         close (fd);
2624         if (data == MAP_FAILED)
2625                 return 0;
2626         header = data;
2627         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2628                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2629                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2630                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2631                 munmap (data, file_size);
2632                 return 0;
2633         }
2634         sheader = (void*)((char*)data + header->e_shoff);
2635         shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2636         strtab = (const char*)data + shstrtabh->sh_offset;
2637         for (i = 0; i < header->e_shnum; ++i) {
2638                 //printf ("section header: %d\n", sheader->sh_type);
2639                 if (sheader->sh_type == SHT_SYMTAB) {
2640                         symtabh = sheader;
2641                         strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2642                         /*printf ("symtab section header: %d, .strstr: %d\n", i, sheader->sh_link);*/
2643                         break;
2644                 }
2645                 sheader = (void*)((char*)sheader + header->e_shentsize);
2646         }
2647         if (!symtabh || !strtabh) {
2648                 munmap (data, file_size);
2649                 return 0;
2650         }
2651         strtab = (const char*)data + strtabh->sh_offset;
2652         num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2653         symbols = (void*)((char*)data + symtabh->sh_offset);
2654         dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2655         munmap (data, file_size);
2656         return 1;
2657 }
2658 #endif
2659
2660 /* ELF code crashes on some systems. */
2661 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2662 #if 0
2663 static int
2664 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2665 {
2666         MonoProfiler *prof = data;
2667         char buf [256];
2668         const char *filename;
2669         BinaryObject *obj;
2670         char *a = (void*)info->dlpi_addr;
2671         int i, num_sym;
2672         ElfW(Dyn) *dyn = NULL;
2673         ElfW(Sym) *symtab = NULL;
2674         ElfW(Word) *hash_table = NULL;
2675         ElfW(Ehdr) *header = NULL;
2676         const char* strtab = NULL;
2677         for (obj = prof->binary_objects; obj; obj = obj->next) {
2678                 if (obj->addr == a)
2679                         return 0;
2680         }
2681         filename = info->dlpi_name;
2682         if (!filename)
2683                 return 0;
2684         if (!info->dlpi_addr && !filename [0]) {
2685                 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2686                 if (l > 0) {
2687                         buf [l] = 0;
2688                         filename = buf;
2689                 }
2690         }
2691         obj = g_calloc (sizeof (BinaryObject), 1);
2692         obj->addr = (void*)info->dlpi_addr;
2693         obj->name = pstrdup (filename);
2694         obj->next = prof->binary_objects;
2695         prof->binary_objects = obj;
2696         //printf ("loaded file: %s at %p, segments: %d\n", filename, (void*)info->dlpi_addr, info->dlpi_phnum);
2697         a = NULL;
2698         for (i = 0; i < info->dlpi_phnum; ++i) {
2699                 //printf ("segment type %d file offset: %d, size: %d\n", info->dlpi_phdr[i].p_type, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2700                 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2701                         header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2702                         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2703                                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2704                                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2705                                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2706                                 header = NULL;
2707                         }
2708                         dump_ubin (prof, filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2709                 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2710                         dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2711                 }
2712         }
2713         if (read_elf_symbols (prof, filename, (void*)info->dlpi_addr))
2714                 return 0;
2715         if (!info->dlpi_name || !info->dlpi_name[0])
2716                 return 0;
2717         if (!dyn)
2718                 return 0;
2719         for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2720                 if (dyn [i].d_tag == DT_SYMTAB) {
2721                         if (symtab && do_debug)
2722                                 printf ("multiple symtabs: %d\n", i);
2723                         symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2724                 } else if (dyn [i].d_tag == DT_HASH) {
2725                         hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2726                 } else if (dyn [i].d_tag == DT_STRTAB) {
2727                         strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2728                 }
2729         }
2730         if (!hash_table)
2731                 return 0;
2732         num_sym = hash_table [1];
2733         dump_elf_symbols (prof, symtab, num_sym, strtab, (void*)info->dlpi_addr);
2734         return 0;
2735 }
2736
2737 static int
2738 load_binaries (MonoProfiler *prof)
2739 {
2740         dl_iterate_phdr (elf_dl_callback, prof);
2741         return 1;
2742 }
2743 #else
2744 static int
2745 load_binaries (MonoProfiler *prof)
2746 {
2747         return 0;
2748 }
2749 #endif
2750
2751 static const char*
2752 symbol_for (uintptr_t code)
2753 {
2754 #ifdef HAVE_DLADDR
2755         void *ip = (void*)code;
2756         Dl_info di;
2757         if (dladdr (ip, &di)) {
2758                 if (di.dli_sname)
2759                         return di.dli_sname;
2760         } else {
2761         /*      char **names;
2762                 names = backtrace_symbols (&ip, 1);
2763                 if (names) {
2764                         const char* p = names [0];
2765                         g_free (names);
2766                         return p;
2767                 }
2768                 */
2769         }
2770 #endif
2771         return NULL;
2772 }
2773
2774 static void
2775 dump_unmanaged_coderefs (MonoProfiler *prof)
2776 {
2777         int i;
2778         const char* last_symbol;
2779         uintptr_t addr, page_end;
2780
2781         if (load_binaries (prof))
2782                 return;
2783         for (i = 0; i < size_code_pages; ++i) {
2784                 const char* sym;
2785                 if (!code_pages [i] || code_pages [i] & 1)
2786                         continue;
2787                 last_symbol = NULL;
2788                 addr = CPAGE_ADDR (code_pages [i]);
2789                 page_end = addr + CPAGE_SIZE;
2790                 code_pages [i] |= 1;
2791                 /* we dump the symbols for the whole page */
2792                 for (; addr < page_end; addr += 16) {
2793                         sym = symbol_for (addr);
2794                         if (sym && sym == last_symbol)
2795                                 continue;
2796                         last_symbol = sym;
2797                         if (!sym)
2798                                 continue;
2799                         dump_usym (prof, sym, addr, 0); /* let's not guess the size */
2800                         //printf ("found symbol at %p: %s\n", (void*)addr, sym);
2801                 }
2802         }
2803 }
2804
2805 typedef struct MonoCounterAgent {
2806         MonoCounter *counter;
2807         // MonoCounterAgent specific data :
2808         void *value;
2809         size_t value_size;
2810         short index;
2811         short emitted;
2812         struct MonoCounterAgent *next;
2813 } MonoCounterAgent;
2814
2815 static MonoCounterAgent* counters;
2816 static int counters_index = 1;
2817 static mono_mutex_t counters_mutex;
2818
2819 static void
2820 counters_add_agent (MonoCounter *counter)
2821 {
2822         if (InterlockedRead (&in_shutdown))
2823                 return;
2824
2825         MonoCounterAgent *agent, *item;
2826
2827         mono_os_mutex_lock (&counters_mutex);
2828
2829         for (agent = counters; agent; agent = agent->next) {
2830                 if (agent->counter == counter) {
2831                         agent->value_size = 0;
2832                         if (agent->value) {
2833                                 g_free (agent->value);
2834                                 agent->value = NULL;
2835                         }
2836                         goto done;
2837                 }
2838         }
2839
2840         agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2841         agent->counter = counter;
2842         agent->value = NULL;
2843         agent->value_size = 0;
2844         agent->index = counters_index++;
2845         agent->emitted = 0;
2846         agent->next = NULL;
2847
2848         if (!counters) {
2849                 counters = agent;
2850         } else {
2851                 item = counters;
2852                 while (item->next)
2853                         item = item->next;
2854                 item->next = agent;
2855         }
2856
2857 done:
2858         mono_os_mutex_unlock (&counters_mutex);
2859 }
2860
2861 static mono_bool
2862 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2863 {
2864         counters_add_agent (counter);
2865         return TRUE;
2866 }
2867
2868 static void
2869 counters_init (MonoProfiler *profiler)
2870 {
2871         mono_os_mutex_init (&counters_mutex);
2872
2873         mono_counters_on_register (&counters_add_agent);
2874         mono_counters_foreach (counters_init_foreach_callback, NULL);
2875 }
2876
2877 static void
2878 counters_emit (MonoProfiler *profiler)
2879 {
2880         MonoCounterAgent *agent;
2881         int len = 0;
2882         int size =
2883                 EVENT_SIZE /* event */ +
2884                 LEB128_SIZE /* len */
2885         ;
2886
2887         mono_os_mutex_lock (&counters_mutex);
2888
2889         for (agent = counters; agent; agent = agent->next) {
2890                 if (agent->emitted)
2891                         continue;
2892
2893                 size +=
2894                         LEB128_SIZE /* section */ +
2895                         strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2896                         BYTE_SIZE /* type */ +
2897                         BYTE_SIZE /* unit */ +
2898                         BYTE_SIZE /* variance */ +
2899                         LEB128_SIZE /* index */
2900                 ;
2901
2902                 len++;
2903         }
2904
2905         if (!len)
2906                 goto done;
2907
2908         ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2909
2910         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2911         emit_value (logbuffer, len);
2912
2913         for (agent = counters; agent; agent = agent->next) {
2914                 const char *name;
2915
2916                 if (agent->emitted)
2917                         continue;
2918
2919                 name = mono_counter_get_name (agent->counter);
2920                 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2921                 emit_string (logbuffer, name, strlen (name) + 1);
2922                 emit_byte (logbuffer, mono_counter_get_type (agent->counter));
2923                 emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
2924                 emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
2925                 emit_value (logbuffer, agent->index);
2926
2927                 agent->emitted = 1;
2928         }
2929
2930         EXIT_LOG_EXPLICIT (DO_SEND);
2931
2932 done:
2933         mono_os_mutex_unlock (&counters_mutex);
2934 }
2935
2936 static void
2937 counters_sample (MonoProfiler *profiler, uint64_t timestamp)
2938 {
2939         MonoCounterAgent *agent;
2940         MonoCounter *counter;
2941         int type;
2942         int buffer_size;
2943         void *buffer;
2944         int size;
2945
2946         counters_emit (profiler);
2947
2948         buffer_size = 8;
2949         buffer = g_calloc (1, buffer_size);
2950
2951         mono_os_mutex_lock (&counters_mutex);
2952
2953         size =
2954                 EVENT_SIZE /* event */
2955         ;
2956
2957         for (agent = counters; agent; agent = agent->next) {
2958                 size +=
2959                         LEB128_SIZE /* index */ +
2960                         BYTE_SIZE /* type */ +
2961                         mono_counter_get_size (agent->counter) /* value */
2962                 ;
2963         }
2964
2965         size +=
2966                 LEB128_SIZE /* stop marker */
2967         ;
2968
2969         ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2970
2971         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2972
2973         for (agent = counters; agent; agent = agent->next) {
2974                 size_t size;
2975
2976                 counter = agent->counter;
2977
2978                 size = mono_counter_get_size (counter);
2979
2980                 if (size > buffer_size) {
2981                         buffer_size = size;
2982                         buffer = g_realloc (buffer, buffer_size);
2983                 }
2984
2985                 memset (buffer, 0, buffer_size);
2986
2987                 g_assert (mono_counters_sample (counter, buffer, size));
2988
2989                 type = mono_counter_get_type (counter);
2990
2991                 if (!agent->value) {
2992                         agent->value = g_calloc (1, size);
2993                         agent->value_size = size;
2994                 } else {
2995                         if (type == MONO_COUNTER_STRING) {
2996                                 if (strcmp (agent->value, buffer) == 0)
2997                                         continue;
2998                         } else {
2999                                 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
3000                                         continue;
3001                         }
3002                 }
3003
3004                 emit_uvalue (logbuffer, agent->index);
3005                 emit_byte (logbuffer, type);
3006                 switch (type) {
3007                 case MONO_COUNTER_INT:
3008 #if SIZEOF_VOID_P == 4
3009                 case MONO_COUNTER_WORD:
3010 #endif
3011                         emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
3012                         break;
3013                 case MONO_COUNTER_UINT:
3014                         emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
3015                         break;
3016                 case MONO_COUNTER_TIME_INTERVAL:
3017                 case MONO_COUNTER_LONG:
3018 #if SIZEOF_VOID_P == 8
3019                 case MONO_COUNTER_WORD:
3020 #endif
3021                         emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
3022                         break;
3023                 case MONO_COUNTER_ULONG:
3024                         emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
3025                         break;
3026                 case MONO_COUNTER_DOUBLE:
3027                         emit_double (logbuffer, *(double*)buffer);
3028                         break;
3029                 case MONO_COUNTER_STRING:
3030                         if (size == 0) {
3031                                 emit_byte (logbuffer, 0);
3032                         } else {
3033                                 emit_byte (logbuffer, 1);
3034                                 emit_string (logbuffer, (char*)buffer, size);
3035                         }
3036                         break;
3037                 default:
3038                         g_assert_not_reached ();
3039                 }
3040
3041                 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
3042                         agent->value = g_realloc (agent->value, size);
3043                         agent->value_size = size;
3044                 }
3045
3046                 if (size > 0)
3047                         memcpy (agent->value, buffer, size);
3048         }
3049         g_free (buffer);
3050
3051         emit_value (logbuffer, 0);
3052
3053         EXIT_LOG_EXPLICIT (DO_SEND);
3054
3055         mono_os_mutex_unlock (&counters_mutex);
3056 }
3057
3058 typedef struct _PerfCounterAgent PerfCounterAgent;
3059 struct _PerfCounterAgent {
3060         PerfCounterAgent *next;
3061         int index;
3062         char *category_name;
3063         char *name;
3064         int type;
3065         gint64 value;
3066         guint8 emitted;
3067         guint8 updated;
3068         guint8 deleted;
3069 };
3070
3071 static PerfCounterAgent *perfcounters = NULL;
3072
3073 static void
3074 perfcounters_emit (MonoProfiler *profiler)
3075 {
3076         PerfCounterAgent *pcagent;
3077         int len = 0;
3078         int size =
3079                 EVENT_SIZE /* event */ +
3080                 LEB128_SIZE /* len */
3081         ;
3082
3083         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3084                 if (pcagent->emitted)
3085                         continue;
3086
3087                 size +=
3088                         LEB128_SIZE /* section */ +
3089                         strlen (pcagent->category_name) + 1 /* category name */ +
3090                         strlen (pcagent->name) + 1 /* name */ +
3091                         BYTE_SIZE /* type */ +
3092                         BYTE_SIZE /* unit */ +
3093                         BYTE_SIZE /* variance */ +
3094                         LEB128_SIZE /* index */
3095                 ;
3096
3097                 len++;
3098         }
3099
3100         if (!len)
3101                 return;
3102
3103         ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
3104
3105         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
3106         emit_value (logbuffer, len);
3107
3108         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3109                 if (pcagent->emitted)
3110                         continue;
3111
3112                 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
3113                 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
3114                 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
3115                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3116                 emit_byte (logbuffer, MONO_COUNTER_RAW);
3117                 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
3118                 emit_value (logbuffer, pcagent->index);
3119
3120                 pcagent->emitted = 1;
3121         }
3122
3123         EXIT_LOG_EXPLICIT (DO_SEND);
3124 }
3125
3126 static gboolean
3127 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
3128 {
3129         PerfCounterAgent *pcagent;
3130
3131         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3132                 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
3133                         continue;
3134                 if (pcagent->value == value)
3135                         return TRUE;
3136
3137                 pcagent->value = value;
3138                 pcagent->updated = 1;
3139                 pcagent->deleted = 0;
3140                 return TRUE;
3141         }
3142
3143         pcagent = g_new0 (PerfCounterAgent, 1);
3144         pcagent->next = perfcounters;
3145         pcagent->index = counters_index++;
3146         pcagent->category_name = g_strdup (category_name);
3147         pcagent->name = g_strdup (name);
3148         pcagent->type = (int) type;
3149         pcagent->value = value;
3150         pcagent->emitted = 0;
3151         pcagent->updated = 1;
3152         pcagent->deleted = 0;
3153
3154         perfcounters = pcagent;
3155
3156         return TRUE;
3157 }
3158
3159 static void
3160 perfcounters_sample (MonoProfiler *profiler, uint64_t timestamp)
3161 {
3162         PerfCounterAgent *pcagent;
3163         int len = 0;
3164         int size;
3165
3166         mono_os_mutex_lock (&counters_mutex);
3167
3168         /* mark all perfcounters as deleted, foreach will unmark them as necessary */
3169         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next)
3170                 pcagent->deleted = 1;
3171
3172         mono_perfcounter_foreach (perfcounters_foreach, perfcounters);
3173
3174         perfcounters_emit (profiler);
3175
3176         size =
3177                 EVENT_SIZE /* event */
3178         ;
3179
3180         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3181                 if (pcagent->deleted || !pcagent->updated)
3182                         continue;
3183
3184                 size +=
3185                         LEB128_SIZE /* index */ +
3186                         BYTE_SIZE /* type */ +
3187                         LEB128_SIZE /* value */
3188                 ;
3189
3190                 len++;
3191         }
3192
3193         if (!len)
3194                 goto done;
3195
3196         size +=
3197                 LEB128_SIZE /* stop marker */
3198         ;
3199
3200         ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
3201
3202         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
3203
3204         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3205                 if (pcagent->deleted || !pcagent->updated)
3206                         continue;
3207                 emit_uvalue (logbuffer, pcagent->index);
3208                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3209                 emit_svalue (logbuffer, pcagent->value);
3210
3211                 pcagent->updated = 0;
3212         }
3213
3214         emit_value (logbuffer, 0);
3215
3216         EXIT_LOG_EXPLICIT (DO_SEND);
3217
3218 done:
3219         mono_os_mutex_unlock (&counters_mutex);
3220 }
3221
3222 static void
3223 counters_and_perfcounters_sample (MonoProfiler *prof)
3224 {
3225         uint64_t now = current_time ();
3226
3227         counters_sample (prof, now);
3228         perfcounters_sample (prof, now);
3229 }
3230
3231 #define COVERAGE_DEBUG(x) if (debug_coverage) {x}
3232 static mono_mutex_t coverage_mutex;
3233 static MonoConcurrentHashTable *coverage_methods = NULL;
3234 static MonoConcurrentHashTable *coverage_assemblies = NULL;
3235 static MonoConcurrentHashTable *coverage_classes = NULL;
3236
3237 static MonoConcurrentHashTable *filtered_classes = NULL;
3238 static MonoConcurrentHashTable *entered_methods = NULL;
3239 static MonoConcurrentHashTable *image_to_methods = NULL;
3240 static MonoConcurrentHashTable *suppressed_assemblies = NULL;
3241 static gboolean coverage_initialized = FALSE;
3242
3243 static GPtrArray *coverage_data = NULL;
3244 static int previous_offset = 0;
3245
3246 typedef struct {
3247         MonoLockFreeQueueNode node;
3248         MonoMethod *method;
3249 } MethodNode;
3250
3251 typedef struct {
3252         int offset;
3253         int counter;
3254         char *filename;
3255         int line;
3256         int column;
3257 } CoverageEntry;
3258
3259 static void
3260 free_coverage_entry (gpointer data, gpointer userdata)
3261 {
3262         CoverageEntry *entry = (CoverageEntry *)data;
3263         g_free (entry->filename);
3264         g_free (entry);
3265 }
3266
3267 static void
3268 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfileCoverageEntry *entry)
3269 {
3270         int offset = entry->iloffset - previous_offset;
3271         CoverageEntry *e = g_new (CoverageEntry, 1);
3272
3273         previous_offset = entry->iloffset;
3274
3275         e->offset = offset;
3276         e->counter = entry->counter;
3277         e->filename = g_strdup(entry->filename ? entry->filename : "");
3278         e->line = entry->line;
3279         e->column = entry->col;
3280
3281         g_ptr_array_add (coverage_data, e);
3282 }
3283
3284 static char *
3285 parse_generic_type_names(char *name)
3286 {
3287         char *new_name, *ret;
3288         int within_generic_declaration = 0, generic_members = 1;
3289
3290         if (name == NULL || *name == '\0')
3291                 return g_strdup ("");
3292
3293         if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
3294                 return NULL;
3295
3296         do {
3297                 switch (*name) {
3298                         case '<':
3299                                 within_generic_declaration = 1;
3300                                 break;
3301
3302                         case '>':
3303                                 within_generic_declaration = 0;
3304
3305                                 if (*(name - 1) != '<') {
3306                                         *new_name++ = '`';
3307                                         *new_name++ = '0' + generic_members;
3308                                 } else {
3309                                         memcpy (new_name, "&lt;&gt;", 8);
3310                                         new_name += 8;
3311                                 }
3312
3313                                 generic_members = 0;
3314                                 break;
3315
3316                         case ',':
3317                                 generic_members++;
3318                                 break;
3319
3320                         default:
3321                                 if (!within_generic_declaration)
3322                                         *new_name++ = *name;
3323
3324                                 break;
3325                 }
3326         } while (*name++);
3327
3328         return ret;
3329 }
3330
3331 static int method_id;
3332 static void
3333 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
3334 {
3335         MonoMethod *method = (MonoMethod *)value;
3336         MonoProfiler *prof = (MonoProfiler *)userdata;
3337         MonoClass *klass;
3338         MonoImage *image;
3339         char *class_name;
3340         const char *image_name, *method_name, *sig, *first_filename;
3341         guint i;
3342
3343         previous_offset = 0;
3344         coverage_data = g_ptr_array_new ();
3345
3346         mono_profiler_coverage_get (prof, method, obtain_coverage_for_method);
3347
3348         klass = mono_method_get_class (method);
3349         image = mono_class_get_image (klass);
3350         image_name = mono_image_get_name (image);
3351
3352         sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3353         class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3354         method_name = mono_method_get_name (method);
3355
3356         if (coverage_data->len != 0) {
3357                 CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[0];
3358                 first_filename = entry->filename ? entry->filename : "";
3359         } else
3360                 first_filename = "";
3361
3362         image_name = image_name ? image_name : "";
3363         sig = sig ? sig : "";
3364         method_name = method_name ? method_name : "";
3365
3366         ENTER_LOG (&coverage_methods_ctr, logbuffer,
3367                 EVENT_SIZE /* event */ +
3368                 strlen (image_name) + 1 /* image name */ +
3369                 strlen (class_name) + 1 /* class name */ +
3370                 strlen (method_name) + 1 /* method name */ +
3371                 strlen (sig) + 1 /* signature */ +
3372                 strlen (first_filename) + 1 /* first file name */ +
3373                 LEB128_SIZE /* token */ +
3374                 LEB128_SIZE /* method id */ +
3375                 LEB128_SIZE /* entries */
3376         );
3377
3378         emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3379         emit_string (logbuffer, image_name, strlen (image_name) + 1);
3380         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3381         emit_string (logbuffer, method_name, strlen (method_name) + 1);
3382         emit_string (logbuffer, sig, strlen (sig) + 1);
3383         emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3384
3385         emit_uvalue (logbuffer, mono_method_get_token (method));
3386         emit_uvalue (logbuffer, method_id);
3387         emit_value (logbuffer, coverage_data->len);
3388
3389         EXIT_LOG_EXPLICIT (DO_SEND);
3390
3391         for (i = 0; i < coverage_data->len; i++) {
3392                 CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[i];
3393
3394                 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3395                         EVENT_SIZE /* event */ +
3396                         LEB128_SIZE /* method id */ +
3397                         LEB128_SIZE /* offset */ +
3398                         LEB128_SIZE /* counter */ +
3399                         LEB128_SIZE /* line */ +
3400                         LEB128_SIZE /* column */
3401                 );
3402
3403                 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3404                 emit_uvalue (logbuffer, method_id);
3405                 emit_uvalue (logbuffer, entry->offset);
3406                 emit_uvalue (logbuffer, entry->counter);
3407                 emit_uvalue (logbuffer, entry->line);
3408                 emit_uvalue (logbuffer, entry->column);
3409
3410                 EXIT_LOG_EXPLICIT (DO_SEND);
3411         }
3412
3413         method_id++;
3414
3415         g_free (class_name);
3416
3417         g_ptr_array_foreach (coverage_data, free_coverage_entry, NULL);
3418         g_ptr_array_free (coverage_data, TRUE);
3419         coverage_data = NULL;
3420 }
3421
3422 /* This empties the queue */
3423 static guint
3424 count_queue (MonoLockFreeQueue *queue)
3425 {
3426         MonoLockFreeQueueNode *node;
3427         guint count = 0;
3428
3429         while ((node = mono_lock_free_queue_dequeue (queue))) {
3430                 count++;
3431                 mono_thread_hazardous_try_free (node, g_free);
3432         }
3433
3434         return count;
3435 }
3436
3437 static void
3438 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3439 {
3440         MonoClass *klass = (MonoClass *)key;
3441         MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3442         MonoImage *image;
3443         char *class_name;
3444         const char *assembly_name;
3445         int number_of_methods, partially_covered;
3446         guint fully_covered;
3447
3448         image = mono_class_get_image (klass);
3449         assembly_name = mono_image_get_name (image);
3450         class_name = mono_type_get_name (mono_class_get_type (klass));
3451
3452         assembly_name = assembly_name ? assembly_name : "";
3453         number_of_methods = mono_class_num_methods (klass);
3454         fully_covered = count_queue (class_methods);
3455         /* We don't handle partial covered yet */
3456         partially_covered = 0;
3457
3458         ENTER_LOG (&coverage_classes_ctr, logbuffer,
3459                 EVENT_SIZE /* event */ +
3460                 strlen (assembly_name) + 1 /* assembly name */ +
3461                 strlen (class_name) + 1 /* class name */ +
3462                 LEB128_SIZE /* no. methods */ +
3463                 LEB128_SIZE /* fully covered */ +
3464                 LEB128_SIZE /* partially covered */
3465         );
3466
3467         emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3468         emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3469         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3470         emit_uvalue (logbuffer, number_of_methods);
3471         emit_uvalue (logbuffer, fully_covered);
3472         emit_uvalue (logbuffer, partially_covered);
3473
3474         EXIT_LOG_EXPLICIT (DO_SEND);
3475
3476         g_free (class_name);
3477 }
3478
3479 static void
3480 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3481 {
3482         MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (image_to_methods, image);
3483
3484         *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3485         if (image_methods)
3486                 *fully_covered = count_queue (image_methods);
3487         else
3488                 *fully_covered = 0;
3489
3490         // FIXME: We don't handle partially covered yet.
3491         *partially_covered = 0;
3492 }
3493
3494 static void
3495 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3496 {
3497         MonoAssembly *assembly = (MonoAssembly *)value;
3498         MonoImage *image = mono_assembly_get_image (assembly);
3499         const char *name, *guid, *filename;
3500         int number_of_methods = 0, partially_covered = 0;
3501         guint fully_covered = 0;
3502
3503         name = mono_image_get_name (image);
3504         guid = mono_image_get_guid (image);
3505         filename = mono_image_get_filename (image);
3506
3507         name = name ? name : "";
3508         guid = guid ? guid : "";
3509         filename = filename ? filename : "";
3510
3511         get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3512
3513         ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3514                 EVENT_SIZE /* event */ +
3515                 strlen (name) + 1 /* name */ +
3516                 strlen (guid) + 1 /* guid */ +
3517                 strlen (filename) + 1 /* file name */ +
3518                 LEB128_SIZE /* no. methods */ +
3519                 LEB128_SIZE /* fully covered */ +
3520                 LEB128_SIZE /* partially covered */
3521         );
3522
3523         emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3524         emit_string (logbuffer, name, strlen (name) + 1);
3525         emit_string (logbuffer, guid, strlen (guid) + 1);
3526         emit_string (logbuffer, filename, strlen (filename) + 1);
3527         emit_uvalue (logbuffer, number_of_methods);
3528         emit_uvalue (logbuffer, fully_covered);
3529         emit_uvalue (logbuffer, partially_covered);
3530
3531         EXIT_LOG_EXPLICIT (DO_SEND);
3532 }
3533
3534 static void
3535 dump_coverage (MonoProfiler *prof)
3536 {
3537         if (!coverage_initialized)
3538                 return;
3539
3540         COVERAGE_DEBUG(fprintf (stderr, "Coverage: Started dump\n");)
3541         method_id = 0;
3542
3543         mono_os_mutex_lock (&coverage_mutex);
3544         mono_conc_hashtable_foreach (coverage_assemblies, build_assembly_buffer, NULL);
3545         mono_conc_hashtable_foreach (coverage_classes, build_class_buffer, NULL);
3546         mono_conc_hashtable_foreach (coverage_methods, build_method_buffer, prof);
3547         mono_os_mutex_unlock (&coverage_mutex);
3548
3549         COVERAGE_DEBUG(fprintf (stderr, "Coverage: Finished dump\n");)
3550 }
3551
3552 static void
3553 process_method_enter_coverage (MonoProfiler *prof, MonoMethod *method)
3554 {
3555         MonoClass *klass;
3556         MonoImage *image;
3557
3558         if (!coverage_initialized)
3559                 return;
3560
3561         klass = mono_method_get_class (method);
3562         image = mono_class_get_image (klass);
3563
3564         if (mono_conc_hashtable_lookup (suppressed_assemblies, (gpointer) mono_image_get_name (image)))
3565                 return;
3566
3567         mono_os_mutex_lock (&coverage_mutex);
3568         mono_conc_hashtable_insert (entered_methods, method, method);
3569         mono_os_mutex_unlock (&coverage_mutex);
3570 }
3571
3572 static MonoLockFreeQueueNode *
3573 create_method_node (MonoMethod *method)
3574 {
3575         MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3576         mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3577         node->method = method;
3578
3579         return (MonoLockFreeQueueNode *) node;
3580 }
3581
3582 static gboolean
3583 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3584 {
3585         MonoError error;
3586         MonoClass *klass;
3587         MonoImage *image;
3588         MonoAssembly *assembly;
3589         MonoMethodHeader *header;
3590         guint32 iflags, flags, code_size;
3591         char *fqn, *classname;
3592         gboolean has_positive, found;
3593         MonoLockFreeQueue *image_methods, *class_methods;
3594         MonoLockFreeQueueNode *node;
3595
3596         g_assert (coverage_initialized && "Why are we being asked for coverage filter info when we're not doing coverage?");
3597
3598         COVERAGE_DEBUG(fprintf (stderr, "Coverage filter for %s\n", mono_method_get_name (method));)
3599
3600         flags = mono_method_get_flags (method, &iflags);
3601         if ((iflags & 0x1000 /*METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL*/) ||
3602             (flags & 0x2000 /*METHOD_ATTRIBUTE_PINVOKE_IMPL*/)) {
3603                 COVERAGE_DEBUG(fprintf (stderr, "   Internal call or pinvoke - ignoring\n");)
3604                 return FALSE;
3605         }
3606
3607         // Don't need to do anything else if we're already tracking this method
3608         if (mono_conc_hashtable_lookup (coverage_methods, method)) {
3609                 COVERAGE_DEBUG(fprintf (stderr, "   Already tracking\n");)
3610                 return TRUE;
3611         }
3612
3613         klass = mono_method_get_class (method);
3614         image = mono_class_get_image (klass);
3615
3616         // Don't handle coverage for the core assemblies
3617         if (mono_conc_hashtable_lookup (suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3618                 return FALSE;
3619
3620         if (prof->coverage_filters) {
3621                 /* Check already filtered classes first */
3622                 if (mono_conc_hashtable_lookup (filtered_classes, klass)) {
3623                         COVERAGE_DEBUG(fprintf (stderr, "   Already filtered\n");)
3624                         return FALSE;
3625                 }
3626
3627                 classname = mono_type_get_name (mono_class_get_type (klass));
3628
3629                 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3630
3631                 COVERAGE_DEBUG(fprintf (stderr, "   Looking for %s in filter\n", fqn);)
3632                 // Check positive filters first
3633                 has_positive = FALSE;
3634                 found = FALSE;
3635                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3636                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3637
3638                         if (filter [0] == '+') {
3639                                 filter = &filter [1];
3640
3641                                 COVERAGE_DEBUG(fprintf (stderr, "   Checking against +%s ...", filter);)
3642
3643                                 if (strstr (fqn, filter) != NULL) {
3644                                         COVERAGE_DEBUG(fprintf (stderr, "matched\n");)
3645                                         found = TRUE;
3646                                 } else
3647                                         COVERAGE_DEBUG(fprintf (stderr, "no match\n");)
3648
3649                                 has_positive = TRUE;
3650                         }
3651                 }
3652
3653                 if (has_positive && !found) {
3654                         COVERAGE_DEBUG(fprintf (stderr, "   Positive match was not found\n");)
3655
3656                         mono_os_mutex_lock (&coverage_mutex);
3657                         mono_conc_hashtable_insert (filtered_classes, klass, klass);
3658                         mono_os_mutex_unlock (&coverage_mutex);
3659                         g_free (fqn);
3660                         g_free (classname);
3661
3662                         return FALSE;
3663                 }
3664
3665                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3666                         // FIXME: Is substring search sufficient?
3667                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3668                         if (filter [0] == '+')
3669                                 continue;
3670
3671                         // Skip '-'
3672                         filter = &filter [1];
3673                         COVERAGE_DEBUG(fprintf (stderr, "   Checking against -%s ...", filter);)
3674
3675                         if (strstr (fqn, filter) != NULL) {
3676                                 COVERAGE_DEBUG(fprintf (stderr, "matched\n");)
3677
3678                                 mono_os_mutex_lock (&coverage_mutex);
3679                                 mono_conc_hashtable_insert (filtered_classes, klass, klass);
3680                                 mono_os_mutex_unlock (&coverage_mutex);
3681                                 g_free (fqn);
3682                                 g_free (classname);
3683
3684                                 return FALSE;
3685                         } else
3686                                 COVERAGE_DEBUG(fprintf (stderr, "no match\n");)
3687
3688                 }
3689
3690                 g_free (fqn);
3691                 g_free (classname);
3692         }
3693
3694         COVERAGE_DEBUG(fprintf (stderr, "   Handling coverage for %s\n", mono_method_get_name (method));)
3695         header = mono_method_get_header_checked (method, &error);
3696         mono_error_cleanup (&error);
3697
3698         mono_method_header_get_code (header, &code_size, NULL);
3699
3700         assembly = mono_image_get_assembly (image);
3701
3702         // Need to keep the assemblies around for as long as they are kept in the hashtable
3703         // Nunit, for example, has a habit of unloading them before the coverage statistics are
3704         // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3705         mono_assembly_addref (assembly);
3706
3707         mono_os_mutex_lock (&coverage_mutex);
3708         mono_conc_hashtable_insert (coverage_methods, method, method);
3709         mono_conc_hashtable_insert (coverage_assemblies, assembly, assembly);
3710         mono_os_mutex_unlock (&coverage_mutex);
3711
3712         image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (image_to_methods, image);
3713
3714         if (image_methods == NULL) {
3715                 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3716                 mono_lock_free_queue_init (image_methods);
3717                 mono_os_mutex_lock (&coverage_mutex);
3718                 mono_conc_hashtable_insert (image_to_methods, image, image_methods);
3719                 mono_os_mutex_unlock (&coverage_mutex);
3720         }
3721
3722         node = create_method_node (method);
3723         mono_lock_free_queue_enqueue (image_methods, node);
3724
3725         class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (coverage_classes, klass);
3726
3727         if (class_methods == NULL) {
3728                 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3729                 mono_lock_free_queue_init (class_methods);
3730                 mono_os_mutex_lock (&coverage_mutex);
3731                 mono_conc_hashtable_insert (coverage_classes, klass, class_methods);
3732                 mono_os_mutex_unlock (&coverage_mutex);
3733         }
3734
3735         node = create_method_node (method);
3736         mono_lock_free_queue_enqueue (class_methods, node);
3737
3738         return TRUE;
3739 }
3740
3741 #define LINE_BUFFER_SIZE 4096
3742 /* Max file limit of 128KB */
3743 #define MAX_FILE_SIZE 128 * 1024
3744 static char *
3745 get_file_content (FILE *stream)
3746 {
3747         char *buffer;
3748         ssize_t bytes_read;
3749         long filesize;
3750         int res, offset = 0;
3751
3752         res = fseek (stream, 0, SEEK_END);
3753         if (res < 0)
3754           return NULL;
3755
3756         filesize = ftell (stream);
3757         if (filesize < 0)
3758           return NULL;
3759
3760         res = fseek (stream, 0, SEEK_SET);
3761         if (res < 0)
3762           return NULL;
3763
3764         if (filesize > MAX_FILE_SIZE)
3765           return NULL;
3766
3767         buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3768         while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3769                 offset += bytes_read;
3770
3771         /* NULL terminate our buffer */
3772         buffer[filesize] = '\0';
3773         return buffer;
3774 }
3775
3776 static char *
3777 get_next_line (char *contents, char **next_start)
3778 {
3779         char *p = contents;
3780
3781         if (p == NULL || *p == '\0') {
3782                 *next_start = NULL;
3783                 return NULL;
3784         }
3785
3786         while (*p != '\n' && *p != '\0')
3787                 p++;
3788
3789         if (*p == '\n') {
3790                 *p = '\0';
3791                 *next_start = p + 1;
3792         } else
3793                 *next_start = NULL;
3794
3795         return contents;
3796 }
3797
3798 static void
3799 init_suppressed_assemblies (void)
3800 {
3801         char *content;
3802         char *line;
3803         FILE *sa_file;
3804
3805         suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3806         sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3807         if (sa_file == NULL)
3808                 return;
3809
3810         /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3811         content = get_file_content (sa_file);
3812         if (content == NULL) {
3813                 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting\n");
3814         }
3815
3816         while ((line = get_next_line (content, &content))) {
3817                 line = g_strchomp (g_strchug (line));
3818                 /* No locking needed as we're doing initialization */
3819                 mono_conc_hashtable_insert (suppressed_assemblies, line, line);
3820         }
3821
3822         fclose (sa_file);
3823 }
3824
3825 static void
3826 parse_cov_filter_file (GPtrArray *filters, const char *file)
3827 {
3828         FILE *filter_file;
3829         char *line, *content;
3830
3831         filter_file = fopen (file, "r");
3832         if (filter_file == NULL) {
3833                 fprintf (stderr, "Unable to open %s\n", file);
3834                 return;
3835         }
3836
3837         /* Don't need to free content as it is referred to by the lines stored in @filters */
3838         content = get_file_content (filter_file);
3839         if (content == NULL)
3840                 fprintf (stderr, "WARNING: %s is greater than 128kb - ignoring\n", file);
3841
3842         while ((line = get_next_line (content, &content)))
3843                 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3844
3845         fclose (filter_file);
3846 }
3847
3848 static void
3849 coverage_init (MonoProfiler *prof)
3850 {
3851         g_assert (!coverage_initialized && "Why are we initializing coverage twice?");
3852
3853         COVERAGE_DEBUG(fprintf (stderr, "Coverage initialized\n");)
3854
3855         mono_os_mutex_init (&coverage_mutex);
3856         coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3857         coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3858         coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3859         filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3860         entered_methods = mono_conc_hashtable_new (NULL, NULL);
3861         image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3862         init_suppressed_assemblies ();
3863
3864         coverage_initialized = TRUE;
3865 }
3866
3867 static void
3868 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3869 {
3870         MonoAssembly *assembly = (MonoAssembly *)value;
3871         mono_assembly_close (assembly);
3872 }
3873
3874 static void
3875 free_sample_hit (gpointer p)
3876 {
3877         mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3878 }
3879
3880 static void
3881 cleanup_reusable_samples (MonoProfiler *prof)
3882 {
3883         SampleHit *sample;
3884
3885         while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->sample_reuse_queue)))
3886                 mono_thread_hazardous_try_free (sample, free_sample_hit);
3887 }
3888
3889 static void
3890 log_shutdown (MonoProfiler *prof)
3891 {
3892         InterlockedWrite (&in_shutdown, 1);
3893
3894         if (!no_counters)
3895                 counters_and_perfcounters_sample (prof);
3896
3897         dump_coverage (prof);
3898
3899         char c = 1;
3900
3901         if (write (prof->pipes [1], &c, 1) != 1) {
3902                 fprintf (stderr, "Could not write to pipe: %s\n", strerror (errno));
3903                 exit (1);
3904         }
3905
3906         mono_native_thread_join (prof->helper_thread);
3907
3908         mono_os_mutex_destroy (&counters_mutex);
3909
3910         MonoCounterAgent *mc_next;
3911
3912         for (MonoCounterAgent *cur = counters; cur; cur = mc_next) {
3913                 mc_next = cur->next;
3914                 g_free (cur);
3915         }
3916
3917         PerfCounterAgent *pc_next;
3918
3919         for (PerfCounterAgent *cur = perfcounters; cur; cur = pc_next) {
3920                 pc_next = cur->next;
3921                 g_free (cur);
3922         }
3923
3924         /*
3925          * Ensure that we empty the LLS completely, even if some nodes are
3926          * not immediately removed upon calling mono_lls_remove (), by
3927          * iterating until the head is NULL.
3928          */
3929         while (profiler_thread_list.head) {
3930                 MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
3931                         g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3932
3933                         remove_thread (thread);
3934                 } MONO_LLS_FOREACH_SAFE_END
3935         }
3936
3937         /*
3938          * Ensure that all threads have been freed, so that we don't miss any
3939          * buffers when we shut down the writer thread below.
3940          */
3941         mono_thread_hazardous_try_free_all ();
3942
3943         InterlockedWrite (&prof->run_dumper_thread, 0);
3944         mono_os_sem_post (&prof->dumper_queue_sem);
3945         mono_native_thread_join (prof->dumper_thread);
3946         mono_os_sem_destroy (&prof->dumper_queue_sem);
3947
3948         InterlockedWrite (&prof->run_writer_thread, 0);
3949         mono_os_sem_post (&prof->writer_queue_sem);
3950         mono_native_thread_join (prof->writer_thread);
3951         mono_os_sem_destroy (&prof->writer_queue_sem);
3952
3953         /*
3954          * Free all writer queue entries, and ensure that all sample hits will be
3955          * added to the sample reuse queue.
3956          */
3957         mono_thread_hazardous_try_free_all ();
3958
3959         cleanup_reusable_samples (prof);
3960
3961         /*
3962          * Finally, make sure that all sample hits are freed. This should cover all
3963          * hazardous data from the profiler. We can now be sure that the runtime
3964          * won't later invoke free functions in the profiler library after it has
3965          * been unloaded.
3966          */
3967         mono_thread_hazardous_try_free_all ();
3968
3969         gint32 state = InterlockedRead (&buffer_lock_state);
3970
3971         g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
3972         g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
3973
3974 #if defined (HAVE_SYS_ZLIB)
3975         if (prof->gzfile)
3976                 gzclose (prof->gzfile);
3977 #endif
3978         if (prof->pipe_output)
3979                 pclose (prof->file);
3980         else
3981                 fclose (prof->file);
3982
3983         mono_conc_hashtable_destroy (prof->method_table);
3984         mono_os_mutex_destroy (&prof->method_table_mutex);
3985
3986         if (coverage_initialized) {
3987                 mono_os_mutex_lock (&coverage_mutex);
3988                 mono_conc_hashtable_foreach (coverage_assemblies, unref_coverage_assemblies, prof);
3989                 mono_os_mutex_unlock (&coverage_mutex);
3990
3991                 mono_conc_hashtable_destroy (coverage_methods);
3992                 mono_conc_hashtable_destroy (coverage_assemblies);
3993                 mono_conc_hashtable_destroy (coverage_classes);
3994                 mono_conc_hashtable_destroy (filtered_classes);
3995
3996                 mono_conc_hashtable_destroy (entered_methods);
3997                 mono_conc_hashtable_destroy (image_to_methods);
3998                 mono_conc_hashtable_destroy (suppressed_assemblies);
3999                 mono_os_mutex_destroy (&coverage_mutex);
4000         }
4001
4002         PROF_TLS_FREE ();
4003
4004         g_free (prof->args);
4005         g_free (prof);
4006 }
4007
4008 static char*
4009 new_filename (const char* filename)
4010 {
4011         time_t t = time (NULL);
4012         int pid = process_id ();
4013         char pid_buf [16];
4014         char time_buf [16];
4015         char *res, *d;
4016         const char *p;
4017         int count_dates = 0;
4018         int count_pids = 0;
4019         int s_date, s_pid;
4020         struct tm *ts;
4021         for (p = filename; *p; p++) {
4022                 if (*p != '%')
4023                         continue;
4024                 p++;
4025                 if (*p == 't')
4026                         count_dates++;
4027                 else if (*p == 'p')
4028                         count_pids++;
4029                 else if (*p == 0)
4030                         break;
4031         }
4032         if (!count_dates && !count_pids)
4033                 return pstrdup (filename);
4034         snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
4035         ts = gmtime (&t);
4036         snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
4037                 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
4038         s_date = strlen (time_buf);
4039         s_pid = strlen (pid_buf);
4040         d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
4041         for (p = filename; *p; p++) {
4042                 if (*p != '%') {
4043                         *d++ = *p;
4044                         continue;
4045                 }
4046                 p++;
4047                 if (*p == 't') {
4048                         strcpy (d, time_buf);
4049                         d += s_date;
4050                         continue;
4051                 } else if (*p == 'p') {
4052                         strcpy (d, pid_buf);
4053                         d += s_pid;
4054                         continue;
4055                 } else if (*p == '%') {
4056                         *d++ = '%';
4057                         continue;
4058                 } else if (*p == 0)
4059                         break;
4060                 *d++ = '%';
4061                 *d++ = *p;
4062         }
4063         *d = 0;
4064         return res;
4065 }
4066
4067 static void
4068 add_to_fd_set (fd_set *set, int fd, int *max_fd)
4069 {
4070         /*
4071          * This should only trigger for the basic FDs (server socket, pipes) at
4072          * startup if for some mysterious reason they're too large. In this case,
4073          * the profiler really can't function, and we're better off printing an
4074          * error and exiting.
4075          */
4076         if (fd >= FD_SETSIZE) {
4077                 fprintf (stderr, "File descriptor is out of bounds for fd_set: %d\n", fd);
4078                 exit (1);
4079         }
4080
4081         FD_SET (fd, set);
4082
4083         if (*max_fd < fd)
4084                 *max_fd = fd;
4085 }
4086
4087 static void *
4088 helper_thread (void *arg)
4089 {
4090         MonoProfiler *prof = (MonoProfiler *) arg;
4091
4092         mono_threads_attach_tools_thread ();
4093         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
4094
4095         MonoProfilerThread *thread = init_thread (prof, FALSE);
4096
4097         GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
4098
4099         while (1) {
4100                 fd_set rfds;
4101                 int max_fd = -1;
4102
4103                 FD_ZERO (&rfds);
4104
4105                 add_to_fd_set (&rfds, prof->server_socket, &max_fd);
4106                 add_to_fd_set (&rfds, prof->pipes [0], &max_fd);
4107
4108                 for (gint i = 0; i < command_sockets->len; i++)
4109                         add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
4110
4111                 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
4112
4113                 // Sleep for 1sec or until a file descriptor has data.
4114                 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
4115                         if (errno == EINTR)
4116                                 continue;
4117
4118                         fprintf (stderr, "Error in mono-profiler-log server: %s", strerror (errno));
4119                         exit (1);
4120                 }
4121
4122                 if (!no_counters)
4123                         counters_and_perfcounters_sample (prof);
4124
4125                 buffer_lock_excl ();
4126
4127                 sync_point (SYNC_POINT_PERIODIC);
4128
4129                 buffer_unlock_excl ();
4130
4131                 // Are we shutting down?
4132                 if (FD_ISSET (prof->pipes [0], &rfds)) {
4133                         char c;
4134                         read (prof->pipes [0], &c, 1);
4135                         break;
4136                 }
4137
4138                 for (gint i = 0; i < command_sockets->len; i++) {
4139                         int fd = g_array_index (command_sockets, int, i);
4140
4141                         if (!FD_ISSET (fd, &rfds))
4142                                 continue;
4143
4144                         char buf [64];
4145                         int len = read (fd, buf, sizeof (buf) - 1);
4146
4147                         if (len == -1)
4148                                 continue;
4149
4150                         if (!len) {
4151                                 // The other end disconnected.
4152                                 g_array_remove_index (command_sockets, i);
4153                                 close (fd);
4154
4155                                 continue;
4156                         }
4157
4158                         buf [len] = 0;
4159
4160                         if (!strcmp (buf, "heapshot\n") && hs_mode_ondemand) {
4161                                 // Rely on the finalization callback triggering a GC.
4162                                 heapshot_requested = 1;
4163                                 mono_gc_finalize_notify ();
4164                         }
4165                 }
4166
4167                 if (FD_ISSET (prof->server_socket, &rfds)) {
4168                         int fd = accept (prof->server_socket, NULL, NULL);
4169
4170                         if (fd != -1) {
4171                                 if (fd >= FD_SETSIZE)
4172                                         close (fd);
4173                                 else
4174                                         g_array_append_val (command_sockets, fd);
4175                         }
4176                 }
4177         }
4178
4179         for (gint i = 0; i < command_sockets->len; i++)
4180                 close (g_array_index (command_sockets, int, i));
4181
4182         g_array_free (command_sockets, TRUE);
4183
4184         send_log_unsafe (FALSE);
4185         deinit_thread (thread);
4186
4187         mono_thread_info_detach ();
4188
4189         return NULL;
4190 }
4191
4192 static void
4193 start_helper_thread (MonoProfiler* prof)
4194 {
4195         if (pipe (prof->pipes) == -1) {
4196                 fprintf (stderr, "Cannot create pipe: %s\n", strerror (errno));
4197                 exit (1);
4198         }
4199
4200         prof->server_socket = socket (PF_INET, SOCK_STREAM, 0);
4201
4202         if (prof->server_socket == -1) {
4203                 fprintf (stderr, "Cannot create server socket: %s\n", strerror (errno));
4204                 exit (1);
4205         }
4206
4207         struct sockaddr_in server_address;
4208
4209         memset (&server_address, 0, sizeof (server_address));
4210         server_address.sin_family = AF_INET;
4211         server_address.sin_addr.s_addr = INADDR_ANY;
4212         server_address.sin_port = htons (prof->command_port);
4213
4214         if (bind (prof->server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
4215                 fprintf (stderr, "Cannot bind server socket on port %d: %s\n", prof->command_port, strerror (errno));
4216                 close (prof->server_socket);
4217                 exit (1);
4218         }
4219
4220         if (listen (prof->server_socket, 1) == -1) {
4221                 fprintf (stderr, "Cannot listen on server socket: %s\n", strerror (errno));
4222                 close (prof->server_socket);
4223                 exit (1);
4224         }
4225
4226         socklen_t slen = sizeof (server_address);
4227
4228         if (getsockname (prof->server_socket, (struct sockaddr *) &server_address, &slen)) {
4229                 fprintf (stderr, "Could not get assigned port: %s\n", strerror (errno));
4230                 close (prof->server_socket);
4231                 exit (1);
4232         }
4233
4234         prof->command_port = ntohs (server_address.sin_port);
4235
4236         if (!mono_native_thread_create (&prof->helper_thread, helper_thread, prof)) {
4237                 fprintf (stderr, "Could not start helper thread\n");
4238                 close (prof->server_socket);
4239                 exit (1);
4240         }
4241 }
4242
4243 static void
4244 free_writer_entry (gpointer p)
4245 {
4246         mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
4247 }
4248
4249 static gboolean
4250 handle_writer_queue_entry (MonoProfiler *prof)
4251 {
4252         WriterQueueEntry *entry;
4253
4254         if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&prof->writer_queue))) {
4255                 if (!entry->methods)
4256                         goto no_methods;
4257
4258                 gboolean wrote_methods = FALSE;
4259
4260                 /*
4261                  * Encode the method events in a temporary log buffer that we
4262                  * flush to disk before the main buffer, ensuring that all
4263                  * methods have metadata emitted before they're referenced.
4264                  *
4265                  * We use a 'proper' thread-local buffer for this as opposed
4266                  * to allocating and freeing a buffer by hand because the call
4267                  * to mono_method_full_name () below may trigger class load
4268                  * events when it retrieves the signature of the method. So a
4269                  * thread-local buffer needs to exist when such events occur.
4270                  */
4271                 for (guint i = 0; i < entry->methods->len; i++) {
4272                         MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
4273
4274                         if (mono_conc_hashtable_lookup (prof->method_table, info->method))
4275                                 goto free_info; // This method already has metadata emitted.
4276
4277                         /*
4278                          * Other threads use this hash table to get a general
4279                          * idea of whether a method has already been emitted to
4280                          * the stream. Due to the way we add to this table, it
4281                          * can easily happen that multiple threads queue up the
4282                          * same methods, but that's OK since eventually all
4283                          * methods will be in this table and the thread-local
4284                          * method lists will just be empty for the rest of the
4285                          * app's lifetime.
4286                          */
4287                         mono_os_mutex_lock (&prof->method_table_mutex);
4288                         mono_conc_hashtable_insert (prof->method_table, info->method, info->method);
4289                         mono_os_mutex_unlock (&prof->method_table_mutex);
4290
4291                         char *name = mono_method_full_name (info->method, 1);
4292                         int nlen = strlen (name) + 1;
4293                         void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
4294                         int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
4295
4296                         ENTER_LOG (&method_jits_ctr, logbuffer,
4297                                 EVENT_SIZE /* event */ +
4298                                 LEB128_SIZE /* method */ +
4299                                 LEB128_SIZE /* start */ +
4300                                 LEB128_SIZE /* size */ +
4301                                 nlen /* name */
4302                         );
4303
4304                         emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
4305                         emit_method_inner (logbuffer, info->method);
4306                         emit_ptr (logbuffer, cstart);
4307                         emit_value (logbuffer, csize);
4308
4309                         memcpy (logbuffer->cursor, name, nlen);
4310                         logbuffer->cursor += nlen;
4311
4312                         EXIT_LOG_EXPLICIT (NO_SEND);
4313
4314                         mono_free (name);
4315
4316                         wrote_methods = TRUE;
4317
4318                 free_info:
4319                         g_free (info);
4320                 }
4321
4322                 g_ptr_array_free (entry->methods, TRUE);
4323
4324                 if (wrote_methods) {
4325                         MonoProfilerThread *thread = PROF_TLS_GET ();
4326
4327                         dump_buffer_threadless (prof, thread->buffer);
4328                         init_buffer_state (thread);
4329                 }
4330
4331         no_methods:
4332                 dump_buffer (prof, entry->buffer);
4333
4334                 mono_thread_hazardous_try_free (entry, free_writer_entry);
4335
4336                 return TRUE;
4337         }
4338
4339         return FALSE;
4340 }
4341
4342 static void *
4343 writer_thread (void *arg)
4344 {
4345         MonoProfiler *prof = (MonoProfiler *)arg;
4346
4347         mono_threads_attach_tools_thread ();
4348         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4349
4350         dump_header (prof);
4351
4352         MonoProfilerThread *thread = init_thread (prof, FALSE);
4353
4354         while (InterlockedRead (&prof->run_writer_thread)) {
4355                 mono_os_sem_wait (&prof->writer_queue_sem, MONO_SEM_FLAGS_NONE);
4356                 handle_writer_queue_entry (prof);
4357         }
4358
4359         /* Drain any remaining entries on shutdown. */
4360         while (handle_writer_queue_entry (prof));
4361
4362         free_buffer (thread->buffer, thread->buffer->size);
4363         deinit_thread (thread);
4364
4365         mono_thread_info_detach ();
4366
4367         return NULL;
4368 }
4369
4370 static void
4371 start_writer_thread (MonoProfiler* prof)
4372 {
4373         InterlockedWrite (&prof->run_writer_thread, 1);
4374
4375         if (!mono_native_thread_create (&prof->writer_thread, writer_thread, prof)) {
4376                 fprintf (stderr, "Could not start writer thread\n");
4377                 exit (1);
4378         }
4379 }
4380
4381 static void
4382 reuse_sample_hit (gpointer p)
4383 {
4384         SampleHit *sample = p;
4385
4386         mono_lock_free_queue_node_unpoison (&sample->node);
4387         mono_lock_free_queue_enqueue (&sample->prof->sample_reuse_queue, &sample->node);
4388 }
4389
4390 static gboolean
4391 handle_dumper_queue_entry (MonoProfiler *prof)
4392 {
4393         SampleHit *sample;
4394
4395         if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->dumper_queue))) {
4396                 for (int i = 0; i < sample->count; ++i) {
4397                         MonoMethod *method = sample->frames [i].method;
4398                         MonoDomain *domain = sample->frames [i].domain;
4399                         void *address = sample->frames [i].base_address;
4400
4401                         if (!method) {
4402                                 g_assert (domain && "What happened to the domain pointer?");
4403                                 g_assert (address && "What happened to the instruction pointer?");
4404
4405                                 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
4406
4407                                 if (ji)
4408                                         sample->frames [i].method = mono_jit_info_get_method (ji);
4409                         }
4410                 }
4411
4412                 ENTER_LOG (&sample_hits_ctr, logbuffer,
4413                         EVENT_SIZE /* event */ +
4414                         BYTE_SIZE /* type */ +
4415                         LEB128_SIZE /* tid */ +
4416                         LEB128_SIZE /* count */ +
4417                         1 * (
4418                                 LEB128_SIZE /* ip */
4419                         ) +
4420                         LEB128_SIZE /* managed count */ +
4421                         sample->count * (
4422                                 LEB128_SIZE /* method */
4423                         )
4424                 );
4425
4426                 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4427                 emit_byte (logbuffer, SAMPLE_CYCLES);
4428                 emit_ptr (logbuffer, (void *) sample->tid);
4429                 emit_value (logbuffer, 1);
4430
4431                 // TODO: Actual native unwinding.
4432                 for (int i = 0; i < 1; ++i) {
4433                         emit_ptr (logbuffer, sample->ip);
4434                         add_code_pointer ((uintptr_t) sample->ip);
4435                 }
4436
4437                 /* new in data version 6 */
4438                 emit_uvalue (logbuffer, sample->count);
4439
4440                 for (int i = 0; i < sample->count; ++i)
4441                         emit_method (logbuffer, sample->frames [i].method);
4442
4443                 EXIT_LOG_EXPLICIT (DO_SEND);
4444
4445                 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4446
4447                 dump_unmanaged_coderefs (prof);
4448         }
4449
4450         return FALSE;
4451 }
4452
4453 static void *
4454 dumper_thread (void *arg)
4455 {
4456         MonoProfiler *prof = (MonoProfiler *)arg;
4457
4458         mono_threads_attach_tools_thread ();
4459         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4460
4461         MonoProfilerThread *thread = init_thread (prof, FALSE);
4462
4463         while (InterlockedRead (&prof->run_dumper_thread)) {
4464                 /*
4465                  * Flush samples every second so it doesn't seem like the profiler is
4466                  * not working if the program is mostly idle.
4467                  */
4468                 if (mono_os_sem_timedwait (&prof->dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4469                         send_log_unsafe (FALSE);
4470
4471                 handle_dumper_queue_entry (prof);
4472         }
4473
4474         /* Drain any remaining entries on shutdown. */
4475         while (handle_dumper_queue_entry (prof));
4476
4477         send_log_unsafe (FALSE);
4478         deinit_thread (thread);
4479
4480         mono_thread_info_detach ();
4481
4482         return NULL;
4483 }
4484
4485 static void
4486 start_dumper_thread (MonoProfiler* prof)
4487 {
4488         InterlockedWrite (&prof->run_dumper_thread, 1);
4489
4490         if (!mono_native_thread_create (&prof->dumper_thread, dumper_thread, prof)) {
4491                 fprintf (stderr, "Could not start dumper thread\n");
4492                 exit (1);
4493         }
4494 }
4495
4496 static void
4497 register_counter (const char *name, gint32 *counter)
4498 {
4499         mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4500 }
4501
4502 static void
4503 runtime_initialized (MonoProfiler *profiler)
4504 {
4505         InterlockedWrite (&runtime_inited, 1);
4506
4507         register_counter ("Sample events allocated", &sample_allocations_ctr);
4508         register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4509
4510         register_counter ("Event: Sync points", &sync_points_ctr);
4511         register_counter ("Event: Heap objects", &heap_objects_ctr);
4512         register_counter ("Event: Heap starts", &heap_starts_ctr);
4513         register_counter ("Event: Heap ends", &heap_ends_ctr);
4514         register_counter ("Event: Heap roots", &heap_roots_ctr);
4515         register_counter ("Event: GC events", &gc_events_ctr);
4516         register_counter ("Event: GC resizes", &gc_resizes_ctr);
4517         register_counter ("Event: GC allocations", &gc_allocs_ctr);
4518         register_counter ("Event: GC moves", &gc_moves_ctr);
4519         register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4520         register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4521         register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4522         register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4523         register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4524         register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4525         register_counter ("Event: Image loads", &image_loads_ctr);
4526         register_counter ("Event: Image unloads", &image_unloads_ctr);
4527         register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4528         register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4529         register_counter ("Event: Class loads", &class_loads_ctr);
4530         register_counter ("Event: Class unloads", &class_unloads_ctr);
4531         register_counter ("Event: Method entries", &method_entries_ctr);
4532         register_counter ("Event: Method exits", &method_exits_ctr);
4533         register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4534         register_counter ("Event: Method JITs", &method_jits_ctr);
4535         register_counter ("Event: Code buffers", &code_buffers_ctr);
4536         register_counter ("Event: Exception throws", &exception_throws_ctr);
4537         register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4538         register_counter ("Event: Monitor events", &monitor_events_ctr);
4539         register_counter ("Event: Thread starts", &thread_starts_ctr);
4540         register_counter ("Event: Thread ends", &thread_ends_ctr);
4541         register_counter ("Event: Thread names", &thread_names_ctr);
4542         register_counter ("Event: Domain loads", &domain_loads_ctr);
4543         register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4544         register_counter ("Event: Domain names", &domain_names_ctr);
4545         register_counter ("Event: Context loads", &context_loads_ctr);
4546         register_counter ("Event: Context unloads", &context_unloads_ctr);
4547         register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4548         register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4549         register_counter ("Event: Sample hits", &sample_hits_ctr);
4550         register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4551         register_counter ("Event: Counter samples", &counter_samples_ctr);
4552         register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4553         register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4554         register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4555         register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4556         register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4557         register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4558
4559         counters_init (profiler);
4560
4561         /*
4562          * We must start the helper thread before the writer thread. This is
4563          * because the helper thread sets up the command port which is written to
4564          * the log header by the writer thread.
4565          */
4566         start_helper_thread (profiler);
4567         start_writer_thread (profiler);
4568         start_dumper_thread (profiler);
4569 }
4570
4571 static void
4572 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4573 {
4574         char *nf;
4575         int force_delete = 0;
4576
4577         log_profiler = (MonoProfiler *) g_calloc (1, sizeof (MonoProfiler));
4578         log_profiler->args = pstrdup (args);
4579         log_profiler->command_port = command_port;
4580
4581         if (filename && *filename == '-') {
4582                 force_delete = 1;
4583                 filename++;
4584                 g_warning ("WARNING: the output:-FILENAME option is deprecated, the profiler now always overrides the output file\n");
4585         }
4586
4587         //If filename begin with +, append the pid at the end
4588         if (filename && *filename == '+')
4589                 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4590
4591
4592         if (!filename) {
4593                 if (do_report)
4594                         filename = "|mprof-report -";
4595                 else
4596                         filename = "output.mlpd";
4597                 nf = (char*)filename;
4598         } else {
4599                 nf = new_filename (filename);
4600                 if (do_report) {
4601                         int s = strlen (nf) + 32;
4602                         char *p = (char *) g_malloc (s);
4603                         snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4604                         g_free (nf);
4605                         nf = p;
4606                 }
4607         }
4608         if (*nf == '|') {
4609                 log_profiler->file = popen (nf + 1, "w");
4610                 log_profiler->pipe_output = 1;
4611         } else if (*nf == '#') {
4612                 int fd = strtol (nf + 1, NULL, 10);
4613                 log_profiler->file = fdopen (fd, "a");
4614         } else {
4615                 if (force_delete)
4616                         unlink (nf);
4617                 log_profiler->file = fopen (nf, "wb");
4618         }
4619         if (!log_profiler->file) {
4620                 fprintf (stderr, "Cannot create profiler output: %s\n", nf);
4621                 exit (1);
4622         }
4623
4624 #if defined (HAVE_SYS_ZLIB)
4625         if (use_zip)
4626                 log_profiler->gzfile = gzdopen (fileno (log_profiler->file), "wb");
4627 #endif
4628
4629         /*
4630          * If you hit this assert while increasing MAX_FRAMES, you need to increase
4631          * SAMPLE_BLOCK_SIZE as well.
4632          */
4633         g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4634
4635         // FIXME: We should free this stuff too.
4636         mono_lock_free_allocator_init_size_class (&log_profiler->sample_size_class, SAMPLE_SLOT_SIZE (num_frames), SAMPLE_BLOCK_SIZE);
4637         mono_lock_free_allocator_init_allocator (&log_profiler->sample_allocator, &log_profiler->sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4638
4639         mono_lock_free_queue_init (&log_profiler->sample_reuse_queue);
4640
4641         g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4642
4643         // FIXME: We should free this stuff too.
4644         mono_lock_free_allocator_init_size_class (&log_profiler->writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4645         mono_lock_free_allocator_init_allocator (&log_profiler->writer_entry_allocator, &log_profiler->writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4646
4647         mono_lock_free_queue_init (&log_profiler->writer_queue);
4648         mono_os_sem_init (&log_profiler->writer_queue_sem, 0);
4649
4650         mono_lock_free_queue_init (&log_profiler->dumper_queue);
4651         mono_os_sem_init (&log_profiler->dumper_queue_sem, 0);
4652
4653         mono_os_mutex_init (&log_profiler->method_table_mutex);
4654         log_profiler->method_table = mono_conc_hashtable_new (NULL, NULL);
4655
4656         if (do_coverage)
4657                 coverage_init (log_profiler);
4658         log_profiler->coverage_filters = filters;
4659
4660         log_profiler->startup_time = current_time ();
4661 }
4662
4663 /*
4664  * declaration to silence the compiler: this is the entry point that
4665  * mono will load from the shared library and call.
4666  */
4667 extern void
4668 mono_profiler_startup (const char *desc);
4669
4670 extern void
4671 mono_profiler_startup_log (const char *desc);
4672
4673 /*
4674  * this is the entry point that will be used when the profiler
4675  * is embedded inside the main executable.
4676  */
4677 void
4678 mono_profiler_startup_log (const char *desc)
4679 {
4680         mono_profiler_startup (desc);
4681 }
4682
4683 void
4684 mono_profiler_startup (const char *desc)
4685 {
4686         GPtrArray *filters = NULL;
4687
4688         proflog_parse_args (&config, desc [3] == ':' ? desc + 4 : "");
4689
4690         //XXX maybe later cleanup to use config directly
4691         nocalls = !(config.effective_mask & PROFLOG_CALL_EVENTS);
4692         no_counters = !(config.effective_mask & PROFLOG_COUNTER_EVENTS);
4693         do_report = config.do_report;
4694         do_debug = config.do_debug;
4695         do_heap_shot = (config.effective_mask & PROFLOG_HEAPSHOT_FEATURE);
4696         hs_mode_ondemand = config.hs_mode_ondemand;
4697         hs_mode_ms = config.hs_mode_ms;
4698         hs_mode_gc = config.hs_mode_gc;
4699         do_mono_sample = (config.effective_mask & PROFLOG_SAMPLING_FEATURE);
4700         use_zip = config.use_zip;
4701         command_port = config.command_port;
4702         num_frames = config.num_frames;
4703         notraces = config.notraces;
4704         max_allocated_sample_hits = config.max_allocated_sample_hits;
4705         max_call_depth = config.max_call_depth;
4706         do_coverage = (config.effective_mask & PROFLOG_CODE_COV_FEATURE);
4707         debug_coverage = config.debug_coverage;
4708         only_coverage = config.only_coverage;
4709
4710         if (config.cov_filter_files) {
4711                 filters = g_ptr_array_new ();
4712                 int i;
4713                 for (i = 0; i < config.cov_filter_files->len; ++i) {
4714                         const char *name = config.cov_filter_files->pdata [i];
4715                         parse_cov_filter_file (filters, name);
4716                 }
4717         }
4718
4719         init_time ();
4720
4721         PROF_TLS_INIT ();
4722
4723         create_profiler (desc, config.output_filename, filters);
4724
4725         mono_lls_init (&profiler_thread_list, NULL);
4726
4727         //This two events are required for the profiler to work
4728         int events = MONO_PROFILE_THREADS | MONO_PROFILE_GC;
4729
4730         //Required callbacks
4731         mono_profiler_install (log_profiler, log_shutdown);
4732         mono_profiler_install_runtime_initialized (runtime_initialized);
4733
4734         mono_profiler_install_gc (gc_event, gc_resize);
4735         mono_profiler_install_thread (thread_start, thread_end);
4736
4737         //It's questionable whether we actually want this to be mandatory, maybe put it behind the actual event?
4738         mono_profiler_install_thread_name (thread_name);
4739
4740
4741         if (config.effective_mask & PROFLOG_DOMAIN_EVENTS) {
4742                 events |= MONO_PROFILE_APPDOMAIN_EVENTS;
4743                 mono_profiler_install_appdomain (NULL, domain_loaded, domain_unloaded, NULL);
4744                 mono_profiler_install_appdomain_name (domain_name);
4745         }
4746
4747         if (config.effective_mask & PROFLOG_ASSEMBLY_EVENTS) {
4748                 events |= MONO_PROFILE_ASSEMBLY_EVENTS;
4749                 mono_profiler_install_assembly (NULL, assembly_loaded, assembly_unloaded, NULL);
4750         }
4751
4752         if (config.effective_mask & PROFLOG_MODULE_EVENTS) {
4753                 events |= MONO_PROFILE_MODULE_EVENTS;
4754                 mono_profiler_install_module (NULL, image_loaded, image_unloaded, NULL);
4755         }
4756
4757         if (config.effective_mask & PROFLOG_CLASS_EVENTS) {
4758                 events |= MONO_PROFILE_CLASS_EVENTS;
4759                 mono_profiler_install_class (NULL, class_loaded, class_unloaded, NULL);
4760         }
4761
4762         if (config.effective_mask & PROFLOG_JIT_COMPILATION_EVENTS) {
4763                 events |= MONO_PROFILE_JIT_COMPILATION;
4764                 mono_profiler_install_jit_end (method_jitted);
4765                 mono_profiler_install_code_buffer_new (code_buffer_new);
4766         }
4767
4768         if (config.effective_mask & PROFLOG_EXCEPTION_EVENTS) {
4769                 events |= MONO_PROFILE_EXCEPTIONS;
4770                 mono_profiler_install_exception (throw_exc, method_exc_leave, NULL);
4771                 mono_profiler_install_exception_clause (clause_exc);
4772         }
4773
4774         if (config.effective_mask & PROFLOG_ALLOCATION_EVENTS) {
4775                 events |= MONO_PROFILE_ALLOCATIONS;
4776                 mono_profiler_install_allocation (gc_alloc);
4777         }
4778
4779         //PROFLOG_GC_EVENTS is mandatory
4780         //PROFLOG_THREAD_EVENTS is mandatory
4781
4782         if (config.effective_mask & PROFLOG_CALL_EVENTS) {
4783                 events |= MONO_PROFILE_ENTER_LEAVE;
4784                 mono_profiler_install_enter_leave (method_enter, method_leave);
4785         }
4786
4787         if (config.effective_mask & PROFLOG_INS_COVERAGE_EVENTS) {
4788                 events |= MONO_PROFILE_INS_COVERAGE;
4789                 mono_profiler_install_coverage_filter (coverage_filter);
4790         }
4791
4792         //XXX should we check for PROFLOG_SAMPLING_FEATURE instead??
4793         if (config.effective_mask & PROFLOG_SAMPLING_EVENTS) {
4794                 events |= MONO_PROFILE_STATISTICAL;
4795                 mono_profiler_set_statistical_mode (config.sampling_mode, config.sample_freq);
4796                 mono_profiler_install_statistical (mono_sample_hit);
4797         }
4798
4799         if (config.effective_mask & PROFLOG_MONITOR_EVENTS) {
4800                 events |= MONO_PROFILE_MONITOR_EVENTS;
4801                 mono_profiler_install_monitor (monitor_event);
4802         }
4803
4804         if (config.effective_mask & PROFLOG_GC_MOVES_EVENTS) {
4805                 events |= MONO_PROFILE_GC_MOVES;
4806                 mono_profiler_install_gc_moves (gc_moves);
4807         }
4808
4809         // TODO split those in two profiler events
4810         if (config.effective_mask & (PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_HANDLE_EVENTS)) {
4811                 events |= MONO_PROFILE_GC_ROOTS;
4812                 mono_profiler_install_gc_roots (
4813                         config.effective_mask & (PROFLOG_GC_HANDLE_EVENTS) ? gc_handle : NULL,
4814                         (config.effective_mask & PROFLOG_GC_ROOT_EVENTS) ? gc_roots : NULL);
4815         }
4816
4817         if (config.effective_mask & PROFLOG_CONTEXT_EVENTS) {
4818                 events |= MONO_PROFILE_CONTEXT_EVENTS;
4819                 mono_profiler_install_context (context_loaded, context_unloaded);
4820         }
4821
4822         if (config.effective_mask & PROFLOG_FINALIZATION_EVENTS) {
4823                 events |= MONO_PROFILE_GC_FINALIZATION;
4824                 mono_profiler_install_gc_finalize (finalize_begin, finalize_object_begin, finalize_object_end, finalize_end);   
4825         } else if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && config.hs_mode_ondemand) {
4826                 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4827                 events |= MONO_PROFILE_GC_FINALIZATION;
4828                 mono_profiler_install_gc_finalize (NULL, NULL, NULL, finalize_end);
4829         }
4830
4831         //PROFLOG_COUNTER_EVENTS is a pseudo event controled by the no_counters global var
4832         //PROFLOG_GC_HANDLE_EVENTS is handled together with PROFLOG_GC_ROOT_EVENTS
4833
4834         mono_profiler_set_events ((MonoProfileFlags)events);
4835 }