[profiler] Fix a deadlock due to recursive use of the buffer lock.
[mono.git] / mono / profiler / log.c
1 /*
2  * mono-profiler-log.c: mono log profiler
3  *
4  * Authors:
5  *   Paolo Molaro (lupus@ximian.com)
6  *   Alex Rønne Petersen (alexrp@xamarin.com)
7  *
8  * Copyright 2010 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11  */
12
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/debug-helpers.h>
16 #include "../metadata/metadata-internals.h"
17 #include <mono/metadata/mono-config.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/metadata/mono-perfcounters.h>
20 #include <mono/utils/atomic.h>
21 #include <mono/utils/hazard-pointer.h>
22 #include <mono/utils/lock-free-alloc.h>
23 #include <mono/utils/lock-free-queue.h>
24 #include <mono/utils/mono-conc-hashtable.h>
25 #include <mono/utils/mono-counters.h>
26 #include <mono/utils/mono-linked-list-set.h>
27 #include <mono/utils/mono-membar.h>
28 #include <mono/utils/mono-mmap.h>
29 #include <mono/utils/mono-os-mutex.h>
30 #include <mono/utils/mono-os-semaphore.h>
31 #include <mono/utils/mono-threads.h>
32 #include <mono/utils/mono-threads-api.h>
33 #include "log.h"
34
35 #ifdef HAVE_DLFCN_H
36 #include <dlfcn.h>
37 #endif
38 #include <fcntl.h>
39 #ifdef HAVE_LINK_H
40 #include <link.h>
41 #endif
42 #ifdef HAVE_UNISTD_H
43 #include <unistd.h>
44 #endif
45 #if defined(__APPLE__)
46 #include <mach/mach_time.h>
47 #endif
48 #include <netinet/in.h>
49 #ifdef HAVE_SYS_MMAN_H
50 #include <sys/mman.h>
51 #endif
52 #include <sys/socket.h>
53 #if defined (HAVE_SYS_ZLIB)
54 #include <zlib.h>
55 #endif
56
57 #define BUFFER_SIZE (4096 * 16)
58
59 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
60 #define LEB128_SIZE 10
61
62 /* Size of a value encoded as a single byte. */
63 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
64 #define BYTE_SIZE 1
65
66 /* Size in bytes of the event prefix (ID + time). */
67 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
68
69 static volatile gint32 runtime_inited;
70 static volatile gint32 in_shutdown;
71
72 static ProfilerConfig config;
73 static int nocalls = 0;
74 static int notraces = 0;
75 static int use_zip = 0;
76 static int do_report = 0;
77 static int do_heap_shot = 0;
78 static int max_call_depth = 0;
79 static int command_port = 0;
80 static int heapshot_requested = 0;
81 static int do_mono_sample = 0;
82 static int do_debug = 0;
83 static int do_coverage = 0;
84 static gboolean no_counters = FALSE;
85 static gboolean only_coverage = FALSE;
86 static gboolean debug_coverage = FALSE;
87 static int max_allocated_sample_hits;
88
89 #define ENABLED(EVT) (config.effective_mask & (EVT))
90
91 // Statistics for internal profiler data structures.
92 static gint32 sample_allocations_ctr,
93               buffer_allocations_ctr;
94
95 // Statistics for profiler events.
96 static gint32 sync_points_ctr,
97               heap_objects_ctr,
98               heap_starts_ctr,
99               heap_ends_ctr,
100               heap_roots_ctr,
101               gc_events_ctr,
102               gc_resizes_ctr,
103               gc_allocs_ctr,
104               gc_moves_ctr,
105               gc_handle_creations_ctr,
106               gc_handle_deletions_ctr,
107               finalize_begins_ctr,
108               finalize_ends_ctr,
109               finalize_object_begins_ctr,
110               finalize_object_ends_ctr,
111               image_loads_ctr,
112               image_unloads_ctr,
113               assembly_loads_ctr,
114               assembly_unloads_ctr,
115               class_loads_ctr,
116               class_unloads_ctr,
117               method_entries_ctr,
118               method_exits_ctr,
119               method_exception_exits_ctr,
120               method_jits_ctr,
121               code_buffers_ctr,
122               exception_throws_ctr,
123               exception_clauses_ctr,
124               monitor_events_ctr,
125               thread_starts_ctr,
126               thread_ends_ctr,
127               thread_names_ctr,
128               domain_loads_ctr,
129               domain_unloads_ctr,
130               domain_names_ctr,
131               context_loads_ctr,
132               context_unloads_ctr,
133               sample_ubins_ctr,
134               sample_usyms_ctr,
135               sample_hits_ctr,
136               counter_descriptors_ctr,
137               counter_samples_ctr,
138               perfcounter_descriptors_ctr,
139               perfcounter_samples_ctr,
140               coverage_methods_ctr,
141               coverage_statements_ctr,
142               coverage_classes_ctr,
143               coverage_assemblies_ctr;
144
145 static MonoLinkedListSet profiler_thread_list;
146
147 /*
148  * file format:
149  * [header] [buffer]*
150  *
151  * The file is composed by a header followed by 0 or more buffers.
152  * Each buffer contains events that happened on a thread: for a given thread
153  * buffers that appear later in the file are guaranteed to contain events
154  * that happened later in time. Buffers from separate threads could be interleaved,
155  * though.
156  * Buffers are not required to be aligned.
157  *
158  * header format:
159  * [id: 4 bytes] constant value: LOG_HEADER_ID
160  * [major: 1 byte] [minor: 1 byte] major and minor version of the log profiler
161  * [format: 1 byte] version of the data format for the rest of the file
162  * [ptrsize: 1 byte] size in bytes of a pointer in the profiled program
163  * [startup time: 8 bytes] time in milliseconds since the unix epoch when the program started
164  * [timer overhead: 4 bytes] approximate overhead in nanoseconds of the timer
165  * [flags: 4 bytes] file format flags, should be 0 for now
166  * [pid: 4 bytes] pid of the profiled process
167  * [port: 2 bytes] tcp port for server if != 0
168  * [args size: 4 bytes] size of args
169  * [args: string] arguments passed to the profiler
170  * [arch size: 4 bytes] size of arch
171  * [arch: string] architecture the profiler is running on
172  * [os size: 4 bytes] size of os
173  * [os: string] operating system the profiler is running on
174  *
175  * The multiple byte integers are in little-endian format.
176  *
177  * buffer format:
178  * [buffer header] [event]*
179  * Buffers have a fixed-size header followed by 0 or more bytes of event data.
180  * Timing information and other values in the event data are usually stored
181  * as uleb128 or sleb128 integers. To save space, as noted for each item below,
182  * some data is represented as a difference between the actual value and
183  * either the last value of the same type (like for timing information) or
184  * as the difference from a value stored in a buffer header.
185  *
186  * For timing information the data is stored as uleb128, since timing
187  * increases in a monotonic way in each thread: the value is the number of
188  * nanoseconds to add to the last seen timing data in a buffer. The first value
189  * in a buffer will be calculated from the time_base field in the buffer head.
190  *
191  * Object or heap sizes are stored as uleb128.
192  * Pointer differences are stored as sleb128, instead.
193  *
194  * If an unexpected value is found, the rest of the buffer should be ignored,
195  * as generally the later values need the former to be interpreted correctly.
196  *
197  * buffer header format:
198  * [bufid: 4 bytes] constant value: BUF_ID
199  * [len: 4 bytes] size of the data following the buffer header
200  * [time_base: 8 bytes] time base in nanoseconds since an unspecified epoch
201  * [ptr_base: 8 bytes] base value for pointers
202  * [obj_base: 8 bytes] base value for object addresses
203  * [thread id: 8 bytes] system-specific thread ID (pthread_t for example)
204  * [method_base: 8 bytes] base value for MonoMethod pointers
205  *
206  * event format:
207  * [extended info: upper 4 bits] [type: lower 4 bits]
208  * [time diff: uleb128] nanoseconds since last timing
209  * [data]*
210  * The data that follows depends on type and the extended info.
211  * Type is one of the enum values in mono-profiler-log.h: TYPE_ALLOC, TYPE_GC,
212  * TYPE_METADATA, TYPE_METHOD, TYPE_EXCEPTION, TYPE_MONITOR, TYPE_HEAP.
213  * The extended info bits are interpreted based on type, see
214  * each individual event description below.
215  * strings are represented as a 0-terminated utf8 sequence.
216  *
217  * backtrace format:
218  * [num: uleb128] number of frames following
219  * [frame: sleb128]* mum MonoMethod* as a pointer difference from the last such
220  * pointer or the buffer method_base
221  *
222  * type alloc format:
223  * type: TYPE_ALLOC
224  * exinfo: zero or TYPE_ALLOC_BT
225  * [ptr: sleb128] class as a byte difference from ptr_base
226  * [obj: sleb128] object address as a byte difference from obj_base
227  * [size: uleb128] size of the object in the heap
228  * If exinfo == TYPE_ALLOC_BT, a backtrace follows.
229  *
230  * type GC format:
231  * type: TYPE_GC
232  * exinfo: one of TYPE_GC_EVENT, TYPE_GC_RESIZE, TYPE_GC_MOVE, TYPE_GC_HANDLE_CREATED[_BT],
233  * TYPE_GC_HANDLE_DESTROYED[_BT], TYPE_GC_FINALIZE_START, TYPE_GC_FINALIZE_END,
234  * TYPE_GC_FINALIZE_OBJECT_START, TYPE_GC_FINALIZE_OBJECT_END
235  * if exinfo == TYPE_GC_RESIZE
236  *      [heap_size: uleb128] new heap size
237  * if exinfo == TYPE_GC_EVENT
238  *      [event type: byte] GC event (MONO_GC_EVENT_* from profiler.h)
239  *      [generation: byte] GC generation event refers to
240  * if exinfo == TYPE_GC_MOVE
241  *      [num_objects: uleb128] number of object moves that follow
242  *      [objaddr: sleb128]+ num_objects object pointer differences from obj_base
243  *      num is always an even number: the even items are the old
244  *      addresses, the odd numbers are the respective new object addresses
245  * if exinfo == TYPE_GC_HANDLE_CREATED[_BT]
246  *      [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
247  *      upper bits reserved as flags
248  *      [handle: uleb128] GC handle value
249  *      [objaddr: sleb128] object pointer differences from obj_base
250  *      If exinfo == TYPE_GC_HANDLE_CREATED_BT, a backtrace follows.
251  * if exinfo == TYPE_GC_HANDLE_DESTROYED[_BT]
252  *      [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
253  *      upper bits reserved as flags
254  *      [handle: uleb128] GC handle value
255  *      If exinfo == TYPE_GC_HANDLE_DESTROYED_BT, a backtrace follows.
256  * if exinfo == TYPE_GC_FINALIZE_OBJECT_{START,END}
257  *      [object: sleb128] the object as a difference from obj_base
258  *
259  * type metadata format:
260  * type: TYPE_METADATA
261  * exinfo: one of: TYPE_END_LOAD, TYPE_END_UNLOAD (optional for TYPE_THREAD and TYPE_DOMAIN)
262  * [mtype: byte] metadata type, one of: TYPE_CLASS, TYPE_IMAGE, TYPE_ASSEMBLY, TYPE_DOMAIN,
263  * TYPE_THREAD, TYPE_CONTEXT
264  * [pointer: sleb128] pointer of the metadata type depending on mtype
265  * if mtype == TYPE_CLASS
266  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
267  *      [name: string] full class name
268  * if mtype == TYPE_IMAGE
269  *      [name: string] image file name
270  * if mtype == TYPE_ASSEMBLY
271  *      [image: sleb128] MonoImage* as a pointer difference from ptr_base
272  *      [name: string] assembly name
273  * if mtype == TYPE_DOMAIN && exinfo == 0
274  *      [name: string] domain friendly name
275  * if mtype == TYPE_CONTEXT
276  *      [domain: sleb128] domain id as pointer
277  * if mtype == TYPE_THREAD && exinfo == 0
278  *      [name: string] thread name
279  *
280  * type method format:
281  * type: TYPE_METHOD
282  * exinfo: one of: TYPE_LEAVE, TYPE_ENTER, TYPE_EXC_LEAVE, TYPE_JIT
283  * [method: sleb128] MonoMethod* as a pointer difference from the last such
284  * pointer or the buffer method_base
285  * if exinfo == TYPE_JIT
286  *      [code address: sleb128] pointer to the native code as a diff from ptr_base
287  *      [code size: uleb128] size of the generated code
288  *      [name: string] full method name
289  *
290  * type exception format:
291  * type: TYPE_EXCEPTION
292  * exinfo: zero, TYPE_CLAUSE, or TYPE_THROW_BT
293  * if exinfo == TYPE_CLAUSE
294  *      [clause type: byte] MonoExceptionEnum enum value
295  *      [clause index: uleb128] index of the current clause
296  *      [method: sleb128] MonoMethod* as a pointer difference from the last such
297  *      pointer or the buffer method_base
298  *      [object: sleb128] the exception object as a difference from obj_base
299  * else
300  *      [object: sleb128] the exception object as a difference from obj_base
301  *      If exinfo == TYPE_THROW_BT, a backtrace follows.
302  *
303  * type runtime format:
304  * type: TYPE_RUNTIME
305  * exinfo: one of: TYPE_JITHELPER
306  * if exinfo == TYPE_JITHELPER
307  *      [type: byte] MonoProfilerCodeBufferType enum value
308  *      [buffer address: sleb128] pointer to the native code as a diff from ptr_base
309  *      [buffer size: uleb128] size of the generated code
310  *      if type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
311  *              [name: string] buffer description name
312  *
313  * type monitor format:
314  * type: TYPE_MONITOR
315  * exinfo: zero or TYPE_MONITOR_BT
316  * [type: byte] MONO_PROFILER_MONITOR_{CONTENTION,FAIL,DONE}
317  * [object: sleb128] the lock object as a difference from obj_base
318  * If exinfo == TYPE_MONITOR_BT, a backtrace follows.
319  *
320  * type heap format
321  * type: TYPE_HEAP
322  * exinfo: one of TYPE_HEAP_START, TYPE_HEAP_END, TYPE_HEAP_OBJECT, TYPE_HEAP_ROOT
323  * if exinfo == TYPE_HEAP_OBJECT
324  *      [object: sleb128] the object as a difference from obj_base
325  *      [class: sleb128] the object MonoClass* as a difference from ptr_base
326  *      [size: uleb128] size of the object on the heap
327  *      [num_refs: uleb128] number of object references
328  *      each referenced objref is preceded by a uleb128 encoded offset: the
329  *      first offset is from the object address and each next offset is relative
330  *      to the previous one
331  *      [objrefs: sleb128]+ object referenced as a difference from obj_base
332  *      The same object can appear multiple times, but only the first time
333  *      with size != 0: in the other cases this data will only be used to
334  *      provide additional referenced objects.
335  * if exinfo == TYPE_HEAP_ROOT
336  *      [num_roots: uleb128] number of root references
337  *      [num_gc: uleb128] number of major gcs
338  *      [object: sleb128] the object as a difference from obj_base
339  *      [root_type: byte] the root_type: MonoProfileGCRootType (profiler.h)
340  *      [extra_info: uleb128] the extra_info value
341  *      object, root_type and extra_info are repeated num_roots times
342  *
343  * type sample format
344  * type: TYPE_SAMPLE
345  * exinfo: one of TYPE_SAMPLE_HIT, TYPE_SAMPLE_USYM, TYPE_SAMPLE_UBIN, TYPE_SAMPLE_COUNTERS_DESC, TYPE_SAMPLE_COUNTERS
346  * if exinfo == TYPE_SAMPLE_HIT
347  *      [thread: sleb128] thread id as difference from ptr_base
348  *      [count: uleb128] number of following instruction addresses
349  *      [ip: sleb128]* instruction pointer as difference from ptr_base
350  *      [mbt_count: uleb128] number of managed backtrace frames
351  *      [method: sleb128]* MonoMethod* as a pointer difference from the last such
352  *      pointer or the buffer method_base (the first such method can be also indentified by ip, but this is not neccessarily true)
353  * if exinfo == TYPE_SAMPLE_USYM
354  *      [address: sleb128] symbol address as a difference from ptr_base
355  *      [size: uleb128] symbol size (may be 0 if unknown)
356  *      [name: string] symbol name
357  * if exinfo == TYPE_SAMPLE_UBIN
358  *      [address: sleb128] address where binary has been loaded as a difference from ptr_base
359  *      [offset: uleb128] file offset of mapping (the same file can be mapped multiple times)
360  *      [size: uleb128] memory size
361  *      [name: string] binary name
362  * if exinfo == TYPE_SAMPLE_COUNTERS_DESC
363  *      [len: uleb128] number of counters
364  *      for i = 0 to len
365  *              [section: uleb128] section of counter
366  *              if section == MONO_COUNTER_PERFCOUNTERS:
367  *                      [section_name: string] section name of counter
368  *              [name: string] name of counter
369  *              [type: byte] type of counter
370  *              [unit: byte] unit of counter
371  *              [variance: byte] variance of counter
372  *              [index: uleb128] unique index of counter
373  * if exinfo == TYPE_SAMPLE_COUNTERS
374  *      while true:
375  *              [index: uleb128] unique index of counter
376  *              if index == 0:
377  *                      break
378  *              [type: byte] type of counter value
379  *              if type == string:
380  *                      if value == null:
381  *                              [0: byte] 0 -> value is null
382  *                      else:
383  *                              [1: byte] 1 -> value is not null
384  *                              [value: string] counter value
385  *              else:
386  *                      [value: uleb128/sleb128/double] counter value, can be sleb128, uleb128 or double (determined by using type)
387  *
388  * type coverage format
389  * type: TYPE_COVERAGE
390  * exinfo: one of TYPE_COVERAGE_METHOD, TYPE_COVERAGE_STATEMENT, TYPE_COVERAGE_ASSEMBLY, TYPE_COVERAGE_CLASS
391  * if exinfo == TYPE_COVERAGE_METHOD
392  *  [assembly: string] name of assembly
393  *  [class: string] name of the class
394  *  [name: string] name of the method
395  *  [signature: string] the signature of the method
396  *  [filename: string] the file path of the file that contains this method
397  *  [token: uleb128] the method token
398  *  [method_id: uleb128] an ID for this data to associate with the buffers of TYPE_COVERAGE_STATEMENTS
399  *  [len: uleb128] the number of TYPE_COVERAGE_BUFFERS associated with this method
400  * if exinfo == TYPE_COVERAGE_STATEMENTS
401  *  [method_id: uleb128] an the TYPE_COVERAGE_METHOD buffer to associate this with
402  *  [offset: uleb128] the il offset relative to the previous offset
403  *  [counter: uleb128] the counter for this instruction
404  *  [line: uleb128] the line of filename containing this instruction
405  *  [column: uleb128] the column containing this instruction
406  * if exinfo == TYPE_COVERAGE_ASSEMBLY
407  *  [name: string] assembly name
408  *  [guid: string] assembly GUID
409  *  [filename: string] assembly filename
410  *  [number_of_methods: uleb128] the number of methods in this assembly
411  *  [fully_covered: uleb128] the number of fully covered methods
412  *  [partially_covered: uleb128] the number of partially covered methods
413  *    currently partially_covered will always be 0, and fully_covered is the
414  *    number of methods that are fully and partially covered.
415  * if exinfo == TYPE_COVERAGE_CLASS
416  *  [name: string] assembly name
417  *  [class: string] class name
418  *  [number_of_methods: uleb128] the number of methods in this class
419  *  [fully_covered: uleb128] the number of fully covered methods
420  *  [partially_covered: uleb128] the number of partially covered methods
421  *    currently partially_covered will always be 0, and fully_covered is the
422  *    number of methods that are fully and partially covered.
423  *
424  * type meta format:
425  * type: TYPE_META
426  * exinfo: one of: TYPE_SYNC_POINT
427  * if exinfo == TYPE_SYNC_POINT
428  *      [type: byte] MonoProfilerSyncPointType enum value
429  */
430
431 // Pending data to be written to the log, for a single thread.
432 // Threads periodically flush their own LogBuffers by calling safe_send
433 typedef struct _LogBuffer LogBuffer;
434 struct _LogBuffer {
435         // Next (older) LogBuffer in processing queue
436         LogBuffer *next;
437
438         uint64_t time_base;
439         uint64_t last_time;
440         uintptr_t ptr_base;
441         uintptr_t method_base;
442         uintptr_t last_method;
443         uintptr_t obj_base;
444         uintptr_t thread_id;
445
446         // Bytes allocated for this LogBuffer
447         int size;
448
449         // Start of currently unused space in buffer
450         unsigned char* cursor;
451
452         // Pointer to start-of-structure-plus-size (for convenience)
453         unsigned char* buf_end;
454
455         // Start of data in buffer. Contents follow "buffer format" described above.
456         unsigned char buf [1];
457 };
458
459 typedef struct {
460         MonoLinkedListSetNode node;
461
462         // Convenience pointer to the profiler structure.
463         MonoProfiler *profiler;
464
465         // Was this thread added to the LLS?
466         gboolean attached;
467
468         // The current log buffer for this thread.
469         LogBuffer *buffer;
470
471         // Methods referenced by events in `buffer`, see `MethodInfo`.
472         GPtrArray *methods;
473
474         // Current call depth for enter/leave events.
475         int call_depth;
476
477         // Indicates whether this thread is currently writing to its `buffer`.
478         gboolean busy;
479
480         // Has this thread written a thread end event to `buffer`?
481         gboolean ended;
482
483         // Stored in `buffer_lock_state` to take the exclusive lock.
484         int small_id;
485 } MonoProfilerThread;
486
487 // Do not use these TLS macros directly unless you know what you're doing.
488
489 #ifdef HOST_WIN32
490
491 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
492 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
493 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
494 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
495
496 static DWORD profiler_tls;
497
498 #elif HAVE_KW_THREAD
499
500 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
501 #define PROF_TLS_GET() (profiler_tls)
502 #define PROF_TLS_INIT()
503 #define PROF_TLS_FREE()
504
505 static __thread MonoProfilerThread *profiler_tls;
506
507 #else
508
509 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
510 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
511 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
512 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
513
514 static pthread_key_t profiler_tls;
515
516 #endif
517
518 static uintptr_t
519 thread_id (void)
520 {
521         return (uintptr_t) mono_native_thread_id_get ();
522 }
523
524 static uintptr_t
525 process_id (void)
526 {
527 #ifdef HOST_WIN32
528         return (uintptr_t) GetCurrentProcessId ();
529 #else
530         return (uintptr_t) getpid ();
531 #endif
532 }
533
534 #ifdef __APPLE__
535 static mach_timebase_info_data_t timebase_info;
536 #elif defined (HOST_WIN32)
537 static LARGE_INTEGER pcounter_freq;
538 #endif
539
540 #define TICKS_PER_SEC 1000000000LL
541
542 static uint64_t
543 current_time (void)
544 {
545 #ifdef __APPLE__
546         uint64_t time = mach_absolute_time ();
547
548         time *= timebase_info.numer;
549         time /= timebase_info.denom;
550
551         return time;
552 #elif defined (HOST_WIN32)
553         LARGE_INTEGER value;
554
555         QueryPerformanceCounter (&value);
556
557         return value.QuadPart * TICKS_PER_SEC / pcounter_freq.QuadPart;
558 #elif defined (CLOCK_MONOTONIC)
559         struct timespec tspec;
560
561         clock_gettime (CLOCK_MONOTONIC, &tspec);
562
563         return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
564 #else
565         struct timeval tv;
566
567         gettimeofday (&tv, NULL);
568
569         return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
570 #endif
571 }
572
573 static int timer_overhead;
574
575 static void
576 init_time (void)
577 {
578 #ifdef __APPLE__
579         mach_timebase_info (&timebase_info);
580 #elif defined (HOST_WIN32)
581         QueryPerformanceFrequency (&pcounter_freq);
582 #endif
583
584         uint64_t time_start = current_time ();
585
586         for (int i = 0; i < 256; ++i)
587                 current_time ();
588
589         uint64_t time_end = current_time ();
590
591         timer_overhead = (time_end - time_start) / 256;
592 }
593
594 /*
595  * These macros should be used when writing an event to a log buffer. They
596  * take care of a bunch of stuff that can be repetitive and error-prone, such
597  * as attaching the current thread, acquiring/releasing the buffer lock,
598  * incrementing the event counter, expanding the log buffer, etc. They also
599  * create a scope so that it's harder to leak the LogBuffer pointer, which can
600  * be problematic as the pointer is unstable when the buffer lock isn't
601  * acquired.
602  *
603  * If the calling thread is already attached, these macros will not alter its
604  * attach mode (i.e. whether it's added to the LLS). If the thread is not
605  * attached, init_thread () will be called with add_to_lls = TRUE.
606  */
607
608 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
609         do { \
610                 MonoProfilerThread *thread__ = get_thread (); \
611                 if (thread__->attached) \
612                         buffer_lock (); \
613                 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
614                 thread__->busy = TRUE; \
615                 InterlockedIncrement ((COUNTER)); \
616                 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
617
618 #define EXIT_LOG_EXPLICIT(SEND) \
619                 thread__->busy = FALSE; \
620                 if ((SEND)) \
621                         send_log_unsafe (TRUE); \
622                 if (thread__->attached) \
623                         buffer_unlock (); \
624         } while (0)
625
626 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
627 #define DO_SEND TRUE
628 #define NO_SEND FALSE
629
630 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
631
632 typedef struct _BinaryObject BinaryObject;
633 struct _BinaryObject {
634         BinaryObject *next;
635         void *addr;
636         char *name;
637 };
638
639 static MonoProfiler *log_profiler;
640
641 struct _MonoProfiler {
642         FILE* file;
643 #if defined (HAVE_SYS_ZLIB)
644         gzFile gzfile;
645 #endif
646         char *args;
647         uint64_t startup_time;
648         int pipe_output;
649         int command_port;
650         int server_socket;
651         int pipes [2];
652         MonoNativeThreadId helper_thread;
653         MonoNativeThreadId writer_thread;
654         MonoNativeThreadId dumper_thread;
655         volatile gint32 run_writer_thread;
656         MonoLockFreeAllocSizeClass writer_entry_size_class;
657         MonoLockFreeAllocator writer_entry_allocator;
658         MonoLockFreeQueue writer_queue;
659         MonoSemType writer_queue_sem;
660         MonoConcurrentHashTable *method_table;
661         mono_mutex_t method_table_mutex;
662         volatile gint32 run_dumper_thread;
663         MonoLockFreeQueue dumper_queue;
664         MonoSemType dumper_queue_sem;
665         MonoLockFreeAllocSizeClass sample_size_class;
666         MonoLockFreeAllocator sample_allocator;
667         MonoLockFreeQueue sample_reuse_queue;
668         BinaryObject *binary_objects;
669         GPtrArray *coverage_filters;
670 };
671
672 typedef struct {
673         MonoLockFreeQueueNode node;
674         GPtrArray *methods;
675         LogBuffer *buffer;
676 } WriterQueueEntry;
677
678 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
679
680 typedef struct {
681         MonoMethod *method;
682         MonoJitInfo *ji;
683         uint64_t time;
684 } MethodInfo;
685
686 static char*
687 pstrdup (const char *s)
688 {
689         int len = strlen (s) + 1;
690         char *p = (char *) g_malloc (len);
691         memcpy (p, s, len);
692         return p;
693 }
694
695 static void *
696 alloc_buffer (int size)
697 {
698         return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
699 }
700
701 static void
702 free_buffer (void *buf, int size)
703 {
704         mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
705 }
706
707 static LogBuffer*
708 create_buffer (uintptr_t tid, int bytes)
709 {
710         LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
711
712         InterlockedIncrement (&buffer_allocations_ctr);
713
714         buf->size = BUFFER_SIZE;
715         buf->time_base = current_time ();
716         buf->last_time = buf->time_base;
717         buf->buf_end = (unsigned char *) buf + buf->size;
718         buf->cursor = buf->buf;
719         buf->thread_id = tid;
720
721         return buf;
722 }
723
724 /*
725  * Must be called with the reader lock held if thread is the current thread, or
726  * the exclusive lock if thread is a different thread. However, if thread is
727  * the current thread, and init_thread () was called with add_to_lls = FALSE,
728  * then no locking is necessary.
729  */
730 static void
731 init_buffer_state (MonoProfilerThread *thread)
732 {
733         thread->buffer = create_buffer (thread->node.key, 0);
734         thread->methods = NULL;
735 }
736
737 static void
738 clear_hazard_pointers (MonoThreadHazardPointers *hp)
739 {
740         mono_hazard_pointer_clear (hp, 0);
741         mono_hazard_pointer_clear (hp, 1);
742         mono_hazard_pointer_clear (hp, 2);
743 }
744
745 static MonoProfilerThread *
746 init_thread (MonoProfiler *prof, gboolean add_to_lls)
747 {
748         MonoProfilerThread *thread = PROF_TLS_GET ();
749
750         /*
751          * Sometimes we may try to initialize a thread twice. One example is the
752          * main thread: We initialize it when setting up the profiler, but we will
753          * also get a thread_start () callback for it. Another example is when
754          * attaching new threads to the runtime: We may get a gc_alloc () callback
755          * for that thread's thread object (where we initialize it), soon followed
756          * by a thread_start () callback.
757          *
758          * These cases are harmless anyhow. Just return if we've already done the
759          * initialization work.
760          */
761         if (thread)
762                 return thread;
763
764         thread = g_malloc (sizeof (MonoProfilerThread));
765         thread->node.key = thread_id ();
766         thread->profiler = prof;
767         thread->attached = add_to_lls;
768         thread->call_depth = 0;
769         thread->busy = 0;
770         thread->ended = FALSE;
771
772         init_buffer_state (thread);
773
774         thread->small_id = mono_thread_info_register_small_id ();
775
776         /*
777          * Some internal profiler threads don't need to be cleaned up
778          * by the main thread on shutdown.
779          */
780         if (add_to_lls) {
781                 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
782                 g_assert (mono_lls_insert (&profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
783                 clear_hazard_pointers (hp);
784         }
785
786         PROF_TLS_SET (thread);
787
788         return thread;
789 }
790
791 // Only valid if init_thread () was called with add_to_lls = FALSE.
792 static void
793 deinit_thread (MonoProfilerThread *thread)
794 {
795         g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
796
797         g_free (thread);
798         PROF_TLS_SET (NULL);
799 }
800
801 static MonoProfilerThread *
802 get_thread (void)
803 {
804         return init_thread (log_profiler, TRUE);
805 }
806
807 // Only valid if init_thread () was called with add_to_lls = FALSE.
808 static LogBuffer *
809 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
810 {
811         LogBuffer *old = thread->buffer;
812
813         if (old->cursor + bytes < old->buf_end)
814                 return old;
815
816         LogBuffer *new_ = create_buffer (thread->node.key, bytes);
817         new_->next = old;
818         thread->buffer = new_;
819
820         return new_;
821 }
822
823 /*
824  * This is a reader/writer spin lock of sorts used to protect log buffers.
825  * When a thread modifies its own log buffer, it increments the reader
826  * count. When a thread wants to access log buffers of other threads, it
827  * takes the exclusive lock.
828  *
829  * `buffer_lock_state` holds the reader count in its lower 16 bits, and
830  * the small ID of the thread currently holding the exclusive (writer)
831  * lock in its upper 16 bits. Both can be zero. It's important that the
832  * whole lock state is a single word that can be read/written atomically
833  * to avoid race conditions where there could end up being readers while
834  * the writer lock is held.
835  *
836  * The lock is writer-biased. When a thread wants to take the exclusive
837  * lock, it increments `buffer_lock_exclusive_intent` which will make new
838  * readers spin until it's back to zero, then takes the exclusive lock
839  * once the reader count has reached zero. After releasing the exclusive
840  * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
841  * reaches zero again, allows readers to increment the reader count.
842  *
843  * The writer bias is necessary because we take the exclusive lock in
844  * `gc_event ()` during STW. If the writer bias was not there, and a
845  * program had a large number of threads, STW-induced pauses could be
846  * significantly longer than they have to be. Also, we emit periodic
847  * sync points from the helper thread, which requires taking the
848  * exclusive lock, and we need those to arrive with a reasonably
849  * consistent frequency so that readers don't have to queue up too many
850  * events between sync points.
851  *
852  * The lock does not support recursion.
853  */
854 static volatile gint32 buffer_lock_state;
855 static volatile gint32 buffer_lock_exclusive_intent;
856
857 static void
858 buffer_lock (void)
859 {
860         /*
861          * If the thread holding the exclusive lock tries to modify the
862          * reader count, just make it a no-op. This way, we also avoid
863          * invoking the GC safe point macros below, which could break if
864          * done from a thread that is currently the initiator of STW.
865          *
866          * In other words, we rely on the fact that the GC thread takes
867          * the exclusive lock in the gc_event () callback when the world
868          * is about to stop.
869          */
870         if (InterlockedRead (&buffer_lock_state) != get_thread ()->small_id << 16) {
871                 MONO_ENTER_GC_SAFE;
872
873                 gint32 old, new_;
874
875                 do {
876                 restart:
877                         // Hold off if a thread wants to take the exclusive lock.
878                         while (InterlockedRead (&buffer_lock_exclusive_intent))
879                                 mono_thread_info_yield ();
880
881                         old = InterlockedRead (&buffer_lock_state);
882
883                         // Is a thread holding the exclusive lock?
884                         if (old >> 16) {
885                                 mono_thread_info_yield ();
886                                 goto restart;
887                         }
888
889                         new_ = old + 1;
890                 } while (InterlockedCompareExchange (&buffer_lock_state, new_, old) != old);
891
892                 MONO_EXIT_GC_SAFE;
893         }
894
895         mono_memory_barrier ();
896 }
897
898 static void
899 buffer_unlock (void)
900 {
901         mono_memory_barrier ();
902
903         gint32 state = InterlockedRead (&buffer_lock_state);
904
905         // See the comment in buffer_lock ().
906         if (state == PROF_TLS_GET ()->small_id << 16)
907                 return;
908
909         g_assert (state && "Why are we decrementing a zero reader count?");
910         g_assert (!(state >> 16) && "Why is the exclusive lock held?");
911
912         InterlockedDecrement (&buffer_lock_state);
913 }
914
915 static void
916 buffer_lock_excl (void)
917 {
918         gint32 new_ = get_thread ()->small_id << 16;
919
920         g_assert (InterlockedRead (&buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
921
922         InterlockedIncrement (&buffer_lock_exclusive_intent);
923
924         MONO_ENTER_GC_SAFE;
925
926         while (InterlockedCompareExchange (&buffer_lock_state, new_, 0))
927                 mono_thread_info_yield ();
928
929         MONO_EXIT_GC_SAFE;
930
931         mono_memory_barrier ();
932 }
933
934 static void
935 buffer_unlock_excl (void)
936 {
937         mono_memory_barrier ();
938
939         gint32 state = InterlockedRead (&buffer_lock_state);
940         gint32 excl = state >> 16;
941
942         g_assert (excl && "Why is the exclusive lock not held?");
943         g_assert (excl == PROF_TLS_GET ()->small_id && "Why does another thread hold the exclusive lock?");
944         g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
945
946         InterlockedWrite (&buffer_lock_state, 0);
947         InterlockedDecrement (&buffer_lock_exclusive_intent);
948 }
949
950 static void
951 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
952 {
953         uint8_t *p = buf;
954
955         do {
956                 uint8_t b = value & 0x7f;
957                 value >>= 7;
958
959                 if (value != 0) /* more bytes to come */
960                         b |= 0x80;
961
962                 *p ++ = b;
963         } while (value);
964
965         *endbuf = p;
966 }
967
968 static void
969 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
970 {
971         int more = 1;
972         int negative = (value < 0);
973         unsigned int size = sizeof (intptr_t) * 8;
974         uint8_t byte;
975         uint8_t *p = buf;
976
977         while (more) {
978                 byte = value & 0x7f;
979                 value >>= 7;
980
981                 /* the following is unnecessary if the
982                  * implementation of >>= uses an arithmetic rather
983                  * than logical shift for a signed left operand
984                  */
985                 if (negative)
986                         /* sign extend */
987                         value |= - ((intptr_t) 1 <<(size - 7));
988
989                 /* sign bit of byte is second high order bit (0x40) */
990                 if ((value == 0 && !(byte & 0x40)) ||
991                     (value == -1 && (byte & 0x40)))
992                         more = 0;
993                 else
994                         byte |= 0x80;
995
996                 *p ++= byte;
997         }
998
999         *endbuf = p;
1000 }
1001
1002 static void
1003 emit_byte (LogBuffer *logbuffer, int value)
1004 {
1005         logbuffer->cursor [0] = value;
1006         logbuffer->cursor++;
1007
1008         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1009 }
1010
1011 static void
1012 emit_value (LogBuffer *logbuffer, int value)
1013 {
1014         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1015
1016         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1017 }
1018
1019 static void
1020 emit_time (LogBuffer *logbuffer, uint64_t value)
1021 {
1022         uint64_t tdiff = value - logbuffer->last_time;
1023         encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
1024         logbuffer->last_time = value;
1025
1026         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1027 }
1028
1029 static void
1030 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
1031 {
1032         emit_byte (logbuffer, event);
1033         emit_time (logbuffer, time);
1034 }
1035
1036 static void
1037 emit_event (LogBuffer *logbuffer, int event)
1038 {
1039         emit_event_time (logbuffer, event, current_time ());
1040 }
1041
1042 static void
1043 emit_svalue (LogBuffer *logbuffer, int64_t value)
1044 {
1045         encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1046
1047         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1048 }
1049
1050 static void
1051 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
1052 {
1053         encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
1054
1055         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1056 }
1057
1058 static void
1059 emit_ptr (LogBuffer *logbuffer, void *ptr)
1060 {
1061         if (!logbuffer->ptr_base)
1062                 logbuffer->ptr_base = (uintptr_t) ptr;
1063
1064         emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
1065
1066         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1067 }
1068
1069 static void
1070 emit_method_inner (LogBuffer *logbuffer, void *method)
1071 {
1072         if (!logbuffer->method_base) {
1073                 logbuffer->method_base = (intptr_t) method;
1074                 logbuffer->last_method = (intptr_t) method;
1075         }
1076
1077         encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
1078         logbuffer->last_method = (intptr_t) method;
1079
1080         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1081 }
1082
1083 // The reader lock must be held.
1084 static void
1085 register_method_local (MonoMethod *method, MonoJitInfo *ji)
1086 {
1087         MonoProfilerThread *thread = get_thread ();
1088
1089         if (!mono_conc_hashtable_lookup (thread->profiler->method_table, method)) {
1090                 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
1091
1092                 info->method = method;
1093                 info->ji = ji;
1094                 info->time = current_time ();
1095
1096                 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
1097                 g_ptr_array_add (arr, info);
1098         }
1099 }
1100
1101 static void
1102 emit_method (LogBuffer *logbuffer, MonoMethod *method)
1103 {
1104         register_method_local (method, NULL);
1105         emit_method_inner (logbuffer, method);
1106 }
1107
1108 static void
1109 emit_obj (LogBuffer *logbuffer, void *ptr)
1110 {
1111         if (!logbuffer->obj_base)
1112                 logbuffer->obj_base = (uintptr_t) ptr >> 3;
1113
1114         emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
1115
1116         g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
1117 }
1118
1119 static void
1120 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
1121 {
1122         size_t i = 0;
1123         if (str) {
1124                 for (; i < size; i++) {
1125                         if (str[i] == '\0')
1126                                 break;
1127                         emit_byte (logbuffer, str [i]);
1128                 }
1129         }
1130         emit_byte (logbuffer, '\0');
1131 }
1132
1133 static void
1134 emit_double (LogBuffer *logbuffer, double value)
1135 {
1136         int i;
1137         unsigned char buffer[8];
1138         memcpy (buffer, &value, 8);
1139 #if G_BYTE_ORDER == G_BIG_ENDIAN
1140         for (i = 7; i >= 0; i--)
1141 #else
1142         for (i = 0; i < 8; i++)
1143 #endif
1144                 emit_byte (logbuffer, buffer[i]);
1145 }
1146
1147 static char*
1148 write_int16 (char *buf, int32_t value)
1149 {
1150         int i;
1151         for (i = 0; i < 2; ++i) {
1152                 buf [i] = value;
1153                 value >>= 8;
1154         }
1155         return buf + 2;
1156 }
1157
1158 static char*
1159 write_int32 (char *buf, int32_t value)
1160 {
1161         int i;
1162         for (i = 0; i < 4; ++i) {
1163                 buf [i] = value;
1164                 value >>= 8;
1165         }
1166         return buf + 4;
1167 }
1168
1169 static char*
1170 write_int64 (char *buf, int64_t value)
1171 {
1172         int i;
1173         for (i = 0; i < 8; ++i) {
1174                 buf [i] = value;
1175                 value >>= 8;
1176         }
1177         return buf + 8;
1178 }
1179
1180 static char *
1181 write_header_string (char *p, const char *str)
1182 {
1183         size_t len = strlen (str) + 1;
1184
1185         p = write_int32 (p, len);
1186         strcpy (p, str);
1187
1188         return p + len;
1189 }
1190
1191 static void
1192 dump_header (MonoProfiler *profiler)
1193 {
1194         const char *args = profiler->args;
1195         const char *arch = mono_config_get_cpu ();
1196         const char *os = mono_config_get_os ();
1197
1198         char *hbuf = g_malloc (
1199                 sizeof (gint32) /* header id */ +
1200                 sizeof (gint8) /* major version */ +
1201                 sizeof (gint8) /* minor version */ +
1202                 sizeof (gint8) /* data version */ +
1203                 sizeof (gint8) /* word size */ +
1204                 sizeof (gint64) /* startup time */ +
1205                 sizeof (gint32) /* timer overhead */ +
1206                 sizeof (gint32) /* flags */ +
1207                 sizeof (gint32) /* process id */ +
1208                 sizeof (gint16) /* command port */ +
1209                 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
1210                 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
1211                 sizeof (gint32) + strlen (os) + 1 /* operating system */
1212         );
1213         char *p = hbuf;
1214
1215         p = write_int32 (p, LOG_HEADER_ID);
1216         *p++ = LOG_VERSION_MAJOR;
1217         *p++ = LOG_VERSION_MINOR;
1218         *p++ = LOG_DATA_VERSION;
1219         *p++ = sizeof (void *);
1220         p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
1221         p = write_int32 (p, timer_overhead);
1222         p = write_int32 (p, 0); /* flags */
1223         p = write_int32 (p, process_id ());
1224         p = write_int16 (p, profiler->command_port);
1225         p = write_header_string (p, args);
1226         p = write_header_string (p, arch);
1227         p = write_header_string (p, os);
1228
1229 #if defined (HAVE_SYS_ZLIB)
1230         if (profiler->gzfile) {
1231                 gzwrite (profiler->gzfile, hbuf, p - hbuf);
1232         } else
1233 #endif
1234         {
1235                 fwrite (hbuf, p - hbuf, 1, profiler->file);
1236                 fflush (profiler->file);
1237         }
1238
1239         g_free (hbuf);
1240 }
1241
1242 /*
1243  * Must be called with the reader lock held if thread is the current thread, or
1244  * the exclusive lock if thread is a different thread. However, if thread is
1245  * the current thread, and init_thread () was called with add_to_lls = FALSE,
1246  * then no locking is necessary.
1247  */
1248 static void
1249 send_buffer (MonoProfilerThread *thread)
1250 {
1251         WriterQueueEntry *entry = mono_lock_free_alloc (&thread->profiler->writer_entry_allocator);
1252         entry->methods = thread->methods;
1253         entry->buffer = thread->buffer;
1254
1255         mono_lock_free_queue_node_init (&entry->node, FALSE);
1256
1257         mono_lock_free_queue_enqueue (&thread->profiler->writer_queue, &entry->node);
1258         mono_os_sem_post (&thread->profiler->writer_queue_sem);
1259 }
1260
1261 static void
1262 free_thread (gpointer p)
1263 {
1264         MonoProfilerThread *thread = p;
1265
1266         if (!thread->ended) {
1267                 /*
1268                  * The thread is being cleaned up by the main thread during
1269                  * shutdown. This typically happens for internal runtime
1270                  * threads. We need to synthesize a thread end event.
1271                  */
1272
1273                 InterlockedIncrement (&thread_ends_ctr);
1274
1275                 if (ENABLED (PROFLOG_THREAD_EVENTS)) {
1276                         LogBuffer *buf = ensure_logbuf_unsafe (thread,
1277                                 EVENT_SIZE /* event */ +
1278                                 BYTE_SIZE /* type */ +
1279                                 LEB128_SIZE /* tid */
1280                         );
1281
1282                         emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1283                         emit_byte (buf, TYPE_THREAD);
1284                         emit_ptr (buf, (void *) thread->node.key);
1285                 }
1286         }
1287
1288         send_buffer (thread);
1289
1290         g_free (thread);
1291 }
1292
1293 static void
1294 remove_thread (MonoProfilerThread *thread)
1295 {
1296         MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1297
1298         if (mono_lls_remove (&profiler_thread_list, hp, &thread->node))
1299                 mono_thread_hazardous_try_free (thread, free_thread);
1300
1301         clear_hazard_pointers (hp);
1302 }
1303
1304 static void
1305 dump_buffer (MonoProfiler *profiler, LogBuffer *buf)
1306 {
1307         char hbuf [128];
1308         char *p = hbuf;
1309
1310         if (buf->next)
1311                 dump_buffer (profiler, buf->next);
1312
1313         if (buf->cursor - buf->buf) {
1314                 p = write_int32 (p, BUF_ID);
1315                 p = write_int32 (p, buf->cursor - buf->buf);
1316                 p = write_int64 (p, buf->time_base);
1317                 p = write_int64 (p, buf->ptr_base);
1318                 p = write_int64 (p, buf->obj_base);
1319                 p = write_int64 (p, buf->thread_id);
1320                 p = write_int64 (p, buf->method_base);
1321
1322 #if defined (HAVE_SYS_ZLIB)
1323                 if (profiler->gzfile) {
1324                         gzwrite (profiler->gzfile, hbuf, p - hbuf);
1325                         gzwrite (profiler->gzfile, buf->buf, buf->cursor - buf->buf);
1326                 } else
1327 #endif
1328                 {
1329                         fwrite (hbuf, p - hbuf, 1, profiler->file);
1330                         fwrite (buf->buf, buf->cursor - buf->buf, 1, profiler->file);
1331                         fflush (profiler->file);
1332                 }
1333         }
1334
1335         free_buffer (buf, buf->size);
1336 }
1337
1338 static void
1339 dump_buffer_threadless (MonoProfiler *profiler, LogBuffer *buf)
1340 {
1341         for (LogBuffer *iter = buf; iter; iter = iter->next)
1342                 iter->thread_id = 0;
1343
1344         dump_buffer (profiler, buf);
1345 }
1346
1347 // Only valid if init_thread () was called with add_to_lls = FALSE.
1348 static void
1349 send_log_unsafe (gboolean if_needed)
1350 {
1351         MonoProfilerThread *thread = PROF_TLS_GET ();
1352
1353         if (!if_needed || (if_needed && thread->buffer->next)) {
1354                 if (!thread->attached)
1355                         for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1356                                 iter->thread_id = 0;
1357
1358                 send_buffer (thread);
1359                 init_buffer_state (thread);
1360         }
1361 }
1362
1363 // Assumes that the exclusive lock is held.
1364 static void
1365 sync_point_flush (void)
1366 {
1367         g_assert (InterlockedRead (&buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1368
1369         MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
1370                 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1371
1372                 send_buffer (thread);
1373                 init_buffer_state (thread);
1374         } MONO_LLS_FOREACH_SAFE_END
1375 }
1376
1377 // Assumes that the exclusive lock is held.
1378 static void
1379 sync_point_mark (MonoProfilerSyncPointType type)
1380 {
1381         g_assert (InterlockedRead (&buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1382
1383         ENTER_LOG (&sync_points_ctr, logbuffer,
1384                 EVENT_SIZE /* event */ +
1385                 LEB128_SIZE /* type */
1386         );
1387
1388         emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1389         emit_byte (logbuffer, type);
1390
1391         EXIT_LOG_EXPLICIT (NO_SEND);
1392
1393         send_log_unsafe (FALSE);
1394 }
1395
1396 // Assumes that the exclusive lock is held.
1397 static void
1398 sync_point (MonoProfilerSyncPointType type)
1399 {
1400         sync_point_flush ();
1401         sync_point_mark (type);
1402 }
1403
1404 static int
1405 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1406 {
1407         /* account for object alignment in the heap */
1408         size += 7;
1409         size &= ~7;
1410
1411         ENTER_LOG (&heap_objects_ctr, logbuffer,
1412                 EVENT_SIZE /* event */ +
1413                 LEB128_SIZE /* obj */ +
1414                 LEB128_SIZE /* klass */ +
1415                 LEB128_SIZE /* size */ +
1416                 LEB128_SIZE /* num */ +
1417                 num * (
1418                         LEB128_SIZE /* offset */ +
1419                         LEB128_SIZE /* ref */
1420                 )
1421         );
1422
1423         emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1424         emit_obj (logbuffer, obj);
1425         emit_ptr (logbuffer, klass);
1426         emit_value (logbuffer, size);
1427         emit_value (logbuffer, num);
1428
1429         uintptr_t last_offset = 0;
1430
1431         for (int i = 0; i < num; ++i) {
1432                 emit_value (logbuffer, offsets [i] - last_offset);
1433                 last_offset = offsets [i];
1434                 emit_obj (logbuffer, refs [i]);
1435         }
1436
1437         EXIT_LOG_EXPLICIT (DO_SEND);
1438
1439         return 0;
1440 }
1441
1442 static unsigned int hs_mode_ms = 0;
1443 static unsigned int hs_mode_gc = 0;
1444 static unsigned int hs_mode_ondemand = 0;
1445 static unsigned int gc_count = 0;
1446 static uint64_t last_hs_time = 0;
1447 static gboolean do_heap_walk = FALSE;
1448 static gboolean ignore_heap_events;
1449
1450 static void
1451 gc_roots (MonoProfiler *prof, int num, void **objects, int *root_types, uintptr_t *extra_info)
1452 {
1453         if (ignore_heap_events)
1454                 return;
1455
1456         ENTER_LOG (&heap_roots_ctr, logbuffer,
1457                 EVENT_SIZE /* event */ +
1458                 LEB128_SIZE /* num */ +
1459                 LEB128_SIZE /* collections */ +
1460                 num * (
1461                         LEB128_SIZE /* object */ +
1462                         LEB128_SIZE /* root type */ +
1463                         LEB128_SIZE /* extra info */
1464                 )
1465         );
1466
1467         emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1468         emit_value (logbuffer, num);
1469         emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
1470
1471         for (int i = 0; i < num; ++i) {
1472                 emit_obj (logbuffer, objects [i]);
1473                 emit_byte (logbuffer, root_types [i]);
1474                 emit_value (logbuffer, extra_info [i]);
1475         }
1476
1477         EXIT_LOG_EXPLICIT (DO_SEND);
1478 }
1479
1480
1481 static void
1482 trigger_on_demand_heapshot (void)
1483 {
1484         if (heapshot_requested)
1485                 mono_gc_collect (mono_gc_max_generation ());
1486 }
1487
1488 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_EVENTS | PROFLOG_HEAPSHOT_FEATURE)
1489
1490 static void
1491 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation)
1492 {
1493         if (ev == MONO_GC_EVENT_START) {
1494                 uint64_t now = current_time ();
1495
1496                 if (hs_mode_ms && (now - last_hs_time) / 1000 * 1000 >= hs_mode_ms)
1497                         do_heap_walk = TRUE;
1498                 else if (hs_mode_gc && !(gc_count % hs_mode_gc))
1499                         do_heap_walk = TRUE;
1500                 else if (hs_mode_ondemand)
1501                         do_heap_walk = heapshot_requested;
1502                 else if (!hs_mode_ms && !hs_mode_gc && generation == mono_gc_max_generation ())
1503                         do_heap_walk = TRUE;
1504
1505                 //If using heapshot, ignore events for collections we don't care
1506                 if (ENABLED (PROFLOG_HEAPSHOT_FEATURE)) {
1507                         // Ignore events generated during the collection itself (IE GC ROOTS)
1508                         ignore_heap_events = !do_heap_walk;
1509                 }
1510         }
1511
1512
1513         if (ENABLED (PROFLOG_GC_EVENTS)) {
1514                 ENTER_LOG (&gc_events_ctr, logbuffer,
1515                         EVENT_SIZE /* event */ +
1516                         BYTE_SIZE /* gc event */ +
1517                         BYTE_SIZE /* generation */
1518                 );
1519
1520                 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1521                 emit_byte (logbuffer, ev);
1522                 emit_byte (logbuffer, generation);
1523
1524                 EXIT_LOG_EXPLICIT (NO_SEND);
1525         }
1526
1527         switch (ev) {
1528         case MONO_GC_EVENT_START:
1529                 if (generation == mono_gc_max_generation ())
1530                         gc_count++;
1531
1532                 break;
1533         case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1534                 /*
1535                  * Ensure that no thread can be in the middle of writing to
1536                  * a buffer when the world stops...
1537                  */
1538                 buffer_lock_excl ();
1539                 break;
1540         case MONO_GC_EVENT_POST_STOP_WORLD:
1541                 /*
1542                  * ... So that we now have a consistent view of all buffers.
1543                  * This allows us to flush them. We need to do this because
1544                  * they may contain object allocation events that need to be
1545                  * committed to the log file before any object move events
1546                  * that will be produced during this GC.
1547                  */
1548                 if (ENABLED (ALL_GC_EVENTS_MASK))
1549                         sync_point (SYNC_POINT_WORLD_STOP);
1550
1551                 /*
1552                  * All heap events are surrounded by a HEAP_START and a HEAP_ENV event.
1553                  * Right now, that's the case for GC Moves, GC Roots or heapshots.
1554                  */
1555                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
1556                         ENTER_LOG (&heap_starts_ctr, logbuffer,
1557                                 EVENT_SIZE /* event */
1558                         );
1559
1560                         emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1561
1562                         EXIT_LOG_EXPLICIT (DO_SEND);
1563                 }
1564
1565                 break;
1566         case MONO_GC_EVENT_PRE_START_WORLD:
1567                 if (do_heap_shot && do_heap_walk)
1568                         mono_gc_walk_heap (0, gc_reference, NULL);
1569
1570                 /* Matching HEAP_END to the HEAP_START from above */
1571                 if (ENABLED (PROFLOG_GC_MOVES_EVENTS | PROFLOG_GC_ROOT_EVENTS) || do_heap_walk) {
1572                         ENTER_LOG (&heap_ends_ctr, logbuffer,
1573                                 EVENT_SIZE /* event */
1574                         );
1575
1576                         emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1577
1578                         EXIT_LOG_EXPLICIT (DO_SEND);
1579                 }
1580
1581                 if (do_heap_shot && do_heap_walk) {
1582                         do_heap_walk = FALSE;
1583                         heapshot_requested = 0;
1584                         last_hs_time = current_time ();
1585                 }
1586
1587                 /*
1588                  * Similarly, we must now make sure that any object moves
1589                  * written to the GC thread's buffer are flushed. Otherwise,
1590                  * object allocation events for certain addresses could come
1591                  * after the move events that made those addresses available.
1592                  */
1593                 if (ENABLED (ALL_GC_EVENTS_MASK))
1594                         sync_point_mark (SYNC_POINT_WORLD_START);
1595                 break;
1596         case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1597                 /*
1598                  * Finally, it is safe to allow other threads to write to
1599                  * their buffers again.
1600                  */
1601                 buffer_unlock_excl ();
1602                 break;
1603         default:
1604                 break;
1605         }
1606 }
1607
1608 static void
1609 gc_resize (MonoProfiler *profiler, int64_t new_size)
1610 {
1611         ENTER_LOG (&gc_resizes_ctr, logbuffer,
1612                 EVENT_SIZE /* event */ +
1613                 LEB128_SIZE /* new size */
1614         );
1615
1616         emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1617         emit_value (logbuffer, new_size);
1618
1619         EXIT_LOG_EXPLICIT (DO_SEND);
1620 }
1621
1622 typedef struct {
1623         int count;
1624         MonoMethod* methods [MAX_FRAMES];
1625         int32_t il_offsets [MAX_FRAMES];
1626         int32_t native_offsets [MAX_FRAMES];
1627 } FrameData;
1628
1629 static int num_frames = MAX_FRAMES;
1630
1631 static mono_bool
1632 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1633 {
1634         FrameData *frame = (FrameData *)data;
1635         if (method && frame->count < num_frames) {
1636                 frame->il_offsets [frame->count] = il_offset;
1637                 frame->native_offsets [frame->count] = native_offset;
1638                 frame->methods [frame->count++] = method;
1639                 //printf ("In %d %s at %d (native: %d)\n", frame->count, mono_method_get_name (method), il_offset, native_offset);
1640         }
1641         return frame->count == num_frames;
1642 }
1643
1644 /*
1645  * a note about stack walks: they can cause more profiler events to fire,
1646  * so we need to make sure they don't happen after we started emitting an
1647  * event, hence the collect_bt/emit_bt split.
1648  */
1649 static void
1650 collect_bt (FrameData *data)
1651 {
1652         data->count = 0;
1653         mono_stack_walk_no_il (walk_stack, data);
1654 }
1655
1656 static void
1657 emit_bt (MonoProfiler *prof, LogBuffer *logbuffer, FrameData *data)
1658 {
1659         if (data->count > num_frames)
1660                 printf ("bad num frames: %d\n", data->count);
1661
1662         emit_value (logbuffer, data->count);
1663
1664         while (data->count)
1665                 emit_method (logbuffer, data->methods [--data->count]);
1666 }
1667
1668 static void
1669 gc_alloc (MonoProfiler *prof, MonoObject *obj, MonoClass *klass)
1670 {
1671         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_ALLOC_BT : 0;
1672         FrameData data;
1673         uintptr_t len = mono_object_get_size (obj);
1674         /* account for object alignment in the heap */
1675         len += 7;
1676         len &= ~7;
1677
1678         if (do_bt)
1679                 collect_bt (&data);
1680
1681         ENTER_LOG (&gc_allocs_ctr, logbuffer,
1682                 EVENT_SIZE /* event */ +
1683                 LEB128_SIZE /* klass */ +
1684                 LEB128_SIZE /* obj */ +
1685                 LEB128_SIZE /* size */ +
1686                 (do_bt ? (
1687                         LEB128_SIZE /* count */ +
1688                         data.count * (
1689                                 LEB128_SIZE /* method */
1690                         )
1691                 ) : 0)
1692         );
1693
1694         emit_event (logbuffer, do_bt | TYPE_ALLOC);
1695         emit_ptr (logbuffer, klass);
1696         emit_obj (logbuffer, obj);
1697         emit_value (logbuffer, len);
1698
1699         if (do_bt)
1700                 emit_bt (prof, logbuffer, &data);
1701
1702         EXIT_LOG;
1703 }
1704
1705 static void
1706 gc_moves (MonoProfiler *prof, void **objects, int num)
1707 {
1708         ENTER_LOG (&gc_moves_ctr, logbuffer,
1709                 EVENT_SIZE /* event */ +
1710                 LEB128_SIZE /* num */ +
1711                 num * (
1712                         LEB128_SIZE /* object */
1713                 )
1714         );
1715
1716         emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1717         emit_value (logbuffer, num);
1718
1719         for (int i = 0; i < num; ++i)
1720                 emit_obj (logbuffer, objects [i]);
1721
1722         EXIT_LOG_EXPLICIT (DO_SEND);
1723 }
1724
1725 static void
1726 gc_handle (MonoProfiler *prof, int op, int type, uintptr_t handle, MonoObject *obj)
1727 {
1728         int do_bt = nocalls && InterlockedRead (&runtime_inited) && !notraces;
1729         FrameData data;
1730
1731         if (do_bt)
1732                 collect_bt (&data);
1733
1734         gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1735
1736         ENTER_LOG (ctr, logbuffer,
1737                 EVENT_SIZE /* event */ +
1738                 LEB128_SIZE /* type */ +
1739                 LEB128_SIZE /* handle */ +
1740                 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1741                         LEB128_SIZE /* obj */
1742                 ) : 0) +
1743                 (do_bt ? (
1744                         LEB128_SIZE /* count */ +
1745                         data.count * (
1746                                 LEB128_SIZE /* method */
1747                         )
1748                 ) : 0)
1749         );
1750
1751         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1752                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1753         else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1754                 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1755         else
1756                 g_assert_not_reached ();
1757
1758         emit_value (logbuffer, type);
1759         emit_value (logbuffer, handle);
1760
1761         if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1762                 emit_obj (logbuffer, obj);
1763
1764         if (do_bt)
1765                 emit_bt (prof, logbuffer, &data);
1766
1767         EXIT_LOG;
1768 }
1769
1770 static void
1771 finalize_begin (MonoProfiler *prof)
1772 {
1773         ENTER_LOG (&finalize_begins_ctr, buf,
1774                 EVENT_SIZE /* event */
1775         );
1776
1777         emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1778
1779         EXIT_LOG;
1780 }
1781
1782 static void
1783 finalize_end (MonoProfiler *prof)
1784 {
1785         trigger_on_demand_heapshot ();
1786         if (ENABLED (PROFLOG_FINALIZATION_EVENTS)) {
1787                 ENTER_LOG (&finalize_ends_ctr, buf,
1788                         EVENT_SIZE /* event */
1789                 );
1790
1791                 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1792
1793                 EXIT_LOG;
1794         }
1795 }
1796
1797 static void
1798 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1799 {
1800         ENTER_LOG (&finalize_object_begins_ctr, buf,
1801                 EVENT_SIZE /* event */ +
1802                 LEB128_SIZE /* obj */
1803         );
1804
1805         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1806         emit_obj (buf, obj);
1807
1808         EXIT_LOG;
1809 }
1810
1811 static void
1812 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1813 {
1814         ENTER_LOG (&finalize_object_ends_ctr, buf,
1815                 EVENT_SIZE /* event */ +
1816                 LEB128_SIZE /* obj */
1817         );
1818
1819         emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1820         emit_obj (buf, obj);
1821
1822         EXIT_LOG;
1823 }
1824
1825 static char*
1826 push_nesting (char *p, MonoClass *klass)
1827 {
1828         MonoClass *nesting;
1829         const char *name;
1830         const char *nspace;
1831         nesting = mono_class_get_nesting_type (klass);
1832         if (nesting) {
1833                 p = push_nesting (p, nesting);
1834                 *p++ = '/';
1835                 *p = 0;
1836         }
1837         name = mono_class_get_name (klass);
1838         nspace = mono_class_get_namespace (klass);
1839         if (*nspace) {
1840                 strcpy (p, nspace);
1841                 p += strlen (nspace);
1842                 *p++ = '.';
1843                 *p = 0;
1844         }
1845         strcpy (p, name);
1846         p += strlen (name);
1847         return p;
1848 }
1849
1850 static char*
1851 type_name (MonoClass *klass)
1852 {
1853         char buf [1024];
1854         char *p;
1855         push_nesting (buf, klass);
1856         p = (char *) g_malloc (strlen (buf) + 1);
1857         strcpy (p, buf);
1858         return p;
1859 }
1860
1861 static void
1862 image_loaded (MonoProfiler *prof, MonoImage *image, int result)
1863 {
1864         if (result != MONO_PROFILE_OK)
1865                 return;
1866
1867         const char *name = mono_image_get_filename (image);
1868         int nlen = strlen (name) + 1;
1869
1870         ENTER_LOG (&image_loads_ctr, logbuffer,
1871                 EVENT_SIZE /* event */ +
1872                 BYTE_SIZE /* type */ +
1873                 LEB128_SIZE /* image */ +
1874                 nlen /* name */
1875         );
1876
1877         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1878         emit_byte (logbuffer, TYPE_IMAGE);
1879         emit_ptr (logbuffer, image);
1880         memcpy (logbuffer->cursor, name, nlen);
1881         logbuffer->cursor += nlen;
1882
1883         EXIT_LOG;
1884 }
1885
1886 static void
1887 image_unloaded (MonoProfiler *prof, MonoImage *image)
1888 {
1889         const char *name = mono_image_get_filename (image);
1890         int nlen = strlen (name) + 1;
1891
1892         ENTER_LOG (&image_unloads_ctr, logbuffer,
1893                 EVENT_SIZE /* event */ +
1894                 BYTE_SIZE /* type */ +
1895                 LEB128_SIZE /* image */ +
1896                 nlen /* name */
1897         );
1898
1899         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1900         emit_byte (logbuffer, TYPE_IMAGE);
1901         emit_ptr (logbuffer, image);
1902         memcpy (logbuffer->cursor, name, nlen);
1903         logbuffer->cursor += nlen;
1904
1905         EXIT_LOG;
1906 }
1907
1908 static void
1909 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly, int result)
1910 {
1911         if (result != MONO_PROFILE_OK)
1912                 return;
1913
1914         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1915         int nlen = strlen (name) + 1;
1916         MonoImage *image = mono_assembly_get_image (assembly);
1917
1918         ENTER_LOG (&assembly_loads_ctr, logbuffer,
1919                 EVENT_SIZE /* event */ +
1920                 BYTE_SIZE /* type */ +
1921                 LEB128_SIZE /* assembly */ +
1922                 LEB128_SIZE /* image */ +
1923                 nlen /* name */
1924         );
1925
1926         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1927         emit_byte (logbuffer, TYPE_ASSEMBLY);
1928         emit_ptr (logbuffer, assembly);
1929         emit_ptr (logbuffer, image);
1930         memcpy (logbuffer->cursor, name, nlen);
1931         logbuffer->cursor += nlen;
1932
1933         EXIT_LOG;
1934
1935         mono_free (name);
1936 }
1937
1938 static void
1939 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1940 {
1941         char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1942         int nlen = strlen (name) + 1;
1943         MonoImage *image = mono_assembly_get_image (assembly);
1944
1945         ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1946                 EVENT_SIZE /* event */ +
1947                 BYTE_SIZE /* type */ +
1948                 LEB128_SIZE /* assembly */ +
1949                 LEB128_SIZE /* image */ +
1950                 nlen /* name */
1951         );
1952
1953         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1954         emit_byte (logbuffer, TYPE_ASSEMBLY);
1955         emit_ptr (logbuffer, assembly);
1956         emit_ptr (logbuffer, image);
1957         memcpy (logbuffer->cursor, name, nlen);
1958         logbuffer->cursor += nlen;
1959
1960         EXIT_LOG;
1961
1962         mono_free (name);
1963 }
1964
1965 static void
1966 class_loaded (MonoProfiler *prof, MonoClass *klass, int result)
1967 {
1968         if (result != MONO_PROFILE_OK)
1969                 return;
1970
1971         char *name;
1972
1973         if (InterlockedRead (&runtime_inited))
1974                 name = mono_type_get_name (mono_class_get_type (klass));
1975         else
1976                 name = type_name (klass);
1977
1978         int nlen = strlen (name) + 1;
1979         MonoImage *image = mono_class_get_image (klass);
1980
1981         ENTER_LOG (&class_loads_ctr, logbuffer,
1982                 EVENT_SIZE /* event */ +
1983                 BYTE_SIZE /* type */ +
1984                 LEB128_SIZE /* klass */ +
1985                 LEB128_SIZE /* image */ +
1986                 nlen /* name */
1987         );
1988
1989         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1990         emit_byte (logbuffer, TYPE_CLASS);
1991         emit_ptr (logbuffer, klass);
1992         emit_ptr (logbuffer, image);
1993         memcpy (logbuffer->cursor, name, nlen);
1994         logbuffer->cursor += nlen;
1995
1996         EXIT_LOG;
1997
1998         if (runtime_inited)
1999                 mono_free (name);
2000         else
2001                 g_free (name);
2002 }
2003
2004 static void
2005 class_unloaded (MonoProfiler *prof, MonoClass *klass)
2006 {
2007         char *name;
2008
2009         if (InterlockedRead (&runtime_inited))
2010                 name = mono_type_get_name (mono_class_get_type (klass));
2011         else
2012                 name = type_name (klass);
2013
2014         int nlen = strlen (name) + 1;
2015         MonoImage *image = mono_class_get_image (klass);
2016
2017         ENTER_LOG (&class_unloads_ctr, logbuffer,
2018                 EVENT_SIZE /* event */ +
2019                 BYTE_SIZE /* type */ +
2020                 LEB128_SIZE /* klass */ +
2021                 LEB128_SIZE /* image */ +
2022                 nlen /* name */
2023         );
2024
2025         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2026         emit_byte (logbuffer, TYPE_CLASS);
2027         emit_ptr (logbuffer, klass);
2028         emit_ptr (logbuffer, image);
2029         memcpy (logbuffer->cursor, name, nlen);
2030         logbuffer->cursor += nlen;
2031
2032         EXIT_LOG;
2033
2034         if (runtime_inited)
2035                 mono_free (name);
2036         else
2037                 g_free (name);
2038 }
2039
2040 static void process_method_enter_coverage (MonoProfiler *prof, MonoMethod *method);
2041
2042 static void
2043 method_enter (MonoProfiler *prof, MonoMethod *method)
2044 {
2045         process_method_enter_coverage (prof, method);
2046
2047         if (!only_coverage && get_thread ()->call_depth++ <= max_call_depth) {
2048                 ENTER_LOG (&method_entries_ctr, logbuffer,
2049                         EVENT_SIZE /* event */ +
2050                         LEB128_SIZE /* method */
2051                 );
2052
2053                 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
2054                 emit_method (logbuffer, method);
2055
2056                 EXIT_LOG;
2057         }
2058 }
2059
2060 static void
2061 method_leave (MonoProfiler *prof, MonoMethod *method)
2062 {
2063         if (!only_coverage && --get_thread ()->call_depth <= max_call_depth) {
2064                 ENTER_LOG (&method_exits_ctr, logbuffer,
2065                         EVENT_SIZE /* event */ +
2066                         LEB128_SIZE /* method */
2067                 );
2068
2069                 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
2070                 emit_method (logbuffer, method);
2071
2072                 EXIT_LOG;
2073         }
2074 }
2075
2076 static void
2077 method_exc_leave (MonoProfiler *prof, MonoMethod *method)
2078 {
2079         if (!only_coverage && !nocalls && --get_thread ()->call_depth <= max_call_depth) {
2080                 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
2081                         EVENT_SIZE /* event */ +
2082                         LEB128_SIZE /* method */
2083                 );
2084
2085                 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
2086                 emit_method (logbuffer, method);
2087
2088                 EXIT_LOG;
2089         }
2090 }
2091
2092 static void
2093 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji, int result)
2094 {
2095         if (result != MONO_PROFILE_OK)
2096                 return;
2097
2098         buffer_lock ();
2099
2100         register_method_local (method, ji);
2101
2102         buffer_unlock ();
2103 }
2104
2105 static void
2106 code_buffer_new (MonoProfiler *prof, void *buffer, int size, MonoProfilerCodeBufferType type, void *data)
2107 {
2108         char *name;
2109         int nlen;
2110
2111         if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
2112                 name = (char *) data;
2113                 nlen = strlen (name) + 1;
2114         } else {
2115                 name = NULL;
2116                 nlen = 0;
2117         }
2118
2119         ENTER_LOG (&code_buffers_ctr, logbuffer,
2120                 EVENT_SIZE /* event */ +
2121                 BYTE_SIZE /* type */ +
2122                 LEB128_SIZE /* buffer */ +
2123                 LEB128_SIZE /* size */ +
2124                 (name ? (
2125                         nlen /* name */
2126                 ) : 0)
2127         );
2128
2129         emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
2130         emit_byte (logbuffer, type);
2131         emit_ptr (logbuffer, buffer);
2132         emit_value (logbuffer, size);
2133
2134         if (name) {
2135                 memcpy (logbuffer->cursor, name, nlen);
2136                 logbuffer->cursor += nlen;
2137         }
2138
2139         EXIT_LOG;
2140 }
2141
2142 static void
2143 throw_exc (MonoProfiler *prof, MonoObject *object)
2144 {
2145         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_THROW_BT : 0;
2146         FrameData data;
2147
2148         if (do_bt)
2149                 collect_bt (&data);
2150
2151         ENTER_LOG (&exception_throws_ctr, logbuffer,
2152                 EVENT_SIZE /* event */ +
2153                 LEB128_SIZE /* object */ +
2154                 (do_bt ? (
2155                         LEB128_SIZE /* count */ +
2156                         data.count * (
2157                                 LEB128_SIZE /* method */
2158                         )
2159                 ) : 0)
2160         );
2161
2162         emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
2163         emit_obj (logbuffer, object);
2164
2165         if (do_bt)
2166                 emit_bt (prof, logbuffer, &data);
2167
2168         EXIT_LOG;
2169 }
2170
2171 static void
2172 clause_exc (MonoProfiler *prof, MonoMethod *method, int clause_type, int clause_num, MonoObject *exc)
2173 {
2174         ENTER_LOG (&exception_clauses_ctr, logbuffer,
2175                 EVENT_SIZE /* event */ +
2176                 BYTE_SIZE /* clause type */ +
2177                 LEB128_SIZE /* clause num */ +
2178                 LEB128_SIZE /* method */
2179         );
2180
2181         emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
2182         emit_byte (logbuffer, clause_type);
2183         emit_value (logbuffer, clause_num);
2184         emit_method (logbuffer, method);
2185         emit_obj (logbuffer, exc);
2186
2187         EXIT_LOG;
2188 }
2189
2190 static void
2191 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
2192 {
2193         int do_bt = (nocalls && InterlockedRead (&runtime_inited) && !notraces) ? TYPE_MONITOR_BT : 0;
2194         FrameData data;
2195
2196         if (do_bt)
2197                 collect_bt (&data);
2198
2199         ENTER_LOG (&monitor_events_ctr, logbuffer,
2200                 EVENT_SIZE /* event */ +
2201                 BYTE_SIZE /* ev */ +
2202                 LEB128_SIZE /* object */ +
2203                 (do_bt ? (
2204                         LEB128_SIZE /* count */ +
2205                         data.count * (
2206                                 LEB128_SIZE /* method */
2207                         )
2208                 ) : 0)
2209         );
2210
2211         emit_event (logbuffer, do_bt | TYPE_MONITOR);
2212         emit_byte (logbuffer, ev);
2213         emit_obj (logbuffer, object);
2214
2215         if (do_bt)
2216                 emit_bt (profiler, logbuffer, &data);
2217
2218         EXIT_LOG;
2219 }
2220
2221 static void
2222 thread_start (MonoProfiler *prof, uintptr_t tid)
2223 {
2224         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2225                 ENTER_LOG (&thread_starts_ctr, logbuffer,
2226                         EVENT_SIZE /* event */ +
2227                         BYTE_SIZE /* type */ +
2228                         LEB128_SIZE /* tid */
2229                 );
2230
2231                 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2232                 emit_byte (logbuffer, TYPE_THREAD);
2233                 emit_ptr (logbuffer, (void*) tid);
2234
2235                 EXIT_LOG;
2236         }
2237 }
2238
2239 static void
2240 thread_end (MonoProfiler *prof, uintptr_t tid)
2241 {
2242         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2243                 ENTER_LOG (&thread_ends_ctr, logbuffer,
2244                         EVENT_SIZE /* event */ +
2245                         BYTE_SIZE /* type */ +
2246                         LEB128_SIZE /* tid */
2247                 );
2248
2249                 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2250                 emit_byte (logbuffer, TYPE_THREAD);
2251                 emit_ptr (logbuffer, (void*) tid);
2252
2253                 EXIT_LOG_EXPLICIT (NO_SEND);
2254         }
2255
2256         MonoProfilerThread *thread = get_thread ();
2257
2258         thread->ended = TRUE;
2259         remove_thread (thread);
2260
2261         PROF_TLS_SET (NULL);
2262 }
2263
2264 static void
2265 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2266 {
2267         int len = strlen (name) + 1;
2268
2269         if (ENABLED (PROFLOG_THREAD_EVENTS)) {
2270                 ENTER_LOG (&thread_names_ctr, logbuffer,
2271                         EVENT_SIZE /* event */ +
2272                         BYTE_SIZE /* type */ +
2273                         LEB128_SIZE /* tid */ +
2274                         len /* name */
2275                 );
2276
2277                 emit_event (logbuffer, TYPE_METADATA);
2278                 emit_byte (logbuffer, TYPE_THREAD);
2279                 emit_ptr (logbuffer, (void*)tid);
2280                 memcpy (logbuffer->cursor, name, len);
2281                 logbuffer->cursor += len;
2282
2283                 EXIT_LOG;
2284         }
2285 }
2286
2287 static void
2288 domain_loaded (MonoProfiler *prof, MonoDomain *domain, int result)
2289 {
2290         if (result != MONO_PROFILE_OK)
2291                 return;
2292
2293         ENTER_LOG (&domain_loads_ctr, logbuffer,
2294                 EVENT_SIZE /* event */ +
2295                 BYTE_SIZE /* type */ +
2296                 LEB128_SIZE /* domain id */
2297         );
2298
2299         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2300         emit_byte (logbuffer, TYPE_DOMAIN);
2301         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2302
2303         EXIT_LOG;
2304 }
2305
2306 static void
2307 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2308 {
2309         ENTER_LOG (&domain_unloads_ctr, logbuffer,
2310                 EVENT_SIZE /* event */ +
2311                 BYTE_SIZE /* type */ +
2312                 LEB128_SIZE /* domain id */
2313         );
2314
2315         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2316         emit_byte (logbuffer, TYPE_DOMAIN);
2317         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2318
2319         EXIT_LOG;
2320 }
2321
2322 static void
2323 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2324 {
2325         int nlen = strlen (name) + 1;
2326
2327         ENTER_LOG (&domain_names_ctr, logbuffer,
2328                 EVENT_SIZE /* event */ +
2329                 BYTE_SIZE /* type */ +
2330                 LEB128_SIZE /* domain id */ +
2331                 nlen /* name */
2332         );
2333
2334         emit_event (logbuffer, TYPE_METADATA);
2335         emit_byte (logbuffer, TYPE_DOMAIN);
2336         emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2337         memcpy (logbuffer->cursor, name, nlen);
2338         logbuffer->cursor += nlen;
2339
2340         EXIT_LOG;
2341 }
2342
2343 static void
2344 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2345 {
2346         ENTER_LOG (&context_loads_ctr, logbuffer,
2347                 EVENT_SIZE /* event */ +
2348                 BYTE_SIZE /* type */ +
2349                 LEB128_SIZE /* context id */ +
2350                 LEB128_SIZE /* domain id */
2351         );
2352
2353         emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2354         emit_byte (logbuffer, TYPE_CONTEXT);
2355         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2356         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2357
2358         EXIT_LOG;
2359 }
2360
2361 static void
2362 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2363 {
2364         ENTER_LOG (&context_unloads_ctr, logbuffer,
2365                 EVENT_SIZE /* event */ +
2366                 BYTE_SIZE /* type */ +
2367                 LEB128_SIZE /* context id */ +
2368                 LEB128_SIZE /* domain id */
2369         );
2370
2371         emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2372         emit_byte (logbuffer, TYPE_CONTEXT);
2373         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2374         emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2375
2376         EXIT_LOG;
2377 }
2378
2379 typedef struct {
2380         MonoMethod *method;
2381         MonoDomain *domain;
2382         void *base_address;
2383         int offset;
2384 } AsyncFrameInfo;
2385
2386 typedef struct {
2387         MonoLockFreeQueueNode node;
2388         MonoProfiler *prof;
2389         uint64_t time;
2390         uintptr_t tid;
2391         void *ip;
2392         int count;
2393         AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2394 } SampleHit;
2395
2396 static mono_bool
2397 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2398 {
2399         SampleHit *sample = (SampleHit *) data;
2400
2401         if (sample->count < num_frames) {
2402                 int i = sample->count;
2403
2404                 sample->frames [i].method = method;
2405                 sample->frames [i].domain = domain;
2406                 sample->frames [i].base_address = base_address;
2407                 sample->frames [i].offset = offset;
2408
2409                 sample->count++;
2410         }
2411
2412         return sample->count == num_frames;
2413 }
2414
2415 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2416 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2417
2418 static void
2419 enqueue_sample_hit (gpointer p)
2420 {
2421         SampleHit *sample = p;
2422
2423         mono_lock_free_queue_node_unpoison (&sample->node);
2424         mono_lock_free_queue_enqueue (&sample->prof->dumper_queue, &sample->node);
2425         mono_os_sem_post (&sample->prof->dumper_queue_sem);
2426 }
2427
2428 static void
2429 mono_sample_hit (MonoProfiler *profiler, unsigned char *ip, void *context)
2430 {
2431         /*
2432          * Please note: We rely on the runtime loading the profiler with
2433          * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2434          * this function (and its siblings) are resolved when the profiler is
2435          * loaded. Otherwise, we would potentially invoke the dynamic linker when
2436          * invoking runtime functions, which is not async-signal-safe.
2437          */
2438
2439         if (InterlockedRead (&in_shutdown))
2440                 return;
2441
2442         SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2443
2444         if (!sample) {
2445                 /*
2446                  * If we're out of reusable sample events and we're not allowed to
2447                  * allocate more, we have no choice but to drop the event.
2448                  */
2449                 if (InterlockedRead (&sample_allocations_ctr) >= max_allocated_sample_hits)
2450                         return;
2451
2452                 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2453                 sample->prof = profiler;
2454                 mono_lock_free_queue_node_init (&sample->node, TRUE);
2455
2456                 InterlockedIncrement (&sample_allocations_ctr);
2457         }
2458
2459         sample->count = 0;
2460         mono_stack_walk_async_safe (&async_walk_stack, context, sample);
2461
2462         sample->time = current_time ();
2463         sample->tid = thread_id ();
2464         sample->ip = ip;
2465
2466         mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2467 }
2468
2469 static uintptr_t *code_pages = 0;
2470 static int num_code_pages = 0;
2471 static int size_code_pages = 0;
2472 #define CPAGE_SHIFT (9)
2473 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2474 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2475 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2476
2477 static uintptr_t
2478 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2479 {
2480         uintptr_t i;
2481         uintptr_t start_pos;
2482         start_pos = (page >> CPAGE_SHIFT) % hsize;
2483         i = start_pos;
2484         do {
2485                 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2486                         return 0;
2487                 } else if (!hash [i]) {
2488                         hash [i] = page;
2489                         return 1;
2490                 }
2491                 /* wrap around */
2492                 if (++i == hsize)
2493                         i = 0;
2494         } while (i != start_pos);
2495         /* should not happen */
2496         printf ("failed code page store\n");
2497         return 0;
2498 }
2499
2500 static void
2501 add_code_pointer (uintptr_t ip)
2502 {
2503         uintptr_t i;
2504         if (num_code_pages * 2 >= size_code_pages) {
2505                 uintptr_t *n;
2506                 uintptr_t old_size = size_code_pages;
2507                 size_code_pages *= 2;
2508                 if (size_code_pages == 0)
2509                         size_code_pages = 16;
2510                 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2511                 for (i = 0; i < old_size; ++i) {
2512                         if (code_pages [i])
2513                                 add_code_page (n, size_code_pages, code_pages [i]);
2514                 }
2515                 if (code_pages)
2516                         g_free (code_pages);
2517                 code_pages = n;
2518         }
2519         num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2520 }
2521
2522 /* ELF code crashes on some systems. */
2523 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2524 #if 0
2525 static void
2526 dump_ubin (MonoProfiler *prof, const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2527 {
2528         int len = strlen (filename) + 1;
2529
2530         ENTER_LOG (&sample_ubins_ctr, logbuffer,
2531                 EVENT_SIZE /* event */ +
2532                 LEB128_SIZE /* load address */ +
2533                 LEB128_SIZE /* offset */ +
2534                 LEB128_SIZE /* size */ +
2535                 nlen /* file name */
2536         );
2537
2538         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2539         emit_ptr (logbuffer, load_addr);
2540         emit_uvalue (logbuffer, offset);
2541         emit_uvalue (logbuffer, size);
2542         memcpy (logbuffer->cursor, filename, len);
2543         logbuffer->cursor += len;
2544
2545         EXIT_LOG_EXPLICIT (DO_SEND);
2546 }
2547 #endif
2548
2549 static void
2550 dump_usym (MonoProfiler *prof, const char *name, uintptr_t value, uintptr_t size)
2551 {
2552         int len = strlen (name) + 1;
2553
2554         ENTER_LOG (&sample_usyms_ctr, logbuffer,
2555                 EVENT_SIZE /* event */ +
2556                 LEB128_SIZE /* value */ +
2557                 LEB128_SIZE /* size */ +
2558                 len /* name */
2559         );
2560
2561         emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2562         emit_ptr (logbuffer, (void*)value);
2563         emit_value (logbuffer, size);
2564         memcpy (logbuffer->cursor, name, len);
2565         logbuffer->cursor += len;
2566
2567         EXIT_LOG_EXPLICIT (DO_SEND);
2568 }
2569
2570 /* ELF code crashes on some systems. */
2571 //#if defined(ELFMAG0)
2572 #if 0
2573
2574 #if SIZEOF_VOID_P == 4
2575 #define ELF_WSIZE 32
2576 #else
2577 #define ELF_WSIZE 64
2578 #endif
2579 #ifndef ElfW
2580 #define ElfW(type)      _ElfW (Elf, ELF_WSIZE, type)
2581 #define _ElfW(e,w,t)    _ElfW_1 (e, w, _##t)
2582 #define _ElfW_1(e,w,t)  e##w##t
2583 #endif
2584
2585 static void
2586 dump_elf_symbols (MonoProfiler *prof, ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2587 {
2588         int i;
2589         for (i = 0; i < num_symbols; ++i) {
2590                 const char* sym;
2591                 sym =  strtab + symbols [i].st_name;
2592                 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2593                         continue;
2594                 //printf ("symbol %s at %d\n", sym, symbols [i].st_value);
2595                 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2596         }
2597 }
2598
2599 static int
2600 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2601 {
2602         int fd, i;
2603         void *data;
2604         struct stat statb;
2605         uint64_t file_size;
2606         ElfW(Ehdr) *header;
2607         ElfW(Shdr) *sheader;
2608         ElfW(Shdr) *shstrtabh;
2609         ElfW(Shdr) *symtabh = NULL;
2610         ElfW(Shdr) *strtabh = NULL;
2611         ElfW(Sym) *symbols = NULL;
2612         const char *strtab;
2613         int num_symbols;
2614
2615         fd = open (filename, O_RDONLY);
2616         if (fd < 0)
2617                 return 0;
2618         if (fstat (fd, &statb) != 0) {
2619                 close (fd);
2620                 return 0;
2621         }
2622         file_size = statb.st_size;
2623         data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2624         close (fd);
2625         if (data == MAP_FAILED)
2626                 return 0;
2627         header = data;
2628         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2629                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2630                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2631                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2632                 munmap (data, file_size);
2633                 return 0;
2634         }
2635         sheader = (void*)((char*)data + header->e_shoff);
2636         shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2637         strtab = (const char*)data + shstrtabh->sh_offset;
2638         for (i = 0; i < header->e_shnum; ++i) {
2639                 //printf ("section header: %d\n", sheader->sh_type);
2640                 if (sheader->sh_type == SHT_SYMTAB) {
2641                         symtabh = sheader;
2642                         strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2643                         /*printf ("symtab section header: %d, .strstr: %d\n", i, sheader->sh_link);*/
2644                         break;
2645                 }
2646                 sheader = (void*)((char*)sheader + header->e_shentsize);
2647         }
2648         if (!symtabh || !strtabh) {
2649                 munmap (data, file_size);
2650                 return 0;
2651         }
2652         strtab = (const char*)data + strtabh->sh_offset;
2653         num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2654         symbols = (void*)((char*)data + symtabh->sh_offset);
2655         dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2656         munmap (data, file_size);
2657         return 1;
2658 }
2659 #endif
2660
2661 /* ELF code crashes on some systems. */
2662 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2663 #if 0
2664 static int
2665 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2666 {
2667         MonoProfiler *prof = data;
2668         char buf [256];
2669         const char *filename;
2670         BinaryObject *obj;
2671         char *a = (void*)info->dlpi_addr;
2672         int i, num_sym;
2673         ElfW(Dyn) *dyn = NULL;
2674         ElfW(Sym) *symtab = NULL;
2675         ElfW(Word) *hash_table = NULL;
2676         ElfW(Ehdr) *header = NULL;
2677         const char* strtab = NULL;
2678         for (obj = prof->binary_objects; obj; obj = obj->next) {
2679                 if (obj->addr == a)
2680                         return 0;
2681         }
2682         filename = info->dlpi_name;
2683         if (!filename)
2684                 return 0;
2685         if (!info->dlpi_addr && !filename [0]) {
2686                 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2687                 if (l > 0) {
2688                         buf [l] = 0;
2689                         filename = buf;
2690                 }
2691         }
2692         obj = g_calloc (sizeof (BinaryObject), 1);
2693         obj->addr = (void*)info->dlpi_addr;
2694         obj->name = pstrdup (filename);
2695         obj->next = prof->binary_objects;
2696         prof->binary_objects = obj;
2697         //printf ("loaded file: %s at %p, segments: %d\n", filename, (void*)info->dlpi_addr, info->dlpi_phnum);
2698         a = NULL;
2699         for (i = 0; i < info->dlpi_phnum; ++i) {
2700                 //printf ("segment type %d file offset: %d, size: %d\n", info->dlpi_phdr[i].p_type, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2701                 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2702                         header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2703                         if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2704                                         header->e_ident [EI_MAG1] != ELFMAG1 ||
2705                                         header->e_ident [EI_MAG2] != ELFMAG2 ||
2706                                         header->e_ident [EI_MAG3] != ELFMAG3 ) {
2707                                 header = NULL;
2708                         }
2709                         dump_ubin (prof, filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2710                 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2711                         dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2712                 }
2713         }
2714         if (read_elf_symbols (prof, filename, (void*)info->dlpi_addr))
2715                 return 0;
2716         if (!info->dlpi_name || !info->dlpi_name[0])
2717                 return 0;
2718         if (!dyn)
2719                 return 0;
2720         for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2721                 if (dyn [i].d_tag == DT_SYMTAB) {
2722                         if (symtab && do_debug)
2723                                 printf ("multiple symtabs: %d\n", i);
2724                         symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2725                 } else if (dyn [i].d_tag == DT_HASH) {
2726                         hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2727                 } else if (dyn [i].d_tag == DT_STRTAB) {
2728                         strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2729                 }
2730         }
2731         if (!hash_table)
2732                 return 0;
2733         num_sym = hash_table [1];
2734         dump_elf_symbols (prof, symtab, num_sym, strtab, (void*)info->dlpi_addr);
2735         return 0;
2736 }
2737
2738 static int
2739 load_binaries (MonoProfiler *prof)
2740 {
2741         dl_iterate_phdr (elf_dl_callback, prof);
2742         return 1;
2743 }
2744 #else
2745 static int
2746 load_binaries (MonoProfiler *prof)
2747 {
2748         return 0;
2749 }
2750 #endif
2751
2752 static const char*
2753 symbol_for (uintptr_t code)
2754 {
2755 #ifdef HAVE_DLADDR
2756         void *ip = (void*)code;
2757         Dl_info di;
2758         if (dladdr (ip, &di)) {
2759                 if (di.dli_sname)
2760                         return di.dli_sname;
2761         } else {
2762         /*      char **names;
2763                 names = backtrace_symbols (&ip, 1);
2764                 if (names) {
2765                         const char* p = names [0];
2766                         g_free (names);
2767                         return p;
2768                 }
2769                 */
2770         }
2771 #endif
2772         return NULL;
2773 }
2774
2775 static void
2776 dump_unmanaged_coderefs (MonoProfiler *prof)
2777 {
2778         int i;
2779         const char* last_symbol;
2780         uintptr_t addr, page_end;
2781
2782         if (load_binaries (prof))
2783                 return;
2784         for (i = 0; i < size_code_pages; ++i) {
2785                 const char* sym;
2786                 if (!code_pages [i] || code_pages [i] & 1)
2787                         continue;
2788                 last_symbol = NULL;
2789                 addr = CPAGE_ADDR (code_pages [i]);
2790                 page_end = addr + CPAGE_SIZE;
2791                 code_pages [i] |= 1;
2792                 /* we dump the symbols for the whole page */
2793                 for (; addr < page_end; addr += 16) {
2794                         sym = symbol_for (addr);
2795                         if (sym && sym == last_symbol)
2796                                 continue;
2797                         last_symbol = sym;
2798                         if (!sym)
2799                                 continue;
2800                         dump_usym (prof, sym, addr, 0); /* let's not guess the size */
2801                         //printf ("found symbol at %p: %s\n", (void*)addr, sym);
2802                 }
2803         }
2804 }
2805
2806 typedef struct MonoCounterAgent {
2807         MonoCounter *counter;
2808         // MonoCounterAgent specific data :
2809         void *value;
2810         size_t value_size;
2811         short index;
2812         short emitted;
2813         struct MonoCounterAgent *next;
2814 } MonoCounterAgent;
2815
2816 static MonoCounterAgent* counters;
2817 static int counters_index = 1;
2818 static mono_mutex_t counters_mutex;
2819
2820 static void
2821 counters_add_agent (MonoCounter *counter)
2822 {
2823         if (InterlockedRead (&in_shutdown))
2824                 return;
2825
2826         MonoCounterAgent *agent, *item;
2827
2828         mono_os_mutex_lock (&counters_mutex);
2829
2830         for (agent = counters; agent; agent = agent->next) {
2831                 if (agent->counter == counter) {
2832                         agent->value_size = 0;
2833                         if (agent->value) {
2834                                 g_free (agent->value);
2835                                 agent->value = NULL;
2836                         }
2837                         goto done;
2838                 }
2839         }
2840
2841         agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2842         agent->counter = counter;
2843         agent->value = NULL;
2844         agent->value_size = 0;
2845         agent->index = counters_index++;
2846         agent->emitted = 0;
2847         agent->next = NULL;
2848
2849         if (!counters) {
2850                 counters = agent;
2851         } else {
2852                 item = counters;
2853                 while (item->next)
2854                         item = item->next;
2855                 item->next = agent;
2856         }
2857
2858 done:
2859         mono_os_mutex_unlock (&counters_mutex);
2860 }
2861
2862 static mono_bool
2863 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2864 {
2865         counters_add_agent (counter);
2866         return TRUE;
2867 }
2868
2869 static void
2870 counters_init (MonoProfiler *profiler)
2871 {
2872         mono_os_mutex_init (&counters_mutex);
2873
2874         mono_counters_on_register (&counters_add_agent);
2875         mono_counters_foreach (counters_init_foreach_callback, NULL);
2876 }
2877
2878 static void
2879 counters_emit (MonoProfiler *profiler)
2880 {
2881         MonoCounterAgent *agent;
2882         int len = 0;
2883         int size =
2884                 EVENT_SIZE /* event */ +
2885                 LEB128_SIZE /* len */
2886         ;
2887
2888         mono_os_mutex_lock (&counters_mutex);
2889
2890         for (agent = counters; agent; agent = agent->next) {
2891                 if (agent->emitted)
2892                         continue;
2893
2894                 size +=
2895                         LEB128_SIZE /* section */ +
2896                         strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2897                         BYTE_SIZE /* type */ +
2898                         BYTE_SIZE /* unit */ +
2899                         BYTE_SIZE /* variance */ +
2900                         LEB128_SIZE /* index */
2901                 ;
2902
2903                 len++;
2904         }
2905
2906         if (!len)
2907                 goto done;
2908
2909         ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2910
2911         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2912         emit_value (logbuffer, len);
2913
2914         for (agent = counters; agent; agent = agent->next) {
2915                 const char *name;
2916
2917                 if (agent->emitted)
2918                         continue;
2919
2920                 name = mono_counter_get_name (agent->counter);
2921                 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2922                 emit_string (logbuffer, name, strlen (name) + 1);
2923                 emit_byte (logbuffer, mono_counter_get_type (agent->counter));
2924                 emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
2925                 emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
2926                 emit_value (logbuffer, agent->index);
2927
2928                 agent->emitted = 1;
2929         }
2930
2931         EXIT_LOG_EXPLICIT (DO_SEND);
2932
2933 done:
2934         mono_os_mutex_unlock (&counters_mutex);
2935 }
2936
2937 static void
2938 counters_sample (MonoProfiler *profiler, uint64_t timestamp)
2939 {
2940         MonoCounterAgent *agent;
2941         MonoCounter *counter;
2942         int type;
2943         int buffer_size;
2944         void *buffer;
2945         int size;
2946
2947         counters_emit (profiler);
2948
2949         buffer_size = 8;
2950         buffer = g_calloc (1, buffer_size);
2951
2952         mono_os_mutex_lock (&counters_mutex);
2953
2954         size =
2955                 EVENT_SIZE /* event */
2956         ;
2957
2958         for (agent = counters; agent; agent = agent->next) {
2959                 size +=
2960                         LEB128_SIZE /* index */ +
2961                         BYTE_SIZE /* type */ +
2962                         mono_counter_get_size (agent->counter) /* value */
2963                 ;
2964         }
2965
2966         size +=
2967                 LEB128_SIZE /* stop marker */
2968         ;
2969
2970         ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2971
2972         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2973
2974         for (agent = counters; agent; agent = agent->next) {
2975                 size_t size;
2976
2977                 counter = agent->counter;
2978
2979                 size = mono_counter_get_size (counter);
2980
2981                 if (size > buffer_size) {
2982                         buffer_size = size;
2983                         buffer = g_realloc (buffer, buffer_size);
2984                 }
2985
2986                 memset (buffer, 0, buffer_size);
2987
2988                 g_assert (mono_counters_sample (counter, buffer, size));
2989
2990                 type = mono_counter_get_type (counter);
2991
2992                 if (!agent->value) {
2993                         agent->value = g_calloc (1, size);
2994                         agent->value_size = size;
2995                 } else {
2996                         if (type == MONO_COUNTER_STRING) {
2997                                 if (strcmp (agent->value, buffer) == 0)
2998                                         continue;
2999                         } else {
3000                                 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
3001                                         continue;
3002                         }
3003                 }
3004
3005                 emit_uvalue (logbuffer, agent->index);
3006                 emit_byte (logbuffer, type);
3007                 switch (type) {
3008                 case MONO_COUNTER_INT:
3009 #if SIZEOF_VOID_P == 4
3010                 case MONO_COUNTER_WORD:
3011 #endif
3012                         emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
3013                         break;
3014                 case MONO_COUNTER_UINT:
3015                         emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
3016                         break;
3017                 case MONO_COUNTER_TIME_INTERVAL:
3018                 case MONO_COUNTER_LONG:
3019 #if SIZEOF_VOID_P == 8
3020                 case MONO_COUNTER_WORD:
3021 #endif
3022                         emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
3023                         break;
3024                 case MONO_COUNTER_ULONG:
3025                         emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
3026                         break;
3027                 case MONO_COUNTER_DOUBLE:
3028                         emit_double (logbuffer, *(double*)buffer);
3029                         break;
3030                 case MONO_COUNTER_STRING:
3031                         if (size == 0) {
3032                                 emit_byte (logbuffer, 0);
3033                         } else {
3034                                 emit_byte (logbuffer, 1);
3035                                 emit_string (logbuffer, (char*)buffer, size);
3036                         }
3037                         break;
3038                 default:
3039                         g_assert_not_reached ();
3040                 }
3041
3042                 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
3043                         agent->value = g_realloc (agent->value, size);
3044                         agent->value_size = size;
3045                 }
3046
3047                 if (size > 0)
3048                         memcpy (agent->value, buffer, size);
3049         }
3050         g_free (buffer);
3051
3052         emit_value (logbuffer, 0);
3053
3054         EXIT_LOG_EXPLICIT (DO_SEND);
3055
3056         mono_os_mutex_unlock (&counters_mutex);
3057 }
3058
3059 typedef struct _PerfCounterAgent PerfCounterAgent;
3060 struct _PerfCounterAgent {
3061         PerfCounterAgent *next;
3062         int index;
3063         char *category_name;
3064         char *name;
3065         int type;
3066         gint64 value;
3067         guint8 emitted;
3068         guint8 updated;
3069         guint8 deleted;
3070 };
3071
3072 static PerfCounterAgent *perfcounters = NULL;
3073
3074 static void
3075 perfcounters_emit (MonoProfiler *profiler)
3076 {
3077         PerfCounterAgent *pcagent;
3078         int len = 0;
3079         int size =
3080                 EVENT_SIZE /* event */ +
3081                 LEB128_SIZE /* len */
3082         ;
3083
3084         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3085                 if (pcagent->emitted)
3086                         continue;
3087
3088                 size +=
3089                         LEB128_SIZE /* section */ +
3090                         strlen (pcagent->category_name) + 1 /* category name */ +
3091                         strlen (pcagent->name) + 1 /* name */ +
3092                         BYTE_SIZE /* type */ +
3093                         BYTE_SIZE /* unit */ +
3094                         BYTE_SIZE /* variance */ +
3095                         LEB128_SIZE /* index */
3096                 ;
3097
3098                 len++;
3099         }
3100
3101         if (!len)
3102                 return;
3103
3104         ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
3105
3106         emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
3107         emit_value (logbuffer, len);
3108
3109         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3110                 if (pcagent->emitted)
3111                         continue;
3112
3113                 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
3114                 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
3115                 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
3116                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3117                 emit_byte (logbuffer, MONO_COUNTER_RAW);
3118                 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
3119                 emit_value (logbuffer, pcagent->index);
3120
3121                 pcagent->emitted = 1;
3122         }
3123
3124         EXIT_LOG_EXPLICIT (DO_SEND);
3125 }
3126
3127 static gboolean
3128 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
3129 {
3130         PerfCounterAgent *pcagent;
3131
3132         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3133                 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
3134                         continue;
3135                 if (pcagent->value == value)
3136                         return TRUE;
3137
3138                 pcagent->value = value;
3139                 pcagent->updated = 1;
3140                 pcagent->deleted = 0;
3141                 return TRUE;
3142         }
3143
3144         pcagent = g_new0 (PerfCounterAgent, 1);
3145         pcagent->next = perfcounters;
3146         pcagent->index = counters_index++;
3147         pcagent->category_name = g_strdup (category_name);
3148         pcagent->name = g_strdup (name);
3149         pcagent->type = (int) type;
3150         pcagent->value = value;
3151         pcagent->emitted = 0;
3152         pcagent->updated = 1;
3153         pcagent->deleted = 0;
3154
3155         perfcounters = pcagent;
3156
3157         return TRUE;
3158 }
3159
3160 static void
3161 perfcounters_sample (MonoProfiler *profiler, uint64_t timestamp)
3162 {
3163         PerfCounterAgent *pcagent;
3164         int len = 0;
3165         int size;
3166
3167         mono_os_mutex_lock (&counters_mutex);
3168
3169         /* mark all perfcounters as deleted, foreach will unmark them as necessary */
3170         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next)
3171                 pcagent->deleted = 1;
3172
3173         mono_perfcounter_foreach (perfcounters_foreach, perfcounters);
3174
3175         perfcounters_emit (profiler);
3176
3177         size =
3178                 EVENT_SIZE /* event */
3179         ;
3180
3181         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3182                 if (pcagent->deleted || !pcagent->updated)
3183                         continue;
3184
3185                 size +=
3186                         LEB128_SIZE /* index */ +
3187                         BYTE_SIZE /* type */ +
3188                         LEB128_SIZE /* value */
3189                 ;
3190
3191                 len++;
3192         }
3193
3194         if (!len)
3195                 goto done;
3196
3197         size +=
3198                 LEB128_SIZE /* stop marker */
3199         ;
3200
3201         ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
3202
3203         emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
3204
3205         for (pcagent = perfcounters; pcagent; pcagent = pcagent->next) {
3206                 if (pcagent->deleted || !pcagent->updated)
3207                         continue;
3208                 emit_uvalue (logbuffer, pcagent->index);
3209                 emit_byte (logbuffer, MONO_COUNTER_LONG);
3210                 emit_svalue (logbuffer, pcagent->value);
3211
3212                 pcagent->updated = 0;
3213         }
3214
3215         emit_value (logbuffer, 0);
3216
3217         EXIT_LOG_EXPLICIT (DO_SEND);
3218
3219 done:
3220         mono_os_mutex_unlock (&counters_mutex);
3221 }
3222
3223 static void
3224 counters_and_perfcounters_sample (MonoProfiler *prof)
3225 {
3226         uint64_t now = current_time ();
3227
3228         counters_sample (prof, now);
3229         perfcounters_sample (prof, now);
3230 }
3231
3232 #define COVERAGE_DEBUG(x) if (debug_coverage) {x}
3233 static mono_mutex_t coverage_mutex;
3234 static MonoConcurrentHashTable *coverage_methods = NULL;
3235 static MonoConcurrentHashTable *coverage_assemblies = NULL;
3236 static MonoConcurrentHashTable *coverage_classes = NULL;
3237
3238 static MonoConcurrentHashTable *filtered_classes = NULL;
3239 static MonoConcurrentHashTable *entered_methods = NULL;
3240 static MonoConcurrentHashTable *image_to_methods = NULL;
3241 static MonoConcurrentHashTable *suppressed_assemblies = NULL;
3242 static gboolean coverage_initialized = FALSE;
3243
3244 static GPtrArray *coverage_data = NULL;
3245 static int previous_offset = 0;
3246
3247 typedef struct {
3248         MonoLockFreeQueueNode node;
3249         MonoMethod *method;
3250 } MethodNode;
3251
3252 typedef struct {
3253         int offset;
3254         int counter;
3255         char *filename;
3256         int line;
3257         int column;
3258 } CoverageEntry;
3259
3260 static void
3261 free_coverage_entry (gpointer data, gpointer userdata)
3262 {
3263         CoverageEntry *entry = (CoverageEntry *)data;
3264         g_free (entry->filename);
3265         g_free (entry);
3266 }
3267
3268 static void
3269 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfileCoverageEntry *entry)
3270 {
3271         int offset = entry->iloffset - previous_offset;
3272         CoverageEntry *e = g_new (CoverageEntry, 1);
3273
3274         previous_offset = entry->iloffset;
3275
3276         e->offset = offset;
3277         e->counter = entry->counter;
3278         e->filename = g_strdup(entry->filename ? entry->filename : "");
3279         e->line = entry->line;
3280         e->column = entry->col;
3281
3282         g_ptr_array_add (coverage_data, e);
3283 }
3284
3285 static char *
3286 parse_generic_type_names(char *name)
3287 {
3288         char *new_name, *ret;
3289         int within_generic_declaration = 0, generic_members = 1;
3290
3291         if (name == NULL || *name == '\0')
3292                 return g_strdup ("");
3293
3294         if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
3295                 return NULL;
3296
3297         do {
3298                 switch (*name) {
3299                         case '<':
3300                                 within_generic_declaration = 1;
3301                                 break;
3302
3303                         case '>':
3304                                 within_generic_declaration = 0;
3305
3306                                 if (*(name - 1) != '<') {
3307                                         *new_name++ = '`';
3308                                         *new_name++ = '0' + generic_members;
3309                                 } else {
3310                                         memcpy (new_name, "&lt;&gt;", 8);
3311                                         new_name += 8;
3312                                 }
3313
3314                                 generic_members = 0;
3315                                 break;
3316
3317                         case ',':
3318                                 generic_members++;
3319                                 break;
3320
3321                         default:
3322                                 if (!within_generic_declaration)
3323                                         *new_name++ = *name;
3324
3325                                 break;
3326                 }
3327         } while (*name++);
3328
3329         return ret;
3330 }
3331
3332 static int method_id;
3333 static void
3334 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
3335 {
3336         MonoMethod *method = (MonoMethod *)value;
3337         MonoProfiler *prof = (MonoProfiler *)userdata;
3338         MonoClass *klass;
3339         MonoImage *image;
3340         char *class_name;
3341         const char *image_name, *method_name, *sig, *first_filename;
3342         guint i;
3343
3344         previous_offset = 0;
3345         coverage_data = g_ptr_array_new ();
3346
3347         mono_profiler_coverage_get (prof, method, obtain_coverage_for_method);
3348
3349         klass = mono_method_get_class (method);
3350         image = mono_class_get_image (klass);
3351         image_name = mono_image_get_name (image);
3352
3353         sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3354         class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3355         method_name = mono_method_get_name (method);
3356
3357         if (coverage_data->len != 0) {
3358                 CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[0];
3359                 first_filename = entry->filename ? entry->filename : "";
3360         } else
3361                 first_filename = "";
3362
3363         image_name = image_name ? image_name : "";
3364         sig = sig ? sig : "";
3365         method_name = method_name ? method_name : "";
3366
3367         ENTER_LOG (&coverage_methods_ctr, logbuffer,
3368                 EVENT_SIZE /* event */ +
3369                 strlen (image_name) + 1 /* image name */ +
3370                 strlen (class_name) + 1 /* class name */ +
3371                 strlen (method_name) + 1 /* method name */ +
3372                 strlen (sig) + 1 /* signature */ +
3373                 strlen (first_filename) + 1 /* first file name */ +
3374                 LEB128_SIZE /* token */ +
3375                 LEB128_SIZE /* method id */ +
3376                 LEB128_SIZE /* entries */
3377         );
3378
3379         emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3380         emit_string (logbuffer, image_name, strlen (image_name) + 1);
3381         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3382         emit_string (logbuffer, method_name, strlen (method_name) + 1);
3383         emit_string (logbuffer, sig, strlen (sig) + 1);
3384         emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3385
3386         emit_uvalue (logbuffer, mono_method_get_token (method));
3387         emit_uvalue (logbuffer, method_id);
3388         emit_value (logbuffer, coverage_data->len);
3389
3390         EXIT_LOG_EXPLICIT (DO_SEND);
3391
3392         for (i = 0; i < coverage_data->len; i++) {
3393                 CoverageEntry *entry = (CoverageEntry *)coverage_data->pdata[i];
3394
3395                 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3396                         EVENT_SIZE /* event */ +
3397                         LEB128_SIZE /* method id */ +
3398                         LEB128_SIZE /* offset */ +
3399                         LEB128_SIZE /* counter */ +
3400                         LEB128_SIZE /* line */ +
3401                         LEB128_SIZE /* column */
3402                 );
3403
3404                 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3405                 emit_uvalue (logbuffer, method_id);
3406                 emit_uvalue (logbuffer, entry->offset);
3407                 emit_uvalue (logbuffer, entry->counter);
3408                 emit_uvalue (logbuffer, entry->line);
3409                 emit_uvalue (logbuffer, entry->column);
3410
3411                 EXIT_LOG_EXPLICIT (DO_SEND);
3412         }
3413
3414         method_id++;
3415
3416         g_free (class_name);
3417
3418         g_ptr_array_foreach (coverage_data, free_coverage_entry, NULL);
3419         g_ptr_array_free (coverage_data, TRUE);
3420         coverage_data = NULL;
3421 }
3422
3423 /* This empties the queue */
3424 static guint
3425 count_queue (MonoLockFreeQueue *queue)
3426 {
3427         MonoLockFreeQueueNode *node;
3428         guint count = 0;
3429
3430         while ((node = mono_lock_free_queue_dequeue (queue))) {
3431                 count++;
3432                 mono_thread_hazardous_try_free (node, g_free);
3433         }
3434
3435         return count;
3436 }
3437
3438 static void
3439 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3440 {
3441         MonoClass *klass = (MonoClass *)key;
3442         MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3443         MonoImage *image;
3444         char *class_name;
3445         const char *assembly_name;
3446         int number_of_methods, partially_covered;
3447         guint fully_covered;
3448
3449         image = mono_class_get_image (klass);
3450         assembly_name = mono_image_get_name (image);
3451         class_name = mono_type_get_name (mono_class_get_type (klass));
3452
3453         assembly_name = assembly_name ? assembly_name : "";
3454         number_of_methods = mono_class_num_methods (klass);
3455         fully_covered = count_queue (class_methods);
3456         /* We don't handle partial covered yet */
3457         partially_covered = 0;
3458
3459         ENTER_LOG (&coverage_classes_ctr, logbuffer,
3460                 EVENT_SIZE /* event */ +
3461                 strlen (assembly_name) + 1 /* assembly name */ +
3462                 strlen (class_name) + 1 /* class name */ +
3463                 LEB128_SIZE /* no. methods */ +
3464                 LEB128_SIZE /* fully covered */ +
3465                 LEB128_SIZE /* partially covered */
3466         );
3467
3468         emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3469         emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3470         emit_string (logbuffer, class_name, strlen (class_name) + 1);
3471         emit_uvalue (logbuffer, number_of_methods);
3472         emit_uvalue (logbuffer, fully_covered);
3473         emit_uvalue (logbuffer, partially_covered);
3474
3475         EXIT_LOG_EXPLICIT (DO_SEND);
3476
3477         g_free (class_name);
3478 }
3479
3480 static void
3481 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3482 {
3483         MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (image_to_methods, image);
3484
3485         *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3486         if (image_methods)
3487                 *fully_covered = count_queue (image_methods);
3488         else
3489                 *fully_covered = 0;
3490
3491         // FIXME: We don't handle partially covered yet.
3492         *partially_covered = 0;
3493 }
3494
3495 static void
3496 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3497 {
3498         MonoAssembly *assembly = (MonoAssembly *)value;
3499         MonoImage *image = mono_assembly_get_image (assembly);
3500         const char *name, *guid, *filename;
3501         int number_of_methods = 0, partially_covered = 0;
3502         guint fully_covered = 0;
3503
3504         name = mono_image_get_name (image);
3505         guid = mono_image_get_guid (image);
3506         filename = mono_image_get_filename (image);
3507
3508         name = name ? name : "";
3509         guid = guid ? guid : "";
3510         filename = filename ? filename : "";
3511
3512         get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3513
3514         ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3515                 EVENT_SIZE /* event */ +
3516                 strlen (name) + 1 /* name */ +
3517                 strlen (guid) + 1 /* guid */ +
3518                 strlen (filename) + 1 /* file name */ +
3519                 LEB128_SIZE /* no. methods */ +
3520                 LEB128_SIZE /* fully covered */ +
3521                 LEB128_SIZE /* partially covered */
3522         );
3523
3524         emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3525         emit_string (logbuffer, name, strlen (name) + 1);
3526         emit_string (logbuffer, guid, strlen (guid) + 1);
3527         emit_string (logbuffer, filename, strlen (filename) + 1);
3528         emit_uvalue (logbuffer, number_of_methods);
3529         emit_uvalue (logbuffer, fully_covered);
3530         emit_uvalue (logbuffer, partially_covered);
3531
3532         EXIT_LOG_EXPLICIT (DO_SEND);
3533 }
3534
3535 static void
3536 dump_coverage (MonoProfiler *prof)
3537 {
3538         if (!coverage_initialized)
3539                 return;
3540
3541         COVERAGE_DEBUG(fprintf (stderr, "Coverage: Started dump\n");)
3542         method_id = 0;
3543
3544         mono_os_mutex_lock (&coverage_mutex);
3545         mono_conc_hashtable_foreach (coverage_assemblies, build_assembly_buffer, NULL);
3546         mono_conc_hashtable_foreach (coverage_classes, build_class_buffer, NULL);
3547         mono_conc_hashtable_foreach (coverage_methods, build_method_buffer, prof);
3548         mono_os_mutex_unlock (&coverage_mutex);
3549
3550         COVERAGE_DEBUG(fprintf (stderr, "Coverage: Finished dump\n");)
3551 }
3552
3553 static void
3554 process_method_enter_coverage (MonoProfiler *prof, MonoMethod *method)
3555 {
3556         MonoClass *klass;
3557         MonoImage *image;
3558
3559         if (!coverage_initialized)
3560                 return;
3561
3562         klass = mono_method_get_class (method);
3563         image = mono_class_get_image (klass);
3564
3565         if (mono_conc_hashtable_lookup (suppressed_assemblies, (gpointer) mono_image_get_name (image)))
3566                 return;
3567
3568         mono_os_mutex_lock (&coverage_mutex);
3569         mono_conc_hashtable_insert (entered_methods, method, method);
3570         mono_os_mutex_unlock (&coverage_mutex);
3571 }
3572
3573 static MonoLockFreeQueueNode *
3574 create_method_node (MonoMethod *method)
3575 {
3576         MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3577         mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3578         node->method = method;
3579
3580         return (MonoLockFreeQueueNode *) node;
3581 }
3582
3583 static gboolean
3584 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3585 {
3586         MonoError error;
3587         MonoClass *klass;
3588         MonoImage *image;
3589         MonoAssembly *assembly;
3590         MonoMethodHeader *header;
3591         guint32 iflags, flags, code_size;
3592         char *fqn, *classname;
3593         gboolean has_positive, found;
3594         MonoLockFreeQueue *image_methods, *class_methods;
3595         MonoLockFreeQueueNode *node;
3596
3597         g_assert (coverage_initialized && "Why are we being asked for coverage filter info when we're not doing coverage?");
3598
3599         COVERAGE_DEBUG(fprintf (stderr, "Coverage filter for %s\n", mono_method_get_name (method));)
3600
3601         flags = mono_method_get_flags (method, &iflags);
3602         if ((iflags & 0x1000 /*METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL*/) ||
3603             (flags & 0x2000 /*METHOD_ATTRIBUTE_PINVOKE_IMPL*/)) {
3604                 COVERAGE_DEBUG(fprintf (stderr, "   Internal call or pinvoke - ignoring\n");)
3605                 return FALSE;
3606         }
3607
3608         // Don't need to do anything else if we're already tracking this method
3609         if (mono_conc_hashtable_lookup (coverage_methods, method)) {
3610                 COVERAGE_DEBUG(fprintf (stderr, "   Already tracking\n");)
3611                 return TRUE;
3612         }
3613
3614         klass = mono_method_get_class (method);
3615         image = mono_class_get_image (klass);
3616
3617         // Don't handle coverage for the core assemblies
3618         if (mono_conc_hashtable_lookup (suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3619                 return FALSE;
3620
3621         if (prof->coverage_filters) {
3622                 /* Check already filtered classes first */
3623                 if (mono_conc_hashtable_lookup (filtered_classes, klass)) {
3624                         COVERAGE_DEBUG(fprintf (stderr, "   Already filtered\n");)
3625                         return FALSE;
3626                 }
3627
3628                 classname = mono_type_get_name (mono_class_get_type (klass));
3629
3630                 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3631
3632                 COVERAGE_DEBUG(fprintf (stderr, "   Looking for %s in filter\n", fqn);)
3633                 // Check positive filters first
3634                 has_positive = FALSE;
3635                 found = FALSE;
3636                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3637                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3638
3639                         if (filter [0] == '+') {
3640                                 filter = &filter [1];
3641
3642                                 COVERAGE_DEBUG(fprintf (stderr, "   Checking against +%s ...", filter);)
3643
3644                                 if (strstr (fqn, filter) != NULL) {
3645                                         COVERAGE_DEBUG(fprintf (stderr, "matched\n");)
3646                                         found = TRUE;
3647                                 } else
3648                                         COVERAGE_DEBUG(fprintf (stderr, "no match\n");)
3649
3650                                 has_positive = TRUE;
3651                         }
3652                 }
3653
3654                 if (has_positive && !found) {
3655                         COVERAGE_DEBUG(fprintf (stderr, "   Positive match was not found\n");)
3656
3657                         mono_os_mutex_lock (&coverage_mutex);
3658                         mono_conc_hashtable_insert (filtered_classes, klass, klass);
3659                         mono_os_mutex_unlock (&coverage_mutex);
3660                         g_free (fqn);
3661                         g_free (classname);
3662
3663                         return FALSE;
3664                 }
3665
3666                 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3667                         // FIXME: Is substring search sufficient?
3668                         char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3669                         if (filter [0] == '+')
3670                                 continue;
3671
3672                         // Skip '-'
3673                         filter = &filter [1];
3674                         COVERAGE_DEBUG(fprintf (stderr, "   Checking against -%s ...", filter);)
3675
3676                         if (strstr (fqn, filter) != NULL) {
3677                                 COVERAGE_DEBUG(fprintf (stderr, "matched\n");)
3678
3679                                 mono_os_mutex_lock (&coverage_mutex);
3680                                 mono_conc_hashtable_insert (filtered_classes, klass, klass);
3681                                 mono_os_mutex_unlock (&coverage_mutex);
3682                                 g_free (fqn);
3683                                 g_free (classname);
3684
3685                                 return FALSE;
3686                         } else
3687                                 COVERAGE_DEBUG(fprintf (stderr, "no match\n");)
3688
3689                 }
3690
3691                 g_free (fqn);
3692                 g_free (classname);
3693         }
3694
3695         COVERAGE_DEBUG(fprintf (stderr, "   Handling coverage for %s\n", mono_method_get_name (method));)
3696         header = mono_method_get_header_checked (method, &error);
3697         mono_error_cleanup (&error);
3698
3699         mono_method_header_get_code (header, &code_size, NULL);
3700
3701         assembly = mono_image_get_assembly (image);
3702
3703         // Need to keep the assemblies around for as long as they are kept in the hashtable
3704         // Nunit, for example, has a habit of unloading them before the coverage statistics are
3705         // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3706         mono_assembly_addref (assembly);
3707
3708         mono_os_mutex_lock (&coverage_mutex);
3709         mono_conc_hashtable_insert (coverage_methods, method, method);
3710         mono_conc_hashtable_insert (coverage_assemblies, assembly, assembly);
3711         mono_os_mutex_unlock (&coverage_mutex);
3712
3713         image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (image_to_methods, image);
3714
3715         if (image_methods == NULL) {
3716                 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3717                 mono_lock_free_queue_init (image_methods);
3718                 mono_os_mutex_lock (&coverage_mutex);
3719                 mono_conc_hashtable_insert (image_to_methods, image, image_methods);
3720                 mono_os_mutex_unlock (&coverage_mutex);
3721         }
3722
3723         node = create_method_node (method);
3724         mono_lock_free_queue_enqueue (image_methods, node);
3725
3726         class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (coverage_classes, klass);
3727
3728         if (class_methods == NULL) {
3729                 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3730                 mono_lock_free_queue_init (class_methods);
3731                 mono_os_mutex_lock (&coverage_mutex);
3732                 mono_conc_hashtable_insert (coverage_classes, klass, class_methods);
3733                 mono_os_mutex_unlock (&coverage_mutex);
3734         }
3735
3736         node = create_method_node (method);
3737         mono_lock_free_queue_enqueue (class_methods, node);
3738
3739         return TRUE;
3740 }
3741
3742 #define LINE_BUFFER_SIZE 4096
3743 /* Max file limit of 128KB */
3744 #define MAX_FILE_SIZE 128 * 1024
3745 static char *
3746 get_file_content (FILE *stream)
3747 {
3748         char *buffer;
3749         ssize_t bytes_read;
3750         long filesize;
3751         int res, offset = 0;
3752
3753         res = fseek (stream, 0, SEEK_END);
3754         if (res < 0)
3755           return NULL;
3756
3757         filesize = ftell (stream);
3758         if (filesize < 0)
3759           return NULL;
3760
3761         res = fseek (stream, 0, SEEK_SET);
3762         if (res < 0)
3763           return NULL;
3764
3765         if (filesize > MAX_FILE_SIZE)
3766           return NULL;
3767
3768         buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3769         while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3770                 offset += bytes_read;
3771
3772         /* NULL terminate our buffer */
3773         buffer[filesize] = '\0';
3774         return buffer;
3775 }
3776
3777 static char *
3778 get_next_line (char *contents, char **next_start)
3779 {
3780         char *p = contents;
3781
3782         if (p == NULL || *p == '\0') {
3783                 *next_start = NULL;
3784                 return NULL;
3785         }
3786
3787         while (*p != '\n' && *p != '\0')
3788                 p++;
3789
3790         if (*p == '\n') {
3791                 *p = '\0';
3792                 *next_start = p + 1;
3793         } else
3794                 *next_start = NULL;
3795
3796         return contents;
3797 }
3798
3799 static void
3800 init_suppressed_assemblies (void)
3801 {
3802         char *content;
3803         char *line;
3804         FILE *sa_file;
3805
3806         suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3807         sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3808         if (sa_file == NULL)
3809                 return;
3810
3811         /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3812         content = get_file_content (sa_file);
3813         if (content == NULL) {
3814                 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting\n");
3815         }
3816
3817         while ((line = get_next_line (content, &content))) {
3818                 line = g_strchomp (g_strchug (line));
3819                 /* No locking needed as we're doing initialization */
3820                 mono_conc_hashtable_insert (suppressed_assemblies, line, line);
3821         }
3822
3823         fclose (sa_file);
3824 }
3825
3826 static void
3827 parse_cov_filter_file (GPtrArray *filters, const char *file)
3828 {
3829         FILE *filter_file;
3830         char *line, *content;
3831
3832         filter_file = fopen (file, "r");
3833         if (filter_file == NULL) {
3834                 fprintf (stderr, "Unable to open %s\n", file);
3835                 return;
3836         }
3837
3838         /* Don't need to free content as it is referred to by the lines stored in @filters */
3839         content = get_file_content (filter_file);
3840         if (content == NULL)
3841                 fprintf (stderr, "WARNING: %s is greater than 128kb - ignoring\n", file);
3842
3843         while ((line = get_next_line (content, &content)))
3844                 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3845
3846         fclose (filter_file);
3847 }
3848
3849 static void
3850 coverage_init (MonoProfiler *prof)
3851 {
3852         g_assert (!coverage_initialized && "Why are we initializing coverage twice?");
3853
3854         COVERAGE_DEBUG(fprintf (stderr, "Coverage initialized\n");)
3855
3856         mono_os_mutex_init (&coverage_mutex);
3857         coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3858         coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3859         coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3860         filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3861         entered_methods = mono_conc_hashtable_new (NULL, NULL);
3862         image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3863         init_suppressed_assemblies ();
3864
3865         coverage_initialized = TRUE;
3866 }
3867
3868 static void
3869 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3870 {
3871         MonoAssembly *assembly = (MonoAssembly *)value;
3872         mono_assembly_close (assembly);
3873 }
3874
3875 static void
3876 free_sample_hit (gpointer p)
3877 {
3878         mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3879 }
3880
3881 static void
3882 cleanup_reusable_samples (MonoProfiler *prof)
3883 {
3884         SampleHit *sample;
3885
3886         while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->sample_reuse_queue)))
3887                 mono_thread_hazardous_try_free (sample, free_sample_hit);
3888 }
3889
3890 static void
3891 log_shutdown (MonoProfiler *prof)
3892 {
3893         InterlockedWrite (&in_shutdown, 1);
3894
3895         if (!no_counters)
3896                 counters_and_perfcounters_sample (prof);
3897
3898         dump_coverage (prof);
3899
3900         char c = 1;
3901
3902         if (write (prof->pipes [1], &c, 1) != 1) {
3903                 fprintf (stderr, "Could not write to pipe: %s\n", strerror (errno));
3904                 exit (1);
3905         }
3906
3907         mono_native_thread_join (prof->helper_thread);
3908
3909         mono_os_mutex_destroy (&counters_mutex);
3910
3911         MonoCounterAgent *mc_next;
3912
3913         for (MonoCounterAgent *cur = counters; cur; cur = mc_next) {
3914                 mc_next = cur->next;
3915                 g_free (cur);
3916         }
3917
3918         PerfCounterAgent *pc_next;
3919
3920         for (PerfCounterAgent *cur = perfcounters; cur; cur = pc_next) {
3921                 pc_next = cur->next;
3922                 g_free (cur);
3923         }
3924
3925         /*
3926          * Ensure that we empty the LLS completely, even if some nodes are
3927          * not immediately removed upon calling mono_lls_remove (), by
3928          * iterating until the head is NULL.
3929          */
3930         while (profiler_thread_list.head) {
3931                 MONO_LLS_FOREACH_SAFE (&profiler_thread_list, MonoProfilerThread, thread) {
3932                         g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3933
3934                         remove_thread (thread);
3935                 } MONO_LLS_FOREACH_SAFE_END
3936         }
3937
3938         /*
3939          * Ensure that all threads have been freed, so that we don't miss any
3940          * buffers when we shut down the writer thread below.
3941          */
3942         mono_thread_hazardous_try_free_all ();
3943
3944         InterlockedWrite (&prof->run_dumper_thread, 0);
3945         mono_os_sem_post (&prof->dumper_queue_sem);
3946         mono_native_thread_join (prof->dumper_thread);
3947         mono_os_sem_destroy (&prof->dumper_queue_sem);
3948
3949         InterlockedWrite (&prof->run_writer_thread, 0);
3950         mono_os_sem_post (&prof->writer_queue_sem);
3951         mono_native_thread_join (prof->writer_thread);
3952         mono_os_sem_destroy (&prof->writer_queue_sem);
3953
3954         /*
3955          * Free all writer queue entries, and ensure that all sample hits will be
3956          * added to the sample reuse queue.
3957          */
3958         mono_thread_hazardous_try_free_all ();
3959
3960         cleanup_reusable_samples (prof);
3961
3962         /*
3963          * Finally, make sure that all sample hits are freed. This should cover all
3964          * hazardous data from the profiler. We can now be sure that the runtime
3965          * won't later invoke free functions in the profiler library after it has
3966          * been unloaded.
3967          */
3968         mono_thread_hazardous_try_free_all ();
3969
3970         gint32 state = InterlockedRead (&buffer_lock_state);
3971
3972         g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
3973         g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
3974
3975 #if defined (HAVE_SYS_ZLIB)
3976         if (prof->gzfile)
3977                 gzclose (prof->gzfile);
3978 #endif
3979         if (prof->pipe_output)
3980                 pclose (prof->file);
3981         else
3982                 fclose (prof->file);
3983
3984         mono_conc_hashtable_destroy (prof->method_table);
3985         mono_os_mutex_destroy (&prof->method_table_mutex);
3986
3987         if (coverage_initialized) {
3988                 mono_os_mutex_lock (&coverage_mutex);
3989                 mono_conc_hashtable_foreach (coverage_assemblies, unref_coverage_assemblies, prof);
3990                 mono_os_mutex_unlock (&coverage_mutex);
3991
3992                 mono_conc_hashtable_destroy (coverage_methods);
3993                 mono_conc_hashtable_destroy (coverage_assemblies);
3994                 mono_conc_hashtable_destroy (coverage_classes);
3995                 mono_conc_hashtable_destroy (filtered_classes);
3996
3997                 mono_conc_hashtable_destroy (entered_methods);
3998                 mono_conc_hashtable_destroy (image_to_methods);
3999                 mono_conc_hashtable_destroy (suppressed_assemblies);
4000                 mono_os_mutex_destroy (&coverage_mutex);
4001         }
4002
4003         PROF_TLS_FREE ();
4004
4005         g_free (prof->args);
4006         g_free (prof);
4007 }
4008
4009 static char*
4010 new_filename (const char* filename)
4011 {
4012         time_t t = time (NULL);
4013         int pid = process_id ();
4014         char pid_buf [16];
4015         char time_buf [16];
4016         char *res, *d;
4017         const char *p;
4018         int count_dates = 0;
4019         int count_pids = 0;
4020         int s_date, s_pid;
4021         struct tm *ts;
4022         for (p = filename; *p; p++) {
4023                 if (*p != '%')
4024                         continue;
4025                 p++;
4026                 if (*p == 't')
4027                         count_dates++;
4028                 else if (*p == 'p')
4029                         count_pids++;
4030                 else if (*p == 0)
4031                         break;
4032         }
4033         if (!count_dates && !count_pids)
4034                 return pstrdup (filename);
4035         snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
4036         ts = gmtime (&t);
4037         snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
4038                 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
4039         s_date = strlen (time_buf);
4040         s_pid = strlen (pid_buf);
4041         d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
4042         for (p = filename; *p; p++) {
4043                 if (*p != '%') {
4044                         *d++ = *p;
4045                         continue;
4046                 }
4047                 p++;
4048                 if (*p == 't') {
4049                         strcpy (d, time_buf);
4050                         d += s_date;
4051                         continue;
4052                 } else if (*p == 'p') {
4053                         strcpy (d, pid_buf);
4054                         d += s_pid;
4055                         continue;
4056                 } else if (*p == '%') {
4057                         *d++ = '%';
4058                         continue;
4059                 } else if (*p == 0)
4060                         break;
4061                 *d++ = '%';
4062                 *d++ = *p;
4063         }
4064         *d = 0;
4065         return res;
4066 }
4067
4068 static void
4069 add_to_fd_set (fd_set *set, int fd, int *max_fd)
4070 {
4071         /*
4072          * This should only trigger for the basic FDs (server socket, pipes) at
4073          * startup if for some mysterious reason they're too large. In this case,
4074          * the profiler really can't function, and we're better off printing an
4075          * error and exiting.
4076          */
4077         if (fd >= FD_SETSIZE) {
4078                 fprintf (stderr, "File descriptor is out of bounds for fd_set: %d\n", fd);
4079                 exit (1);
4080         }
4081
4082         FD_SET (fd, set);
4083
4084         if (*max_fd < fd)
4085                 *max_fd = fd;
4086 }
4087
4088 static void *
4089 helper_thread (void *arg)
4090 {
4091         MonoProfiler *prof = (MonoProfiler *) arg;
4092
4093         mono_threads_attach_tools_thread ();
4094         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
4095
4096         MonoProfilerThread *thread = init_thread (prof, FALSE);
4097
4098         GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
4099
4100         while (1) {
4101                 fd_set rfds;
4102                 int max_fd = -1;
4103
4104                 FD_ZERO (&rfds);
4105
4106                 add_to_fd_set (&rfds, prof->server_socket, &max_fd);
4107                 add_to_fd_set (&rfds, prof->pipes [0], &max_fd);
4108
4109                 for (gint i = 0; i < command_sockets->len; i++)
4110                         add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
4111
4112                 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
4113
4114                 // Sleep for 1sec or until a file descriptor has data.
4115                 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
4116                         if (errno == EINTR)
4117                                 continue;
4118
4119                         fprintf (stderr, "Error in mono-profiler-log server: %s", strerror (errno));
4120                         exit (1);
4121                 }
4122
4123                 if (!no_counters)
4124                         counters_and_perfcounters_sample (prof);
4125
4126                 buffer_lock_excl ();
4127
4128                 sync_point (SYNC_POINT_PERIODIC);
4129
4130                 buffer_unlock_excl ();
4131
4132                 // Are we shutting down?
4133                 if (FD_ISSET (prof->pipes [0], &rfds)) {
4134                         char c;
4135                         read (prof->pipes [0], &c, 1);
4136                         break;
4137                 }
4138
4139                 for (gint i = 0; i < command_sockets->len; i++) {
4140                         int fd = g_array_index (command_sockets, int, i);
4141
4142                         if (!FD_ISSET (fd, &rfds))
4143                                 continue;
4144
4145                         char buf [64];
4146                         int len = read (fd, buf, sizeof (buf) - 1);
4147
4148                         if (len == -1)
4149                                 continue;
4150
4151                         if (!len) {
4152                                 // The other end disconnected.
4153                                 g_array_remove_index (command_sockets, i);
4154                                 close (fd);
4155
4156                                 continue;
4157                         }
4158
4159                         buf [len] = 0;
4160
4161                         if (!strcmp (buf, "heapshot\n") && hs_mode_ondemand) {
4162                                 // Rely on the finalization callback triggering a GC.
4163                                 heapshot_requested = 1;
4164                                 mono_gc_finalize_notify ();
4165                         }
4166                 }
4167
4168                 if (FD_ISSET (prof->server_socket, &rfds)) {
4169                         int fd = accept (prof->server_socket, NULL, NULL);
4170
4171                         if (fd != -1) {
4172                                 if (fd >= FD_SETSIZE)
4173                                         close (fd);
4174                                 else
4175                                         g_array_append_val (command_sockets, fd);
4176                         }
4177                 }
4178         }
4179
4180         for (gint i = 0; i < command_sockets->len; i++)
4181                 close (g_array_index (command_sockets, int, i));
4182
4183         g_array_free (command_sockets, TRUE);
4184
4185         send_log_unsafe (FALSE);
4186         deinit_thread (thread);
4187
4188         mono_thread_info_detach ();
4189
4190         return NULL;
4191 }
4192
4193 static void
4194 start_helper_thread (MonoProfiler* prof)
4195 {
4196         if (pipe (prof->pipes) == -1) {
4197                 fprintf (stderr, "Cannot create pipe: %s\n", strerror (errno));
4198                 exit (1);
4199         }
4200
4201         prof->server_socket = socket (PF_INET, SOCK_STREAM, 0);
4202
4203         if (prof->server_socket == -1) {
4204                 fprintf (stderr, "Cannot create server socket: %s\n", strerror (errno));
4205                 exit (1);
4206         }
4207
4208         struct sockaddr_in server_address;
4209
4210         memset (&server_address, 0, sizeof (server_address));
4211         server_address.sin_family = AF_INET;
4212         server_address.sin_addr.s_addr = INADDR_ANY;
4213         server_address.sin_port = htons (prof->command_port);
4214
4215         if (bind (prof->server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
4216                 fprintf (stderr, "Cannot bind server socket on port %d: %s\n", prof->command_port, strerror (errno));
4217                 close (prof->server_socket);
4218                 exit (1);
4219         }
4220
4221         if (listen (prof->server_socket, 1) == -1) {
4222                 fprintf (stderr, "Cannot listen on server socket: %s\n", strerror (errno));
4223                 close (prof->server_socket);
4224                 exit (1);
4225         }
4226
4227         socklen_t slen = sizeof (server_address);
4228
4229         if (getsockname (prof->server_socket, (struct sockaddr *) &server_address, &slen)) {
4230                 fprintf (stderr, "Could not get assigned port: %s\n", strerror (errno));
4231                 close (prof->server_socket);
4232                 exit (1);
4233         }
4234
4235         prof->command_port = ntohs (server_address.sin_port);
4236
4237         if (!mono_native_thread_create (&prof->helper_thread, helper_thread, prof)) {
4238                 fprintf (stderr, "Could not start helper thread\n");
4239                 close (prof->server_socket);
4240                 exit (1);
4241         }
4242 }
4243
4244 static void
4245 free_writer_entry (gpointer p)
4246 {
4247         mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
4248 }
4249
4250 static gboolean
4251 handle_writer_queue_entry (MonoProfiler *prof)
4252 {
4253         WriterQueueEntry *entry;
4254
4255         if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&prof->writer_queue))) {
4256                 if (!entry->methods)
4257                         goto no_methods;
4258
4259                 gboolean wrote_methods = FALSE;
4260
4261                 /*
4262                  * Encode the method events in a temporary log buffer that we
4263                  * flush to disk before the main buffer, ensuring that all
4264                  * methods have metadata emitted before they're referenced.
4265                  *
4266                  * We use a 'proper' thread-local buffer for this as opposed
4267                  * to allocating and freeing a buffer by hand because the call
4268                  * to mono_method_full_name () below may trigger class load
4269                  * events when it retrieves the signature of the method. So a
4270                  * thread-local buffer needs to exist when such events occur.
4271                  */
4272                 for (guint i = 0; i < entry->methods->len; i++) {
4273                         MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
4274
4275                         if (mono_conc_hashtable_lookup (prof->method_table, info->method))
4276                                 goto free_info; // This method already has metadata emitted.
4277
4278                         /*
4279                          * Other threads use this hash table to get a general
4280                          * idea of whether a method has already been emitted to
4281                          * the stream. Due to the way we add to this table, it
4282                          * can easily happen that multiple threads queue up the
4283                          * same methods, but that's OK since eventually all
4284                          * methods will be in this table and the thread-local
4285                          * method lists will just be empty for the rest of the
4286                          * app's lifetime.
4287                          */
4288                         mono_os_mutex_lock (&prof->method_table_mutex);
4289                         mono_conc_hashtable_insert (prof->method_table, info->method, info->method);
4290                         mono_os_mutex_unlock (&prof->method_table_mutex);
4291
4292                         char *name = mono_method_full_name (info->method, 1);
4293                         int nlen = strlen (name) + 1;
4294                         void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
4295                         int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
4296
4297                         ENTER_LOG (&method_jits_ctr, logbuffer,
4298                                 EVENT_SIZE /* event */ +
4299                                 LEB128_SIZE /* method */ +
4300                                 LEB128_SIZE /* start */ +
4301                                 LEB128_SIZE /* size */ +
4302                                 nlen /* name */
4303                         );
4304
4305                         emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
4306                         emit_method_inner (logbuffer, info->method);
4307                         emit_ptr (logbuffer, cstart);
4308                         emit_value (logbuffer, csize);
4309
4310                         memcpy (logbuffer->cursor, name, nlen);
4311                         logbuffer->cursor += nlen;
4312
4313                         EXIT_LOG_EXPLICIT (NO_SEND);
4314
4315                         mono_free (name);
4316
4317                         wrote_methods = TRUE;
4318
4319                 free_info:
4320                         g_free (info);
4321                 }
4322
4323                 g_ptr_array_free (entry->methods, TRUE);
4324
4325                 if (wrote_methods) {
4326                         MonoProfilerThread *thread = PROF_TLS_GET ();
4327
4328                         dump_buffer_threadless (prof, thread->buffer);
4329                         init_buffer_state (thread);
4330                 }
4331
4332         no_methods:
4333                 dump_buffer (prof, entry->buffer);
4334
4335                 mono_thread_hazardous_try_free (entry, free_writer_entry);
4336
4337                 return TRUE;
4338         }
4339
4340         return FALSE;
4341 }
4342
4343 static void *
4344 writer_thread (void *arg)
4345 {
4346         MonoProfiler *prof = (MonoProfiler *)arg;
4347
4348         mono_threads_attach_tools_thread ();
4349         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4350
4351         dump_header (prof);
4352
4353         MonoProfilerThread *thread = init_thread (prof, FALSE);
4354
4355         while (InterlockedRead (&prof->run_writer_thread)) {
4356                 mono_os_sem_wait (&prof->writer_queue_sem, MONO_SEM_FLAGS_NONE);
4357                 handle_writer_queue_entry (prof);
4358         }
4359
4360         /* Drain any remaining entries on shutdown. */
4361         while (handle_writer_queue_entry (prof));
4362
4363         free_buffer (thread->buffer, thread->buffer->size);
4364         deinit_thread (thread);
4365
4366         mono_thread_info_detach ();
4367
4368         return NULL;
4369 }
4370
4371 static void
4372 start_writer_thread (MonoProfiler* prof)
4373 {
4374         InterlockedWrite (&prof->run_writer_thread, 1);
4375
4376         if (!mono_native_thread_create (&prof->writer_thread, writer_thread, prof)) {
4377                 fprintf (stderr, "Could not start writer thread\n");
4378                 exit (1);
4379         }
4380 }
4381
4382 static void
4383 reuse_sample_hit (gpointer p)
4384 {
4385         SampleHit *sample = p;
4386
4387         mono_lock_free_queue_node_unpoison (&sample->node);
4388         mono_lock_free_queue_enqueue (&sample->prof->sample_reuse_queue, &sample->node);
4389 }
4390
4391 static gboolean
4392 handle_dumper_queue_entry (MonoProfiler *prof)
4393 {
4394         SampleHit *sample;
4395
4396         if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&prof->dumper_queue))) {
4397                 for (int i = 0; i < sample->count; ++i) {
4398                         MonoMethod *method = sample->frames [i].method;
4399                         MonoDomain *domain = sample->frames [i].domain;
4400                         void *address = sample->frames [i].base_address;
4401
4402                         if (!method) {
4403                                 g_assert (domain && "What happened to the domain pointer?");
4404                                 g_assert (address && "What happened to the instruction pointer?");
4405
4406                                 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
4407
4408                                 if (ji)
4409                                         sample->frames [i].method = mono_jit_info_get_method (ji);
4410                         }
4411                 }
4412
4413                 ENTER_LOG (&sample_hits_ctr, logbuffer,
4414                         EVENT_SIZE /* event */ +
4415                         BYTE_SIZE /* type */ +
4416                         LEB128_SIZE /* tid */ +
4417                         LEB128_SIZE /* count */ +
4418                         1 * (
4419                                 LEB128_SIZE /* ip */
4420                         ) +
4421                         LEB128_SIZE /* managed count */ +
4422                         sample->count * (
4423                                 LEB128_SIZE /* method */
4424                         )
4425                 );
4426
4427                 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4428                 emit_byte (logbuffer, SAMPLE_CYCLES);
4429                 emit_ptr (logbuffer, (void *) sample->tid);
4430                 emit_value (logbuffer, 1);
4431
4432                 // TODO: Actual native unwinding.
4433                 for (int i = 0; i < 1; ++i) {
4434                         emit_ptr (logbuffer, sample->ip);
4435                         add_code_pointer ((uintptr_t) sample->ip);
4436                 }
4437
4438                 /* new in data version 6 */
4439                 emit_uvalue (logbuffer, sample->count);
4440
4441                 for (int i = 0; i < sample->count; ++i)
4442                         emit_method (logbuffer, sample->frames [i].method);
4443
4444                 EXIT_LOG_EXPLICIT (DO_SEND);
4445
4446                 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4447
4448                 dump_unmanaged_coderefs (prof);
4449         }
4450
4451         return FALSE;
4452 }
4453
4454 static void *
4455 dumper_thread (void *arg)
4456 {
4457         MonoProfiler *prof = (MonoProfiler *)arg;
4458
4459         mono_threads_attach_tools_thread ();
4460         mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4461
4462         MonoProfilerThread *thread = init_thread (prof, FALSE);
4463
4464         while (InterlockedRead (&prof->run_dumper_thread)) {
4465                 /*
4466                  * Flush samples every second so it doesn't seem like the profiler is
4467                  * not working if the program is mostly idle.
4468                  */
4469                 if (mono_os_sem_timedwait (&prof->dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4470                         send_log_unsafe (FALSE);
4471
4472                 handle_dumper_queue_entry (prof);
4473         }
4474
4475         /* Drain any remaining entries on shutdown. */
4476         while (handle_dumper_queue_entry (prof));
4477
4478         send_log_unsafe (FALSE);
4479         deinit_thread (thread);
4480
4481         mono_thread_info_detach ();
4482
4483         return NULL;
4484 }
4485
4486 static void
4487 start_dumper_thread (MonoProfiler* prof)
4488 {
4489         InterlockedWrite (&prof->run_dumper_thread, 1);
4490
4491         if (!mono_native_thread_create (&prof->dumper_thread, dumper_thread, prof)) {
4492                 fprintf (stderr, "Could not start dumper thread\n");
4493                 exit (1);
4494         }
4495 }
4496
4497 static void
4498 register_counter (const char *name, gint32 *counter)
4499 {
4500         mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4501 }
4502
4503 static void
4504 runtime_initialized (MonoProfiler *profiler)
4505 {
4506         InterlockedWrite (&runtime_inited, 1);
4507
4508         register_counter ("Sample events allocated", &sample_allocations_ctr);
4509         register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4510
4511         register_counter ("Event: Sync points", &sync_points_ctr);
4512         register_counter ("Event: Heap objects", &heap_objects_ctr);
4513         register_counter ("Event: Heap starts", &heap_starts_ctr);
4514         register_counter ("Event: Heap ends", &heap_ends_ctr);
4515         register_counter ("Event: Heap roots", &heap_roots_ctr);
4516         register_counter ("Event: GC events", &gc_events_ctr);
4517         register_counter ("Event: GC resizes", &gc_resizes_ctr);
4518         register_counter ("Event: GC allocations", &gc_allocs_ctr);
4519         register_counter ("Event: GC moves", &gc_moves_ctr);
4520         register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4521         register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4522         register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4523         register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4524         register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4525         register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4526         register_counter ("Event: Image loads", &image_loads_ctr);
4527         register_counter ("Event: Image unloads", &image_unloads_ctr);
4528         register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4529         register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4530         register_counter ("Event: Class loads", &class_loads_ctr);
4531         register_counter ("Event: Class unloads", &class_unloads_ctr);
4532         register_counter ("Event: Method entries", &method_entries_ctr);
4533         register_counter ("Event: Method exits", &method_exits_ctr);
4534         register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4535         register_counter ("Event: Method JITs", &method_jits_ctr);
4536         register_counter ("Event: Code buffers", &code_buffers_ctr);
4537         register_counter ("Event: Exception throws", &exception_throws_ctr);
4538         register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4539         register_counter ("Event: Monitor events", &monitor_events_ctr);
4540         register_counter ("Event: Thread starts", &thread_starts_ctr);
4541         register_counter ("Event: Thread ends", &thread_ends_ctr);
4542         register_counter ("Event: Thread names", &thread_names_ctr);
4543         register_counter ("Event: Domain loads", &domain_loads_ctr);
4544         register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4545         register_counter ("Event: Domain names", &domain_names_ctr);
4546         register_counter ("Event: Context loads", &context_loads_ctr);
4547         register_counter ("Event: Context unloads", &context_unloads_ctr);
4548         register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4549         register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4550         register_counter ("Event: Sample hits", &sample_hits_ctr);
4551         register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4552         register_counter ("Event: Counter samples", &counter_samples_ctr);
4553         register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4554         register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4555         register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4556         register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4557         register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4558         register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4559
4560         counters_init (profiler);
4561
4562         /*
4563          * We must start the helper thread before the writer thread. This is
4564          * because the helper thread sets up the command port which is written to
4565          * the log header by the writer thread.
4566          */
4567         start_helper_thread (profiler);
4568         start_writer_thread (profiler);
4569         start_dumper_thread (profiler);
4570 }
4571
4572 static void
4573 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4574 {
4575         char *nf;
4576         int force_delete = 0;
4577
4578         log_profiler = (MonoProfiler *) g_calloc (1, sizeof (MonoProfiler));
4579         log_profiler->args = pstrdup (args);
4580         log_profiler->command_port = command_port;
4581
4582         if (filename && *filename == '-') {
4583                 force_delete = 1;
4584                 filename++;
4585                 g_warning ("WARNING: the output:-FILENAME option is deprecated, the profiler now always overrides the output file\n");
4586         }
4587
4588         //If filename begin with +, append the pid at the end
4589         if (filename && *filename == '+')
4590                 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4591
4592
4593         if (!filename) {
4594                 if (do_report)
4595                         filename = "|mprof-report -";
4596                 else
4597                         filename = "output.mlpd";
4598                 nf = (char*)filename;
4599         } else {
4600                 nf = new_filename (filename);
4601                 if (do_report) {
4602                         int s = strlen (nf) + 32;
4603                         char *p = (char *) g_malloc (s);
4604                         snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4605                         g_free (nf);
4606                         nf = p;
4607                 }
4608         }
4609         if (*nf == '|') {
4610                 log_profiler->file = popen (nf + 1, "w");
4611                 log_profiler->pipe_output = 1;
4612         } else if (*nf == '#') {
4613                 int fd = strtol (nf + 1, NULL, 10);
4614                 log_profiler->file = fdopen (fd, "a");
4615         } else {
4616                 if (force_delete)
4617                         unlink (nf);
4618                 log_profiler->file = fopen (nf, "wb");
4619         }
4620         if (!log_profiler->file) {
4621                 fprintf (stderr, "Cannot create profiler output: %s\n", nf);
4622                 exit (1);
4623         }
4624
4625 #if defined (HAVE_SYS_ZLIB)
4626         if (use_zip)
4627                 log_profiler->gzfile = gzdopen (fileno (log_profiler->file), "wb");
4628 #endif
4629
4630         /*
4631          * If you hit this assert while increasing MAX_FRAMES, you need to increase
4632          * SAMPLE_BLOCK_SIZE as well.
4633          */
4634         g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4635
4636         // FIXME: We should free this stuff too.
4637         mono_lock_free_allocator_init_size_class (&log_profiler->sample_size_class, SAMPLE_SLOT_SIZE (num_frames), SAMPLE_BLOCK_SIZE);
4638         mono_lock_free_allocator_init_allocator (&log_profiler->sample_allocator, &log_profiler->sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4639
4640         mono_lock_free_queue_init (&log_profiler->sample_reuse_queue);
4641
4642         g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4643
4644         // FIXME: We should free this stuff too.
4645         mono_lock_free_allocator_init_size_class (&log_profiler->writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4646         mono_lock_free_allocator_init_allocator (&log_profiler->writer_entry_allocator, &log_profiler->writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4647
4648         mono_lock_free_queue_init (&log_profiler->writer_queue);
4649         mono_os_sem_init (&log_profiler->writer_queue_sem, 0);
4650
4651         mono_lock_free_queue_init (&log_profiler->dumper_queue);
4652         mono_os_sem_init (&log_profiler->dumper_queue_sem, 0);
4653
4654         mono_os_mutex_init (&log_profiler->method_table_mutex);
4655         log_profiler->method_table = mono_conc_hashtable_new (NULL, NULL);
4656
4657         if (do_coverage)
4658                 coverage_init (log_profiler);
4659         log_profiler->coverage_filters = filters;
4660
4661         log_profiler->startup_time = current_time ();
4662 }
4663
4664 /*
4665  * declaration to silence the compiler: this is the entry point that
4666  * mono will load from the shared library and call.
4667  */
4668 extern void
4669 mono_profiler_startup (const char *desc);
4670
4671 extern void
4672 mono_profiler_startup_log (const char *desc);
4673
4674 /*
4675  * this is the entry point that will be used when the profiler
4676  * is embedded inside the main executable.
4677  */
4678 void
4679 mono_profiler_startup_log (const char *desc)
4680 {
4681         mono_profiler_startup (desc);
4682 }
4683
4684 void
4685 mono_profiler_startup (const char *desc)
4686 {
4687         GPtrArray *filters = NULL;
4688
4689         proflog_parse_args (&config, desc [3] == ':' ? desc + 4 : "");
4690
4691         //XXX maybe later cleanup to use config directly
4692         nocalls = !(config.effective_mask & PROFLOG_CALL_EVENTS);
4693         no_counters = !(config.effective_mask & PROFLOG_COUNTER_EVENTS);
4694         do_report = config.do_report;
4695         do_debug = config.do_debug;
4696         do_heap_shot = (config.effective_mask & PROFLOG_HEAPSHOT_FEATURE);
4697         hs_mode_ondemand = config.hs_mode_ondemand;
4698         hs_mode_ms = config.hs_mode_ms;
4699         hs_mode_gc = config.hs_mode_gc;
4700         do_mono_sample = (config.effective_mask & PROFLOG_SAMPLING_FEATURE);
4701         use_zip = config.use_zip;
4702         command_port = config.command_port;
4703         num_frames = config.num_frames;
4704         notraces = config.notraces;
4705         max_allocated_sample_hits = config.max_allocated_sample_hits;
4706         max_call_depth = config.max_call_depth;
4707         do_coverage = (config.effective_mask & PROFLOG_CODE_COV_FEATURE);
4708         debug_coverage = config.debug_coverage;
4709         only_coverage = config.only_coverage;
4710
4711         if (config.cov_filter_files) {
4712                 filters = g_ptr_array_new ();
4713                 int i;
4714                 for (i = 0; i < config.cov_filter_files->len; ++i) {
4715                         const char *name = config.cov_filter_files->pdata [i];
4716                         parse_cov_filter_file (filters, name);
4717                 }
4718         }
4719
4720         init_time ();
4721
4722         PROF_TLS_INIT ();
4723
4724         create_profiler (desc, config.output_filename, filters);
4725
4726         mono_lls_init (&profiler_thread_list, NULL);
4727
4728         //This two events are required for the profiler to work
4729         int events = MONO_PROFILE_THREADS | MONO_PROFILE_GC;
4730
4731         //Required callbacks
4732         mono_profiler_install (log_profiler, log_shutdown);
4733         mono_profiler_install_runtime_initialized (runtime_initialized);
4734
4735         mono_profiler_install_gc (gc_event, gc_resize);
4736         mono_profiler_install_thread (thread_start, thread_end);
4737
4738         //It's questionable whether we actually want this to be mandatory, maybe put it behind the actual event?
4739         mono_profiler_install_thread_name (thread_name);
4740
4741
4742         if (config.effective_mask & PROFLOG_DOMAIN_EVENTS) {
4743                 events |= MONO_PROFILE_APPDOMAIN_EVENTS;
4744                 mono_profiler_install_appdomain (NULL, domain_loaded, domain_unloaded, NULL);
4745                 mono_profiler_install_appdomain_name (domain_name);
4746         }
4747
4748         if (config.effective_mask & PROFLOG_ASSEMBLY_EVENTS) {
4749                 events |= MONO_PROFILE_ASSEMBLY_EVENTS;
4750                 mono_profiler_install_assembly (NULL, assembly_loaded, assembly_unloaded, NULL);
4751         }
4752
4753         if (config.effective_mask & PROFLOG_MODULE_EVENTS) {
4754                 events |= MONO_PROFILE_MODULE_EVENTS;
4755                 mono_profiler_install_module (NULL, image_loaded, image_unloaded, NULL);
4756         }
4757
4758         if (config.effective_mask & PROFLOG_CLASS_EVENTS) {
4759                 events |= MONO_PROFILE_CLASS_EVENTS;
4760                 mono_profiler_install_class (NULL, class_loaded, class_unloaded, NULL);
4761         }
4762
4763         if (config.effective_mask & PROFLOG_JIT_COMPILATION_EVENTS) {
4764                 events |= MONO_PROFILE_JIT_COMPILATION;
4765                 mono_profiler_install_jit_end (method_jitted);
4766                 mono_profiler_install_code_buffer_new (code_buffer_new);
4767         }
4768
4769         if (config.effective_mask & PROFLOG_EXCEPTION_EVENTS) {
4770                 events |= MONO_PROFILE_EXCEPTIONS;
4771                 mono_profiler_install_exception (throw_exc, method_exc_leave, NULL);
4772                 mono_profiler_install_exception_clause (clause_exc);
4773         }
4774
4775         if (config.effective_mask & PROFLOG_ALLOCATION_EVENTS) {
4776                 events |= MONO_PROFILE_ALLOCATIONS;
4777                 mono_profiler_install_allocation (gc_alloc);
4778         }
4779
4780         //PROFLOG_GC_EVENTS is mandatory
4781         //PROFLOG_THREAD_EVENTS is mandatory
4782
4783         if (config.effective_mask & PROFLOG_CALL_EVENTS) {
4784                 events |= MONO_PROFILE_ENTER_LEAVE;
4785                 mono_profiler_install_enter_leave (method_enter, method_leave);
4786         }
4787
4788         if (config.effective_mask & PROFLOG_INS_COVERAGE_EVENTS) {
4789                 events |= MONO_PROFILE_INS_COVERAGE;
4790                 mono_profiler_install_coverage_filter (coverage_filter);
4791         }
4792
4793         //XXX should we check for PROFLOG_SAMPLING_FEATURE instead??
4794         if (config.effective_mask & PROFLOG_SAMPLING_EVENTS) {
4795                 events |= MONO_PROFILE_STATISTICAL;
4796                 mono_profiler_set_statistical_mode (config.sampling_mode, config.sample_freq);
4797                 mono_profiler_install_statistical (mono_sample_hit);
4798         }
4799
4800         if (config.effective_mask & PROFLOG_MONITOR_EVENTS) {
4801                 events |= MONO_PROFILE_MONITOR_EVENTS;
4802                 mono_profiler_install_monitor (monitor_event);
4803         }
4804
4805         if (config.effective_mask & PROFLOG_GC_MOVES_EVENTS) {
4806                 events |= MONO_PROFILE_GC_MOVES;
4807                 mono_profiler_install_gc_moves (gc_moves);
4808         }
4809
4810         // TODO split those in two profiler events
4811         if (config.effective_mask & (PROFLOG_GC_ROOT_EVENTS | PROFLOG_GC_HANDLE_EVENTS)) {
4812                 events |= MONO_PROFILE_GC_ROOTS;
4813                 mono_profiler_install_gc_roots (
4814                         config.effective_mask & (PROFLOG_GC_HANDLE_EVENTS) ? gc_handle : NULL,
4815                         (config.effective_mask & PROFLOG_GC_ROOT_EVENTS) ? gc_roots : NULL);
4816         }
4817
4818         if (config.effective_mask & PROFLOG_CONTEXT_EVENTS) {
4819                 events |= MONO_PROFILE_CONTEXT_EVENTS;
4820                 mono_profiler_install_context (context_loaded, context_unloaded);
4821         }
4822
4823         if (config.effective_mask & PROFLOG_FINALIZATION_EVENTS) {
4824                 events |= MONO_PROFILE_GC_FINALIZATION;
4825                 mono_profiler_install_gc_finalize (finalize_begin, finalize_object_begin, finalize_object_end, finalize_end);   
4826         } else if (ENABLED (PROFLOG_HEAPSHOT_FEATURE) && config.hs_mode_ondemand) {
4827                 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4828                 events |= MONO_PROFILE_GC_FINALIZATION;
4829                 mono_profiler_install_gc_finalize (NULL, NULL, NULL, finalize_end);
4830         }
4831
4832         //PROFLOG_COUNTER_EVENTS is a pseudo event controled by the no_counters global var
4833         //PROFLOG_GC_HANDLE_EVENTS is handled together with PROFLOG_GC_ROOT_EVENTS
4834
4835         mono_profiler_set_events ((MonoProfileFlags)events);
4836 }