2 * mono-profiler-logging.c: Logging profiler for Mono.
5 * Massimiliano Mantione (massi@ximian.com)
7 * Copyright 2008-2009 Novell, Inc (http://www.novell.com)
10 #include <mono/metadata/profiler.h>
11 #include <mono/metadata/class.h>
12 #include <mono/metadata/metadata-internals.h>
13 #include <mono/metadata/class-internals.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/loader.h>
16 #include <mono/metadata/threads.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/utils/mono-tls.h>
20 #include <mono/io-layer/atomic.h>
29 #include <sys/types.h>
30 #include <sys/socket.h>
31 #include <netinet/in.h>
33 #define HAS_OPROFILE 0
36 #include <libopagent.h>
39 // Needed for heap analysis
40 extern gboolean mono_object_is_alive (MonoObject* obj);
43 MONO_PROFILER_FILE_BLOCK_KIND_INTRO = 1,
44 MONO_PROFILER_FILE_BLOCK_KIND_END = 2,
45 MONO_PROFILER_FILE_BLOCK_KIND_MAPPING = 3,
46 MONO_PROFILER_FILE_BLOCK_KIND_LOADED = 4,
47 MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED = 5,
48 MONO_PROFILER_FILE_BLOCK_KIND_EVENTS = 6,
49 MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL = 7,
50 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA = 8,
51 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY = 9,
52 MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES = 10
53 } MonoProfilerFileBlockKind;
56 MONO_PROFILER_DIRECTIVE_END = 0,
57 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER = 1,
58 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK = 2,
59 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID = 3,
60 MONO_PROFILER_DIRECTIVE_LOADED_ELEMENTS_CARRY_ID = 4,
61 MONO_PROFILER_DIRECTIVE_CLASSES_CARRY_ASSEMBLY_ID = 5,
62 MONO_PROFILER_DIRECTIVE_METHODS_CARRY_WRAPPER_FLAG = 6,
63 MONO_PROFILER_DIRECTIVE_LAST
64 } MonoProfilerDirectives;
67 #define MONO_PROFILER_LOADED_EVENT_MODULE 1
68 #define MONO_PROFILER_LOADED_EVENT_ASSEMBLY 2
69 #define MONO_PROFILER_LOADED_EVENT_APPDOMAIN 4
70 #define MONO_PROFILER_LOADED_EVENT_SUCCESS 8
71 #define MONO_PROFILER_LOADED_EVENT_FAILURE 16
74 MONO_PROFILER_EVENT_DATA_TYPE_OTHER = 0,
75 MONO_PROFILER_EVENT_DATA_TYPE_METHOD = 1,
76 MONO_PROFILER_EVENT_DATA_TYPE_CLASS = 2
77 } MonoProfilerEventDataType;
79 typedef struct _ProfilerEventData {
84 unsigned int data_type:2;
87 unsigned int value:25;
90 #define EVENT_VALUE_BITS (25)
91 #define MAX_EVENT_VALUE ((1<<EVENT_VALUE_BITS)-1)
94 MONO_PROFILER_EVENT_METHOD_JIT = 0,
95 MONO_PROFILER_EVENT_METHOD_FREED = 1,
96 MONO_PROFILER_EVENT_METHOD_CALL = 2,
97 MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER = 3,
98 MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER = 4
99 } MonoProfilerMethodEvents;
101 MONO_PROFILER_EVENT_CLASS_LOAD = 0,
102 MONO_PROFILER_EVENT_CLASS_UNLOAD = 1,
103 MONO_PROFILER_EVENT_CLASS_EXCEPTION = 2,
104 MONO_PROFILER_EVENT_CLASS_MONITOR = 3,
105 MONO_PROFILER_EVENT_CLASS_ALLOCATION = 4
106 } MonoProfilerClassEvents;
108 MONO_PROFILER_EVENT_RESULT_SUCCESS = 0,
109 MONO_PROFILER_EVENT_RESULT_FAILURE = 4
110 } MonoProfilerEventResult;
111 #define MONO_PROFILER_EVENT_RESULT_MASK MONO_PROFILER_EVENT_RESULT_FAILURE
113 MONO_PROFILER_EVENT_THREAD = 1,
114 MONO_PROFILER_EVENT_GC_COLLECTION = 2,
115 MONO_PROFILER_EVENT_GC_MARK = 3,
116 MONO_PROFILER_EVENT_GC_SWEEP = 4,
117 MONO_PROFILER_EVENT_GC_RESIZE = 5,
118 MONO_PROFILER_EVENT_GC_STOP_WORLD = 6,
119 MONO_PROFILER_EVENT_GC_START_WORLD = 7,
120 MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION = 8,
121 MONO_PROFILER_EVENT_STACK_SECTION = 9,
122 MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID = 10,
123 MONO_PROFILER_EVENT_OBJECT_MONITOR = 11
124 } MonoProfilerEvents;
126 MONO_PROFILER_EVENT_KIND_START = 0,
127 MONO_PROFILER_EVENT_KIND_END = 1
128 } MonoProfilerEventKind;
130 #define MONO_PROFILER_GET_CURRENT_TIME(t) {\
131 struct timeval current_time;\
132 gettimeofday (¤t_time, NULL);\
133 (t) = (((guint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;\
136 static gboolean use_fast_timer = FALSE;
138 #if (defined(__i386__) || defined(__x86_64__)) && ! defined(HOST_WIN32)
140 #if defined(__i386__)
141 static const guchar cpuid_impl [] = {
142 0x55, /* push %ebp */
143 0x89, 0xe5, /* mov %esp,%ebp */
144 0x53, /* push %ebx */
145 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
146 0x0f, 0xa2, /* cpuid */
147 0x50, /* push %eax */
148 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
149 0x89, 0x18, /* mov %ebx,(%eax) */
150 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
151 0x89, 0x08, /* mov %ecx,(%eax) */
152 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
153 0x89, 0x10, /* mov %edx,(%eax) */
155 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
156 0x89, 0x02, /* mov %eax,(%edx) */
162 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
165 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx) {
168 __asm__ __volatile__ (
171 "movl %%eax, %%edx\n"
172 "xorl $0x200000, %%eax\n"
177 "xorl %%edx, %%eax\n"
178 "andl $0x200000, %%eax\n"
200 CpuidFunc func = (CpuidFunc) cpuid_impl;
201 func (id, p_eax, p_ebx, p_ecx, p_edx);
203 * We use this approach because of issues with gcc and pic code, see:
204 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
205 __asm__ __volatile__ ("cpuid"
206 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
214 static void detect_fast_timer (void) {
215 int p_eax, p_ebx, p_ecx, p_edx;
217 if (cpuid (0x1, &p_eax, &p_ebx, &p_ecx, &p_edx)) {
219 use_fast_timer = TRUE;
221 use_fast_timer = FALSE;
224 use_fast_timer = FALSE;
229 #if defined(__x86_64__)
230 static void detect_fast_timer (void) {
232 guint32 eax,ebx,ecx,edx;
233 __asm__ __volatile__ ("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(op));
235 use_fast_timer = TRUE;
237 use_fast_timer = FALSE;
242 static __inline__ guint64 rdtsc(void) {
244 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
245 return ((guint64) lo) | (((guint64) hi) << 32);
247 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) {\
248 if (use_fast_timer) {\
251 MONO_PROFILER_GET_CURRENT_TIME ((c));\
255 static void detect_fast_timer (void) {
256 use_fast_timer = FALSE;
258 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) MONO_PROFILER_GET_CURRENT_TIME ((c))
262 #define CLASS_LAYOUT_PACKED_BITMAP_SIZE 64
263 #define CLASS_LAYOUT_NOT_INITIALIZED (0xFFFF)
266 HEAP_CODE_OBJECT = 1,
267 HEAP_CODE_FREE_OBJECT_CLASS = 2,
269 } HeapProfilerJobValueCode;
270 typedef struct _MonoProfilerClassData {
279 } MonoProfilerClassData;
281 typedef struct _MonoProfilerMethodData {
284 } MonoProfilerMethodData;
286 typedef struct _ClassIdMappingElement {
290 struct _ClassIdMappingElement *next_unwritten;
291 MonoProfilerClassData data;
292 } ClassIdMappingElement;
294 typedef struct _MethodIdMappingElement {
298 struct _MethodIdMappingElement *next_unwritten;
299 MonoProfilerMethodData data;
300 } MethodIdMappingElement;
302 typedef struct _ClassIdMapping {
304 ClassIdMappingElement *unwritten;
308 typedef struct _MethodIdMapping {
310 MethodIdMappingElement *unwritten;
314 typedef struct _LoadedElement {
316 guint64 load_start_counter;
317 guint64 load_end_counter;
318 guint64 unload_start_counter;
319 guint64 unload_end_counter;
324 guint8 unload_written;
326 struct _ProfilerCodeBufferArray;
327 typedef struct _ProfilerCodeBuffer {
335 struct _ProfilerCodeBufferArray *sub_buffers;
340 } ProfilerCodeBuffer;
342 #define PROFILER_CODE_BUFFER_ARRAY_SIZE 64
343 typedef struct _ProfilerCodeBufferArray {
345 int number_of_buffers;
346 ProfilerCodeBuffer buffers [PROFILER_CODE_BUFFER_ARRAY_SIZE];
347 } ProfilerCodeBufferArray;
349 typedef struct _ProfilerCodeChunk {
353 ProfilerCodeBufferArray *buffers;
356 typedef struct _ProfilerCodeChunks {
358 int number_of_chunks;;
359 ProfilerCodeChunk *chunks;
360 } ProfilerCodeChunks;
363 #define PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE 1024
364 #define PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE 4096
365 #define PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE 4096
367 typedef struct _ProfilerHeapShotObjectBuffer {
368 struct _ProfilerHeapShotObjectBuffer *next;
369 MonoObject **next_free_slot;
371 MonoObject **first_unprocessed_slot;
372 MonoObject *buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE];
373 } ProfilerHeapShotObjectBuffer;
375 typedef struct _ProfilerHeapShotHeapBuffer {
376 struct _ProfilerHeapShotHeapBuffer *next;
377 struct _ProfilerHeapShotHeapBuffer *previous;
378 MonoObject **start_slot;
379 MonoObject **end_slot;
380 MonoObject *buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE];
381 } ProfilerHeapShotHeapBuffer;
383 typedef struct _ProfilerHeapShotHeapBuffers {
384 ProfilerHeapShotHeapBuffer *buffers;
385 ProfilerHeapShotHeapBuffer *last;
386 ProfilerHeapShotHeapBuffer *current;
387 MonoObject **first_free_slot;
388 } ProfilerHeapShotHeapBuffers;
391 typedef struct _ProfilerHeapShotWriteBuffer {
392 struct _ProfilerHeapShotWriteBuffer *next;
393 gpointer buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE];
394 } ProfilerHeapShotWriteBuffer;
396 typedef struct _ProfilerHeapShotClassSummary {
405 } ProfilerHeapShotClassSummary;
407 typedef struct _ProfilerHeapShotCollectionSummary {
408 ProfilerHeapShotClassSummary *per_class_data;
410 } ProfilerHeapShotCollectionSummary;
412 typedef struct _ProfilerHeapShotWriteJob {
413 struct _ProfilerHeapShotWriteJob *next;
414 struct _ProfilerHeapShotWriteJob *next_unwritten;
418 ProfilerHeapShotWriteBuffer *buffers;
419 ProfilerHeapShotWriteBuffer **last_next;
420 guint32 full_buffers;
421 gboolean heap_shot_was_requested;
422 guint64 start_counter;
427 ProfilerHeapShotCollectionSummary summary;
428 gboolean dump_heap_data;
429 } ProfilerHeapShotWriteJob;
431 typedef struct _ProfilerThreadStack {
434 guint32 last_saved_top;
435 guint32 last_written_frame;
437 guint8 *method_is_jitted;
438 guint32 *written_frames;
439 } ProfilerThreadStack;
441 typedef struct _ProfilerPerThreadData {
442 ProfilerEventData *events;
443 ProfilerEventData *next_free_event;
444 ProfilerEventData *next_unreserved_event;
445 ProfilerEventData *end_event;
446 ProfilerEventData *first_unwritten_event;
447 ProfilerEventData *first_unmapped_event;
448 guint64 start_event_counter;
449 guint64 last_event_counter;
451 ProfilerHeapShotObjectBuffer *heap_shot_object_buffers;
452 ProfilerThreadStack stack;
453 struct _ProfilerPerThreadData* next;
454 } ProfilerPerThreadData;
456 typedef struct _ProfilerStatisticalHit {
459 } ProfilerStatisticalHit;
461 typedef struct _ProfilerStatisticalData {
462 ProfilerStatisticalHit *hits;
463 unsigned int next_free_index;
464 unsigned int end_index;
465 unsigned int first_unwritten_index;
466 } ProfilerStatisticalData;
468 typedef struct _ProfilerUnmanagedSymbol {
473 } ProfilerUnmanagedSymbol;
475 struct _ProfilerExecutableFile;
476 struct _ProfilerExecutableFileSectionRegion;
478 typedef struct _ProfilerExecutableMemoryRegionData {
486 struct _ProfilerExecutableFile *file;
487 struct _ProfilerExecutableFileSectionRegion *file_region_reference;
488 guint32 symbols_count;
489 guint32 symbols_capacity;
490 ProfilerUnmanagedSymbol *symbols;
491 } ProfilerExecutableMemoryRegionData;
493 typedef struct _ProfilerExecutableMemoryRegions {
494 ProfilerExecutableMemoryRegionData **regions;
495 guint32 regions_capacity;
496 guint32 regions_count;
498 guint32 next_unmanaged_function_id;
499 } ProfilerExecutableMemoryRegions;
501 /* Start of ELF definitions */
503 typedef guint16 ElfHalf;
504 typedef guint32 ElfWord;
505 typedef gsize ElfAddr;
506 typedef gsize ElfOff;
509 unsigned char e_ident[EI_NIDENT];
515 ElfOff e_shoff; // Section header table
517 ElfHalf e_ehsize; // Header size
520 ElfHalf e_shentsize; // Section header entry size
521 ElfHalf e_shnum; // Section header entries number
522 ElfHalf e_shstrndx; // String table index
525 #if (SIZEOF_VOID_P == 4)
530 ElfAddr sh_addr; // Address in memory
531 ElfOff sh_offset; // Offset in file
535 ElfWord sh_addralign;
542 unsigned char st_info; // Use ELF32_ST_TYPE to get symbol type
543 unsigned char st_other;
544 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
546 #elif (SIZEOF_VOID_P == 8)
551 ElfAddr sh_addr; // Address in memory
552 ElfOff sh_offset; // Offset in file
561 unsigned char st_info; // Use ELF_ST_TYPE to get symbol type
562 unsigned char st_other;
563 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
568 #error Bad size of void pointer
572 #define ELF_ST_BIND(i) ((i)>>4)
573 #define ELF_ST_TYPE(i) ((i)&0xf)
586 ELF_FILE_TYPE_NONE = 0,
587 ELF_FILE_TYPE_REL = 1,
588 ELF_FILE_TYPE_EXEC = 2,
589 ELF_FILE_TYPE_DYN = 3,
590 ELF_FILE_TYPE_CORE = 4
607 ELF_SHT_PROGBITS = 1,
631 ELF_SHF_EXECINSTR = 4,
634 #define ELF_SHN_UNDEF 0
635 #define ELF_SHN_LORESERVE 0xff00
636 #define ELF_SHN_LOPROC 0xff00
637 #define ELF_SHN_HIPROC 0xff1f
638 #define ELF_SHN_ABS 0xfff1
639 #define ELF_SHN_COMMON 0xfff2
640 #define ELF_SHN_HIRESERVE 0xffff
641 /* End of ELF definitions */
643 typedef struct _ProfilerExecutableFileSectionRegion {
644 ProfilerExecutableMemoryRegionData *region;
645 guint8 *section_address;
646 gsize section_offset;
647 } ProfilerExecutableFileSectionRegion;
649 typedef struct _ProfilerExecutableFile {
650 guint32 reference_count;
652 /* Used for mmap and munmap */
659 guint8 *symbols_start;
660 guint32 symbols_count;
662 const char *symbols_string_table;
663 const char *main_string_table;
665 ProfilerExecutableFileSectionRegion *section_regions;
667 struct _ProfilerExecutableFile *next_new_file;
668 } ProfilerExecutableFile;
670 typedef struct _ProfilerExecutableFiles {
672 ProfilerExecutableFile *new_files;
673 } ProfilerExecutableFiles;
676 #define CLEANUP_WRITER_THREAD() do {profiler->writer_thread_terminated = TRUE;} while (0)
677 #define CHECK_WRITER_THREAD() (! profiler->writer_thread_terminated)
680 #include <sys/types.h>
681 #include <sys/time.h>
682 #include <sys/stat.h>
686 #include <semaphore.h>
688 #include <sys/mman.h>
689 #include <sys/types.h>
690 #include <sys/stat.h>
694 #define MUTEX_TYPE pthread_mutex_t
695 #define INITIALIZE_PROFILER_MUTEX() pthread_mutex_init (&(profiler->mutex), NULL)
696 #define DELETE_PROFILER_MUTEX() pthread_mutex_destroy (&(profiler->mutex))
697 #define LOCK_PROFILER() do {/*LOG_WRITER_THREAD ("LOCK_PROFILER");*/ pthread_mutex_lock (&(profiler->mutex));} while (0)
698 #define UNLOCK_PROFILER() do {/*LOG_WRITER_THREAD ("UNLOCK_PROFILER");*/ pthread_mutex_unlock (&(profiler->mutex));} while (0)
700 #define THREAD_TYPE pthread_t
701 #define CREATE_WRITER_THREAD(f) pthread_create (&(profiler->data_writer_thread), NULL, ((void*(*)(void*))f), NULL)
702 #define CREATE_USER_THREAD(f) pthread_create (&(profiler->user_thread), NULL, ((void*(*)(void*))f), NULL)
703 #define EXIT_THREAD() pthread_exit (NULL);
704 #define WAIT_WRITER_THREAD() do {\
705 if (CHECK_WRITER_THREAD ()) {\
706 pthread_join (profiler->data_writer_thread, NULL);\
709 #define CURRENT_THREAD_ID() (gsize) pthread_self ()
711 #ifndef HAVE_KW_THREAD
712 static pthread_key_t pthread_profiler_key;
713 static pthread_once_t profiler_pthread_once = PTHREAD_ONCE_INIT;
715 make_pthread_profiler_key (void) {
716 (void) pthread_key_create (&pthread_profiler_key, NULL);
718 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) pthread_getspecific (pthread_profiler_key))
719 #define SET_PROFILER_THREAD_DATA(x) (void) pthread_setspecific (pthread_profiler_key, (x))
720 #define ALLOCATE_PROFILER_THREAD_DATA() (void) pthread_once (&profiler_pthread_once, make_pthread_profiler_key)
721 #define FREE_PROFILER_THREAD_DATA() (void) pthread_key_delete (pthread_profiler_key)
724 #define EVENT_TYPE sem_t
725 #define WRITER_EVENT_INIT() do {\
726 sem_init (&(profiler->enable_data_writer_event), 0, 0);\
727 sem_init (&(profiler->wake_data_writer_event), 0, 0);\
728 sem_init (&(profiler->done_data_writer_event), 0, 0);\
730 #define WRITER_EVENT_DESTROY() do {\
731 sem_destroy (&(profiler->enable_data_writer_event));\
732 sem_destroy (&(profiler->wake_data_writer_event));\
733 sem_destroy (&(profiler->done_data_writer_event));\
735 #define WRITER_EVENT_WAIT() (void) sem_wait (&(profiler->wake_data_writer_event))
736 #define WRITER_EVENT_RAISE() (void) sem_post (&(profiler->wake_data_writer_event))
737 #define WRITER_EVENT_ENABLE_WAIT() (void) sem_wait (&(profiler->enable_data_writer_event))
738 #define WRITER_EVENT_ENABLE_RAISE() (void) sem_post (&(profiler->enable_data_writer_event))
739 #define WRITER_EVENT_DONE_WAIT() do {\
740 if (CHECK_WRITER_THREAD ()) {\
741 (void) sem_wait (&(profiler->done_data_writer_event));\
744 #define WRITER_EVENT_DONE_RAISE() (void) sem_post (&(profiler->done_data_writer_event))
747 #define FILE_HANDLE_TYPE FILE*
748 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
749 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
750 #define FLUSH_FILE() fflush (profiler->file)
751 #define CLOSE_FILE() fclose (profiler->file)
753 #define FILE_HANDLE_TYPE int
754 #define OPEN_FILE() profiler->file = open (profiler->file_name, O_WRONLY|O_CREAT|O_TRUNC, 0664);
755 #define WRITE_BUFFER(b,s) write (profiler->file, (b), (s))
756 #define FLUSH_FILE() fsync (profiler->file)
757 #define CLOSE_FILE() close (profiler->file)
764 #define MUTEX_TYPE CRITICAL_SECTION
765 #define INITIALIZE_PROFILER_MUTEX() InitializeCriticalSection (&(profiler->mutex))
766 #define DELETE_PROFILER_MUTEX() DeleteCriticalSection (&(profiler->mutex))
767 #define LOCK_PROFILER() EnterCriticalSection (&(profiler->mutex))
768 #define UNLOCK_PROFILER() LeaveCriticalSection (&(profiler->mutex))
770 #define THREAD_TYPE HANDLE
771 #define CREATE_WRITER_THREAD(f) CreateThread (NULL, (1*1024*1024), (f), NULL, 0, NULL);
772 #define EXIT_THREAD() ExitThread (0);
773 #define WAIT_WRITER_THREAD() do {\
774 if (CHECK_WRITER_THREAD ()) {\
775 WaitForSingleObject (profiler->data_writer_thread, INFINITE);\
778 #define CURRENT_THREAD_ID() (gsize) GetCurrentThreadId ()
780 #ifndef HAVE_KW_THREAD
781 static MonoNativeTlsKey profiler_thread_id;
782 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*)mono_native_tls_get_value (profiler_thread_id))
783 #define SET_PROFILER_THREAD_DATA(x) mono_native_tls_set_value (profiler_thread_id, (x));
784 #define ALLOCATE_PROFILER_THREAD_DATA() mono_native_tls_alloc (profiler_thread_id, NULL)
785 #define FREE_PROFILER_THREAD_DATA() mono_native_tls_free (profiler_thread_id)
788 #define EVENT_TYPE HANDLE
789 #define WRITER_EVENT_INIT() (void) do {\
790 profiler->enable_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
791 profiler->wake_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
792 profiler->done_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
794 #define WRITER_EVENT_DESTROY() CloseHandle (profiler->statistical_data_writer_event)
795 #define WRITER_EVENT_INIT() (void) do {\
796 CloseHandle (profiler->enable_data_writer_event);\
797 CloseHandle (profiler->wake_data_writer_event);\
798 CloseHandle (profiler->done_data_writer_event);\
800 #define WRITER_EVENT_WAIT() WaitForSingleObject (profiler->wake_data_writer_event, INFINITE)
801 #define WRITER_EVENT_RAISE() SetEvent (profiler->wake_data_writer_event)
802 #define WRITER_EVENT_ENABLE_WAIT() WaitForSingleObject (profiler->enable_data_writer_event, INFINITE)
803 #define WRITER_EVENT_ENABLE_RAISE() SetEvent (profiler->enable_data_writer_event)
804 #define WRITER_EVENT_DONE_WAIT() do {\
805 if (CHECK_WRITER_THREAD ()) {\
806 WaitForSingleObject (profiler->done_data_writer_event, INFINITE);\
809 #define WRITER_EVENT_DONE_RAISE() SetEvent (profiler->done_data_writer_event)
811 #define FILE_HANDLE_TYPE FILE*
812 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
813 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
814 #define FLUSH_FILE() fflush (profiler->file)
815 #define CLOSE_FILE() fclose (profiler->file);
819 #ifdef HAVE_KW_THREAD
820 static __thread ProfilerPerThreadData * tls_profiler_per_thread_data;
821 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) tls_profiler_per_thread_data)
822 #define SET_PROFILER_THREAD_DATA(x) tls_profiler_per_thread_data = (x)
823 #define ALLOCATE_PROFILER_THREAD_DATA() /* nop */
824 #define FREE_PROFILER_THREAD_DATA() /* nop */
827 #define GET_PROFILER_THREAD_DATA(data) do {\
828 ProfilerPerThreadData *_result = LOOKUP_PROFILER_THREAD_DATA ();\
830 _result = profiler_per_thread_data_new (profiler->per_thread_buffer_size);\
832 _result->next = profiler->per_thread_data;\
833 profiler->per_thread_data = _result;\
835 SET_PROFILER_THREAD_DATA (_result);\
840 #define PROFILER_FILE_WRITE_BUFFER_SIZE (profiler->write_buffer_size)
841 typedef struct _ProfilerFileWriteBuffer {
842 struct _ProfilerFileWriteBuffer *next;
843 guint8 buffer [MONO_ZERO_LEN_ARRAY];
844 } ProfilerFileWriteBuffer;
846 #define CHECK_PROFILER_ENABLED() do {\
847 if (! profiler->profiler_enabled)\
850 struct _MonoProfiler {
853 MonoProfileFlags flags;
854 gboolean profiler_enabled;
856 char *file_name_suffix;
857 FILE_HANDLE_TYPE file;
860 guint64 start_counter;
864 guint64 last_header_counter;
866 MethodIdMapping *methods;
867 ClassIdMapping *classes;
869 guint32 loaded_element_next_free_id;
870 GHashTable *loaded_assemblies;
871 GHashTable *loaded_modules;
872 GHashTable *loaded_appdomains;
874 guint32 per_thread_buffer_size;
875 guint32 statistical_buffer_size;
876 ProfilerPerThreadData* per_thread_data;
877 ProfilerStatisticalData *statistical_data;
878 ProfilerStatisticalData *statistical_data_ready;
879 ProfilerStatisticalData *statistical_data_second_buffer;
880 int statistical_call_chain_depth;
881 MonoProfilerCallChainStrategy statistical_call_chain_strategy;
883 ProfilerCodeChunks code_chunks;
885 THREAD_TYPE data_writer_thread;
886 THREAD_TYPE user_thread;
887 EVENT_TYPE enable_data_writer_event;
888 EVENT_TYPE wake_data_writer_event;
889 EVENT_TYPE done_data_writer_event;
890 gboolean terminate_writer_thread;
891 gboolean writer_thread_terminated;
893 ProfilerFileWriteBuffer *write_buffers;
894 ProfilerFileWriteBuffer *current_write_buffer;
895 int write_buffer_size;
896 int current_write_position;
897 int full_write_buffers;
899 ProfilerHeapShotWriteJob *heap_shot_write_jobs;
900 ProfilerHeapShotHeapBuffers heap;
904 int dump_next_heap_snapshots;
905 gboolean heap_shot_was_requested;
906 guint32 garbage_collection_counter;
908 ProfilerExecutableMemoryRegions *executable_regions;
909 ProfilerExecutableFiles executable_files;
916 gboolean unreachable_objects;
917 gboolean collection_summary;
918 gboolean report_gc_events;
920 gboolean track_stack;
921 gboolean track_calls;
922 gboolean save_allocation_caller;
923 gboolean save_allocation_stack;
924 gboolean allocations_carry_id;
927 static MonoProfiler *profiler;
930 enable_profiler (void) {
931 profiler->profiler_enabled = TRUE;
934 static void flush_everything (void);
937 disable_profiler (void) {
938 profiler->profiler_enabled = FALSE;
943 request_heap_snapshot (void) {
944 profiler->heap_shot_was_requested = TRUE;
945 mono_gc_collect (mono_gc_max_generation ());
948 #define DEBUG_LOAD_EVENTS 0
949 #define DEBUG_MAPPING_EVENTS 0
950 #define DEBUG_LOGGING_PROFILER 0
951 #define DEBUG_HEAP_PROFILER 0
952 #define DEBUG_CLASS_BITMAPS 0
953 #define DEBUG_STATISTICAL_PROFILER 0
954 #define DEBUG_WRITER_THREAD 0
955 #define DEBUG_USER_THREAD 0
956 #define DEBUG_FILE_WRITES 0
957 #if (DEBUG_LOGGING_PROFILER || DEBUG_STATISTICAL_PROFILER || DEBUG_HEAP_PROFILER || DEBUG_WRITER_THREAD || DEBUG_FILE_WRITES)
958 #define LOG_WRITER_THREAD(m) printf ("WRITER-THREAD-LOG %s\n", m)
960 #define LOG_WRITER_THREAD(m)
962 #if (DEBUG_LOGGING_PROFILER || DEBUG_STATISTICAL_PROFILER || DEBUG_HEAP_PROFILER || DEBUG_USER_THREAD || DEBUG_FILE_WRITES)
963 #define LOG_USER_THREAD(m) printf ("USER-THREAD-LOG %s\n", m)
965 #define LOG_USER_THREAD(m)
968 #if DEBUG_LOGGING_PROFILER
969 static int event_counter = 0;
970 #define EVENT_MARK() printf ("[EVENT:%d]", ++ event_counter)
974 thread_stack_initialize_empty (ProfilerThreadStack *stack) {
977 stack->last_saved_top = 0;
978 stack->last_written_frame = 0;
980 stack->method_is_jitted = NULL;
981 stack->written_frames = NULL;
985 thread_stack_free (ProfilerThreadStack *stack) {
988 stack->last_saved_top = 0;
989 stack->last_written_frame = 0;
990 if (stack->stack != NULL) {
991 g_free (stack->stack);
994 if (stack->method_is_jitted != NULL) {
995 g_free (stack->method_is_jitted);
996 stack->method_is_jitted = NULL;
998 if (stack->written_frames != NULL) {
999 g_free (stack->written_frames);
1000 stack->written_frames = NULL;
1005 thread_stack_initialize (ProfilerThreadStack *stack, guint32 capacity) {
1006 stack->capacity = capacity;
1008 stack->last_saved_top = 0;
1009 stack->last_written_frame = 0;
1010 stack->stack = g_new0 (MonoMethod*, capacity);
1011 stack->method_is_jitted = g_new0 (guint8, capacity);
1012 stack->written_frames = g_new0 (guint32, capacity);
1016 thread_stack_push_jitted (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1017 if (stack->top >= stack->capacity) {
1018 MonoMethod **old_stack = stack->stack;
1019 guint8 *old_method_is_jitted = stack->method_is_jitted;
1020 guint32 *old_written_frames = stack->written_frames;
1021 guint32 top = stack->top;
1022 guint32 last_saved_top = stack->last_saved_top;
1023 guint32 last_written_frame = stack->last_written_frame;
1024 thread_stack_initialize (stack, stack->capacity * 2);
1025 memcpy (stack->stack, old_stack, top * sizeof (MonoMethod*));
1026 memcpy (stack->method_is_jitted, old_method_is_jitted, top * sizeof (guint8));
1027 memcpy (stack->written_frames, old_written_frames, top * sizeof (guint32));
1029 g_free (old_method_is_jitted);
1030 g_free (old_written_frames);
1032 stack->last_saved_top = last_saved_top;
1033 stack->last_written_frame = last_written_frame;
1035 stack->stack [stack->top] = method;
1036 stack->method_is_jitted [stack->top] = method_is_jitted;
1041 thread_stack_push (ProfilerThreadStack *stack, MonoMethod* method) {
1042 thread_stack_push_jitted (stack, method, FALSE);
1046 thread_stack_pop (ProfilerThreadStack *stack) {
1047 if (stack->top > 0) {
1049 if (stack->last_saved_top > stack->top) {
1050 stack->last_saved_top = stack->top;
1052 return stack->stack [stack->top];
1059 thread_stack_top (ProfilerThreadStack *stack) {
1060 if (stack->top > 0) {
1061 return stack->stack [stack->top - 1];
1068 thread_stack_top_is_jitted (ProfilerThreadStack *stack) {
1069 if (stack->top > 0) {
1070 return stack->method_is_jitted [stack->top - 1];
1077 thread_stack_index_from_top (ProfilerThreadStack *stack, int index) {
1078 if (stack->top > index) {
1079 return stack->stack [stack->top - (index + 1)];
1086 thread_stack_index_from_top_is_jitted (ProfilerThreadStack *stack, int index) {
1087 if (stack->top > index) {
1088 return stack->method_is_jitted [stack->top - (index + 1)];
1095 thread_stack_push_safely (ProfilerThreadStack *stack, MonoMethod* method) {
1096 if (stack->stack != NULL) {
1097 thread_stack_push (stack, method);
1102 thread_stack_push_jitted_safely (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1103 if (stack->stack != NULL) {
1104 thread_stack_push_jitted (stack, method, method_is_jitted);
1109 thread_stack_count_unsaved_frames (ProfilerThreadStack *stack) {
1110 int result = stack->top - stack->last_saved_top;
1111 return (result > 0) ? result : 0;
1115 thread_stack_get_last_written_frame (ProfilerThreadStack *stack) {
1116 return stack->last_written_frame;
1120 thread_stack_set_last_written_frame (ProfilerThreadStack *stack, int last_written_frame) {
1121 stack->last_written_frame = last_written_frame;
1124 static inline guint32
1125 thread_stack_written_frame_at_index (ProfilerThreadStack *stack, int index) {
1126 return stack->written_frames [index];
1130 thread_stack_write_frame_at_index (ProfilerThreadStack *stack, int index, guint32 method_id_and_is_jitted) {
1131 stack->written_frames [index] = method_id_and_is_jitted;
1134 static ClassIdMappingElement*
1135 class_id_mapping_element_get (MonoClass *klass) {
1136 return g_hash_table_lookup (profiler->classes->table, (gconstpointer) klass);
1139 static MethodIdMappingElement*
1140 method_id_mapping_element_get (MonoMethod *method) {
1141 return g_hash_table_lookup (profiler->methods->table, (gconstpointer) method);
1144 #define BITS_TO_BYTES(v) do {\
1150 static ClassIdMappingElement*
1151 class_id_mapping_element_new (MonoClass *klass) {
1152 ClassIdMappingElement *result = g_new (ClassIdMappingElement, 1);
1154 result->name = mono_type_full_name (mono_class_get_type (klass));
1155 result->klass = klass;
1156 result->next_unwritten = profiler->classes->unwritten;
1157 profiler->classes->unwritten = result;
1158 result->id = profiler->classes->next_id;
1159 profiler->classes->next_id ++;
1161 result->data.bitmap.compact = 0;
1162 result->data.layout.slots = CLASS_LAYOUT_NOT_INITIALIZED;
1163 result->data.layout.references = CLASS_LAYOUT_NOT_INITIALIZED;
1165 g_hash_table_insert (profiler->classes->table, klass, result);
1167 #if (DEBUG_MAPPING_EVENTS)
1168 printf ("Created new CLASS mapping element \"%s\" (%p)[%d]\n", result->name, klass, result->id);
1174 class_id_mapping_element_build_layout_bitmap (MonoClass *klass, ClassIdMappingElement *klass_id) {
1175 MonoClass *parent_class = mono_class_get_parent (klass);
1176 int number_of_reference_fields = 0;
1177 int max_offset_of_reference_fields = 0;
1178 ClassIdMappingElement *parent_id;
1180 MonoClassField *field;
1182 #if (DEBUG_CLASS_BITMAPS)
1183 printf ("class_id_mapping_element_build_layout_bitmap: building layout for class %s.%s: ", mono_class_get_namespace (klass), mono_class_get_name (klass));
1186 if (parent_class != NULL) {
1187 parent_id = class_id_mapping_element_get (parent_class);
1188 g_assert (parent_id != NULL);
1190 if (parent_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1191 #if (DEBUG_CLASS_BITMAPS)
1192 printf ("[recursively building bitmap for father class]\n");
1194 class_id_mapping_element_build_layout_bitmap (parent_class, parent_id);
1201 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1202 MonoType* field_type = mono_field_get_type (field);
1203 // For now, skip static fields
1204 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1207 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1208 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1209 if (field_offset > max_offset_of_reference_fields) {
1210 max_offset_of_reference_fields = field_offset;
1212 number_of_reference_fields ++;
1214 MonoClass *field_class = mono_class_from_mono_type (field_type);
1215 if (field_class && mono_class_is_valuetype (field_class)) {
1216 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1217 g_assert (field_id != NULL);
1219 if (field_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1220 if (field_id != klass_id) {
1221 #if (DEBUG_CLASS_BITMAPS)
1222 printf ("[recursively building bitmap for field %s]\n", mono_field_get_name (field));
1224 class_id_mapping_element_build_layout_bitmap (field_class, field_id);
1226 #if (DEBUG_CLASS_BITMAPS)
1227 printf ("[breaking recursive bitmap build for field %s]", mono_field_get_name (field));
1230 klass_id->data.bitmap.compact = 0;
1231 klass_id->data.layout.slots = 0;
1232 klass_id->data.layout.references = 0;
1236 if (field_id->data.layout.references > 0) {
1237 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1238 int max_offset_reference_in_field = (field_id->data.layout.slots - 1) * sizeof (gpointer);
1240 if ((field_offset + max_offset_reference_in_field) > max_offset_of_reference_fields) {
1241 max_offset_of_reference_fields = field_offset + max_offset_reference_in_field;
1244 number_of_reference_fields += field_id->data.layout.references;
1250 #if (DEBUG_CLASS_BITMAPS)
1251 printf ("[allocating bitmap for class %s.%s (references %d, max offset %d, slots %d)]", mono_class_get_namespace (klass), mono_class_get_name (klass), number_of_reference_fields, max_offset_of_reference_fields, (int)(max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1253 if ((number_of_reference_fields == 0) && ((parent_id == NULL) || (parent_id->data.layout.references == 0))) {
1254 #if (DEBUG_CLASS_BITMAPS)
1255 printf ("[no references at all]");
1257 klass_id->data.bitmap.compact = 0;
1258 klass_id->data.layout.slots = 0;
1259 klass_id->data.layout.references = 0;
1261 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1262 #if (DEBUG_CLASS_BITMAPS)
1263 printf ("[parent %s.%s has %d references in %d slots]", mono_class_get_namespace (parent_class), mono_class_get_name (parent_class), parent_id->data.layout.references, parent_id->data.layout.slots);
1265 klass_id->data.layout.slots = parent_id->data.layout.slots;
1266 klass_id->data.layout.references = parent_id->data.layout.references;
1268 #if (DEBUG_CLASS_BITMAPS)
1269 printf ("[no references from parent]");
1271 klass_id->data.layout.slots = 0;
1272 klass_id->data.layout.references = 0;
1275 if (number_of_reference_fields > 0) {
1276 klass_id->data.layout.slots += ((max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1277 klass_id->data.layout.references += number_of_reference_fields;
1278 #if (DEBUG_CLASS_BITMAPS)
1279 printf ("[adding data, going to %d references in %d slots]", klass_id->data.layout.references, klass_id->data.layout.slots);
1283 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1284 #if (DEBUG_CLASS_BITMAPS)
1285 printf ("[zeroing bitmap]");
1287 klass_id->data.bitmap.compact = 0;
1288 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1289 #if (DEBUG_CLASS_BITMAPS)
1290 printf ("[copying compact father bitmap]");
1292 klass_id->data.bitmap.compact = parent_id->data.bitmap.compact;
1295 int size_of_bitmap = klass_id->data.layout.slots;
1296 BITS_TO_BYTES (size_of_bitmap);
1297 #if (DEBUG_CLASS_BITMAPS)
1298 printf ("[allocating %d bytes for bitmap]", size_of_bitmap);
1300 klass_id->data.bitmap.extended = g_malloc0 (size_of_bitmap);
1301 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1302 int size_of_father_bitmap = parent_id->data.layout.slots;
1303 if (size_of_father_bitmap <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1305 #if (DEBUG_CLASS_BITMAPS)
1306 printf ("[copying %d bits from father bitmap]", size_of_father_bitmap);
1308 for (father_slot = 0; father_slot < size_of_father_bitmap; father_slot ++) {
1309 if (parent_id->data.bitmap.compact & (((guint64)1) << father_slot)) {
1310 klass_id->data.bitmap.extended [father_slot >> 3] |= (1 << (father_slot & 7));
1314 BITS_TO_BYTES (size_of_father_bitmap);
1315 #if (DEBUG_CLASS_BITMAPS)
1316 printf ("[copying %d bytes from father bitmap]", size_of_father_bitmap);
1318 memcpy (klass_id->data.bitmap.extended, parent_id->data.bitmap.extended, size_of_father_bitmap);
1324 #if (DEBUG_CLASS_BITMAPS)
1325 printf ("[starting filling iteration]\n");
1328 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1329 MonoType* field_type = mono_field_get_type (field);
1330 // For now, skip static fields
1331 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1334 #if (DEBUG_CLASS_BITMAPS)
1335 printf ("[Working on field %s]", mono_field_get_name (field));
1337 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1338 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1340 g_assert ((field_offset % sizeof (gpointer)) == 0);
1341 field_slot = field_offset / sizeof (gpointer);
1342 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1343 klass_id->data.bitmap.compact |= (((guint64)1) << field_slot);
1345 klass_id->data.bitmap.extended [field_slot >> 3] |= (1 << (field_slot & 7));
1347 #if (DEBUG_CLASS_BITMAPS)
1348 printf ("[reference at offset %d, slot %d]", field_offset, field_slot);
1351 MonoClass *field_class = mono_class_from_mono_type (field_type);
1352 if (field_class && mono_class_is_valuetype (field_class)) {
1353 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1357 g_assert (field_id != NULL);
1358 field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1359 g_assert ((field_id->data.layout.references == 0) || ((field_offset % sizeof (gpointer)) == 0));
1360 field_slot = field_offset / sizeof (gpointer);
1361 #if (DEBUG_CLASS_BITMAPS)
1362 printf ("[value type at offset %d, slot %d, with %d references in %d slots]", field_offset, field_slot, field_id->data.layout.references, field_id->data.layout.slots);
1365 if (field_id->data.layout.references > 0) {
1367 if (field_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1368 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1369 if (field_id->data.bitmap.compact & (((guint64)1) << sub_field_slot)) {
1370 int actual_slot = field_slot + sub_field_slot;
1371 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1372 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1374 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1379 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1380 if (field_id->data.bitmap.extended [sub_field_slot >> 3] & (1 << (sub_field_slot & 7))) {
1381 int actual_slot = field_slot + sub_field_slot;
1382 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1383 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1385 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1394 #if (DEBUG_CLASS_BITMAPS)
1397 printf ("\nLayot of class \"%s.%s\": references %d, slots %d, bitmap {", mono_class_get_namespace (klass), mono_class_get_name (klass), klass_id->data.layout.references, klass_id->data.layout.slots);
1398 for (slot = 0; slot < klass_id->data.layout.slots; slot ++) {
1399 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1400 if (klass_id->data.bitmap.compact & (((guint64)1) << slot)) {
1406 if (klass_id->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
1420 static MethodIdMappingElement*
1421 method_id_mapping_element_new (MonoMethod *method) {
1422 MethodIdMappingElement *result = g_new (MethodIdMappingElement, 1);
1423 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
1425 result->name = g_strdup_printf ("%s (%s)", mono_method_get_name (method), signature);
1427 result->method = method;
1428 result->next_unwritten = profiler->methods->unwritten;
1429 profiler->methods->unwritten = result;
1430 result->id = profiler->methods->next_id;
1431 profiler->methods->next_id ++;
1432 g_hash_table_insert (profiler->methods->table, method, result);
1434 result->data.code_start = NULL;
1435 result->data.code_size = 0;
1437 #if (DEBUG_MAPPING_EVENTS)
1438 printf ("Created new METHOD mapping element \"%s\" (%p)[%d]\n", result->name, method, result->id);
1445 method_id_mapping_element_destroy (gpointer element) {
1446 MethodIdMappingElement *e = (MethodIdMappingElement*) element;
1453 class_id_mapping_element_destroy (gpointer element) {
1454 ClassIdMappingElement *e = (ClassIdMappingElement*) element;
1457 if ((e->data.layout.slots != CLASS_LAYOUT_NOT_INITIALIZED) && (e->data.layout.slots > CLASS_LAYOUT_PACKED_BITMAP_SIZE))
1458 g_free (e->data.bitmap.extended);
1462 static MethodIdMapping*
1463 method_id_mapping_new (void) {
1464 MethodIdMapping *result = g_new (MethodIdMapping, 1);
1465 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, method_id_mapping_element_destroy);
1466 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, method_id_mapping_element_destroy);
1467 result->unwritten = NULL;
1468 result->next_id = 1;
1472 static ClassIdMapping*
1473 class_id_mapping_new (void) {
1474 ClassIdMapping *result = g_new (ClassIdMapping, 1);
1475 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, class_id_mapping_element_destroy);
1476 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, class_id_mapping_element_destroy);
1477 result->unwritten = NULL;
1478 result->next_id = 1;
1483 method_id_mapping_destroy (MethodIdMapping *map) {
1484 g_hash_table_destroy (map->table);
1489 class_id_mapping_destroy (ClassIdMapping *map) {
1490 g_hash_table_destroy (map->table);
1494 #if (DEBUG_LOAD_EVENTS)
1496 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element);
1499 static LoadedElement*
1500 loaded_element_load_start (GHashTable *table, gpointer item) {
1501 LoadedElement *element = g_new0 (LoadedElement, 1);
1502 element->id = profiler->loaded_element_next_free_id;
1503 profiler->loaded_element_next_free_id ++;
1504 #if (DEBUG_LOAD_EVENTS)
1505 print_load_event ("LOAD START", table, item, element);
1507 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_start_counter);
1508 g_hash_table_insert (table, item, element);
1512 static LoadedElement*
1513 loaded_element_load_end (GHashTable *table, gpointer item, char *name) {
1514 LoadedElement *element = g_hash_table_lookup (table, item);
1515 #if (DEBUG_LOAD_EVENTS)
1516 print_load_event ("LOAD END", table, item, element);
1518 g_assert (element != NULL);
1519 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_end_counter);
1520 element->name = name;
1521 element->loaded = TRUE;
1525 static LoadedElement*
1526 loaded_element_unload_start (GHashTable *table, gpointer item) {
1527 LoadedElement *element = g_hash_table_lookup (table, item);
1528 #if (DEBUG_LOAD_EVENTS)
1529 print_load_event ("UNLOAD START", table, item, element);
1531 g_assert (element != NULL);
1532 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_start_counter);
1536 static LoadedElement*
1537 loaded_element_unload_end (GHashTable *table, gpointer item) {
1538 LoadedElement *element = g_hash_table_lookup (table, item);
1539 #if (DEBUG_LOAD_EVENTS)
1540 print_load_event ("UNLOAD END", table, item, element);
1542 g_assert (element != NULL);
1543 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_end_counter);
1544 element->unloaded = TRUE;
1548 static LoadedElement*
1549 loaded_element_find (GHashTable *table, gpointer item) {
1550 LoadedElement *element = g_hash_table_lookup (table, item);
1555 loaded_element_get_id (GHashTable *table, gpointer item) {
1556 LoadedElement *element = loaded_element_find (table, item);
1557 if (element != NULL) {
1565 loaded_element_destroy (gpointer element) {
1566 if (((LoadedElement*)element)->name)
1567 g_free (((LoadedElement*)element)->name);
1571 #if (DEBUG_LOAD_EVENTS)
1573 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element) {
1574 const char* item_name;
1577 if (table == profiler->loaded_assemblies) {
1578 //item_info = g_strdup_printf("ASSEMBLY %p (dynamic %d)", item, mono_image_is_dynamic (mono_assembly_get_image((MonoAssembly*)item)));
1579 item_info = g_strdup_printf("ASSEMBLY %p", item);
1580 } else if (table == profiler->loaded_modules) {
1581 //item_info = g_strdup_printf("MODULE %p (dynamic %d)", item, mono_image_is_dynamic ((MonoImage*)item));
1582 item_info = g_strdup_printf("MODULE %p", item);
1583 } else if (table == profiler->loaded_appdomains) {
1584 item_info = g_strdup_printf("APPDOMAIN %p (id %d)", item, mono_domain_get_id ((MonoDomain*)item));
1587 g_assert_not_reached ();
1590 if (element != NULL) {
1591 item_name = element->name;
1593 item_name = "<NULL>";
1596 printf ("%s EVENT for %s (%s [id %d])\n", event_name, item_info, item_name, element->id);
1602 profiler_heap_shot_object_buffers_destroy (ProfilerHeapShotObjectBuffer *buffer) {
1603 while (buffer != NULL) {
1604 ProfilerHeapShotObjectBuffer *next = buffer->next;
1605 #if DEBUG_HEAP_PROFILER
1606 printf ("profiler_heap_shot_object_buffers_destroy: destroyed buffer %p (%p-%p)\n", buffer, & (buffer->buffer [0]), buffer->end);
1613 static ProfilerHeapShotObjectBuffer*
1614 profiler_heap_shot_object_buffer_new (ProfilerPerThreadData *data) {
1615 ProfilerHeapShotObjectBuffer *buffer;
1616 ProfilerHeapShotObjectBuffer *result = g_new (ProfilerHeapShotObjectBuffer, 1);
1617 result->next_free_slot = & (result->buffer [0]);
1618 result->end = & (result->buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE]);
1619 result->first_unprocessed_slot = & (result->buffer [0]);
1620 result->next = data->heap_shot_object_buffers;
1621 data->heap_shot_object_buffers = result;
1622 #if DEBUG_HEAP_PROFILER
1623 printf ("profiler_heap_shot_object_buffer_new: created buffer %p (%p-%p)\n", result, result->next_free_slot, result->end);
1625 for (buffer = result; buffer != NULL; buffer = buffer->next) {
1626 ProfilerHeapShotObjectBuffer *last = buffer->next;
1627 if ((last != NULL) && (last->first_unprocessed_slot == last->end)) {
1628 buffer->next = NULL;
1629 profiler_heap_shot_object_buffers_destroy (last);
1636 static ProfilerHeapShotWriteJob*
1637 profiler_heap_shot_write_job_new (gboolean heap_shot_was_requested, gboolean dump_heap_data, guint32 collection) {
1638 ProfilerHeapShotWriteJob *job = g_new (ProfilerHeapShotWriteJob, 1);
1640 job->next_unwritten = NULL;
1642 if (profiler->action_flags.unreachable_objects || dump_heap_data) {
1643 job->buffers = g_new (ProfilerHeapShotWriteBuffer, 1);
1644 job->buffers->next = NULL;
1645 job->last_next = & (job->buffers->next);
1646 job->start = & (job->buffers->buffer [0]);
1647 job->cursor = job->start;
1648 job->end = & (job->buffers->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1650 job->buffers = NULL;
1651 job->last_next = NULL;
1656 job->full_buffers = 0;
1658 if (profiler->action_flags.collection_summary) {
1659 job->summary.capacity = profiler->classes->next_id;
1660 job->summary.per_class_data = g_new0 (ProfilerHeapShotClassSummary, job->summary.capacity);
1662 job->summary.capacity = 0;
1663 job->summary.per_class_data = NULL;
1666 job->heap_shot_was_requested = heap_shot_was_requested;
1667 job->collection = collection;
1668 job->dump_heap_data = dump_heap_data;
1669 #if DEBUG_HEAP_PROFILER
1670 printf ("profiler_heap_shot_write_job_new: created job %p with buffer %p(%p-%p) (collection %d, dump %d)\n", job, job->buffers, job->start, job->end, collection, dump_heap_data);
1676 profiler_heap_shot_write_job_has_data (ProfilerHeapShotWriteJob *job) {
1677 return ((job->buffers != NULL) || (job->summary.capacity > 0));
1681 profiler_heap_shot_write_job_add_buffer (ProfilerHeapShotWriteJob *job, gpointer value) {
1682 ProfilerHeapShotWriteBuffer *buffer = g_new (ProfilerHeapShotWriteBuffer, 1);
1683 buffer->next = NULL;
1684 *(job->last_next) = buffer;
1685 job->last_next = & (buffer->next);
1686 job->full_buffers ++;
1687 buffer->buffer [0] = value;
1688 job->start = & (buffer->buffer [0]);
1689 job->cursor = & (buffer->buffer [1]);
1690 job->end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1691 #if DEBUG_HEAP_PROFILER
1692 printf ("profiler_heap_shot_write_job_add_buffer: in job %p, added buffer %p(%p-%p) with value %p at address %p (cursor now %p)\n", job, buffer, job->start, job->end, value, &(buffer->buffer [0]), job->cursor);
1694 ProfilerHeapShotWriteBuffer *current_buffer;
1695 for (current_buffer = job->buffers; current_buffer != NULL; current_buffer = current_buffer->next) {
1696 printf ("profiler_heap_shot_write_job_add_buffer: now job %p has buffer %p\n", job, current_buffer);
1703 profiler_heap_shot_write_job_free_buffers (ProfilerHeapShotWriteJob *job) {
1704 ProfilerHeapShotWriteBuffer *buffer = job->buffers;
1706 while (buffer != NULL) {
1707 ProfilerHeapShotWriteBuffer *next = buffer->next;
1708 #if DEBUG_HEAP_PROFILER
1709 printf ("profiler_heap_shot_write_job_free_buffers: in job %p, freeing buffer %p\n", job, buffer);
1715 job->buffers = NULL;
1717 if (job->summary.per_class_data != NULL) {
1718 g_free (job->summary.per_class_data);
1719 job->summary.per_class_data = NULL;
1721 job->summary.capacity = 0;
1725 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job);
1728 profiler_process_heap_shot_write_jobs (void) {
1729 gboolean done = FALSE;
1732 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1733 ProfilerHeapShotWriteJob *previous_job = NULL;
1734 ProfilerHeapShotWriteJob *next_job;
1737 while (current_job != NULL) {
1738 next_job = current_job->next_unwritten;
1740 if (next_job != NULL) {
1741 if (profiler_heap_shot_write_job_has_data (current_job)) {
1744 if (! profiler_heap_shot_write_job_has_data (next_job)) {
1745 current_job->next_unwritten = NULL;
1749 if (profiler_heap_shot_write_job_has_data (current_job)) {
1750 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: writing...");
1751 profiler_heap_shot_write_block (current_job);
1752 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: done");
1753 if (previous_job != NULL) {
1754 previous_job->next_unwritten = NULL;
1759 previous_job = current_job;
1760 current_job = next_job;
1766 profiler_free_heap_shot_write_jobs (void) {
1767 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1768 ProfilerHeapShotWriteJob *next_job;
1770 if (current_job != NULL) {
1771 while (current_job->next_unwritten != NULL) {
1772 #if DEBUG_HEAP_PROFILER
1773 printf ("profiler_free_heap_shot_write_jobs: job %p must not be freed\n", current_job);
1775 current_job = current_job->next_unwritten;
1778 next_job = current_job->next;
1779 current_job->next = NULL;
1780 current_job = next_job;
1782 while (current_job != NULL) {
1783 #if DEBUG_HEAP_PROFILER
1784 printf ("profiler_free_heap_shot_write_jobs: job %p will be freed\n", current_job);
1786 next_job = current_job->next;
1787 profiler_heap_shot_write_job_free_buffers (current_job);
1788 g_free (current_job);
1789 current_job = next_job;
1795 profiler_destroy_heap_shot_write_jobs (void) {
1796 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1797 ProfilerHeapShotWriteJob *next_job;
1799 while (current_job != NULL) {
1800 next_job = current_job->next;
1801 profiler_heap_shot_write_job_free_buffers (current_job);
1802 g_free (current_job);
1803 current_job = next_job;
1808 profiler_add_heap_shot_write_job (ProfilerHeapShotWriteJob *job) {
1809 job->next = profiler->heap_shot_write_jobs;
1810 job->next_unwritten = job->next;
1811 profiler->heap_shot_write_jobs = job;
1812 #if DEBUG_HEAP_PROFILER
1813 printf ("profiler_add_heap_shot_write_job: added job %p\n", job);
1817 #if DEBUG_HEAP_PROFILER
1818 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p\n", (d)->thread_id, (o), (d)->heap_shot_object_buffers->next_free_slot)
1819 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p in new buffer %p\n", (d)->thread_id, (o), buffer->next_free_slot, buffer)
1821 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o)
1822 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o)
1824 #define STORE_ALLOCATED_OBJECT(d,o) do {\
1825 if ((d)->heap_shot_object_buffers->next_free_slot < (d)->heap_shot_object_buffers->end) {\
1826 STORE_ALLOCATED_OBJECT_MESSAGE1 ((d), (o));\
1827 *((d)->heap_shot_object_buffers->next_free_slot) = (o);\
1828 (d)->heap_shot_object_buffers->next_free_slot ++;\
1830 ProfilerHeapShotObjectBuffer *buffer = profiler_heap_shot_object_buffer_new (d);\
1831 STORE_ALLOCATED_OBJECT_MESSAGE2 ((d), (o));\
1832 *((buffer)->next_free_slot) = (o);\
1833 (buffer)->next_free_slot ++;\
1837 static ProfilerPerThreadData*
1838 profiler_per_thread_data_new (guint32 buffer_size)
1840 ProfilerPerThreadData *data = g_new (ProfilerPerThreadData, 1);
1842 data->events = g_new0 (ProfilerEventData, buffer_size);
1843 data->next_free_event = data->events;
1844 data->next_unreserved_event = data->events;
1845 data->end_event = data->events + (buffer_size - 1);
1846 data->first_unwritten_event = data->events;
1847 data->first_unmapped_event = data->events;
1848 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
1849 data->last_event_counter = data->start_event_counter;
1850 data->thread_id = CURRENT_THREAD_ID ();
1851 data->heap_shot_object_buffers = NULL;
1852 if ((profiler->action_flags.unreachable_objects == TRUE) ||
1853 (profiler->action_flags.heap_shot == TRUE) ||
1854 (profiler->action_flags.collection_summary == TRUE)) {
1855 profiler_heap_shot_object_buffer_new (data);
1857 if (profiler->action_flags.track_stack) {
1858 thread_stack_initialize (&(data->stack), 64);
1860 thread_stack_initialize_empty (&(data->stack));
1866 profiler_per_thread_data_destroy (ProfilerPerThreadData *data) {
1867 g_free (data->events);
1868 profiler_heap_shot_object_buffers_destroy (data->heap_shot_object_buffers);
1869 thread_stack_free (&(data->stack));
1873 static ProfilerStatisticalData*
1874 profiler_statistical_data_new (MonoProfiler *profiler) {
1875 int buffer_size = profiler->statistical_buffer_size * (profiler->statistical_call_chain_depth + 1);
1876 ProfilerStatisticalData *data = g_new (ProfilerStatisticalData, 1);
1878 data->hits = g_new0 (ProfilerStatisticalHit, buffer_size);
1879 data->next_free_index = 0;
1880 data->end_index = profiler->statistical_buffer_size;
1881 data->first_unwritten_index = 0;
1887 profiler_statistical_data_destroy (ProfilerStatisticalData *data) {
1888 g_free (data->hits);
1892 static ProfilerCodeBufferArray*
1893 profiler_code_buffer_array_new (ProfilerCodeBufferArray *child) {
1894 ProfilerCodeBufferArray *result = g_new0 (ProfilerCodeBufferArray, 1);
1895 if (child == NULL) {
1898 result->level = child->level + 1;
1899 result->number_of_buffers = 1;
1900 result->buffers [0].info.data.sub_buffers = child;
1901 result->buffers [0].start = child->buffers [0].start;
1902 result->buffers [0].end = child->buffers [child->number_of_buffers - 1].end;
1908 profiler_code_buffer_array_destroy (ProfilerCodeBufferArray *buffers) {
1909 if (buffers->level > 0) {
1911 for (i = 0; i < buffers->number_of_buffers; i++) {
1912 ProfilerCodeBufferArray *sub_buffers = buffers->buffers [i].info.data.sub_buffers;
1913 profiler_code_buffer_array_destroy (sub_buffers);
1920 profiler_code_buffer_array_is_full (ProfilerCodeBufferArray *buffers) {
1921 while (buffers->level > 0) {
1922 ProfilerCodeBufferArray *next;
1923 if (buffers->number_of_buffers < PROFILER_CODE_BUFFER_ARRAY_SIZE) {
1926 next = buffers->buffers [PROFILER_CODE_BUFFER_ARRAY_SIZE - 1].info.data.sub_buffers;
1927 if (next->level < (buffers->level - 1)) {
1932 return (buffers->number_of_buffers == PROFILER_CODE_BUFFER_ARRAY_SIZE);
1935 static ProfilerCodeBufferArray*
1936 profiler_code_buffer_add (ProfilerCodeBufferArray *buffers, gpointer *buffer, int size, MonoProfilerCodeBufferType type, void *data) {
1937 if (buffers == NULL) {
1938 buffers = profiler_code_buffer_array_new (NULL);
1941 if (profiler_code_buffer_array_is_full (buffers)) {
1942 ProfilerCodeBufferArray *new_slot = profiler_code_buffer_add (NULL, buffer, size, type, data);
1943 buffers = profiler_code_buffer_array_new (buffers);
1944 buffers->buffers [buffers->number_of_buffers].info.data.sub_buffers = new_slot;
1945 buffers->buffers [buffers->number_of_buffers].start = new_slot->buffers [0].start;
1946 buffers->buffers [buffers->number_of_buffers].end = new_slot->buffers [new_slot->number_of_buffers - 1].end;
1947 buffers->number_of_buffers ++;
1948 } else if (buffers->level > 0) {
1949 ProfilerCodeBufferArray *new_slot = profiler_code_buffer_add (buffers->buffers [buffers->number_of_buffers - 1].info.data.sub_buffers, buffer, size, type, data);
1950 buffers->buffers [buffers->number_of_buffers - 1].info.data.sub_buffers = new_slot;
1951 buffers->buffers [buffers->number_of_buffers - 1].start = new_slot->buffers [0].start;
1952 buffers->buffers [buffers->number_of_buffers - 1].end = new_slot->buffers [new_slot->number_of_buffers - 1].end;
1954 buffers->buffers [buffers->number_of_buffers].start = buffer;
1955 buffers->buffers [buffers->number_of_buffers].end = (((guint8*) buffer) + size);
1956 buffers->buffers [buffers->number_of_buffers].info.type = type;
1958 case MONO_PROFILER_CODE_BUFFER_UNKNOWN:
1959 buffers->buffers [buffers->number_of_buffers].info.data.data = NULL;
1961 case MONO_PROFILER_CODE_BUFFER_METHOD:
1962 buffers->buffers [buffers->number_of_buffers].info.data.method = data;
1965 buffers->buffers [buffers->number_of_buffers].info.type = MONO_PROFILER_CODE_BUFFER_UNKNOWN;
1966 buffers->buffers [buffers->number_of_buffers].info.data.data = NULL;
1968 buffers->number_of_buffers ++;
1973 static ProfilerCodeBuffer*
1974 profiler_code_buffer_find (ProfilerCodeBufferArray *buffers, gpointer *address) {
1975 if (buffers != NULL) {
1976 ProfilerCodeBuffer *result = NULL;
1979 int high = buffers->number_of_buffers - 1;
1981 while (high != low) {
1982 int middle = low + ((high - low) >> 1);
1984 if ((guint8*) address < (guint8*) buffers->buffers [low].start) {
1987 if ((guint8*) address >= (guint8*) buffers->buffers [high].end) {
1991 if ((guint8*) address < (guint8*) buffers->buffers [middle].start) {
1996 } else if ((guint8*) address >= (guint8*) buffers->buffers [middle].end) {
2007 if (((guint8*) address >= (guint8*) buffers->buffers [low].start) && ((guint8*) address < (guint8*) buffers->buffers [low].end)) {
2008 if (buffers->level == 0) {
2009 result = & (buffers->buffers [low]);
2011 buffers = buffers->buffers [low].info.data.sub_buffers;
2016 } while (result == NULL);
2024 profiler_code_chunk_initialize (ProfilerCodeChunk *chunk, gpointer memory, gsize size) {
2025 chunk->buffers = profiler_code_buffer_array_new (NULL);
2026 chunk->destroyed = FALSE;
2027 chunk->start = memory;
2028 chunk->end = ((guint8*)memory) + size;
2032 profiler_code_chunk_cleanup (ProfilerCodeChunk *chunk) {
2033 if (chunk->buffers != NULL) {
2034 profiler_code_buffer_array_destroy (chunk->buffers);
2035 chunk->buffers = NULL;
2037 chunk->start = NULL;
2042 profiler_code_chunks_initialize (ProfilerCodeChunks *chunks) {
2043 chunks->capacity = 32;
2044 chunks->chunks = g_new0 (ProfilerCodeChunk, 32);
2045 chunks->number_of_chunks = 0;
2049 profiler_code_chunks_cleanup (ProfilerCodeChunks *chunks) {
2051 for (i = 0; i < chunks->number_of_chunks; i++) {
2052 profiler_code_chunk_cleanup (& (chunks->chunks [i]));
2054 chunks->capacity = 0;
2055 chunks->number_of_chunks = 0;
2056 g_free (chunks->chunks);
2057 chunks->chunks = NULL;
2061 compare_code_chunks (const void* c1, const void* c2) {
2062 ProfilerCodeChunk *chunk1 = (ProfilerCodeChunk*) c1;
2063 ProfilerCodeChunk *chunk2 = (ProfilerCodeChunk*) c2;
2064 return ((guint8*) chunk1->end < (guint8*) chunk2->start) ? -1 : (((guint8*) chunk1->start >= (guint8*) chunk2->end) ? 1 : 0);
2068 compare_address_and_code_chunk (const void* a, const void* c) {
2069 gpointer address = (gpointer) a;
2070 ProfilerCodeChunk *chunk = (ProfilerCodeChunk*) c;
2071 return ((guint8*) address < (guint8*) chunk->start) ? -1 : (((guint8*) address >= (guint8*) chunk->end) ? 1 : 0);
2075 profiler_code_chunks_sort (ProfilerCodeChunks *chunks) {
2076 qsort (chunks->chunks, chunks->number_of_chunks, sizeof (ProfilerCodeChunk), compare_code_chunks);
2079 static ProfilerCodeChunk*
2080 profiler_code_chunk_find (ProfilerCodeChunks *chunks, gpointer address) {
2081 return bsearch (address, chunks->chunks, chunks->number_of_chunks, sizeof (ProfilerCodeChunk), compare_address_and_code_chunk);
2084 static ProfilerCodeChunk*
2085 profiler_code_chunk_new (ProfilerCodeChunks *chunks, gpointer memory, gsize size) {
2086 ProfilerCodeChunk *result;
2088 if (chunks->number_of_chunks == chunks->capacity) {
2089 ProfilerCodeChunk *new_chunks = g_new0 (ProfilerCodeChunk, chunks->capacity * 2);
2090 memcpy (new_chunks, chunks->chunks, chunks->capacity * sizeof (ProfilerCodeChunk));
2091 chunks->capacity *= 2;
2092 g_free (chunks->chunks);
2093 chunks->chunks = new_chunks;
2096 result = & (chunks->chunks [chunks->number_of_chunks]);
2097 chunks->number_of_chunks ++;
2098 profiler_code_chunk_initialize (result, memory, size);
2099 profiler_code_chunks_sort (chunks);
2104 profiler_code_chunk_to_index (ProfilerCodeChunks *chunks, ProfilerCodeChunk *chunk) {
2105 return (int) (chunk - chunks->chunks);
2109 profiler_code_chunk_remove (ProfilerCodeChunks *chunks, ProfilerCodeChunk *chunk) {
2110 int index = profiler_code_chunk_to_index (chunks, chunk);
2112 profiler_code_chunk_cleanup (chunk);
2113 if ((index >= 0) && (index < chunks->number_of_chunks)) {
2114 memmove (chunk, chunk + 1, (chunks->number_of_chunks - index) * sizeof (ProfilerCodeChunk));
2118 /* This assumes the profiler lock is held */
2119 static ProfilerCodeBuffer*
2120 profiler_code_buffer_from_address (MonoProfiler *prof, gpointer address) {
2121 ProfilerCodeChunks *chunks = & (prof->code_chunks);
2123 ProfilerCodeChunk *chunk = profiler_code_chunk_find (chunks, address);
2124 if (chunk != NULL) {
2125 return profiler_code_buffer_find (chunk->buffers, address);
2132 profiler_code_chunk_new_callback (MonoProfiler *prof, gpointer address, int size) {
2133 ProfilerCodeChunks *chunks = & (prof->code_chunks);
2135 if (prof->code_chunks.chunks != NULL) {
2137 profiler_code_chunk_new (chunks, address, size);
2143 profiler_code_chunk_destroy_callback (MonoProfiler *prof, gpointer address) {
2144 ProfilerCodeChunks *chunks = & (prof->code_chunks);
2145 ProfilerCodeChunk *chunk;
2147 if (prof->code_chunks.chunks != NULL) {
2149 chunk = profiler_code_chunk_find (chunks, address);
2150 if (chunk != NULL) {
2151 profiler_code_chunk_remove (chunks, chunk);
2158 profiler_code_buffer_new_callback (MonoProfiler *prof, gpointer address, int size, MonoProfilerCodeBufferType type, void *data) {
2159 ProfilerCodeChunks *chunks = & (prof->code_chunks);
2160 ProfilerCodeChunk *chunk;
2162 if (prof->code_chunks.chunks != NULL) {
2164 chunk = profiler_code_chunk_find (chunks, address);
2165 if (chunk != NULL) {
2166 chunk->buffers = profiler_code_buffer_add (chunk->buffers, address, size, type, data);
2173 profiler_add_write_buffer (void) {
2174 if (profiler->current_write_buffer->next == NULL) {
2175 profiler->current_write_buffer->next = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
2176 profiler->current_write_buffer->next->next = NULL;
2178 //printf ("Added next buffer %p, to buffer %p\n", profiler->current_write_buffer->next, profiler->current_write_buffer);
2181 profiler->current_write_buffer = profiler->current_write_buffer->next;
2182 profiler->current_write_position = 0;
2183 profiler->full_write_buffers ++;
2187 profiler_free_write_buffers (void) {
2188 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
2189 while (current_buffer != NULL) {
2190 ProfilerFileWriteBuffer *next_buffer = current_buffer->next;
2192 //printf ("Freeing write buffer %p, next is %p\n", current_buffer, next_buffer);
2194 g_free (current_buffer);
2195 current_buffer = next_buffer;
2199 #define WRITE_BYTE(b) do {\
2200 if (profiler->current_write_position >= PROFILER_FILE_WRITE_BUFFER_SIZE) {\
2201 profiler_add_write_buffer ();\
2203 profiler->current_write_buffer->buffer [profiler->current_write_position] = (b);\
2204 profiler->current_write_position ++;\
2207 #if (DEBUG_FILE_WRITES)
2208 static int bytes_written = 0;
2212 write_current_block (guint16 code) {
2213 guint32 size = (profiler->full_write_buffers * PROFILER_FILE_WRITE_BUFFER_SIZE) + profiler->current_write_position;
2214 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
2215 guint64 current_counter;
2216 guint32 counter_delta;
2219 MONO_PROFILER_GET_CURRENT_COUNTER (current_counter);
2220 if (profiler->last_header_counter != 0) {
2221 counter_delta = current_counter - profiler->last_header_counter;
2225 profiler->last_header_counter = current_counter;
2227 header [0] = code & 0xff;
2228 header [1] = (code >> 8) & 0xff;
2229 header [2] = size & 0xff;
2230 header [3] = (size >> 8) & 0xff;
2231 header [4] = (size >> 16) & 0xff;
2232 header [5] = (size >> 24) & 0xff;
2233 header [6] = counter_delta & 0xff;
2234 header [7] = (counter_delta >> 8) & 0xff;
2235 header [8] = (counter_delta >> 16) & 0xff;
2236 header [9] = (counter_delta >> 24) & 0xff;
2238 #if (DEBUG_FILE_WRITES)
2239 printf ("write_current_block: writing header (code %d) at offset %d\n", code, bytes_written);
2240 bytes_written += 10;
2242 WRITE_BUFFER (& (header [0]), 10);
2244 while ((current_buffer != NULL) && (profiler->full_write_buffers > 0)) {
2245 #if (DEBUG_FILE_WRITES)
2246 printf ("write_current_block: writing buffer (size %d)\n", PROFILER_FILE_WRITE_BUFFER_SIZE);
2247 bytes_written += PROFILER_FILE_WRITE_BUFFER_SIZE;
2249 WRITE_BUFFER (& (current_buffer->buffer [0]), PROFILER_FILE_WRITE_BUFFER_SIZE);
2250 profiler->full_write_buffers --;
2251 current_buffer = current_buffer->next;
2253 if (profiler->current_write_position > 0) {
2254 #if (DEBUG_FILE_WRITES)
2255 printf ("write_current_block: writing last buffer (size %d)\n", profiler->current_write_position);
2256 bytes_written += profiler->current_write_position;
2258 WRITE_BUFFER (& (current_buffer->buffer [0]), profiler->current_write_position);
2261 #if (DEBUG_FILE_WRITES)
2262 printf ("write_current_block: buffers flushed (file size %d)\n", bytes_written);
2265 profiler->current_write_buffer = profiler->write_buffers;
2266 profiler->current_write_position = 0;
2267 profiler->full_write_buffers = 0;
2271 #define SEVEN_BITS_MASK (0x7f)
2272 #define EIGHT_BIT_MASK (0x80)
2275 write_uint32 (guint32 value) {
2276 while (value > SEVEN_BITS_MASK) {
2277 WRITE_BYTE (value & SEVEN_BITS_MASK);
2280 WRITE_BYTE (value | EIGHT_BIT_MASK);
2283 write_uint64 (guint64 value) {
2284 while (value > SEVEN_BITS_MASK) {
2285 WRITE_BYTE (value & SEVEN_BITS_MASK);
2288 WRITE_BYTE (value | EIGHT_BIT_MASK);
2291 write_string (const char *string) {
2292 while (*string != 0) {
2293 WRITE_BYTE (*string);
2299 static void write_clock_data (void);
2301 write_directives_block (gboolean start) {
2302 write_clock_data ();
2305 if (profiler->action_flags.save_allocation_caller) {
2306 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER);
2308 if (profiler->action_flags.save_allocation_stack || profiler->action_flags.track_calls) {
2309 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK);
2311 if (profiler->action_flags.allocations_carry_id) {
2312 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID);
2314 write_uint32 (MONO_PROFILER_DIRECTIVE_LOADED_ELEMENTS_CARRY_ID);
2315 write_uint32 (MONO_PROFILER_DIRECTIVE_CLASSES_CARRY_ASSEMBLY_ID);
2316 write_uint32 (MONO_PROFILER_DIRECTIVE_METHODS_CARRY_WRAPPER_FLAG);
2318 write_uint32 (MONO_PROFILER_DIRECTIVE_END);
2320 write_clock_data ();
2321 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES);
2324 #if DEBUG_HEAP_PROFILER
2325 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c) printf ("WRITE_HEAP_SHOT_JOB_VALUE: writing value %p at cursor %p\n", (v), (c))
2327 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c)
2329 #define WRITE_HEAP_SHOT_JOB_VALUE(j,v) do {\
2330 if ((j)->cursor < (j)->end) {\
2331 WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE ((v), ((j)->cursor));\
2332 *((j)->cursor) = (v);\
2335 profiler_heap_shot_write_job_add_buffer (j, v);\
2340 #undef GUINT_TO_POINTER
2341 #undef GPOINTER_TO_UINT
2342 #if (SIZEOF_VOID_P == 4)
2343 #define GUINT_TO_POINTER(u) ((void*)(guint32)(u))
2344 #define GPOINTER_TO_UINT(p) ((guint32)(void*)(p))
2345 #elif (SIZEOF_VOID_P == 8)
2346 #define GUINT_TO_POINTER(u) ((void*)(guint64)(u))
2347 #define GPOINTER_TO_UINT(p) ((guint64)(void*)(p))
2349 #error Bad size of void pointer
2352 #define WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE(j,v,c) WRITE_HEAP_SHOT_JOB_VALUE (j, GUINT_TO_POINTER (GPOINTER_TO_UINT (v)|(c)))
2354 #if DEBUG_HEAP_PROFILER
2355 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE() printf ("profiler_heap_shot_write_block[UPDATE_JOB_BUFFER_CURSOR]: in job %p, moving to buffer %p and cursor %p\n", job, buffer, cursor)
2357 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE()
2359 #define UPDATE_JOB_BUFFER_CURSOR() do {\
2361 if (cursor >= end) {\
2362 buffer = buffer->next;\
2363 if (buffer != NULL) {\
2364 cursor = & (buffer->buffer [0]);\
2365 if (buffer->next != NULL) {\
2366 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);\
2374 UPDATE_JOB_BUFFER_CURSOR_MESSAGE ();\
2378 profiler_heap_shot_write_data_block (ProfilerHeapShotWriteJob *job) {
2379 ProfilerHeapShotWriteBuffer *buffer;
2382 guint64 start_counter;
2384 guint64 end_counter;
2387 write_uint64 (job->start_counter);
2388 write_uint64 (job->start_time);
2389 write_uint64 (job->end_counter);
2390 write_uint64 (job->end_time);
2391 write_uint32 (job->collection);
2392 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2393 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2394 write_uint64 (start_counter);
2395 write_uint64 (start_time);
2396 #if DEBUG_HEAP_PROFILER
2397 printf ("profiler_heap_shot_write_data_block: start writing job %p (start %p, end %p)...\n", job, & (job->buffers->buffer [0]), job->cursor);
2399 buffer = job->buffers;
2400 cursor = & (buffer->buffer [0]);
2401 if (buffer->next != NULL) {
2402 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
2406 if (cursor >= end) {
2409 #if DEBUG_HEAP_PROFILER
2410 printf ("profiler_heap_shot_write_data_block: in job %p, starting at buffer %p and cursor %p\n", job, buffer, cursor);
2412 while (cursor != NULL) {
2413 gpointer value = *cursor;
2414 HeapProfilerJobValueCode code = GPOINTER_TO_UINT (value) & HEAP_CODE_MASK;
2415 #if DEBUG_HEAP_PROFILER
2416 printf ("profiler_heap_shot_write_data_block: got value %p and code %d\n", value, code);
2419 UPDATE_JOB_BUFFER_CURSOR ();
2420 if (code == HEAP_CODE_FREE_OBJECT_CLASS) {
2421 MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2422 //MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) % 4);
2423 ClassIdMappingElement *class_id;
2426 class_id = class_id_mapping_element_get (klass);
2427 if (class_id == NULL) {
2428 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2430 g_assert (class_id != NULL);
2431 write_uint32 ((class_id->id << 2) | HEAP_CODE_FREE_OBJECT_CLASS);
2433 size = GPOINTER_TO_UINT (*cursor);
2434 UPDATE_JOB_BUFFER_CURSOR ();
2435 write_uint32 (size);
2436 #if DEBUG_HEAP_PROFILER
2437 printf ("profiler_heap_shot_write_data_block: wrote unreachable object of class %p (id %d, size %d)\n", klass, class_id->id, size);
2439 } else if (code == HEAP_CODE_OBJECT) {
2440 MonoObject *object = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2441 MonoClass *klass = mono_object_get_class (object);
2442 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
2443 guint32 size = mono_object_get_size (object);
2444 guint32 references = GPOINTER_TO_UINT (*cursor);
2445 UPDATE_JOB_BUFFER_CURSOR ();
2447 if (class_id == NULL) {
2448 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2450 g_assert (class_id != NULL);
2452 write_uint64 (GPOINTER_TO_UINT (value));
2453 write_uint32 (class_id->id);
2454 write_uint32 (size);
2455 write_uint32 (references);
2456 #if DEBUG_HEAP_PROFILER
2457 printf ("profiler_heap_shot_write_data_block: writing object %p (references %d)\n", value, references);
2460 while (references > 0) {
2461 gpointer reference = *cursor;
2462 write_uint64 (GPOINTER_TO_UINT (reference));
2463 UPDATE_JOB_BUFFER_CURSOR ();
2465 #if DEBUG_HEAP_PROFILER
2466 printf ("profiler_heap_shot_write_data_block: inside object %p, wrote reference %p)\n", value, reference);
2470 #if DEBUG_HEAP_PROFILER
2471 printf ("profiler_heap_shot_write_data_block: unknown code %d in value %p\n", code, value);
2473 g_assert_not_reached ();
2478 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2479 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2480 write_uint64 (end_counter);
2481 write_uint64 (end_time);
2483 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA);
2484 #if DEBUG_HEAP_PROFILER
2485 printf ("profiler_heap_shot_write_data_block: writing job %p done.\n", job);
2489 profiler_heap_shot_write_summary_block (ProfilerHeapShotWriteJob *job) {
2490 guint64 start_counter;
2492 guint64 end_counter;
2496 #if DEBUG_HEAP_PROFILER
2497 printf ("profiler_heap_shot_write_summary_block: start writing job %p...\n", job);
2499 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2500 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2501 write_uint64 (start_counter);
2502 write_uint64 (start_time);
2504 write_uint32 (job->collection);
2506 for (id = 0; id < job->summary.capacity; id ++) {
2507 if ((job->summary.per_class_data [id].reachable.instances > 0) || (job->summary.per_class_data [id].unreachable.instances > 0)) {
2509 write_uint32 (job->summary.per_class_data [id].reachable.instances);
2510 write_uint32 (job->summary.per_class_data [id].reachable.bytes);
2511 write_uint32 (job->summary.per_class_data [id].unreachable.instances);
2512 write_uint32 (job->summary.per_class_data [id].unreachable.bytes);
2517 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2518 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2519 write_uint64 (end_counter);
2520 write_uint64 (end_time);
2522 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY);
2523 #if DEBUG_HEAP_PROFILER
2524 printf ("profiler_heap_shot_write_summary_block: writing job %p done.\n", job);
2529 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job) {
2530 #if DEBUG_HEAP_PROFILER
2531 printf ("profiler_heap_shot_write_block: working on job %p...\n", job);
2534 if (profiler->action_flags.collection_summary == TRUE) {
2535 profiler_heap_shot_write_summary_block (job);
2538 if ((profiler->action_flags.unreachable_objects == TRUE) || (profiler->action_flags.heap_shot == TRUE)) {
2539 profiler_heap_shot_write_data_block (job);
2542 profiler_heap_shot_write_job_free_buffers (job);
2543 #if DEBUG_HEAP_PROFILER
2544 printf ("profiler_heap_shot_write_block: work on job %p done.\n", job);
2549 write_element_load_block (LoadedElement *element, guint8 kind, gsize thread_id, gpointer item) {
2551 write_uint64 (element->load_start_counter);
2552 write_uint64 (element->load_end_counter);
2553 write_uint64 (thread_id);
2554 write_uint32 (element->id);
2555 write_string (element->name);
2556 if (kind & MONO_PROFILER_LOADED_EVENT_ASSEMBLY) {
2557 MonoImage *image = mono_assembly_get_image ((MonoAssembly*) item);
2558 MonoAssemblyName aname;
2559 if (mono_assembly_fill_assembly_name (image, &aname)) {
2560 write_string (aname.name);
2561 write_uint32 (aname.major);
2562 write_uint32 (aname.minor);
2563 write_uint32 (aname.build);
2564 write_uint32 (aname.revision);
2565 write_string (aname.culture && *aname.culture? aname.culture: "neutral");
2566 write_string (aname.public_key_token [0] ? (char *)aname.public_key_token : "null");
2567 /* Retargetable flag */
2568 write_uint32 ((aname.flags & 0x00000100) ? 1 : 0);
2570 write_string ("UNKNOWN");
2575 write_string ("neutral");
2576 write_string ("null");
2580 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_LOADED);
2581 element->load_written = TRUE;
2585 write_element_unload_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2587 write_uint64 (element->unload_start_counter);
2588 write_uint64 (element->unload_end_counter);
2589 write_uint64 (thread_id);
2590 write_uint32 (element->id);
2591 write_string (element->name);
2592 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED);
2593 element->unload_written = TRUE;
2597 write_clock_data (void) {
2601 MONO_PROFILER_GET_CURRENT_COUNTER (counter);
2602 MONO_PROFILER_GET_CURRENT_TIME (time);
2604 write_uint64 (counter);
2605 write_uint64 (time);
2609 write_mapping_block (gsize thread_id) {
2610 ClassIdMappingElement *current_class;
2611 MethodIdMappingElement *current_method;
2613 if ((profiler->classes->unwritten == NULL) && (profiler->methods->unwritten == NULL))
2616 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2617 printf ("[write_mapping_block][TID %ld] START\n", thread_id);
2620 write_clock_data ();
2621 write_uint64 (thread_id);
2623 for (current_class = profiler->classes->unwritten; current_class != NULL; current_class = current_class->next_unwritten) {
2624 MonoImage *image = mono_class_get_image (current_class->klass);
2625 MonoAssembly *assembly = mono_image_get_assembly (image);
2626 guint32 assembly_id = loaded_element_get_id (profiler->loaded_assemblies, assembly);
2627 write_uint32 (current_class->id);
2628 write_uint32 (assembly_id);
2629 write_string (current_class->name);
2630 #if (DEBUG_MAPPING_EVENTS)
2631 printf ("mapping CLASS (%d => %s)\n", current_class->id, current_class->name);
2633 g_free (current_class->name);
2634 current_class->name = NULL;
2637 profiler->classes->unwritten = NULL;
2639 for (current_method = profiler->methods->unwritten; current_method != NULL; current_method = current_method->next_unwritten) {
2640 MonoMethod *method = current_method->method;
2641 MonoClass *klass = mono_method_get_class (method);
2642 ClassIdMappingElement *class_element = class_id_mapping_element_get (klass);
2643 g_assert (class_element != NULL);
2644 write_uint32 (current_method->id);
2645 write_uint32 (class_element->id);
2646 if (method->wrapper_type != 0) {
2651 write_string (current_method->name);
2652 #if (DEBUG_MAPPING_EVENTS)
2653 printf ("mapping METHOD ([%d]%d => %s)\n", class_element?class_element->id:1, current_method->id, current_method->name);
2655 g_free (current_method->name);
2656 current_method->name = NULL;
2659 profiler->methods->unwritten = NULL;
2661 write_clock_data ();
2662 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_MAPPING);
2664 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2665 printf ("[write_mapping_block][TID %ld] END\n", thread_id);
2670 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER = 1,
2671 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_IMPLICIT = 2,
2672 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT = 3,
2673 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION = 4,
2674 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT = 5,
2675 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT = 6,
2676 MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT = 7
2677 } MonoProfilerPackedEventCode;
2678 #define MONO_PROFILER_PACKED_EVENT_CODE_BITS 3
2679 #define MONO_PROFILER_PACKED_EVENT_DATA_BITS (8-MONO_PROFILER_PACKED_EVENT_CODE_BITS)
2680 #define MONO_PROFILER_PACKED_EVENT_DATA_MASK ((1<<MONO_PROFILER_PACKED_EVENT_DATA_BITS)-1)
2682 #define MONO_PROFILER_EVENT_MAKE_PACKED_CODE(result,data,base) do {\
2683 result = ((base)|((data & MONO_PROFILER_PACKED_EVENT_DATA_MASK) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2684 data >>= MONO_PROFILER_PACKED_EVENT_DATA_BITS;\
2686 #define MONO_PROFILER_EVENT_MAKE_FULL_CODE(result,code,kind,base) do {\
2687 result = ((base)|((((kind)<<4) | (code)) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2691 rewrite_last_written_stack (ProfilerThreadStack *stack) {
2693 int i = thread_stack_get_last_written_frame (stack);
2695 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2696 WRITE_BYTE (event_code);
2702 write_uint32 (thread_stack_written_frame_at_index (stack, i));
2707 static ProfilerEventData*
2708 write_stack_section_event (ProfilerEventData *events, ProfilerPerThreadData *data) {
2709 int last_saved_frame = events->data.number;
2710 int saved_frames = events->value;
2714 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2715 WRITE_BYTE (event_code);
2716 write_uint32 (last_saved_frame);
2717 write_uint32 (saved_frames);
2718 thread_stack_set_last_written_frame (&(data->stack), last_saved_frame + saved_frames);
2721 for (i = 0; i < saved_frames; i++) {
2722 guint8 code = events->code;
2724 MethodIdMappingElement *method;
2725 guint32 frame_value;
2727 if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) {
2729 } else if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER) {
2732 g_assert_not_reached ();
2736 method = method_id_mapping_element_get (events->data.address);
2737 g_assert (method != NULL);
2738 frame_value = (method->id << 1) | jit_flag;
2739 write_uint32 (frame_value);
2740 thread_stack_write_frame_at_index (&(data->stack), last_saved_frame + saved_frames - (1 + i), frame_value);
2747 static ProfilerEventData*
2748 write_event (ProfilerEventData *event, ProfilerPerThreadData *data) {
2749 ProfilerEventData *next = event + 1;
2750 gboolean write_event_value = TRUE;
2753 guint64 event_value;
2754 gboolean write_event_value_extension_1 = FALSE;
2755 guint64 event_value_extension_1 = 0;
2756 gboolean write_event_value_extension_2 = FALSE;
2757 guint64 event_value_extension_2 = 0;
2759 event_value = event->value;
2760 if (event_value == MAX_EVENT_VALUE) {
2761 event_value = *((guint64*)next);
2765 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
2766 MethodIdMappingElement *element = method_id_mapping_element_get (event->data.address);
2767 g_assert (element != NULL);
2768 event_data = element->id;
2770 if (event->code == MONO_PROFILER_EVENT_METHOD_CALL) {
2771 if (event->kind == MONO_PROFILER_EVENT_KIND_START) {
2772 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER);
2774 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT);
2777 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT);
2779 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
2780 ClassIdMappingElement *element = class_id_mapping_element_get (event->data.address);
2781 g_assert (element != NULL);
2782 event_data = element->id;
2784 if (event->code == MONO_PROFILER_EVENT_CLASS_ALLOCATION) {
2785 if ((! profiler->action_flags.save_allocation_caller) || (! (next->code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER))) {
2786 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION);
2788 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2791 if (profiler->action_flags.save_allocation_caller) {
2792 MonoMethod *caller_method = next->data.address;
2794 if ((next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) && (next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER)) {
2795 g_assert_not_reached ();
2798 if (caller_method != NULL) {
2799 MethodIdMappingElement *caller = method_id_mapping_element_get (caller_method);
2800 g_assert (caller != NULL);
2801 event_value_extension_1 = caller->id;
2804 write_event_value_extension_1 = TRUE;
2808 if (profiler->action_flags.allocations_carry_id) {
2809 event_value_extension_2 = GPOINTER_TO_UINT (next->data.address);
2811 if (next->code != MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID) {
2812 g_assert_not_reached ();
2815 write_event_value_extension_2 = TRUE;
2818 } else if (event->code == MONO_PROFILER_EVENT_CLASS_MONITOR) {
2819 g_assert (next->code == MONO_PROFILER_EVENT_OBJECT_MONITOR);
2821 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2822 event_value_extension_1 = next->value;
2823 write_event_value_extension_1 = TRUE;
2824 event_value_extension_2 = GPOINTER_TO_UINT (next->data.address);
2825 write_event_value_extension_2 = TRUE;
2828 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2831 if (event->code == MONO_PROFILER_EVENT_STACK_SECTION) {
2832 return write_stack_section_event (event, data);
2834 event_data = event->data.number;
2835 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2839 /* Skip writing JIT events if the user did not ask for them */
2840 if ((event->code == MONO_PROFILER_EVENT_METHOD_JIT) && ! profiler->action_flags.jit_time) {
2844 #if (DEBUG_LOGGING_PROFILER)
2846 printf ("writing EVENT[%p] data_type:%d, kind:%d, code:%d (%d:%ld:%ld)\n", event,
2847 event->data_type, event->kind, event->code,
2848 event_code, event_data, event_value);
2851 WRITE_BYTE (event_code);
2852 write_uint64 (event_data);
2853 if (write_event_value) {
2854 write_uint64 (event_value);
2855 if (write_event_value_extension_1) {
2856 write_uint64 (event_value_extension_1);
2858 if (write_event_value_extension_2) {
2859 write_uint64 (event_value_extension_2);
2867 write_thread_data_block (ProfilerPerThreadData *data) {
2868 ProfilerEventData *start = data->first_unwritten_event;
2869 ProfilerEventData *end = data->first_unmapped_event;
2873 #if (DEBUG_FILE_WRITES)
2874 printf ("write_thread_data_block: preparing buffer for thread %ld\n", (guint64) data->thread_id);
2876 write_clock_data ();
2877 write_uint64 (data->thread_id);
2879 write_uint64 (data->start_event_counter);
2881 /* If we are tracking the stack, make sure that stack sections */
2882 /* can be fully reconstructed even reading only one block */
2883 if (profiler->action_flags.track_stack) {
2884 rewrite_last_written_stack (&(data->stack));
2887 while (start < end) {
2888 start = write_event (start, data);
2891 data->first_unwritten_event = end;
2893 write_clock_data ();
2894 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_EVENTS);
2895 #if (DEBUG_FILE_WRITES)
2896 printf ("write_thread_data_block: buffer for thread %ld written\n", (guint64) data->thread_id);
2900 static ProfilerExecutableMemoryRegionData*
2901 profiler_executable_memory_region_new (gpointer *start, gpointer *end, guint32 file_offset, char *file_name, guint32 id) {
2902 ProfilerExecutableMemoryRegionData *result = g_new (ProfilerExecutableMemoryRegionData, 1);
2903 result->start = start;
2905 result->file_offset = file_offset;
2906 result->file_name = g_strdup (file_name);
2908 result->is_new = TRUE;
2910 result->file = NULL;
2911 result->file_region_reference = NULL;
2912 result->symbols_capacity = id;
2913 result->symbols_count = id;
2914 result->symbols = NULL;
2920 executable_file_close (ProfilerExecutableMemoryRegionData *region);
2923 profiler_executable_memory_region_destroy (ProfilerExecutableMemoryRegionData *data) {
2924 if (data->file != NULL) {
2925 executable_file_close (data);
2928 if (data->symbols != NULL) {
2929 g_free (data->symbols);
2930 data->symbols = NULL;
2932 if (data->file_name != NULL) {
2933 g_free (data->file_name);
2934 data->file_name = NULL;
2939 static ProfilerExecutableMemoryRegions*
2940 profiler_executable_memory_regions_new (int next_id, int next_unmanaged_function_id) {
2941 ProfilerExecutableMemoryRegions *result = g_new (ProfilerExecutableMemoryRegions, 1);
2942 result->regions = g_new0 (ProfilerExecutableMemoryRegionData*, 32);
2943 result->regions_capacity = 32;
2944 result->regions_count = 0;
2945 result->next_id = next_id;
2946 result->next_unmanaged_function_id = next_unmanaged_function_id;
2951 profiler_executable_memory_regions_destroy (ProfilerExecutableMemoryRegions *regions) {
2954 for (i = 0; i < regions->regions_count; i++) {
2955 profiler_executable_memory_region_destroy (regions->regions [i]);
2957 g_free (regions->regions);
2961 static ProfilerExecutableMemoryRegionData*
2962 find_address_region (ProfilerExecutableMemoryRegions *regions, gpointer address) {
2964 int high_index = regions->regions_count;
2965 int middle_index = 0;
2966 ProfilerExecutableMemoryRegionData *middle_region = regions->regions [0];
2968 if ((regions->regions_count == 0) || (regions->regions [low_index]->start > address) || (regions->regions [high_index - 1]->end < address)) {
2972 //printf ("find_address_region: Looking for address %p in %d regions (from %p to %p)\n", address, regions->regions_count, regions->regions [low_index]->start, regions->regions [high_index - 1]->end);
2974 while (low_index != high_index) {
2975 middle_index = low_index + ((high_index - low_index) / 2);
2976 middle_region = regions->regions [middle_index];
2978 //printf ("find_address_region: Looking for address %p, considering index %d[%p-%p] (%d-%d)\n", address, middle_index, middle_region->start, middle_region->end, low_index, high_index);
2980 if (middle_region->start > address) {
2981 if (middle_index > 0) {
2982 high_index = middle_index;
2986 } else if (middle_region->end < address) {
2987 if (middle_index < regions->regions_count - 1) {
2988 low_index = middle_index + 1;
2993 return middle_region;
2997 if ((middle_region == NULL) || (middle_region->start > address) || (middle_region->end < address)) {
3000 return middle_region;
3005 append_region (ProfilerExecutableMemoryRegions *regions, gpointer *start, gpointer *end, guint32 file_offset, char *file_name) {
3006 if (regions->regions_count >= regions->regions_capacity) {
3007 ProfilerExecutableMemoryRegionData **new_regions = g_new0 (ProfilerExecutableMemoryRegionData*, regions->regions_capacity * 2);
3008 memcpy (new_regions, regions->regions, regions->regions_capacity * sizeof (ProfilerExecutableMemoryRegionData*));
3009 g_free (regions->regions);
3010 regions->regions = new_regions;
3011 regions->regions_capacity = regions->regions_capacity * 2;
3013 regions->regions [regions->regions_count] = profiler_executable_memory_region_new (start, end, file_offset, file_name, regions->next_id);
3014 regions->regions_count ++;
3015 regions->next_id ++;
3019 regions_are_equivalent (ProfilerExecutableMemoryRegionData *region1, ProfilerExecutableMemoryRegionData *region2) {
3020 if ((region1->start == region2->start) &&
3021 (region1->end == region2->end) &&
3022 (region1->file_offset == region2->file_offset) &&
3023 ! strcmp (region1->file_name, region2->file_name)) {
3031 compare_regions (const void *a1, const void *a2) {
3032 ProfilerExecutableMemoryRegionData *r1 = * (ProfilerExecutableMemoryRegionData**) a1;
3033 ProfilerExecutableMemoryRegionData *r2 = * (ProfilerExecutableMemoryRegionData**) a2;
3034 return (r1->start < r2->start)? -1 : ((r1->start > r2->start)? 1 : 0);
3038 restore_old_regions (ProfilerExecutableMemoryRegions *old_regions, ProfilerExecutableMemoryRegions *new_regions) {
3042 for (new_i = 0; new_i < new_regions->regions_count; new_i++) {
3043 ProfilerExecutableMemoryRegionData *new_region = new_regions->regions [new_i];
3044 for (old_i = 0; old_i < old_regions->regions_count; old_i++) {
3045 ProfilerExecutableMemoryRegionData *old_region = old_regions->regions [old_i];
3046 if ( regions_are_equivalent (old_region, new_region)) {
3047 new_regions->regions [new_i] = old_region;
3048 old_regions->regions [old_i] = new_region;
3050 // FIXME (sanity check)
3051 g_assert (new_region->is_new && ! old_region->is_new);
3058 sort_regions (ProfilerExecutableMemoryRegions *regions) {
3059 if (regions->regions_count > 1) {
3062 qsort (regions->regions, regions->regions_count, sizeof (ProfilerExecutableMemoryRegionData *), compare_regions);
3065 while (i < regions->regions_count) {
3066 ProfilerExecutableMemoryRegionData *current_region = regions->regions [i];
3067 ProfilerExecutableMemoryRegionData *previous_region = regions->regions [i - 1];
3069 if (regions_are_equivalent (previous_region, current_region)) {
3072 if (! current_region->is_new) {
3073 profiler_executable_memory_region_destroy (previous_region);
3074 regions->regions [i - 1] = current_region;
3076 profiler_executable_memory_region_destroy (current_region);
3079 for (j = i + 1; j < regions->regions_count; j++) {
3080 regions->regions [j - 1] = regions->regions [j];
3083 regions->regions_count --;
3092 fix_region_references (ProfilerExecutableMemoryRegions *regions) {
3094 for (i = 0; i < regions->regions_count; i++) {
3095 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3096 if (region->file_region_reference != NULL) {
3097 region->file_region_reference->region = region;
3103 executable_file_add_region_reference (ProfilerExecutableFile *file, ProfilerExecutableMemoryRegionData *region) {
3104 guint8 *section_headers = file->data + file->header->e_shoff;
3107 for (section_index = 1; section_index < file->header->e_shnum; section_index ++) {
3108 ElfSection *section_header = (ElfSection*) (section_headers + (file->header->e_shentsize * section_index));
3110 if ((section_header->sh_addr != 0) && (section_header->sh_flags & ELF_SHF_EXECINSTR) &&
3111 (region->file_offset <= section_header->sh_offset) && (region->file_offset + (((guint8*)region->end)-((guint8*)region->start)) >= (section_header->sh_offset + section_header->sh_size))) {
3112 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [section_index]);
3113 section_region->region = region;
3114 section_region->section_address = (gpointer) section_header->sh_addr;
3115 section_region->section_offset = section_header->sh_offset;
3116 region->file_region_reference = section_region;
3121 static gboolean check_elf_header (ElfHeader* header) {
3122 guint16 test = 0x0102;
3124 if ((header->e_ident [EI_MAG0] != 0x7f) || (header->e_ident [EI_MAG1] != 'E') ||
3125 (header->e_ident [EI_MAG2] != 'L') || (header->e_ident [EI_MAG3] != 'F')) {
3129 if (sizeof (gsize) == 4) {
3130 if (header->e_ident [EI_CLASS] != ELF_CLASS_32) {
3131 g_warning ("Class is not ELF_CLASS_32 with gsize size %d", (int) sizeof (gsize));
3134 } else if (sizeof (gsize) == 8) {
3135 if (header->e_ident [EI_CLASS] != ELF_CLASS_64) {
3136 g_warning ("Class is not ELF_CLASS_64 with gsize size %d", (int) sizeof (gsize));
3140 g_warning ("Absurd gsize size %d", (int) sizeof (gsize));
3144 if ((*(guint8*)(&test)) == 0x01) {
3145 if (header->e_ident [EI_DATA] != ELF_DATA_MSB) {
3146 g_warning ("Data is not ELF_DATA_MSB with first test byte 0x01");
3149 } else if ((*(guint8*)(&test)) == 0x02) {
3150 if (header->e_ident [EI_DATA] != ELF_DATA_LSB) {
3151 g_warning ("Data is not ELF_DATA_LSB with first test byte 0x02");
3155 g_warning ("Absurd test byte value");
3162 static gboolean check_elf_file (int fd) {
3163 void *header = malloc (sizeof (ElfHeader));
3164 ssize_t read_result = read (fd, header, sizeof (ElfHeader));
3167 if (read_result != sizeof (ElfHeader)) {
3170 result = check_elf_header ((ElfHeader*) header);
3177 static ProfilerExecutableFile*
3178 executable_file_open (ProfilerExecutableMemoryRegionData *region) {
3179 ProfilerExecutableFiles *files = & (profiler->executable_files);
3180 ProfilerExecutableFile *file = region->file;
3183 file = (ProfilerExecutableFile*) g_hash_table_lookup (files->table, region->file_name);
3186 struct stat stat_buffer;
3187 int symtab_index = 0;
3188 int strtab_index = 0;
3189 int dynsym_index = 0;
3190 int dynstr_index = 0;
3192 guint8 *section_headers;
3196 file = g_new0 (ProfilerExecutableFile, 1);
3197 region->file = file;
3198 g_hash_table_insert (files->table, region->file_name, file);
3199 file->reference_count ++;
3200 file->next_new_file = files->new_files;
3201 files->new_files = file;
3203 file->fd = open (region->file_name, O_RDONLY);
3204 if (file->fd == -1) {
3205 //g_warning ("Cannot open file '%s': '%s'", region->file_name, strerror (errno));
3208 if (fstat (file->fd, &stat_buffer) != 0) {
3209 //g_warning ("Cannot stat file '%s': '%s'", region->file_name, strerror (errno));
3211 } else if (! check_elf_file (file->fd)) {
3214 size_t region_length = ((guint8*)region->end) - ((guint8*)region->start);
3215 file->length = stat_buffer.st_size;
3217 if (file->length == region_length) {
3218 file->data = region->start;
3222 file->data = mmap (NULL, file->length, PROT_READ, MAP_PRIVATE, file->fd, 0);
3224 if (file->data == MAP_FAILED) {
3226 //g_warning ("Cannot map file '%s': '%s'", region->file_name, strerror (errno));
3234 /* OK, this is a usable elf file, and we mmapped it... */
3235 header = (ElfHeader*) file->data;
3236 file->header = header;
3237 section_headers = file->data + file->header->e_shoff;
3238 file->main_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * header->e_shstrndx)))->sh_offset);
3240 for (section_index = 0; section_index < header->e_shnum; section_index ++) {
3241 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
3243 if (section_header->sh_type == ELF_SHT_SYMTAB) {
3244 symtab_index = section_index;
3245 } else if (section_header->sh_type == ELF_SHT_DYNSYM) {
3246 dynsym_index = section_index;
3247 } else if (section_header->sh_type == ELF_SHT_STRTAB) {
3248 if (! strcmp (file->main_string_table + section_header->sh_name, ".strtab")) {
3249 strtab_index = section_index;
3250 } else if (! strcmp (file->main_string_table + section_header->sh_name, ".dynstr")) {
3251 dynstr_index = section_index;
3256 if ((symtab_index != 0) && (strtab_index != 0)) {
3257 section_index = symtab_index;
3258 strings_index = strtab_index;
3259 } else if ((dynsym_index != 0) && (dynstr_index != 0)) {
3260 section_index = dynsym_index;
3261 strings_index = dynstr_index;
3267 if (section_index != 0) {
3268 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
3269 file->symbol_size = section_header->sh_entsize;
3270 file->symbols_count = (guint32) (section_header->sh_size / section_header->sh_entsize);
3271 file->symbols_start = file->data + section_header->sh_offset;
3272 file->symbols_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * strings_index)))->sh_offset);
3275 file->section_regions = g_new0 (ProfilerExecutableFileSectionRegion, file->header->e_shnum);
3277 region->file = file;
3278 file->reference_count ++;
3282 if (file->header != NULL) {
3283 executable_file_add_region_reference (file, region);
3290 executable_file_free (ProfilerExecutableFile* file) {
3291 if (file->fd != -1) {
3292 if (close (file->fd) != 0) {
3293 g_warning ("Cannot close file: '%s'", strerror (errno));
3295 if (file->data != NULL) {
3296 if (munmap (file->data, file->length) != 0) {
3297 g_warning ("Cannot unmap file: '%s'", strerror (errno));
3301 if (file->section_regions != NULL) {
3302 g_free (file->section_regions);
3303 file->section_regions = NULL;
3309 executable_file_close (ProfilerExecutableMemoryRegionData *region) {
3310 region->file->reference_count --;
3312 if ((region->file_region_reference != NULL) && (region->file_region_reference->region == region)) {
3313 region->file_region_reference->region = NULL;
3314 region->file_region_reference->section_address = 0;
3315 region->file_region_reference->section_offset = 0;
3318 if (region->file->reference_count <= 0) {
3319 ProfilerExecutableFiles *files = & (profiler->executable_files);
3320 g_hash_table_remove (files->table, region->file_name);
3321 executable_file_free (region->file);
3322 region->file = NULL;
3327 executable_file_count_symbols (ProfilerExecutableFile *file) {
3330 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
3331 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
3333 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
3334 (symbol->st_shndx > 0) &&
3335 (symbol->st_shndx < file->header->e_shnum)) {
3336 int symbol_section_index = symbol->st_shndx;
3337 ProfilerExecutableMemoryRegionData *region = file->section_regions [symbol_section_index].region;
3338 if ((region != NULL) && (region->symbols == NULL)) {
3339 region->symbols_count ++;
3346 executable_memory_regions_prepare_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
3348 for (i = 0; i < regions->regions_count; i++) {
3349 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3350 if ((region->symbols_count > 0) && (region->symbols == NULL)) {
3351 region->symbols = g_new (ProfilerUnmanagedSymbol, region->symbols_count);
3352 region->symbols_capacity = region->symbols_count;
3353 region->symbols_count = 0;
3359 executable_region_symbol_get_name (ProfilerExecutableMemoryRegionData *region, ProfilerUnmanagedSymbol *symbol) {
3360 ElfSymbol *elf_symbol = (ElfSymbol*) (region->file->symbols_start + (symbol->index * region->file->symbol_size));
3361 return region->file->symbols_string_table + elf_symbol->st_name;
3365 executable_file_build_symbol_tables (ProfilerExecutableFile *file) {
3368 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
3369 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
3371 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
3372 (symbol->st_shndx > 0) &&
3373 (symbol->st_shndx < file->header->e_shnum)) {
3374 int symbol_section_index = symbol->st_shndx;
3375 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [symbol_section_index]);
3376 ProfilerExecutableMemoryRegionData *region = section_region->region;
3378 if (region != NULL) {
3379 ProfilerUnmanagedSymbol *new_symbol = & (region->symbols [region->symbols_count]);
3380 region->symbols_count ++;
3383 new_symbol->index = symbol_index;
3384 new_symbol->size = symbol->st_size;
3385 new_symbol->offset = (((guint8*) symbol->st_value) - section_region->section_address) - (region->file_offset - section_region->section_offset);
3392 compare_region_symbols (const void *p1, const void *p2) {
3393 const ProfilerUnmanagedSymbol *s1 = p1;
3394 const ProfilerUnmanagedSymbol *s2 = p2;
3395 return (s1->offset < s2->offset)? -1 : ((s1->offset > s2->offset)? 1 : 0);
3399 executable_memory_regions_sort_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
3401 for (i = 0; i < regions->regions_count; i++) {
3402 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3403 if ((region->is_new) && (region->symbols != NULL)) {
3404 qsort (region->symbols, region->symbols_count, sizeof (ProfilerUnmanagedSymbol), compare_region_symbols);
3410 build_symbol_tables (ProfilerExecutableMemoryRegions *regions, ProfilerExecutableFiles *files) {
3412 ProfilerExecutableFile *file;
3414 for (i = 0; i < regions->regions_count; i++) {
3415 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3416 if ((region->is_new) && (region->file == NULL)) {
3417 executable_file_open (region);
3421 for (file = files->new_files; file != NULL; file = file->next_new_file) {
3422 executable_file_count_symbols (file);
3425 executable_memory_regions_prepare_symbol_tables (regions);
3427 for (file = files->new_files; file != NULL; file = file->next_new_file) {
3428 executable_file_build_symbol_tables (file);
3431 executable_memory_regions_sort_symbol_tables (regions);
3433 file = files->new_files;
3434 while (file != NULL) {
3435 ProfilerExecutableFile *next_file = file->next_new_file;
3436 file->next_new_file = NULL;
3439 files->new_files = NULL;
3442 static ProfilerUnmanagedSymbol*
3443 executable_memory_region_find_symbol (ProfilerExecutableMemoryRegionData *region, guint32 offset) {
3444 if (region->symbols_count > 0) {
3445 ProfilerUnmanagedSymbol *low = region->symbols;
3446 ProfilerUnmanagedSymbol *high = region->symbols + (region->symbols_count - 1);
3447 int step = region->symbols_count >> 1;
3448 ProfilerUnmanagedSymbol *current = region->symbols + step;
3451 step = (high - low) >> 1;
3453 if (offset < current->offset) {
3455 current = high - step;
3456 } else if (offset >= current->offset) {
3457 if (offset >= (current->offset + current->size)) {
3459 current = low + step;
3466 if ((offset >= current->offset) && (offset < (current->offset + current->size))) {
3476 //FIXME: make also Win32 and BSD variants
3477 #define MAPS_BUFFER_SIZE 4096
3478 #define MAPS_FILENAME_SIZE 2048
3481 update_regions_buffer (int fd, char *buffer) {
3482 ssize_t result = read (fd, buffer, MAPS_BUFFER_SIZE);
3484 if (result == MAPS_BUFFER_SIZE) {
3486 } else if (result >= 0) {
3487 *(buffer + result) = 0;
3495 #define GOTO_NEXT_CHAR(c,b,fd) do {\
3497 if (((c) - (b) >= MAPS_BUFFER_SIZE) || ((*(c) == 0) && ((c) != (b)))) {\
3498 update_regions_buffer ((fd), (b));\
3503 static int hex_digit_value (char c) {
3504 if ((c >= '0') && (c <= '9')) {
3506 } else if ((c >= 'a') && (c <= 'f')) {
3507 return c - 'a' + 10;
3508 } else if ((c >= 'A') && (c <= 'F')) {
3509 return c - 'A' + 10;
3531 MAP_LINE_PARSER_STATE_INVALID,
3532 MAP_LINE_PARSER_STATE_START_ADDRESS,
3533 MAP_LINE_PARSER_STATE_END_ADDRESS,
3534 MAP_LINE_PARSER_STATE_PERMISSIONS,
3535 MAP_LINE_PARSER_STATE_OFFSET,
3536 MAP_LINE_PARSER_STATE_DEVICE,
3537 MAP_LINE_PARSER_STATE_INODE,
3538 MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME,
3539 MAP_LINE_PARSER_STATE_FILENAME,
3540 MAP_LINE_PARSER_STATE_DONE
3541 } MapLineParserState;
3543 const char *map_line_parser_state [] = {
3551 "BLANK_BEFORE_FILENAME",
3557 parse_map_line (ProfilerExecutableMemoryRegions *regions, int fd, char *buffer, char *filename, char *current) {
3558 MapLineParserState state = MAP_LINE_PARSER_STATE_START_ADDRESS;
3559 gsize start_address = 0;
3560 gsize end_address = 0;
3562 int filename_index = 0;
3563 gboolean is_executable = FALSE;
3564 gboolean done = FALSE;
3570 case MAP_LINE_PARSER_STATE_START_ADDRESS:
3572 start_address <<= 4;
3573 start_address |= hex_digit_value (c);
3574 } else if (c == '-') {
3575 state = MAP_LINE_PARSER_STATE_END_ADDRESS;
3577 state = MAP_LINE_PARSER_STATE_INVALID;
3580 case MAP_LINE_PARSER_STATE_END_ADDRESS:
3583 end_address |= hex_digit_value (c);
3584 } else if (isblank (c)) {
3585 state = MAP_LINE_PARSER_STATE_PERMISSIONS;
3587 state = MAP_LINE_PARSER_STATE_INVALID;
3590 case MAP_LINE_PARSER_STATE_PERMISSIONS:
3592 is_executable = TRUE;
3593 } else if (isblank (c)) {
3594 state = MAP_LINE_PARSER_STATE_OFFSET;
3595 } else if ((c != '-') && ! isalpha (c)) {
3596 state = MAP_LINE_PARSER_STATE_INVALID;
3599 case MAP_LINE_PARSER_STATE_OFFSET:
3602 offset |= hex_digit_value (c);
3603 } else if (isblank (c)) {
3604 state = MAP_LINE_PARSER_STATE_DEVICE;
3606 state = MAP_LINE_PARSER_STATE_INVALID;
3609 case MAP_LINE_PARSER_STATE_DEVICE:
3611 state = MAP_LINE_PARSER_STATE_INODE;
3612 } else if ((c != ':') && ! isxdigit (c)) {
3613 state = MAP_LINE_PARSER_STATE_INVALID;
3616 case MAP_LINE_PARSER_STATE_INODE:
3618 state = MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME;
3619 } else if (! isdigit (c)) {
3620 state = MAP_LINE_PARSER_STATE_INVALID;
3623 case MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME:
3624 if ((c == '/') || (c == '[')) {
3625 state = MAP_LINE_PARSER_STATE_FILENAME;
3626 filename [filename_index] = *current;
3628 } else if (! isblank (c)) {
3629 state = MAP_LINE_PARSER_STATE_INVALID;
3632 case MAP_LINE_PARSER_STATE_FILENAME:
3633 if (filename_index < MAPS_FILENAME_SIZE) {
3635 state = MAP_LINE_PARSER_STATE_DONE;
3637 filename [filename_index] = 0;
3639 filename [filename_index] = *current;
3643 filename [filename_index] = 0;
3644 g_warning ("ELF filename too long: \"%s\"...\n", filename);
3647 case MAP_LINE_PARSER_STATE_DONE:
3648 if (done && is_executable) {
3649 filename [filename_index] = 0;
3650 append_region (regions, (gpointer) start_address, (gpointer) end_address, offset, filename);
3653 case MAP_LINE_PARSER_STATE_INVALID:
3655 state = MAP_LINE_PARSER_STATE_DONE;
3662 } else if (c == '\n') {
3663 state = MAP_LINE_PARSER_STATE_DONE;
3666 GOTO_NEXT_CHAR(current, buffer, fd);
3672 scan_process_regions (ProfilerExecutableMemoryRegions *regions) {
3678 fd = open ("/proc/self/maps", O_RDONLY);
3683 buffer = malloc (MAPS_BUFFER_SIZE);
3684 filename = malloc (MAPS_FILENAME_SIZE);
3685 update_regions_buffer (fd, buffer);
3687 while (current != NULL) {
3688 current = parse_map_line (regions, fd, buffer, filename, current);
3700 MONO_PROFILER_STATISTICAL_CODE_END = 0,
3701 MONO_PROFILER_STATISTICAL_CODE_METHOD = 1,
3702 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID = 2,
3703 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID = 3,
3704 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION = 4,
3705 MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN = 5,
3706 MONO_PROFILER_STATISTICAL_CODE_REGIONS = 7
3707 } MonoProfilerStatisticalCode;
3710 refresh_memory_regions (void) {
3711 ProfilerExecutableMemoryRegions *old_regions = profiler->executable_regions;
3712 ProfilerExecutableMemoryRegions *new_regions = profiler_executable_memory_regions_new (old_regions->next_id, old_regions->next_unmanaged_function_id);
3715 LOG_WRITER_THREAD ("Refreshing memory regions...");
3716 scan_process_regions (new_regions);
3717 sort_regions (new_regions);
3718 restore_old_regions (old_regions, new_regions);
3719 fix_region_references (new_regions);
3720 LOG_WRITER_THREAD ("Refreshed memory regions.");
3722 LOG_WRITER_THREAD ("Building symbol tables...");
3723 build_symbol_tables (new_regions, & (profiler->executable_files));
3725 printf ("Symbol tables done!\n");
3726 printf ("Region summary...\n");
3727 for (i = 0; i < new_regions->regions_count; i++) {
3728 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3729 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3730 region->start, region->end, region->file_offset, region->file_name);
3732 printf ("New symbol tables dump...\n");
3733 for (i = 0; i < new_regions->regions_count; i++) {
3734 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3736 if (region->is_new) {
3739 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3740 region->start, region->end, region->file_offset, region->file_name);
3741 for (symbol_index = 0; symbol_index < region->symbols_count; symbol_index ++) {
3742 ProfilerUnmanagedSymbol *symbol = & (region->symbols [symbol_index]);
3743 printf (" [%d] Symbol %s (offset %d, size %d)\n", symbol_index,
3744 executable_region_symbol_get_name (region, symbol),
3745 symbol->offset, symbol->size);
3750 LOG_WRITER_THREAD ("Built symbol tables.");
3752 // This marks the region "sub-block"
3753 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_REGIONS);
3755 // First write the "removed" regions
3756 for (i = 0; i < old_regions->regions_count; i++) {
3757 ProfilerExecutableMemoryRegionData *region = old_regions->regions [i];
3758 if (! region->is_new) {
3759 #if DEBUG_STATISTICAL_PROFILER
3760 printf ("[refresh_memory_regions] Invalidated region %d\n", region->id);
3762 write_uint32 (region->id);
3767 // Then write the new ones
3768 for (i = 0; i < new_regions->regions_count; i++) {
3769 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3770 if (region->is_new) {
3771 region->is_new = FALSE;
3773 #if DEBUG_STATISTICAL_PROFILER
3774 printf ("[refresh_memory_regions] Wrote region %d (%p-%p[%d] '%s')\n", region->id, region->start, region->end, region->file_offset, region->file_name);
3776 write_uint32 (region->id);
3777 write_uint64 (GPOINTER_TO_UINT (region->start));
3778 write_uint32 (GPOINTER_TO_UINT (region->end) - GPOINTER_TO_UINT (region->start));
3779 write_uint32 (region->file_offset);
3780 write_string (region->file_name);
3785 // Finally, free the old ones, and replace them
3786 profiler_executable_memory_regions_destroy (old_regions);
3787 profiler->executable_regions = new_regions;
3791 write_statistical_hit (gpointer address, gboolean regions_refreshed) {
3792 ProfilerCodeBuffer *code_buffer = profiler_code_buffer_from_address (profiler, address);
3794 if ((code_buffer != NULL) && (code_buffer->info.type == MONO_PROFILER_CODE_BUFFER_METHOD)) {
3795 MonoMethod *method = code_buffer->info.data.method;
3796 MethodIdMappingElement *element = method_id_mapping_element_get (method);
3798 if (element != NULL) {
3799 #if DEBUG_STATISTICAL_PROFILER
3800 printf ("[write_statistical_hit] Wrote method %d\n", element->id);
3802 write_uint32 ((element->id << 3) | MONO_PROFILER_STATISTICAL_CODE_METHOD);
3804 #if DEBUG_STATISTICAL_PROFILER
3805 printf ("[write_statistical_hit] Wrote unknown method %p\n", method);
3807 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_METHOD);
3810 ProfilerExecutableMemoryRegionData *region = find_address_region (profiler->executable_regions, address);
3812 if (region == NULL && ! regions_refreshed) {
3813 #if DEBUG_STATISTICAL_PROFILER
3814 printf ("[write_statistical_hit] Cannot find region for address %p, refreshing...\n", address);
3816 refresh_memory_regions ();
3817 regions_refreshed = TRUE;
3818 region = find_address_region (profiler->executable_regions, address);
3821 if (region != NULL) {
3822 guint32 offset = ((guint8*)address) - ((guint8*)region->start);
3823 ProfilerUnmanagedSymbol *symbol = executable_memory_region_find_symbol (region, offset);
3825 if (symbol != NULL) {
3826 if (symbol->id > 0) {
3827 #if DEBUG_STATISTICAL_PROFILER
3828 printf ("[write_statistical_hit] Wrote unmanaged symbol %d\n", symbol->id);
3830 write_uint32 ((symbol->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID);
3832 ProfilerExecutableMemoryRegions *regions = profiler->executable_regions;
3833 const char *symbol_name = executable_region_symbol_get_name (region, symbol);
3834 symbol->id = regions->next_unmanaged_function_id;
3835 regions->next_unmanaged_function_id ++;
3836 #if DEBUG_STATISTICAL_PROFILER
3837 printf ("[write_statistical_hit] Wrote new unmanaged symbol in region %d[%d]\n", region->id, offset);
3839 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID);
3840 write_uint32 (symbol->id);
3841 write_string (symbol_name);
3844 #if DEBUG_STATISTICAL_PROFILER
3845 printf ("[write_statistical_hit] Wrote unknown unmanaged hit in region %d[%d] (address %p)\n", region->id, offset, address);
3847 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3848 write_uint32 (offset);
3851 #if DEBUG_STATISTICAL_PROFILER
3852 printf ("[write_statistical_hit] Wrote unknown unmanaged hit %p\n", address);
3854 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3855 write_uint64 (GPOINTER_TO_UINT (address));
3859 return regions_refreshed;
3863 flush_all_mappings (void);
3866 write_statistical_data_block (ProfilerStatisticalData *data) {
3867 int start_index = data->first_unwritten_index;
3868 int end_index = data->next_free_index;
3869 gboolean regions_refreshed = FALSE;
3870 int call_chain_depth = profiler->statistical_call_chain_depth;
3873 if (end_index > data->end_index)
3874 end_index = data->end_index;
3876 if (start_index == end_index)
3879 data->first_unwritten_index = end_index;
3881 write_clock_data ();
3883 #if DEBUG_STATISTICAL_PROFILER
3884 printf ("[write_statistical_data_block] Starting loop at index %d\n", start_index);
3887 for (index = start_index; index < end_index; index ++) {
3888 int base_index = index * (call_chain_depth + 1);
3889 ProfilerStatisticalHit hit = data->hits [base_index];
3892 regions_refreshed = write_statistical_hit (hit.address, regions_refreshed);
3895 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3896 hit = data->hits [base_index + callers_count];
3897 if (hit.address == NULL) {
3902 if (callers_count > 0) {
3903 write_uint32 ((callers_count << 3) | MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN);
3905 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3906 hit = data->hits [base_index + callers_count];
3907 if (hit.address != NULL) {
3908 regions_refreshed = write_statistical_hit (hit.address, regions_refreshed);
3915 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_END);
3917 #if DEBUG_STATISTICAL_PROFILER
3918 printf ("[write_statistical_data_block] Ending loop at index %d\n", end_index);
3920 write_clock_data ();
3922 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL);
3926 write_intro_block (void) {
3928 write_string ("mono");
3929 write_uint32 (profiler->flags);
3930 write_uint64 (profiler->start_counter);
3931 write_uint64 (profiler->start_time);
3932 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_INTRO);
3936 write_end_block (void) {
3938 write_uint64 (profiler->end_counter);
3939 write_uint64 (profiler->end_time);
3940 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_END);
3944 update_mapping (ProfilerPerThreadData *data) {
3945 ProfilerEventData *start = data->first_unmapped_event;
3946 ProfilerEventData *end = data->next_free_event;
3947 data->first_unmapped_event = end;
3949 #if (DEBUG_LOGGING_PROFILER)
3950 printf ("[update_mapping][TID %ld] START\n", data->thread_id);
3952 while (start < end) {
3953 #if DEBUG_LOGGING_PROFILER
3954 printf ("Examining event %p[TID %ld] looking for a new mapping...\n", start, data->thread_id);
3956 if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3957 ClassIdMappingElement *element = class_id_mapping_element_get (start->data.address);
3958 if (element == NULL) {
3959 MonoClass *klass = start->data.address;
3960 class_id_mapping_element_new (klass);
3962 } else if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3963 MethodIdMappingElement *element = method_id_mapping_element_get (start->data.address);
3964 if (element == NULL) {
3965 MonoMethod *method = start->data.address;
3966 if (method != NULL) {
3967 method_id_mapping_element_new (method);
3972 if (start->value == MAX_EVENT_VALUE) {
3977 #if (DEBUG_LOGGING_PROFILER)
3978 printf ("[update_mapping][TID %ld] END\n", data->thread_id);
3983 flush_all_mappings (void) {
3984 ProfilerPerThreadData *data;
3986 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3987 update_mapping (data);
3989 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3990 write_mapping_block (data->thread_id);
3995 flush_full_event_data_buffer (ProfilerPerThreadData *data) {
3998 // We flush all mappings because some id definitions could come
3999 // from other threads
4000 flush_all_mappings ();
4001 g_assert (data->first_unmapped_event >= data->next_free_event);
4003 write_thread_data_block (data);
4005 data->next_free_event = data->events;
4006 data->next_unreserved_event = data->events;
4007 data->first_unwritten_event = data->events;
4008 data->first_unmapped_event = data->events;
4009 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
4010 data->last_event_counter = data->start_event_counter;
4015 /* The ">=" operator is intentional, to leave one spare slot for "extended values" */
4016 #define RESERVE_EVENTS(d,e,count) do {\
4017 if ((d)->next_unreserved_event >= ((d)->end_event - (count))) {\
4018 flush_full_event_data_buffer (d);\
4020 (e) = (d)->next_unreserved_event;\
4021 (d)->next_unreserved_event += (count);\
4023 #define GET_NEXT_FREE_EVENT(d,e) RESERVE_EVENTS ((d),(e),1)
4024 #define COMMIT_RESERVED_EVENTS(d) do {\
4025 data->next_free_event = data->next_unreserved_event;\
4029 flush_everything (void) {
4030 ProfilerPerThreadData *data;
4032 flush_all_mappings ();
4033 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4034 write_thread_data_block (data);
4036 write_statistical_data_block (profiler->statistical_data);
4039 #define RESULT_TO_LOAD_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_LOADED_EVENT_SUCCESS:MONO_PROFILER_LOADED_EVENT_FAILURE)
4041 appdomain_start_load (MonoProfiler *profiler, MonoDomain *domain) {
4043 loaded_element_load_start (profiler->loaded_appdomains, domain);
4048 appdomain_end_load (MonoProfiler *profiler, MonoDomain *domain, int result) {
4050 LoadedElement *element;
4052 name = g_strdup_printf ("%d", mono_domain_get_id (domain));
4054 element = loaded_element_load_end (profiler->loaded_appdomains, domain, name);
4055 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), domain);
4060 appdomain_start_unload (MonoProfiler *profiler, MonoDomain *domain) {
4062 loaded_element_unload_start (profiler->loaded_appdomains, domain);
4063 flush_everything ();
4068 appdomain_end_unload (MonoProfiler *profiler, MonoDomain *domain) {
4069 LoadedElement *element;
4072 element = loaded_element_unload_end (profiler->loaded_appdomains, domain);
4073 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN, CURRENT_THREAD_ID ());
4078 module_start_load (MonoProfiler *profiler, MonoImage *module) {
4080 loaded_element_load_start (profiler->loaded_modules, module);
4085 module_end_load (MonoProfiler *profiler, MonoImage *module, int result) {
4087 MonoAssemblyName aname;
4088 LoadedElement *element;
4090 if (mono_assembly_fill_assembly_name (module, &aname)) {
4091 name = mono_stringify_assembly_name (&aname);
4093 name = g_strdup_printf ("Dynamic module \"%p\"", module);
4096 element = loaded_element_load_end (profiler->loaded_modules, module, name);
4097 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_MODULE | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), module);
4102 module_start_unload (MonoProfiler *profiler, MonoImage *module) {
4104 loaded_element_unload_start (profiler->loaded_modules, module);
4105 flush_everything ();
4110 module_end_unload (MonoProfiler *profiler, MonoImage *module) {
4111 LoadedElement *element;
4114 element = loaded_element_unload_end (profiler->loaded_modules, module);
4115 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_MODULE, CURRENT_THREAD_ID ());
4120 assembly_start_load (MonoProfiler *profiler, MonoAssembly *assembly) {
4122 loaded_element_load_start (profiler->loaded_assemblies, assembly);
4127 assembly_end_load (MonoProfiler *profiler, MonoAssembly *assembly, int result) {
4129 MonoAssemblyName aname;
4130 LoadedElement *element;
4132 if (mono_assembly_fill_assembly_name (mono_assembly_get_image (assembly), &aname)) {
4133 name = mono_stringify_assembly_name (&aname);
4135 name = g_strdup_printf ("Dynamic assembly \"%p\"", assembly);
4138 element = loaded_element_load_end (profiler->loaded_assemblies, assembly, name);
4139 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), assembly);
4144 assembly_start_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
4146 loaded_element_unload_start (profiler->loaded_assemblies, assembly);
4147 flush_everything ();
4151 assembly_end_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
4152 LoadedElement *element;
4155 element = loaded_element_unload_end (profiler->loaded_assemblies, assembly);
4156 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY, CURRENT_THREAD_ID ());
4160 #if (DEBUG_LOGGING_PROFILER)
4162 class_event_code_to_string (MonoProfilerClassEvents code) {
4164 case MONO_PROFILER_EVENT_CLASS_LOAD: return "LOAD";
4165 case MONO_PROFILER_EVENT_CLASS_UNLOAD: return "UNLOAD";
4166 case MONO_PROFILER_EVENT_CLASS_ALLOCATION: return "ALLOCATION";
4167 case MONO_PROFILER_EVENT_CLASS_EXCEPTION: return "EXCEPTION";
4168 default: g_assert_not_reached (); return "";
4172 method_event_code_to_string (MonoProfilerMethodEvents code) {
4174 case MONO_PROFILER_EVENT_METHOD_CALL: return "CALL";
4175 case MONO_PROFILER_EVENT_METHOD_JIT: return "JIT";
4176 case MONO_PROFILER_EVENT_METHOD_FREED: return "FREED";
4177 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER: return "ALLOCATION_CALLER";
4178 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER: return "ALLOCATION_JIT_TIME_CALLER";
4179 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
4180 default: g_assert_not_reached (); return "";
4184 number_event_code_to_string (MonoProfilerEvents code) {
4186 case MONO_PROFILER_EVENT_THREAD: return "THREAD";
4187 case MONO_PROFILER_EVENT_GC_COLLECTION: return "GC_COLLECTION";
4188 case MONO_PROFILER_EVENT_GC_MARK: return "GC_MARK";
4189 case MONO_PROFILER_EVENT_GC_SWEEP: return "GC_SWEEP";
4190 case MONO_PROFILER_EVENT_GC_RESIZE: return "GC_RESIZE";
4191 case MONO_PROFILER_EVENT_GC_STOP_WORLD: return "GC_STOP_WORLD";
4192 case MONO_PROFILER_EVENT_GC_START_WORLD: return "GC_START_WORLD";
4193 case MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION: return "JIT_TIME_ALLOCATION";
4194 case MONO_PROFILER_EVENT_STACK_SECTION: return "STACK_SECTION";
4195 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
4196 default: g_assert_not_reached (); return "";
4200 event_result_to_string (MonoProfilerEventResult code) {
4202 case MONO_PROFILER_EVENT_RESULT_SUCCESS: return "SUCCESS";
4203 case MONO_PROFILER_EVENT_RESULT_FAILURE: return "FAILURE";
4204 default: g_assert_not_reached (); return "";
4208 event_kind_to_string (MonoProfilerEventKind code) {
4210 case MONO_PROFILER_EVENT_KIND_START: return "START";
4211 case MONO_PROFILER_EVENT_KIND_END: return "END";
4212 default: g_assert_not_reached (); return "";
4216 print_event_data (ProfilerPerThreadData *data, ProfilerEventData *event, guint64 value) {
4217 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
4218 printf ("STORE EVENT [TID %ld][EVENT %ld] CLASS[%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s)\n",
4220 event - data->events,
4221 event->data.address,
4222 class_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
4223 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
4224 event_kind_to_string (event->kind),
4229 mono_class_get_namespace ((MonoClass*) event->data.address),
4230 mono_class_get_name ((MonoClass*) event->data.address));
4231 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
4232 printf ("STORE EVENT [TID %ld][EVENT %ld] METHOD[%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s:%s (?))\n",
4234 event - data->events,
4235 event->data.address,
4236 method_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
4237 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
4238 event_kind_to_string (event->kind),
4243 (event->data.address != NULL) ? mono_class_get_namespace (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
4244 (event->data.address != NULL) ? mono_class_get_name (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
4245 (event->data.address != NULL) ? mono_method_get_name ((MonoMethod*) event->data.address) : "<NULL>");
4247 printf ("STORE EVENT [TID %ld][EVENT %ld] NUMBER[%ld] %s:%s[%d-%d-%d] %ld\n",
4249 event - data->events,
4250 (guint64) event->data.number,
4251 number_event_code_to_string (event->code),
4252 event_kind_to_string (event->kind),
4259 #define LOG_EVENT(data,ev,val) print_event_data ((data),(ev),(val))
4261 #define LOG_EVENT(data,ev,val)
4264 #define RESULT_TO_EVENT_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_EVENT_RESULT_SUCCESS:MONO_PROFILER_EVENT_RESULT_FAILURE)
4266 #define STORE_EVENT_ITEM_COUNTER(event,p,i,dt,c,k) do {\
4269 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
4270 (event)->data.address = (i);\
4271 (event)->data_type = (dt);\
4272 (event)->code = (c);\
4273 (event)->kind = (k);\
4274 delta = counter - data->last_event_counter;\
4275 if (delta < MAX_EVENT_VALUE) {\
4276 (event)->value = delta;\
4278 ProfilerEventData *extension = data->next_unreserved_event;\
4279 data->next_unreserved_event ++;\
4280 (event)->value = MAX_EVENT_VALUE;\
4281 *(guint64*)extension = delta;\
4283 data->last_event_counter = counter;\
4284 LOG_EVENT (data, (event), delta);\
4286 #define STORE_EVENT_ITEM_VALUE(event,p,i,dt,c,k,v) do {\
4287 (event)->data.address = (i);\
4288 (event)->data_type = (dt);\
4289 (event)->code = (c);\
4290 (event)->kind = (k);\
4291 if ((v) < MAX_EVENT_VALUE) {\
4292 (event)->value = (v);\
4294 ProfilerEventData *extension = data->next_unreserved_event;\
4295 data->next_unreserved_event ++;\
4296 (event)->value = MAX_EVENT_VALUE;\
4297 *(guint64*)extension = (v);\
4299 LOG_EVENT (data, (event), (v));\
4301 #define STORE_EVENT_NUMBER_COUNTER(event,p,n,dt,c,k) do {\
4304 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
4305 (event)->data.number = (n);\
4306 (event)->data_type = (dt);\
4307 (event)->code = (c);\
4308 (event)->kind = (k);\
4309 delta = counter - data->last_event_counter;\
4310 if (delta < MAX_EVENT_VALUE) {\
4311 (event)->value = delta;\
4313 ProfilerEventData *extension = data->next_unreserved_event;\
4314 data->next_unreserved_event ++;\
4315 (event)->value = MAX_EVENT_VALUE;\
4316 *(guint64*)extension = delta;\
4318 data->last_event_counter = counter;\
4319 LOG_EVENT (data, (event), delta);\
4321 #define STORE_EVENT_NUMBER_VALUE(event,p,n,dt,c,k,v) do {\
4322 (event)->data.number = (n);\
4323 (event)->data_type = (dt);\
4324 (event)->code = (c);\
4325 (event)->kind = (k);\
4326 if ((v) < MAX_EVENT_VALUE) {\
4327 (event)->value = (v);\
4329 ProfilerEventData *extension = data->next_unreserved_event;\
4330 data->next_unreserved_event ++;\
4331 (event)->value = MAX_EVENT_VALUE;\
4332 *(guint64*)extension = (v);\
4334 LOG_EVENT (data, (event), (v));\
4336 #define INCREMENT_EVENT(event) do {\
4337 if ((event)->value != MAX_EVENT_VALUE) {\
4345 class_start_load (MonoProfiler *profiler, MonoClass *klass) {
4346 ProfilerPerThreadData *data;
4347 ProfilerEventData *event;
4348 GET_PROFILER_THREAD_DATA (data);
4349 GET_NEXT_FREE_EVENT (data, event);
4350 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD, MONO_PROFILER_EVENT_KIND_START);
4351 COMMIT_RESERVED_EVENTS (data);
4354 class_end_load (MonoProfiler *profiler, MonoClass *klass, int result) {
4355 ProfilerPerThreadData *data;
4356 ProfilerEventData *event;
4357 GET_PROFILER_THREAD_DATA (data);
4358 GET_NEXT_FREE_EVENT (data, event);
4359 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
4360 COMMIT_RESERVED_EVENTS (data);
4363 class_start_unload (MonoProfiler *profiler, MonoClass *klass) {
4364 ProfilerPerThreadData *data;
4365 ProfilerEventData *event;
4366 GET_PROFILER_THREAD_DATA (data);
4367 GET_NEXT_FREE_EVENT (data, event);
4368 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_START);
4369 COMMIT_RESERVED_EVENTS (data);
4372 class_end_unload (MonoProfiler *profiler, MonoClass *klass) {
4373 ProfilerPerThreadData *data;
4374 ProfilerEventData *event;
4375 GET_PROFILER_THREAD_DATA (data);
4376 GET_NEXT_FREE_EVENT (data, event);
4377 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_END);
4378 COMMIT_RESERVED_EVENTS (data);
4382 method_start_jit (MonoProfiler *profiler, MonoMethod *method) {
4383 ProfilerPerThreadData *data;
4384 ProfilerEventData *event;
4385 GET_PROFILER_THREAD_DATA (data);
4386 GET_NEXT_FREE_EVENT (data, event);
4387 thread_stack_push_jitted_safely (&(data->stack), method, TRUE);
4388 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT, MONO_PROFILER_EVENT_KIND_START);
4389 COMMIT_RESERVED_EVENTS (data);
4392 method_end_jit (MonoProfiler *profiler, MonoMethod *method, int result) {
4393 ProfilerPerThreadData *data;
4394 ProfilerEventData *event;
4395 GET_PROFILER_THREAD_DATA (data);
4396 GET_NEXT_FREE_EVENT (data, event);
4397 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
4398 thread_stack_pop (&(data->stack));
4399 COMMIT_RESERVED_EVENTS (data);
4404 method_jit_result (MonoProfiler *prof, MonoMethod *method, MonoJitInfo* jinfo, int result) {
4405 if (profiler->action_flags.oprofile && (result == MONO_PROFILE_OK)) {
4406 MonoClass *klass = mono_method_get_class (method);
4407 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
4408 char *name = g_strdup_printf ("%s.%s:%s (%s)", mono_class_get_namespace (klass), mono_class_get_name (klass), mono_method_get_name (method), signature);
4409 gpointer code_start = mono_jit_info_get_code_start (jinfo);
4410 int code_size = mono_jit_info_get_code_size (jinfo);
4412 if (op_write_native_code (name, code_start, code_size)) {
4413 g_warning ("Problem calling op_write_native_code\n");
4424 method_enter (MonoProfiler *profiler, MonoMethod *method) {
4425 ProfilerPerThreadData *data;
4427 CHECK_PROFILER_ENABLED ();
4428 GET_PROFILER_THREAD_DATA (data);
4429 if (profiler->action_flags.track_calls) {
4430 ProfilerEventData *event;
4431 GET_NEXT_FREE_EVENT (data, event);
4432 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_START);
4433 COMMIT_RESERVED_EVENTS (data);
4435 if (profiler->action_flags.track_stack) {
4436 thread_stack_push_safely (&(data->stack), method);
4440 method_leave (MonoProfiler *profiler, MonoMethod *method) {
4441 ProfilerPerThreadData *data;
4443 CHECK_PROFILER_ENABLED ();
4444 GET_PROFILER_THREAD_DATA (data);
4445 if (profiler->action_flags.track_calls) {
4446 ProfilerEventData *event;
4447 GET_NEXT_FREE_EVENT (data, event);
4448 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_END);
4449 COMMIT_RESERVED_EVENTS (data);
4451 if (profiler->action_flags.track_stack) {
4452 thread_stack_pop (&(data->stack));
4457 method_free (MonoProfiler *profiler, MonoMethod *method) {
4458 ProfilerPerThreadData *data;
4459 ProfilerEventData *event;
4460 GET_PROFILER_THREAD_DATA (data);
4461 GET_NEXT_FREE_EVENT (data, event);
4462 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_FREED, 0);
4463 COMMIT_RESERVED_EVENTS (data);
4467 thread_start (MonoProfiler *profiler, uintptr_t tid) {
4468 ProfilerPerThreadData *data;
4469 ProfilerEventData *event;
4470 GET_PROFILER_THREAD_DATA (data);
4471 GET_NEXT_FREE_EVENT (data, event);
4472 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_START);
4473 COMMIT_RESERVED_EVENTS (data);
4476 thread_end (MonoProfiler *profiler, uintptr_t tid) {
4477 ProfilerPerThreadData *data;
4478 ProfilerEventData *event;
4479 GET_PROFILER_THREAD_DATA (data);
4480 GET_NEXT_FREE_EVENT (data, event);
4481 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_END);
4482 COMMIT_RESERVED_EVENTS (data);
4485 static ProfilerEventData*
4486 save_stack_delta (MonoProfiler *profiler, ProfilerPerThreadData *data, ProfilerEventData *events, int unsaved_frames) {
4489 /* In this loop it is safe to simply increment "events" because MAX_EVENT_VALUE cannot be reached. */
4490 STORE_EVENT_NUMBER_VALUE (events, profiler, data->stack.last_saved_top, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_STACK_SECTION, 0, unsaved_frames);
4492 for (i = 0; i < unsaved_frames; i++) {
4493 if (! thread_stack_index_from_top_is_jitted (&(data->stack), i)) {
4494 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4496 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4501 data->stack.last_saved_top = data->stack.top;
4507 object_allocated (MonoProfiler *profiler, MonoObject *obj, MonoClass *klass) {
4508 ProfilerPerThreadData *data;
4509 ProfilerEventData *events;
4511 int event_slot_count;
4513 GET_PROFILER_THREAD_DATA (data);
4514 event_slot_count = 1;
4515 if (profiler->action_flags.save_allocation_caller) {
4516 event_slot_count ++;
4518 if (profiler->action_flags.allocations_carry_id) {
4519 event_slot_count ++;
4521 if (profiler->action_flags.save_allocation_stack) {
4522 unsaved_frames = thread_stack_count_unsaved_frames (&(data->stack));
4523 event_slot_count += (unsaved_frames + 1);
4527 RESERVE_EVENTS (data, events, event_slot_count);
4529 if (profiler->action_flags.save_allocation_stack) {
4530 events = save_stack_delta (profiler, data, events, unsaved_frames);
4533 STORE_EVENT_ITEM_VALUE (events, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_ALLOCATION, 0, (guint64) mono_object_get_size (obj));
4534 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
4535 STORE_ALLOCATED_OBJECT (data, obj);
4538 if (profiler->action_flags.save_allocation_caller) {
4539 MonoMethod *caller = thread_stack_top (&(data->stack));
4540 gboolean caller_is_jitted = thread_stack_top_is_jitted (&(data->stack));
4542 /* In this loop it is safe to simply increment "events" because MAX_EVENT_VALUE cannot be reached. */
4545 while ((caller != NULL) && (caller->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)) {
4546 caller = thread_stack_index_from_top (&(data->stack), index);
4547 caller_is_jitted = thread_stack_index_from_top_is_jitted (&(data->stack), index);
4550 if (! caller_is_jitted) {
4551 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4553 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4556 if (profiler->action_flags.allocations_carry_id) {
4558 STORE_EVENT_ITEM_VALUE (events, profiler, obj, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID, 0, 0);
4561 COMMIT_RESERVED_EVENTS (data);
4565 monitor_event (MonoProfiler *profiler, MonoObject *obj, MonoProfilerMonitorEvent event) {
4566 ProfilerPerThreadData *data;
4567 ProfilerEventData *events;
4570 int event_slot_count;
4572 CHECK_PROFILER_ENABLED ();
4574 GET_PROFILER_THREAD_DATA (data);
4575 klass = mono_object_get_class (obj);
4577 unsaved_frames = thread_stack_count_unsaved_frames (&(data->stack));
4578 if (unsaved_frames > 0) {
4579 event_slot_count = unsaved_frames + 3;
4581 event_slot_count = 2;
4584 RESERVE_EVENTS (data, events, event_slot_count);
4585 if (unsaved_frames > 0) {
4586 events = save_stack_delta (profiler, data, events, unsaved_frames);
4588 STORE_EVENT_ITEM_COUNTER (events, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_MONITOR, MONO_PROFILER_EVENT_KIND_START);
4589 INCREMENT_EVENT (events);
4590 STORE_EVENT_ITEM_VALUE (events, profiler, obj, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_OBJECT_MONITOR, 0, event);
4591 COMMIT_RESERVED_EVENTS (data);
4595 statistical_call_chain (MonoProfiler *profiler, int call_chain_depth, guchar **ips, void *context) {
4596 MonoDomain *domain = mono_domain_get ();
4597 ProfilerStatisticalData *data;
4600 CHECK_PROFILER_ENABLED ();
4602 data = profiler->statistical_data;
4603 index = InterlockedIncrement ((int*) &data->next_free_index);
4605 if (index <= data->end_index) {
4606 unsigned int base_index = (index - 1) * (profiler->statistical_call_chain_depth + 1);
4607 unsigned int call_chain_index = 0;
4609 //printf ("[statistical_call_chain] (%d)\n", call_chain_depth);
4610 while (call_chain_index < call_chain_depth) {
4611 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4612 //printf ("[statistical_call_chain] [%d] = %p\n", base_index + call_chain_index, ips [call_chain_index]);
4613 hit->address = (gpointer) ips [call_chain_index];
4614 hit->domain = domain;
4615 call_chain_index ++;
4617 while (call_chain_index <= profiler->statistical_call_chain_depth) {
4618 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4619 //printf ("[statistical_call_chain] [%d] = NULL\n", base_index + call_chain_index);
4620 hit->address = NULL;
4622 call_chain_index ++;
4625 /* Check if we are the one that must swap the buffers */
4626 if (index == data->end_index + 1) {
4627 ProfilerStatisticalData *new_data;
4629 /* In the *impossible* case that the writer thread has not finished yet, */
4630 /* loop waiting for it and meanwhile lose all statistical events... */
4632 /* First, wait that it consumed the ready buffer */
4633 while (profiler->statistical_data_ready != NULL);
4634 /* Then, wait that it produced the free buffer */
4635 new_data = profiler->statistical_data_second_buffer;
4636 } while (new_data == NULL);
4638 profiler->statistical_data_ready = data;
4639 profiler->statistical_data = new_data;
4640 profiler->statistical_data_second_buffer = NULL;
4641 WRITER_EVENT_RAISE ();
4642 /* Otherwise exit from the handler and drop the event... */
4647 /* Loop again, hoping to acquire a free slot this time (otherwise the event will be dropped) */
4650 } while (data == NULL);
4654 statistical_hit (MonoProfiler *profiler, guchar *ip, void *context) {
4655 MonoDomain *domain = mono_domain_get ();
4656 ProfilerStatisticalData *data;
4659 CHECK_PROFILER_ENABLED ();
4661 data = profiler->statistical_data;
4662 index = InterlockedIncrement ((int*) &data->next_free_index);
4664 if (index <= data->end_index) {
4665 ProfilerStatisticalHit *hit = & (data->hits [index - 1]);
4666 hit->address = (gpointer) ip;
4667 hit->domain = domain;
4669 /* Check if we are the one that must swap the buffers */
4670 if (index == data->end_index + 1) {
4671 ProfilerStatisticalData *new_data;
4673 /* In the *impossible* case that the writer thread has not finished yet, */
4674 /* loop waiting for it and meanwhile lose all statistical events... */
4676 /* First, wait that it consumed the ready buffer */
4677 while (profiler->statistical_data_ready != NULL);
4678 /* Then, wait that it produced the free buffer */
4679 new_data = profiler->statistical_data_second_buffer;
4680 } while (new_data == NULL);
4682 profiler->statistical_data_ready = data;
4683 profiler->statistical_data = new_data;
4684 profiler->statistical_data_second_buffer = NULL;
4685 WRITER_EVENT_RAISE ();
4688 /* Loop again, hoping to acquire a free slot this time */
4691 } while (data == NULL);
4694 static MonoProfilerEvents
4695 gc_event_code_from_profiler_event (MonoGCEvent event) {
4697 case MONO_GC_EVENT_START:
4698 case MONO_GC_EVENT_END:
4699 return MONO_PROFILER_EVENT_GC_COLLECTION;
4700 case MONO_GC_EVENT_MARK_START:
4701 case MONO_GC_EVENT_MARK_END:
4702 return MONO_PROFILER_EVENT_GC_MARK;
4703 case MONO_GC_EVENT_RECLAIM_START:
4704 case MONO_GC_EVENT_RECLAIM_END:
4705 return MONO_PROFILER_EVENT_GC_SWEEP;
4706 case MONO_GC_EVENT_PRE_STOP_WORLD:
4707 case MONO_GC_EVENT_POST_STOP_WORLD:
4708 return MONO_PROFILER_EVENT_GC_STOP_WORLD;
4709 case MONO_GC_EVENT_PRE_START_WORLD:
4710 case MONO_GC_EVENT_POST_START_WORLD:
4711 return MONO_PROFILER_EVENT_GC_START_WORLD;
4713 g_assert_not_reached ();
4718 static MonoProfilerEventKind
4719 gc_event_kind_from_profiler_event (MonoGCEvent event) {
4721 case MONO_GC_EVENT_START:
4722 case MONO_GC_EVENT_MARK_START:
4723 case MONO_GC_EVENT_RECLAIM_START:
4724 case MONO_GC_EVENT_PRE_STOP_WORLD:
4725 case MONO_GC_EVENT_PRE_START_WORLD:
4726 return MONO_PROFILER_EVENT_KIND_START;
4727 case MONO_GC_EVENT_END:
4728 case MONO_GC_EVENT_MARK_END:
4729 case MONO_GC_EVENT_RECLAIM_END:
4730 case MONO_GC_EVENT_POST_START_WORLD:
4731 case MONO_GC_EVENT_POST_STOP_WORLD:
4732 return MONO_PROFILER_EVENT_KIND_END;
4734 g_assert_not_reached ();
4740 dump_current_heap_snapshot (void) {
4743 if (profiler->heap_shot_was_requested) {
4746 if (profiler->dump_next_heap_snapshots > 0) {
4747 profiler->dump_next_heap_snapshots--;
4749 } else if (profiler->dump_next_heap_snapshots < 0) {
4760 profiler_heap_buffers_setup (ProfilerHeapShotHeapBuffers *heap) {
4761 heap->buffers = g_new (ProfilerHeapShotHeapBuffer, 1);
4762 heap->buffers->previous = NULL;
4763 heap->buffers->next = NULL;
4764 heap->buffers->start_slot = &(heap->buffers->buffer [0]);
4765 heap->buffers->end_slot = &(heap->buffers->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4766 heap->last = heap->buffers;
4767 heap->current = heap->buffers;
4768 heap->first_free_slot = & (heap->buffers->buffer [0]);
4771 profiler_heap_buffers_clear (ProfilerHeapShotHeapBuffers *heap) {
4772 heap->buffers = NULL;
4774 heap->current = NULL;
4775 heap->first_free_slot = NULL;
4778 profiler_heap_buffers_free (ProfilerHeapShotHeapBuffers *heap) {
4779 ProfilerHeapShotHeapBuffer *current = heap->buffers;
4780 while (current != NULL) {
4781 ProfilerHeapShotHeapBuffer *next = current->next;
4785 profiler_heap_buffers_clear (heap);
4789 report_object_references (gpointer *start, ClassIdMappingElement *layout, ProfilerHeapShotWriteJob *job) {
4790 int reported_references = 0;
4793 for (slot = 0; slot < layout->data.layout.slots; slot ++) {
4794 gboolean slot_has_reference;
4795 if (layout->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
4796 if (layout->data.bitmap.compact & (((guint64)1) << slot)) {
4797 slot_has_reference = TRUE;
4799 slot_has_reference = FALSE;
4802 if (layout->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
4803 slot_has_reference = TRUE;
4805 slot_has_reference = FALSE;
4809 if (slot_has_reference) {
4810 gpointer field = start [slot];
4812 if ((field != NULL) && mono_object_is_alive (field)) {
4813 reported_references ++;
4814 WRITE_HEAP_SHOT_JOB_VALUE (job, field);
4819 return reported_references;
4823 profiler_heap_report_object_reachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4825 MonoClass *klass = mono_object_get_class (obj);
4826 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4827 if (class_id == NULL) {
4828 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4830 g_assert (class_id != NULL);
4832 if (job->summary.capacity > 0) {
4833 guint32 id = class_id->id;
4834 g_assert (id < job->summary.capacity);
4836 job->summary.per_class_data [id].reachable.instances ++;
4837 job->summary.per_class_data [id].reachable.bytes += mono_object_get_size (obj);
4839 if (profiler->action_flags.heap_shot && job->dump_heap_data) {
4840 int reference_counter = 0;
4841 gpointer *reference_counter_location;
4843 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, obj, HEAP_CODE_OBJECT);
4844 #if DEBUG_HEAP_PROFILER
4845 printf ("profiler_heap_report_object_reachable: reported object %p at cursor %p\n", obj, (job->cursor - 1));
4847 WRITE_HEAP_SHOT_JOB_VALUE (job, NULL);
4848 reference_counter_location = job->cursor - 1;
4850 if (mono_class_get_rank (klass)) {
4851 MonoArray *array = (MonoArray *) obj;
4852 MonoClass *element_class = mono_class_get_element_class (klass);
4853 ClassIdMappingElement *element_id = class_id_mapping_element_get (element_class);
4855 g_assert (element_id != NULL);
4856 if (element_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4857 class_id_mapping_element_build_layout_bitmap (element_class, element_id);
4859 if (! mono_class_is_valuetype (element_class)) {
4860 int length = mono_array_length (array);
4862 for (i = 0; i < length; i++) {
4863 MonoObject *array_element = mono_array_get (array, MonoObject*, i);
4864 if ((array_element != NULL) && mono_object_is_alive (array_element)) {
4865 reference_counter ++;
4866 WRITE_HEAP_SHOT_JOB_VALUE (job, array_element);
4869 } else if (element_id->data.layout.references > 0) {
4870 int length = mono_array_length (array);
4871 int array_element_size = mono_array_element_size (klass);
4873 for (i = 0; i < length; i++) {
4874 gpointer array_element_address = mono_array_addr_with_size (array, array_element_size, i);
4875 reference_counter += report_object_references (array_element_address, element_id, job);
4879 if (class_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4880 class_id_mapping_element_build_layout_bitmap (klass, class_id);
4882 if (class_id->data.layout.references > 0) {
4883 reference_counter += report_object_references ((gpointer)(((char*)obj) + sizeof (MonoObject)), class_id, job);
4887 *reference_counter_location = GINT_TO_POINTER (reference_counter);
4888 #if DEBUG_HEAP_PROFILER
4889 printf ("profiler_heap_report_object_reachable: updated reference_counter_location %p with value %d\n", reference_counter_location, reference_counter);
4895 profiler_heap_report_object_unreachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4897 MonoClass *klass = mono_object_get_class (obj);
4898 guint32 size = mono_object_get_size (obj);
4900 if (job->summary.capacity > 0) {
4901 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4904 if (class_id == NULL) {
4905 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4907 g_assert (class_id != NULL);
4909 g_assert (id < job->summary.capacity);
4911 job->summary.per_class_data [id].unreachable.instances ++;
4912 job->summary.per_class_data [id].unreachable.bytes += size;
4914 if (profiler->action_flags.unreachable_objects && job->dump_heap_data) {
4915 #if DEBUG_HEAP_PROFILER
4916 printf ("profiler_heap_report_object_unreachable: at job %p writing klass %p\n", job, klass);
4918 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, klass, HEAP_CODE_FREE_OBJECT_CLASS);
4920 #if DEBUG_HEAP_PROFILER
4921 printf ("profiler_heap_report_object_unreachable: at job %p writing size %p\n", job, GUINT_TO_POINTER (size));
4923 WRITE_HEAP_SHOT_JOB_VALUE (job, GUINT_TO_POINTER (size));
4929 profiler_heap_add_object (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4930 if (heap->first_free_slot >= heap->current->end_slot) {
4931 if (heap->current->next != NULL) {
4932 heap->current = heap->current->next;
4934 ProfilerHeapShotHeapBuffer *buffer = g_new (ProfilerHeapShotHeapBuffer, 1);
4935 buffer->previous = heap->last;
4936 buffer->next = NULL;
4937 buffer->start_slot = &(buffer->buffer [0]);
4938 buffer->end_slot = &(buffer->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4939 heap->current = buffer;
4940 heap->last->next = buffer;
4941 heap->last = buffer;
4943 heap->first_free_slot = &(heap->current->buffer [0]);
4946 *(heap->first_free_slot) = obj;
4947 heap->first_free_slot ++;
4948 profiler_heap_report_object_reachable (job, obj);
4952 profiler_heap_pop_object_from_end (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject** current_slot) {
4953 while (heap->first_free_slot != current_slot) {
4956 if (heap->first_free_slot > heap->current->start_slot) {
4957 heap->first_free_slot --;
4959 heap->current = heap->current->previous;
4960 g_assert (heap->current != NULL);
4961 heap->first_free_slot = heap->current->end_slot - 1;
4964 obj = *(heap->first_free_slot);
4966 if (mono_object_is_alive (obj)) {
4967 profiler_heap_report_object_reachable (job, obj);
4970 profiler_heap_report_object_unreachable (job, obj);
4977 profiler_heap_scan (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job) {
4978 ProfilerHeapShotHeapBuffer *current_buffer = heap->buffers;
4979 MonoObject** current_slot = current_buffer->start_slot;
4981 while (current_slot != heap->first_free_slot) {
4982 MonoObject *obj = *current_slot;
4983 if (mono_object_is_alive (obj)) {
4984 profiler_heap_report_object_reachable (job, obj);
4986 profiler_heap_report_object_unreachable (job, obj);
4987 *current_slot = profiler_heap_pop_object_from_end (heap, job, current_slot);
4990 if (*current_slot != NULL) {
4993 if (current_slot == current_buffer->end_slot) {
4994 current_buffer = current_buffer->next;
4995 g_assert (current_buffer != NULL);
4996 current_slot = current_buffer->start_slot;
5002 static inline gboolean
5003 heap_shot_write_job_should_be_created (gboolean dump_heap_data) {
5004 return dump_heap_data || profiler->action_flags.unreachable_objects || profiler->action_flags.collection_summary;
5008 process_gc_event (MonoProfiler *profiler, gboolean do_heap_profiling, MonoGCEvent ev) {
5009 static gboolean dump_heap_data;
5012 case MONO_GC_EVENT_PRE_STOP_WORLD:
5013 // Get the lock, so we are sure nobody is flushing events during the collection,
5014 // and we can update all mappings (building the class descriptors).
5015 // This is necessary also during lock profiling (even if do_heap_profiling is FALSE).
5018 case MONO_GC_EVENT_POST_STOP_WORLD:
5019 if (do_heap_profiling) {
5020 dump_heap_data = dump_current_heap_snapshot ();
5021 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
5022 ProfilerPerThreadData *data;
5023 // Update all mappings, so that we have built all the class descriptors.
5024 flush_all_mappings ();
5025 // Also write all event buffers, so that allocations are recorded.
5026 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
5027 write_thread_data_block (data);
5031 dump_heap_data = FALSE;
5036 case MONO_GC_EVENT_MARK_END: {
5037 if (do_heap_profiling) {
5038 ProfilerHeapShotWriteJob *job;
5039 ProfilerPerThreadData *data;
5041 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
5042 job = profiler_heap_shot_write_job_new (profiler->heap_shot_was_requested, dump_heap_data, profiler->garbage_collection_counter);
5043 profiler->heap_shot_was_requested = FALSE;
5044 MONO_PROFILER_GET_CURRENT_COUNTER (job->start_counter);
5045 MONO_PROFILER_GET_CURRENT_TIME (job->start_time);
5050 profiler_heap_scan (&(profiler->heap), job);
5052 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
5053 ProfilerHeapShotObjectBuffer *buffer;
5054 for (buffer = data->heap_shot_object_buffers; buffer != NULL; buffer = buffer->next) {
5055 MonoObject **cursor;
5056 for (cursor = buffer->first_unprocessed_slot; cursor < buffer->next_free_slot; cursor ++) {
5057 MonoObject *obj = *cursor;
5058 #if DEBUG_HEAP_PROFILER
5059 printf ("gc_event: in object buffer %p(%p-%p) cursor at %p has object %p ", buffer, &(buffer->buffer [0]), buffer->end, cursor, obj);
5061 if (mono_object_is_alive (obj)) {
5062 #if DEBUG_HEAP_PROFILER
5063 printf ("(object is alive, adding to heap)\n");
5065 profiler_heap_add_object (&(profiler->heap), job, obj);
5067 #if DEBUG_HEAP_PROFILER
5068 printf ("(object is unreachable, reporting in job)\n");
5070 profiler_heap_report_object_unreachable (job, obj);
5073 buffer->first_unprocessed_slot = cursor;
5078 MONO_PROFILER_GET_CURRENT_COUNTER (job->end_counter);
5079 MONO_PROFILER_GET_CURRENT_TIME (job->end_time);
5081 profiler_add_heap_shot_write_job (job);
5082 profiler_free_heap_shot_write_jobs ();
5083 WRITER_EVENT_RAISE ();
5094 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation) {
5095 ProfilerPerThreadData *data;
5096 ProfilerEventData *event;
5097 gboolean do_heap_profiling = profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary;
5098 guint32 event_value;
5100 if (ev == MONO_GC_EVENT_START) {
5101 profiler->garbage_collection_counter ++;
5104 event_value = (profiler->garbage_collection_counter << 8) | generation;
5106 if (ev == MONO_GC_EVENT_POST_STOP_WORLD) {
5107 process_gc_event (profiler, do_heap_profiling, ev);
5110 /* Check if the gc event should be recorded. */
5111 if (profiler->action_flags.report_gc_events || do_heap_profiling) {
5112 GET_PROFILER_THREAD_DATA (data);
5113 GET_NEXT_FREE_EVENT (data, event);
5114 STORE_EVENT_NUMBER_COUNTER (event, profiler, event_value, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, gc_event_code_from_profiler_event (ev), gc_event_kind_from_profiler_event (ev));
5115 COMMIT_RESERVED_EVENTS (data);
5118 if (ev != MONO_GC_EVENT_POST_STOP_WORLD) {
5119 process_gc_event (profiler, do_heap_profiling, ev);
5124 gc_resize (MonoProfiler *profiler, gint64 new_size) {
5125 ProfilerPerThreadData *data;
5126 ProfilerEventData *event;
5127 GET_PROFILER_THREAD_DATA (data);
5128 GET_NEXT_FREE_EVENT (data, event);
5129 profiler->garbage_collection_counter ++;
5130 STORE_EVENT_NUMBER_VALUE (event, profiler, new_size, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_GC_RESIZE, 0, profiler->garbage_collection_counter);
5131 COMMIT_RESERVED_EVENTS (data);
5135 runtime_initialized (MonoProfiler *profiler) {
5136 LOG_WRITER_THREAD ("runtime_initialized: initializing internal calls.\n");
5137 mono_add_internal_call ("Mono.Profiler.RuntimeControls::EnableProfiler", enable_profiler);
5138 mono_add_internal_call ("Mono.Profiler.RuntimeControls::DisableProfiler", disable_profiler);
5139 mono_add_internal_call ("Mono.Profiler.RuntimeControls::TakeHeapSnapshot", request_heap_snapshot);
5140 LOG_WRITER_THREAD ("runtime_initialized: initialized internal calls.\n");
5144 #define MAX_COMMAND_LENGTH (1024)
5145 static int server_socket;
5146 static int command_socket;
5149 write_user_response (const char *response) {
5150 LOG_USER_THREAD ("write_user_response: writing response:");
5151 LOG_USER_THREAD (response);
5152 send (command_socket, response, strlen (response), 0);
5156 execute_user_command (char *command) {
5159 LOG_USER_THREAD ("execute_user_command: executing command:");
5160 LOG_USER_THREAD (command);
5162 /* Ignore leading and trailing '\r' */
5163 line_feed = strchr (command, '\r');
5164 if (line_feed == command) {
5166 line_feed = strchr (command, '\r');
5168 if ((line_feed != NULL) && (* (line_feed + 1) == 0)) {
5172 if (strcmp (command, "enable") == 0) {
5173 LOG_USER_THREAD ("execute_user_command: enabling profiler");
5175 write_user_response ("DONE\n");
5176 } else if (strcmp (command, "disable") == 0) {
5177 LOG_USER_THREAD ("execute_user_command: disabling profiler");
5178 disable_profiler ();
5179 write_user_response ("DONE\n");
5180 } else if (strcmp (command, "heap-snapshot") == 0) {
5181 LOG_USER_THREAD ("execute_user_command: taking heap snapshot");
5182 profiler->heap_shot_was_requested = TRUE;
5183 WRITER_EVENT_RAISE ();
5184 write_user_response ("DONE\n");
5185 } else if (strstr (command, "heap-snapshot-counter") == 0) {
5187 LOG_USER_THREAD ("execute_user_command: changing heap counter");
5188 equals = strstr (command, "=");
5189 if (equals != NULL) {
5191 if (strcmp (equals, "all") == 0) {
5192 LOG_USER_THREAD ("execute_user_command: heap counter is \"all\"");
5193 profiler->garbage_collection_counter = -1;
5194 } else if (strcmp (equals, "none") == 0) {
5195 LOG_USER_THREAD ("execute_user_command: heap counter is \"none\"");
5196 profiler->garbage_collection_counter = 0;
5198 profiler->garbage_collection_counter = atoi (equals);
5200 write_user_response ("DONE\n");
5202 write_user_response ("ERROR\n");
5204 profiler->heap_shot_was_requested = TRUE;
5206 LOG_USER_THREAD ("execute_user_command: command not recognized");
5207 write_user_response ("ERROR\n");
5212 process_user_commands (void) {
5213 char *command_buffer = malloc (MAX_COMMAND_LENGTH);
5214 int command_buffer_current_index = 0;
5215 gboolean loop = TRUE;
5216 gboolean result = TRUE;
5219 int unprocessed_characters;
5221 LOG_USER_THREAD ("process_user_commands: reading from socket...");
5222 unprocessed_characters = recv (command_socket, command_buffer + command_buffer_current_index, MAX_COMMAND_LENGTH - command_buffer_current_index, 0);
5224 if (unprocessed_characters > 0) {
5225 char *command_end = NULL;
5227 LOG_USER_THREAD ("process_user_commands: received characters.");
5230 if (command_end != NULL) {
5232 execute_user_command (command_buffer);
5233 unprocessed_characters -= (((command_end - command_buffer) - command_buffer_current_index) + 1);
5235 if (unprocessed_characters > 0) {
5236 memmove (command_buffer, command_end + 1, unprocessed_characters);
5238 command_buffer_current_index = 0;
5241 command_end = memchr (command_buffer, '\n', command_buffer_current_index + unprocessed_characters);
5242 } while (command_end != NULL);
5244 command_buffer_current_index += unprocessed_characters;
5246 } else if (unprocessed_characters == 0) {
5247 LOG_USER_THREAD ("process_user_commands: received no character.");
5251 LOG_USER_THREAD ("process_user_commands: received error.");
5257 free (command_buffer);
5262 user_thread (gpointer nothing) {
5263 struct sockaddr_in server_address;
5266 command_socket = -1;
5268 LOG_USER_THREAD ("user_thread: starting up...");
5270 server_socket = socket (AF_INET, SOCK_STREAM, 0);
5271 if (server_socket < 0) {
5272 LOG_USER_THREAD ("user_thread: error creating socket.");
5275 memset (& server_address, 0, sizeof (server_address));
5277 server_address.sin_family = AF_INET;
5278 server_address.sin_addr.s_addr = INADDR_ANY;
5279 if ((profiler->command_port < 1023) || (profiler->command_port > 65535)) {
5280 LOG_USER_THREAD ("user_thread: invalid port number.");
5283 server_address.sin_port = htons (profiler->command_port);
5285 if (bind (server_socket, (struct sockaddr *) &server_address, sizeof(server_address)) < 0) {
5286 LOG_USER_THREAD ("user_thread: error binding socket.");
5287 close (server_socket);
5291 LOG_USER_THREAD ("user_thread: listening...\n");
5292 listen (server_socket, 1);
5293 command_socket = accept (server_socket, NULL, NULL);
5294 if (command_socket < 0) {
5295 LOG_USER_THREAD ("user_thread: error accepting socket.");
5296 close (server_socket);
5300 LOG_USER_THREAD ("user_thread: processing user commands...");
5301 process_user_commands ();
5303 LOG_USER_THREAD ("user_thread: exiting cleanly.");
5304 close (server_socket);
5305 close (command_socket);
5310 /* called at the end of the program */
5312 profiler_shutdown (MonoProfiler *prof)
5314 ProfilerPerThreadData* current_thread_data;
5315 ProfilerPerThreadData* next_thread_data;
5317 LOG_WRITER_THREAD ("profiler_shutdown: zeroing relevant flags");
5318 mono_profiler_set_events (0);
5319 /* During shutdown searching for MonoJitInfo is not possible... */
5320 if (profiler->statistical_call_chain_strategy == MONO_PROFILER_CALL_CHAIN_MANAGED) {
5321 mono_profiler_install_statistical_call_chain (NULL, 0, MONO_PROFILER_CALL_CHAIN_NONE);
5323 //profiler->flags = 0;
5324 //profiler->action_flags.unreachable_objects = FALSE;
5325 //profiler->action_flags.heap_shot = FALSE;
5327 LOG_WRITER_THREAD ("profiler_shutdown: asking stats thread to exit");
5328 profiler->terminate_writer_thread = TRUE;
5329 WRITER_EVENT_RAISE ();
5330 LOG_WRITER_THREAD ("profiler_shutdown: waiting for stats thread to exit");
5331 WAIT_WRITER_THREAD ();
5332 LOG_WRITER_THREAD ("profiler_shutdown: stats thread should be dead now");
5333 WRITER_EVENT_DESTROY ();
5336 flush_everything ();
5337 MONO_PROFILER_GET_CURRENT_TIME (profiler->end_time);
5338 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->end_counter);
5342 mono_profiler_install_code_chunk_new (NULL);
5343 mono_profiler_install_code_chunk_destroy (NULL);
5344 mono_profiler_install_code_buffer_new (NULL);
5345 profiler_code_chunks_cleanup (& (profiler->code_chunks));
5348 g_free (profiler->file_name);
5349 if (profiler->file_name_suffix != NULL) {
5350 g_free (profiler->file_name_suffix);
5353 method_id_mapping_destroy (profiler->methods);
5354 class_id_mapping_destroy (profiler->classes);
5355 g_hash_table_destroy (profiler->loaded_assemblies);
5356 g_hash_table_destroy (profiler->loaded_modules);
5357 g_hash_table_destroy (profiler->loaded_appdomains);
5359 FREE_PROFILER_THREAD_DATA ();
5361 for (current_thread_data = profiler->per_thread_data; current_thread_data != NULL; current_thread_data = next_thread_data) {
5362 next_thread_data = current_thread_data->next;
5363 profiler_per_thread_data_destroy (current_thread_data);
5365 if (profiler->statistical_data != NULL) {
5366 profiler_statistical_data_destroy (profiler->statistical_data);
5368 if (profiler->statistical_data_ready != NULL) {
5369 profiler_statistical_data_destroy (profiler->statistical_data_ready);
5371 if (profiler->statistical_data_second_buffer != NULL) {
5372 profiler_statistical_data_destroy (profiler->statistical_data_second_buffer);
5374 if (profiler->executable_regions != NULL) {
5375 profiler_executable_memory_regions_destroy (profiler->executable_regions);
5378 profiler_heap_buffers_free (&(profiler->heap));
5380 profiler_free_write_buffers ();
5381 profiler_destroy_heap_shot_write_jobs ();
5383 DELETE_PROFILER_MUTEX ();
5386 if (profiler->action_flags.oprofile) {
5395 #define FAIL_ARGUMENT_CHECK(message) do {\
5396 failure_message = (message);\
5397 goto failure_handling;\
5399 #define FAIL_PARSING_VALUED_ARGUMENT FAIL_ARGUMENT_CHECK("cannot parse valued argument %s")
5400 #define FAIL_PARSING_FLAG_ARGUMENT FAIL_ARGUMENT_CHECK("cannot parse flag argument %s")
5401 #define CHECK_CONDITION(condition,message) do {\
5402 gboolean result = (condition);\
5404 FAIL_ARGUMENT_CHECK (message);\
5407 #define FAIL_IF_HAS_MINUS CHECK_CONDITION(has_minus,"minus ('-') modifier not allowed for argument %s")
5408 #define TRUE_IF_NOT_MINUS ((!has_minus)?TRUE:FALSE)
5410 #define DEFAULT_ARGUMENTS "s"
5412 setup_user_options (const char *arguments) {
5413 gchar **arguments_array, **current_argument;
5414 detect_fast_timer ();
5416 profiler->file_name = NULL;
5417 profiler->file_name_suffix = NULL;
5418 profiler->per_thread_buffer_size = 10000;
5419 profiler->statistical_buffer_size = 10000;
5420 profiler->statistical_call_chain_depth = 0;
5421 profiler->statistical_call_chain_strategy = MONO_PROFILER_CALL_CHAIN_NATIVE;
5422 profiler->write_buffer_size = 1024;
5423 profiler->dump_next_heap_snapshots = 0;
5424 profiler->heap_shot_was_requested = FALSE;
5425 profiler->flags = MONO_PROFILE_APPDOMAIN_EVENTS|
5426 MONO_PROFILE_ASSEMBLY_EVENTS|
5427 MONO_PROFILE_MODULE_EVENTS|
5428 MONO_PROFILE_CLASS_EVENTS|
5429 MONO_PROFILE_METHOD_EVENTS|
5430 MONO_PROFILE_JIT_COMPILATION;
5431 profiler->profiler_enabled = TRUE;
5433 if (arguments == NULL) {
5434 arguments = DEFAULT_ARGUMENTS;
5435 } else if (strstr (arguments, ":")) {
5436 arguments = strstr (arguments, ":") + 1;
5437 if (arguments [0] == 0) {
5438 arguments = DEFAULT_ARGUMENTS;
5442 arguments_array = g_strsplit (arguments, ",", -1);
5444 for (current_argument = arguments_array; ((current_argument != NULL) && (current_argument [0] != 0)); current_argument ++) {
5445 char *argument = *current_argument;
5446 char *equals = strstr (argument, "=");
5447 const char *failure_message = NULL;
5451 if (*argument == '+') {
5455 } else if (*argument == '-') {
5464 if (equals != NULL) {
5465 int equals_position = equals - argument;
5467 if (! (strncmp (argument, "per-thread-buffer-size", equals_position) && strncmp (argument, "tbs", equals_position))) {
5468 int value = atoi (equals + 1);
5471 profiler->per_thread_buffer_size = value;
5473 } else if (! (strncmp (argument, "statistical", equals_position) && strncmp (argument, "stat", equals_position) && strncmp (argument, "s", equals_position))) {
5474 int value = atoi (equals + 1);
5477 if (value > MONO_PROFILER_MAX_STAT_CALL_CHAIN_DEPTH) {
5478 value = MONO_PROFILER_MAX_STAT_CALL_CHAIN_DEPTH;
5480 profiler->statistical_call_chain_depth = value;
5481 profiler->flags |= MONO_PROFILE_STATISTICAL;
5483 } else if (! (strncmp (argument, "call-chain-strategy", equals_position) && strncmp (argument, "ccs", equals_position))) {
5484 char *parameter = equals + 1;
5486 if (! strcmp (parameter, "native")) {
5487 profiler->statistical_call_chain_strategy = MONO_PROFILER_CALL_CHAIN_NATIVE;
5488 } else if (! strcmp (parameter, "glibc")) {
5489 profiler->statistical_call_chain_strategy = MONO_PROFILER_CALL_CHAIN_GLIBC;
5490 } else if (! strcmp (parameter, "managed")) {
5491 profiler->statistical_call_chain_strategy = MONO_PROFILER_CALL_CHAIN_MANAGED;
5493 failure_message = "invalid call chain strategy in argument %s";
5494 goto failure_handling;
5496 } else if (! (strncmp (argument, "statistical-thread-buffer-size", equals_position) && strncmp (argument, "sbs", equals_position))) {
5497 int value = atoi (equals + 1);
5500 profiler->statistical_buffer_size = value;
5502 } else if (! (strncmp (argument, "write-buffer-size", equals_position) && strncmp (argument, "wbs", equals_position))) {
5503 int value = atoi (equals + 1);
5506 profiler->write_buffer_size = value;
5508 } else if (! (strncmp (argument, "output", equals_position) && strncmp (argument, "out", equals_position) && strncmp (argument, "o", equals_position) && strncmp (argument, "O", equals_position))) {
5510 if (strlen (equals + 1) > 0) {
5511 profiler->file_name = g_strdup (equals + 1);
5513 } else if (! (strncmp (argument, "output-suffix", equals_position) && strncmp (argument, "suffix", equals_position) && strncmp (argument, "os", equals_position) && strncmp (argument, "OS", equals_position))) {
5515 if (strlen (equals + 1) > 0) {
5516 profiler->file_name_suffix = g_strdup (equals + 1);
5518 } else if (! (strncmp (argument, "heap-shot", equals_position) && strncmp (argument, "heap", equals_position) && strncmp (argument, "h", equals_position))) {
5519 char *parameter = equals + 1;
5520 if (! strcmp (parameter, "all")) {
5521 profiler->dump_next_heap_snapshots = -1;
5523 profiler->dump_next_heap_snapshots = atoi (parameter);
5527 profiler->action_flags.save_allocation_caller = TRUE;
5528 profiler->action_flags.save_allocation_stack = TRUE;
5529 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5531 profiler->action_flags.heap_shot = TRUE_IF_NOT_MINUS;
5532 } else if (! (strncmp (argument, "gc-dumps", equals_position) && strncmp (argument, "gc-d", equals_position) && strncmp (argument, "gcd", equals_position))) {
5534 if (strlen (equals + 1) > 0) {
5535 profiler->dump_next_heap_snapshots = atoi (equals + 1);
5537 } else if (! (strncmp (argument, "command-port", equals_position) && strncmp (argument, "cp", equals_position))) {
5539 if (strlen (equals + 1) > 0) {
5540 profiler->command_port = atoi (equals + 1);
5543 FAIL_PARSING_VALUED_ARGUMENT;
5546 if (! (strcmp (argument, "jit") && strcmp (argument, "j"))) {
5547 profiler->action_flags.jit_time = TRUE_IF_NOT_MINUS;
5548 } else if (! (strcmp (argument, "allocations") && strcmp (argument, "alloc") && strcmp (argument, "a"))) {
5551 profiler->action_flags.save_allocation_caller = TRUE;
5552 profiler->action_flags.save_allocation_stack = TRUE;
5555 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5557 profiler->flags &= ~MONO_PROFILE_ALLOCATIONS;
5559 } else if (! (strcmp (argument, "monitor") && strcmp (argument, "locks") && strcmp (argument, "lock"))) {
5561 profiler->action_flags.track_stack = TRUE;
5562 profiler->flags |= MONO_PROFILE_MONITOR_EVENTS;
5563 profiler->flags |= MONO_PROFILE_GC;
5564 } else if (! (strcmp (argument, "gc") && strcmp (argument, "g"))) {
5566 profiler->action_flags.report_gc_events = TRUE;
5567 profiler->flags |= MONO_PROFILE_GC;
5568 } else if (! (strcmp (argument, "allocations-summary") && strcmp (argument, "as"))) {
5569 profiler->action_flags.collection_summary = TRUE_IF_NOT_MINUS;
5570 } else if (! (strcmp (argument, "heap-shot") && strcmp (argument, "heap") && strcmp (argument, "h"))) {
5573 profiler->action_flags.save_allocation_caller = TRUE;
5574 profiler->action_flags.save_allocation_stack = TRUE;
5575 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5577 profiler->action_flags.heap_shot = TRUE_IF_NOT_MINUS;
5578 } else if (! (strcmp (argument, "unreachable") && strcmp (argument, "free") && strcmp (argument, "f"))) {
5579 profiler->action_flags.unreachable_objects = TRUE_IF_NOT_MINUS;
5580 } else if (! (strcmp (argument, "threads") && strcmp (argument, "t"))) {
5582 profiler->flags |= MONO_PROFILE_THREADS;
5584 profiler->flags &= ~MONO_PROFILE_THREADS;
5586 } else if (! (strcmp (argument, "enter-leave") && strcmp (argument, "calls") && strcmp (argument, "c"))) {
5587 profiler->action_flags.track_calls = TRUE_IF_NOT_MINUS;
5588 } else if (! (strcmp (argument, "statistical") && strcmp (argument, "stat") && strcmp (argument, "s"))) {
5590 profiler->flags |= MONO_PROFILE_STATISTICAL;
5592 profiler->flags &= ~MONO_PROFILE_STATISTICAL;
5594 } else if (! (strcmp (argument, "save-allocation-caller") && strcmp (argument, "sac"))) {
5595 profiler->action_flags.save_allocation_caller = TRUE_IF_NOT_MINUS;
5596 } else if (! (strcmp (argument, "save-allocation-stack") && strcmp (argument, "sas"))) {
5597 profiler->action_flags.save_allocation_stack = TRUE_IF_NOT_MINUS;
5598 } else if (! (strcmp (argument, "allocations-carry-id") && strcmp (argument, "aci"))) {
5599 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5600 } else if (! (strcmp (argument, "start-enabled") && strcmp (argument, "se"))) {
5601 profiler->profiler_enabled = TRUE_IF_NOT_MINUS;
5602 } else if (! (strcmp (argument, "start-disabled") && strcmp (argument, "sd"))) {
5603 profiler->profiler_enabled = ! TRUE_IF_NOT_MINUS;
5604 } else if (! (strcmp (argument, "force-accurate-timer") && strcmp (argument, "fac"))) {
5605 use_fast_timer = TRUE_IF_NOT_MINUS;
5607 } else if (! (strcmp (argument, "oprofile") && strcmp (argument, "oprof"))) {
5608 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5609 profiler->action_flags.oprofile = TRUE;
5610 if (op_open_agent ()) {
5611 FAIL_ARGUMENT_CHECK ("problem calling op_open_agent");
5614 } else if (strcmp (argument, "logging")) {
5615 FAIL_PARSING_FLAG_ARGUMENT;
5620 if (failure_message != NULL) {
5621 g_warning (failure_message, argument);
5622 failure_message = NULL;
5626 g_free (arguments_array);
5628 /* Ensure that the profiler flags needed to support required action flags are active */
5629 if (profiler->action_flags.jit_time) {
5630 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5632 if (profiler->action_flags.save_allocation_caller || profiler->action_flags.save_allocation_stack || profiler->action_flags.allocations_carry_id) {
5633 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5635 if (profiler->action_flags.collection_summary || profiler->action_flags.heap_shot || profiler->action_flags.unreachable_objects) {
5636 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5637 profiler->action_flags.report_gc_events = TRUE;
5639 if (profiler->action_flags.track_calls) {
5640 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5641 profiler->action_flags.jit_time = TRUE;
5643 if (profiler->action_flags.save_allocation_caller || profiler->action_flags.save_allocation_stack) {
5644 profiler->action_flags.track_stack = TRUE;
5645 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5647 if (profiler->action_flags.track_stack) {
5648 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5651 /* Tracking call stacks is useless if we already emit all enter-exit events... */
5652 if (profiler->action_flags.track_calls) {
5653 profiler->action_flags.track_stack = FALSE;
5654 profiler->action_flags.save_allocation_caller = FALSE;
5655 profiler->action_flags.save_allocation_stack = FALSE;
5658 /* Without JIT events the stat profiler will not find method IDs... */
5659 if (profiler->flags | MONO_PROFILE_STATISTICAL) {
5660 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5662 /* Profiling allocations without knowing which gc we are doing is not nice... */
5663 if (profiler->flags | MONO_PROFILE_ALLOCATIONS) {
5664 profiler->flags |= MONO_PROFILE_GC;
5665 profiler->action_flags.report_gc_events = TRUE;
5669 if (profiler->file_name == NULL) {
5670 char *program_name = g_get_prgname ();
5672 if (program_name != NULL) {
5673 char *name_buffer = g_strdup (program_name);
5674 char *name_start = name_buffer;
5677 /* Jump over the last '/' */
5678 cursor = strrchr (name_buffer, '/');
5679 if (cursor == NULL) {
5680 cursor = name_buffer;
5684 name_start = cursor;
5686 /* Then jump over the last '\\' */
5687 cursor = strrchr (name_start, '\\');
5688 if (cursor == NULL) {
5689 cursor = name_start;
5693 name_start = cursor;
5695 /* Finally, find the last '.' */
5696 cursor = strrchr (name_start, '.');
5697 if (cursor != NULL) {
5701 if (profiler->file_name_suffix == NULL) {
5702 profiler->file_name = g_strdup_printf ("%s.mprof", name_start);
5704 profiler->file_name = g_strdup_printf ("%s-%s.mprof", name_start, profiler->file_name_suffix);
5706 g_free (name_buffer);
5708 profiler->file_name = g_strdup_printf ("%s.mprof", "profiler-log");
5714 data_writer_thread (gpointer nothing) {
5716 ProfilerStatisticalData *statistical_data;
5719 LOG_WRITER_THREAD ("data_writer_thread: going to sleep");
5720 WRITER_EVENT_WAIT ();
5721 LOG_WRITER_THREAD ("data_writer_thread: just woke up");
5723 if (profiler->heap_shot_was_requested) {
5724 MonoDomain * root_domain = mono_get_root_domain ();
5726 if (root_domain != NULL) {
5727 MonoThread *this_thread;
5728 LOG_WRITER_THREAD ("data_writer_thread: attaching thread");
5729 this_thread = mono_thread_attach (root_domain);
5730 LOG_WRITER_THREAD ("data_writer_thread: starting requested collection");
5731 mono_gc_collect (mono_gc_max_generation ());
5732 LOG_WRITER_THREAD ("data_writer_thread: requested collection done");
5733 LOG_WRITER_THREAD ("data_writer_thread: detaching thread");
5734 mono_thread_detach (this_thread);
5736 LOG_WRITER_THREAD ("data_writer_thread: collection sequence completed");
5738 LOG_WRITER_THREAD ("data_writer_thread: cannot get root domain, collection sequence skipped");
5743 statistical_data = profiler->statistical_data_ready;
5744 done = (statistical_data == NULL) && (profiler->heap_shot_write_jobs == NULL);
5747 LOG_WRITER_THREAD ("data_writer_thread: acquiring lock and writing data");
5750 // This makes sure that all method ids are in place
5751 LOG_WRITER_THREAD ("data_writer_thread: writing mapping...");
5752 flush_all_mappings ();
5753 LOG_WRITER_THREAD ("data_writer_thread: wrote mapping");
5755 if (statistical_data != NULL) {
5756 LOG_WRITER_THREAD ("data_writer_thread: writing statistical data...");
5757 profiler->statistical_data_ready = NULL;
5758 write_statistical_data_block (statistical_data);
5759 statistical_data->next_free_index = 0;
5760 statistical_data->first_unwritten_index = 0;
5761 profiler->statistical_data_second_buffer = statistical_data;
5762 LOG_WRITER_THREAD ("data_writer_thread: wrote statistical data");
5765 profiler_process_heap_shot_write_jobs ();
5768 LOG_WRITER_THREAD ("data_writer_thread: wrote data and released lock");
5770 LOG_WRITER_THREAD ("data_writer_thread: acquiring lock and flushing buffers");
5772 LOG_WRITER_THREAD ("data_writer_thread: lock acquired, flushing buffers");
5773 flush_everything ();
5775 LOG_WRITER_THREAD ("data_writer_thread: flushed buffers and released lock");
5778 if (profiler->terminate_writer_thread) {
5779 LOG_WRITER_THREAD ("data_writer_thread: exiting thread");
5780 CLEANUP_WRITER_THREAD ();
5788 mono_profiler_startup (const char *desc);
5790 /* the entry point (mono_profiler_load?) */
5792 mono_profiler_startup (const char *desc)
5794 profiler = g_new0 (MonoProfiler, 1);
5796 setup_user_options ((desc != NULL) ? desc : DEFAULT_ARGUMENTS);
5798 INITIALIZE_PROFILER_MUTEX ();
5799 MONO_PROFILER_GET_CURRENT_TIME (profiler->start_time);
5800 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->start_counter);
5801 profiler->last_header_counter = 0;
5803 profiler->methods = method_id_mapping_new ();
5804 profiler->classes = class_id_mapping_new ();
5805 profiler->loaded_element_next_free_id = 1;
5806 profiler->loaded_assemblies = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5807 profiler->loaded_modules = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5808 profiler->loaded_appdomains = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5810 profiler->statistical_data = profiler_statistical_data_new (profiler);
5811 profiler->statistical_data_second_buffer = profiler_statistical_data_new (profiler);
5813 profiler->write_buffers = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
5814 profiler->write_buffers->next = NULL;
5815 profiler->current_write_buffer = profiler->write_buffers;
5816 profiler->current_write_position = 0;
5817 profiler->full_write_buffers = 0;
5818 profiler_code_chunks_initialize (& (profiler->code_chunks));
5820 profiler->executable_regions = profiler_executable_memory_regions_new (1, 1);
5822 profiler->executable_files.table = g_hash_table_new (g_str_hash, g_str_equal);
5823 profiler->executable_files.new_files = NULL;
5825 profiler->heap_shot_write_jobs = NULL;
5826 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
5827 profiler_heap_buffers_setup (&(profiler->heap));
5829 profiler_heap_buffers_clear (&(profiler->heap));
5831 profiler->garbage_collection_counter = 0;
5833 WRITER_EVENT_INIT ();
5834 LOG_WRITER_THREAD ("mono_profiler_startup: creating writer thread");
5835 CREATE_WRITER_THREAD (data_writer_thread);
5836 LOG_WRITER_THREAD ("mono_profiler_startup: created writer thread");
5837 if ((profiler->command_port >= 1024) && (profiler->command_port <= 65535)) {
5838 LOG_USER_THREAD ("mono_profiler_startup: creating user thread");
5839 CREATE_USER_THREAD (user_thread);
5840 LOG_USER_THREAD ("mono_profiler_startup: created user thread");
5842 LOG_USER_THREAD ("mono_profiler_startup: skipping user thread creation");
5845 ALLOCATE_PROFILER_THREAD_DATA ();
5849 write_intro_block ();
5850 write_directives_block (TRUE);
5852 mono_profiler_install (profiler, profiler_shutdown);
5854 mono_profiler_install_appdomain (appdomain_start_load, appdomain_end_load,
5855 appdomain_start_unload, appdomain_end_unload);
5856 mono_profiler_install_assembly (assembly_start_load, assembly_end_load,
5857 assembly_start_unload, assembly_end_unload);
5858 mono_profiler_install_module (module_start_load, module_end_load,
5859 module_start_unload, module_end_unload);
5860 mono_profiler_install_class (class_start_load, class_end_load,
5861 class_start_unload, class_end_unload);
5862 mono_profiler_install_jit_compile (method_start_jit, method_end_jit);
5863 mono_profiler_install_enter_leave (method_enter, method_leave);
5864 mono_profiler_install_method_free (method_free);
5865 mono_profiler_install_thread (thread_start, thread_end);
5866 mono_profiler_install_allocation (object_allocated);
5867 mono_profiler_install_monitor (monitor_event);
5868 mono_profiler_install_statistical (statistical_hit);
5869 mono_profiler_install_statistical_call_chain (statistical_call_chain, profiler->statistical_call_chain_depth, profiler->statistical_call_chain_strategy);
5870 mono_profiler_install_gc (gc_event, gc_resize);
5871 mono_profiler_install_runtime_initialized (runtime_initialized);
5873 mono_profiler_install_jit_end (method_jit_result);
5875 if (profiler->flags | MONO_PROFILE_STATISTICAL) {
5876 mono_profiler_install_code_chunk_new (profiler_code_chunk_new_callback);
5877 mono_profiler_install_code_chunk_destroy (profiler_code_chunk_destroy_callback);
5878 mono_profiler_install_code_buffer_new (profiler_code_buffer_new_callback);
5881 mono_profiler_set_events (profiler->flags);