2 * mono-profiler-logging.c: Logging profiler for Mono.
5 * Massimiliano Mantione (massi@ximian.com)
7 * Copyright 2008-2009 Novell, Inc (http://www.novell.com)
10 #include <mono/metadata/profiler.h>
11 #include <mono/metadata/class.h>
12 #include <mono/metadata/class-internals.h>
13 #include <mono/metadata/assembly.h>
14 #include <mono/metadata/loader.h>
15 #include <mono/metadata/threads.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/metadata/mono-gc.h>
18 #include <mono/io-layer/atomic.h>
27 #define HAS_OPROFILE 0
30 #include <libopagent.h>
33 // Needed for heap analysis
34 extern gboolean mono_object_is_alive (MonoObject* obj);
37 MONO_PROFILER_FILE_BLOCK_KIND_INTRO = 1,
38 MONO_PROFILER_FILE_BLOCK_KIND_END = 2,
39 MONO_PROFILER_FILE_BLOCK_KIND_MAPPING = 3,
40 MONO_PROFILER_FILE_BLOCK_KIND_LOADED = 4,
41 MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED = 5,
42 MONO_PROFILER_FILE_BLOCK_KIND_EVENTS = 6,
43 MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL = 7,
44 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA = 8,
45 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY = 9,
46 MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES = 10
47 } MonoProfilerFileBlockKind;
50 MONO_PROFILER_DIRECTIVE_END = 0,
51 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER = 1,
52 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK = 2,
53 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID = 3,
54 MONO_PROFILER_DIRECTIVE_LOADED_ELEMENTS_CARRY_ID = 4,
55 MONO_PROFILER_DIRECTIVE_CLASSES_CARRY_ASSEMBLY_ID = 5,
56 MONO_PROFILER_DIRECTIVE_METHODS_CARRY_WRAPPER_FLAG = 6,
57 MONO_PROFILER_DIRECTIVE_LAST
58 } MonoProfilerDirectives;
61 #define MONO_PROFILER_LOADED_EVENT_MODULE 1
62 #define MONO_PROFILER_LOADED_EVENT_ASSEMBLY 2
63 #define MONO_PROFILER_LOADED_EVENT_APPDOMAIN 4
64 #define MONO_PROFILER_LOADED_EVENT_SUCCESS 8
65 #define MONO_PROFILER_LOADED_EVENT_FAILURE 16
68 MONO_PROFILER_EVENT_DATA_TYPE_OTHER = 0,
69 MONO_PROFILER_EVENT_DATA_TYPE_METHOD = 1,
70 MONO_PROFILER_EVENT_DATA_TYPE_CLASS = 2
71 } MonoProfilerEventDataType;
73 typedef struct _ProfilerEventData {
78 unsigned int data_type:2;
81 unsigned int value:25;
84 #define EVENT_VALUE_BITS (25)
85 #define MAX_EVENT_VALUE ((1<<EVENT_VALUE_BITS)-1)
88 MONO_PROFILER_EVENT_METHOD_JIT = 0,
89 MONO_PROFILER_EVENT_METHOD_FREED = 1,
90 MONO_PROFILER_EVENT_METHOD_CALL = 2,
91 MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER = 3,
92 MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER = 4
93 } MonoProfilerMethodEvents;
95 MONO_PROFILER_EVENT_CLASS_LOAD = 0,
96 MONO_PROFILER_EVENT_CLASS_UNLOAD = 1,
97 MONO_PROFILER_EVENT_CLASS_EXCEPTION = 2,
98 MONO_PROFILER_EVENT_CLASS_MONITOR = 3,
99 MONO_PROFILER_EVENT_CLASS_ALLOCATION = 4
100 } MonoProfilerClassEvents;
102 MONO_PROFILER_EVENT_RESULT_SUCCESS = 0,
103 MONO_PROFILER_EVENT_RESULT_FAILURE = 4
104 } MonoProfilerEventResult;
105 #define MONO_PROFILER_EVENT_RESULT_MASK MONO_PROFILER_EVENT_RESULT_FAILURE
107 MONO_PROFILER_EVENT_THREAD = 1,
108 MONO_PROFILER_EVENT_GC_COLLECTION = 2,
109 MONO_PROFILER_EVENT_GC_MARK = 3,
110 MONO_PROFILER_EVENT_GC_SWEEP = 4,
111 MONO_PROFILER_EVENT_GC_RESIZE = 5,
112 MONO_PROFILER_EVENT_GC_STOP_WORLD = 6,
113 MONO_PROFILER_EVENT_GC_START_WORLD = 7,
114 MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION = 8,
115 MONO_PROFILER_EVENT_STACK_SECTION = 9,
116 MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID = 10,
117 MONO_PROFILER_EVENT_OBJECT_MONITOR = 11
118 } MonoProfilerEvents;
120 MONO_PROFILER_EVENT_KIND_START = 0,
121 MONO_PROFILER_EVENT_KIND_END = 1
122 } MonoProfilerEventKind;
124 #define MONO_PROFILER_GET_CURRENT_TIME(t) {\
125 struct timeval current_time;\
126 gettimeofday (¤t_time, NULL);\
127 (t) = (((guint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;\
130 static gboolean use_fast_timer = FALSE;
132 #if (defined(__i386__) || defined(__x86_64__)) && ! defined(PLATFORM_WIN32)
134 #if defined(__i386__)
135 static const guchar cpuid_impl [] = {
136 0x55, /* push %ebp */
137 0x89, 0xe5, /* mov %esp,%ebp */
138 0x53, /* push %ebx */
139 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
140 0x0f, 0xa2, /* cpuid */
141 0x50, /* push %eax */
142 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
143 0x89, 0x18, /* mov %ebx,(%eax) */
144 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
145 0x89, 0x08, /* mov %ecx,(%eax) */
146 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
147 0x89, 0x10, /* mov %edx,(%eax) */
149 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
150 0x89, 0x02, /* mov %eax,(%edx) */
156 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
159 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx) {
162 __asm__ __volatile__ (
165 "movl %%eax, %%edx\n"
166 "xorl $0x200000, %%eax\n"
171 "xorl %%edx, %%eax\n"
172 "andl $0x200000, %%eax\n"
194 CpuidFunc func = (CpuidFunc) cpuid_impl;
195 func (id, p_eax, p_ebx, p_ecx, p_edx);
197 * We use this approach because of issues with gcc and pic code, see:
198 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
199 __asm__ __volatile__ ("cpuid"
200 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
208 static void detect_fast_timer (void) {
209 int p_eax, p_ebx, p_ecx, p_edx;
211 if (cpuid (0x1, &p_eax, &p_ebx, &p_ecx, &p_edx)) {
213 use_fast_timer = TRUE;
215 use_fast_timer = FALSE;
218 use_fast_timer = FALSE;
223 #if defined(__x86_64__)
224 static void detect_fast_timer (void) {
226 guint32 eax,ebx,ecx,edx;
227 __asm__ __volatile__ ("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(op));
229 use_fast_timer = TRUE;
231 use_fast_timer = FALSE;
236 static __inline__ guint64 rdtsc(void) {
238 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
239 return ((guint64) lo) | (((guint64) hi) << 32);
241 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) {\
242 if (use_fast_timer) {\
245 MONO_PROFILER_GET_CURRENT_TIME ((c));\
249 static void detect_fast_timer (void) {
250 use_fast_timer = FALSE;
252 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) MONO_PROFILER_GET_CURRENT_TIME ((c))
256 #define CLASS_LAYOUT_PACKED_BITMAP_SIZE 64
257 #define CLASS_LAYOUT_NOT_INITIALIZED (0xFFFF)
260 HEAP_CODE_OBJECT = 1,
261 HEAP_CODE_FREE_OBJECT_CLASS = 2,
263 } HeapProfilerJobValueCode;
264 typedef struct _MonoProfilerClassData {
273 } MonoProfilerClassData;
275 typedef struct _MonoProfilerMethodData {
278 } MonoProfilerMethodData;
280 typedef struct _ClassIdMappingElement {
284 struct _ClassIdMappingElement *next_unwritten;
285 MonoProfilerClassData data;
286 } ClassIdMappingElement;
288 typedef struct _MethodIdMappingElement {
292 struct _MethodIdMappingElement *next_unwritten;
293 MonoProfilerMethodData data;
294 } MethodIdMappingElement;
296 typedef struct _ClassIdMapping {
298 ClassIdMappingElement *unwritten;
302 typedef struct _MethodIdMapping {
304 MethodIdMappingElement *unwritten;
308 typedef struct _LoadedElement {
310 guint64 load_start_counter;
311 guint64 load_end_counter;
312 guint64 unload_start_counter;
313 guint64 unload_end_counter;
318 guint8 unload_written;
321 #define PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE 1024
322 #define PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE 4096
323 #define PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE 4096
325 typedef struct _ProfilerHeapShotObjectBuffer {
326 struct _ProfilerHeapShotObjectBuffer *next;
327 MonoObject **next_free_slot;
329 MonoObject **first_unprocessed_slot;
330 MonoObject *buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE];
331 } ProfilerHeapShotObjectBuffer;
333 typedef struct _ProfilerHeapShotHeapBuffer {
334 struct _ProfilerHeapShotHeapBuffer *next;
335 struct _ProfilerHeapShotHeapBuffer *previous;
336 MonoObject **start_slot;
337 MonoObject **end_slot;
338 MonoObject *buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE];
339 } ProfilerHeapShotHeapBuffer;
341 typedef struct _ProfilerHeapShotHeapBuffers {
342 ProfilerHeapShotHeapBuffer *buffers;
343 ProfilerHeapShotHeapBuffer *last;
344 ProfilerHeapShotHeapBuffer *current;
345 MonoObject **first_free_slot;
346 } ProfilerHeapShotHeapBuffers;
349 typedef struct _ProfilerHeapShotWriteBuffer {
350 struct _ProfilerHeapShotWriteBuffer *next;
351 gpointer buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE];
352 } ProfilerHeapShotWriteBuffer;
354 typedef struct _ProfilerHeapShotClassSummary {
363 } ProfilerHeapShotClassSummary;
365 typedef struct _ProfilerHeapShotCollectionSummary {
366 ProfilerHeapShotClassSummary *per_class_data;
368 } ProfilerHeapShotCollectionSummary;
370 typedef struct _ProfilerHeapShotWriteJob {
371 struct _ProfilerHeapShotWriteJob *next;
372 struct _ProfilerHeapShotWriteJob *next_unwritten;
376 ProfilerHeapShotWriteBuffer *buffers;
377 ProfilerHeapShotWriteBuffer **last_next;
378 guint32 full_buffers;
379 gboolean heap_shot_was_signalled;
380 guint64 start_counter;
385 ProfilerHeapShotCollectionSummary summary;
386 gboolean dump_heap_data;
387 } ProfilerHeapShotWriteJob;
389 typedef struct _ProfilerThreadStack {
392 guint32 last_saved_top;
393 guint32 last_written_frame;
395 guint8 *method_is_jitted;
396 guint32 *written_frames;
397 } ProfilerThreadStack;
399 typedef struct _ProfilerPerThreadData {
400 ProfilerEventData *events;
401 ProfilerEventData *next_free_event;
402 ProfilerEventData *next_unreserved_event;
403 ProfilerEventData *end_event;
404 ProfilerEventData *first_unwritten_event;
405 ProfilerEventData *first_unmapped_event;
406 guint64 start_event_counter;
407 guint64 last_event_counter;
409 ProfilerHeapShotObjectBuffer *heap_shot_object_buffers;
410 ProfilerThreadStack stack;
411 struct _ProfilerPerThreadData* next;
412 } ProfilerPerThreadData;
414 typedef struct _ProfilerStatisticalHit {
417 } ProfilerStatisticalHit;
419 typedef struct _ProfilerStatisticalData {
420 ProfilerStatisticalHit *hits;
421 unsigned int next_free_index;
422 unsigned int end_index;
423 unsigned int first_unwritten_index;
424 } ProfilerStatisticalData;
426 typedef struct _ProfilerUnmanagedSymbol {
431 } ProfilerUnmanagedSymbol;
433 struct _ProfilerExecutableFile;
434 struct _ProfilerExecutableFileSectionRegion;
436 typedef struct _ProfilerExecutableMemoryRegionData {
444 struct _ProfilerExecutableFile *file;
445 struct _ProfilerExecutableFileSectionRegion *file_region_reference;
446 guint32 symbols_count;
447 guint32 symbols_capacity;
448 ProfilerUnmanagedSymbol *symbols;
449 } ProfilerExecutableMemoryRegionData;
451 typedef struct _ProfilerExecutableMemoryRegions {
452 ProfilerExecutableMemoryRegionData **regions;
453 guint32 regions_capacity;
454 guint32 regions_count;
456 guint32 next_unmanaged_function_id;
457 } ProfilerExecutableMemoryRegions;
459 /* Start of ELF definitions */
461 typedef guint16 ElfHalf;
462 typedef guint32 ElfWord;
463 typedef gsize ElfAddr;
464 typedef gsize ElfOff;
467 unsigned char e_ident[EI_NIDENT];
473 ElfOff e_shoff; // Section header table
475 ElfHalf e_ehsize; // Header size
478 ElfHalf e_shentsize; // Section header entry size
479 ElfHalf e_shnum; // Section header entries number
480 ElfHalf e_shstrndx; // String table index
483 #if (SIZEOF_VOID_P == 4)
488 ElfAddr sh_addr; // Address in memory
489 ElfOff sh_offset; // Offset in file
493 ElfWord sh_addralign;
500 unsigned char st_info; // Use ELF32_ST_TYPE to get symbol type
501 unsigned char st_other;
502 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
504 #elif (SIZEOF_VOID_P == 8)
509 ElfAddr sh_addr; // Address in memory
510 ElfOff sh_offset; // Offset in file
519 unsigned char st_info; // Use ELF_ST_TYPE to get symbol type
520 unsigned char st_other;
521 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
526 #error Bad size of void pointer
530 #define ELF_ST_BIND(i) ((i)>>4)
531 #define ELF_ST_TYPE(i) ((i)&0xf)
544 ELF_FILE_TYPE_NONE = 0,
545 ELF_FILE_TYPE_REL = 1,
546 ELF_FILE_TYPE_EXEC = 2,
547 ELF_FILE_TYPE_DYN = 3,
548 ELF_FILE_TYPE_CORE = 4
565 ELF_SHT_PROGBITS = 1,
589 ELF_SHF_EXECINSTR = 4,
592 #define ELF_SHN_UNDEF 0
593 #define ELF_SHN_LORESERVE 0xff00
594 #define ELF_SHN_LOPROC 0xff00
595 #define ELF_SHN_HIPROC 0xff1f
596 #define ELF_SHN_ABS 0xfff1
597 #define ELF_SHN_COMMON 0xfff2
598 #define ELF_SHN_HIRESERVE 0xffff
599 /* End of ELF definitions */
601 typedef struct _ProfilerExecutableFileSectionRegion {
602 ProfilerExecutableMemoryRegionData *region;
603 guint8 *section_address;
604 gsize section_offset;
605 } ProfilerExecutableFileSectionRegion;
607 typedef struct _ProfilerExecutableFile {
608 guint32 reference_count;
610 /* Used for mmap and munmap */
617 guint8 *symbols_start;
618 guint32 symbols_count;
620 const char *symbols_string_table;
621 const char *main_string_table;
623 ProfilerExecutableFileSectionRegion *section_regions;
625 struct _ProfilerExecutableFile *next_new_file;
626 } ProfilerExecutableFile;
628 typedef struct _ProfilerExecutableFiles {
630 ProfilerExecutableFile *new_files;
631 } ProfilerExecutableFiles;
634 #define CLEANUP_WRITER_THREAD() do {profiler->writer_thread_terminated = TRUE;} while (0)
635 #define CHECK_WRITER_THREAD() (! profiler->writer_thread_terminated)
637 #ifndef PLATFORM_WIN32
638 #include <sys/types.h>
639 #include <sys/time.h>
640 #include <sys/stat.h>
644 #include <semaphore.h>
646 #include <sys/mman.h>
647 #include <sys/types.h>
648 #include <sys/stat.h>
652 #define MUTEX_TYPE pthread_mutex_t
653 #define INITIALIZE_PROFILER_MUTEX() pthread_mutex_init (&(profiler->mutex), NULL)
654 #define DELETE_PROFILER_MUTEX() pthread_mutex_destroy (&(profiler->mutex))
655 #define LOCK_PROFILER() do {/*LOG_WRITER_THREAD ("LOCK_PROFILER");*/ pthread_mutex_lock (&(profiler->mutex));} while (0)
656 #define UNLOCK_PROFILER() do {/*LOG_WRITER_THREAD ("UNLOCK_PROFILER");*/ pthread_mutex_unlock (&(profiler->mutex));} while (0)
658 #define THREAD_TYPE pthread_t
659 #define CREATE_WRITER_THREAD(f) pthread_create (&(profiler->data_writer_thread), NULL, ((void*(*)(void*))f), NULL)
660 #define EXIT_THREAD() pthread_exit (NULL);
661 #define WAIT_WRITER_THREAD() do {\
662 if (CHECK_WRITER_THREAD ()) {\
663 pthread_join (profiler->data_writer_thread, NULL);\
666 #define CURRENT_THREAD_ID() (gsize) pthread_self ()
668 #ifndef HAVE_KW_THREAD
669 static pthread_key_t pthread_profiler_key;
670 static pthread_once_t profiler_pthread_once = PTHREAD_ONCE_INIT;
672 make_pthread_profiler_key (void) {
673 (void) pthread_key_create (&pthread_profiler_key, NULL);
675 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) pthread_getspecific (pthread_profiler_key))
676 #define SET_PROFILER_THREAD_DATA(x) (void) pthread_setspecific (pthread_profiler_key, (x))
677 #define ALLOCATE_PROFILER_THREAD_DATA() (void) pthread_once (&profiler_pthread_once, make_pthread_profiler_key)
678 #define FREE_PROFILER_THREAD_DATA() (void) pthread_key_delete (pthread_profiler_key)
681 #define EVENT_TYPE sem_t
682 #define WRITER_EVENT_INIT() do {\
683 sem_init (&(profiler->enable_data_writer_event), 0, 0);\
684 sem_init (&(profiler->wake_data_writer_event), 0, 0);\
685 sem_init (&(profiler->done_data_writer_event), 0, 0);\
687 #define WRITER_EVENT_DESTROY() do {\
688 sem_destroy (&(profiler->enable_data_writer_event));\
689 sem_destroy (&(profiler->wake_data_writer_event));\
690 sem_destroy (&(profiler->done_data_writer_event));\
692 #define WRITER_EVENT_WAIT() (void) sem_wait (&(profiler->wake_data_writer_event))
693 #define WRITER_EVENT_RAISE() (void) sem_post (&(profiler->wake_data_writer_event))
694 #define WRITER_EVENT_ENABLE_WAIT() (void) sem_wait (&(profiler->enable_data_writer_event))
695 #define WRITER_EVENT_ENABLE_RAISE() (void) sem_post (&(profiler->enable_data_writer_event))
696 #define WRITER_EVENT_DONE_WAIT() do {\
697 if (CHECK_WRITER_THREAD ()) {\
698 (void) sem_wait (&(profiler->done_data_writer_event));\
701 #define WRITER_EVENT_DONE_RAISE() (void) sem_post (&(profiler->done_data_writer_event))
704 #define FILE_HANDLE_TYPE FILE*
705 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
706 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
707 #define FLUSH_FILE() fflush (profiler->file)
708 #define CLOSE_FILE() fclose (profiler->file);
710 #define FILE_HANDLE_TYPE int
711 #define OPEN_FILE() profiler->file = open (profiler->file_name, O_WRONLY|O_CREAT|O_TRUNC, 0664);
712 #define WRITE_BUFFER(b,s) write (profiler->file, (b), (s))
714 #define CLOSE_FILE() close (profiler->file);
721 #define MUTEX_TYPE CRITICAL_SECTION
722 #define INITIALIZE_PROFILER_MUTEX() InitializeCriticalSection (&(profiler->mutex))
723 #define DELETE_PROFILER_MUTEX() DeleteCriticalSection (&(profiler->mutex))
724 #define LOCK_PROFILER() EnterCriticalSection (&(profiler->mutex))
725 #define UNLOCK_PROFILER() LeaveCriticalSection (&(profiler->mutex))
727 #define THREAD_TYPE HANDLE
728 #define CREATE_WRITER_THREAD(f) CreateThread (NULL, (1*1024*1024), (f), NULL, 0, NULL);
729 #define EXIT_THREAD() ExitThread (0);
730 #define WAIT_WRITER_THREAD() do {\
731 if (CHECK_WRITER_THREAD ()) {\
732 WaitForSingleObject (profiler->data_writer_thread, INFINITE);\
735 #define CURRENT_THREAD_ID() (gsize) GetCurrentThreadId ()
737 #ifndef HAVE_KW_THREAD
738 static guint32 profiler_thread_id = -1;
739 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*)TlsGetValue (profiler_thread_id))
740 #define SET_PROFILER_THREAD_DATA(x) TlsSetValue (profiler_thread_id, (x));
741 #define ALLOCATE_PROFILER_THREAD_DATA() profiler_thread_id = TlsAlloc ()
742 #define FREE_PROFILER_THREAD_DATA() TlsFree (profiler_thread_id)
745 #define EVENT_TYPE HANDLE
746 #define WRITER_EVENT_INIT() (void) do {\
747 profiler->enable_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
748 profiler->wake_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
749 profiler->done_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
751 #define WRITER_EVENT_DESTROY() CloseHandle (profiler->statistical_data_writer_event)
752 #define WRITER_EVENT_INIT() (void) do {\
753 CloseHandle (profiler->enable_data_writer_event);\
754 CloseHandle (profiler->wake_data_writer_event);\
755 CloseHandle (profiler->done_data_writer_event);\
757 #define WRITER_EVENT_WAIT() WaitForSingleObject (profiler->wake_data_writer_event, INFINITE)
758 #define WRITER_EVENT_RAISE() SetEvent (profiler->wake_data_writer_event)
759 #define WRITER_EVENT_ENABLE_WAIT() WaitForSingleObject (profiler->enable_data_writer_event, INFINITE)
760 #define WRITER_EVENT_ENABLE_RAISE() SetEvent (profiler->enable_data_writer_event)
761 #define WRITER_EVENT_DONE_WAIT() do {\
762 if (CHECK_WRITER_THREAD ()) {\
763 WaitForSingleObject (profiler->done_data_writer_event, INFINITE);\
766 #define WRITER_EVENT_DONE_RAISE() SetEvent (profiler->done_data_writer_event)
768 #define FILE_HANDLE_TYPE FILE*
769 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
770 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
771 #define FLUSH_FILE() fflush (profiler->file)
772 #define CLOSE_FILE() fclose (profiler->file);
776 #ifdef HAVE_KW_THREAD
777 static __thread ProfilerPerThreadData * tls_profiler_per_thread_data;
778 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) tls_profiler_per_thread_data)
779 #define SET_PROFILER_THREAD_DATA(x) tls_profiler_per_thread_data = (x)
780 #define ALLOCATE_PROFILER_THREAD_DATA() /* nop */
781 #define FREE_PROFILER_THREAD_DATA() /* nop */
784 #define GET_PROFILER_THREAD_DATA(data) do {\
785 ProfilerPerThreadData *_result = LOOKUP_PROFILER_THREAD_DATA ();\
787 _result = profiler_per_thread_data_new (profiler->per_thread_buffer_size);\
789 _result->next = profiler->per_thread_data;\
790 profiler->per_thread_data = _result;\
792 SET_PROFILER_THREAD_DATA (_result);\
797 #define PROFILER_FILE_WRITE_BUFFER_SIZE (profiler->write_buffer_size)
798 typedef struct _ProfilerFileWriteBuffer {
799 struct _ProfilerFileWriteBuffer *next;
801 } ProfilerFileWriteBuffer;
803 #define CHECK_PROFILER_ENABLED() do {\
804 if (! profiler->profiler_enabled)\
807 struct _MonoProfiler {
810 MonoProfileFlags flags;
811 gboolean profiler_enabled;
813 char *file_name_suffix;
814 FILE_HANDLE_TYPE file;
817 guint64 start_counter;
821 guint64 last_header_counter;
823 MethodIdMapping *methods;
824 ClassIdMapping *classes;
826 guint32 loaded_element_next_free_id;
827 GHashTable *loaded_assemblies;
828 GHashTable *loaded_modules;
829 GHashTable *loaded_appdomains;
831 guint32 per_thread_buffer_size;
832 guint32 statistical_buffer_size;
833 ProfilerPerThreadData* per_thread_data;
834 ProfilerStatisticalData *statistical_data;
835 ProfilerStatisticalData *statistical_data_ready;
836 ProfilerStatisticalData *statistical_data_second_buffer;
837 int statistical_call_chain_depth;
839 THREAD_TYPE data_writer_thread;
840 EVENT_TYPE enable_data_writer_event;
841 EVENT_TYPE wake_data_writer_event;
842 EVENT_TYPE done_data_writer_event;
843 gboolean terminate_writer_thread;
844 gboolean writer_thread_terminated;
845 gboolean detach_writer_thread;
846 gboolean writer_thread_enabled;
847 gboolean writer_thread_flush_everything;
849 ProfilerFileWriteBuffer *write_buffers;
850 ProfilerFileWriteBuffer *current_write_buffer;
851 int write_buffer_size;
852 int current_write_position;
853 int full_write_buffers;
855 ProfilerHeapShotWriteJob *heap_shot_write_jobs;
856 ProfilerHeapShotHeapBuffers heap;
858 char *heap_shot_command_file_name;
859 int dump_next_heap_snapshots;
860 guint64 heap_shot_command_file_access_time;
861 gboolean heap_shot_was_signalled;
862 guint32 garbage_collection_counter;
864 ProfilerExecutableMemoryRegions *executable_regions;
865 ProfilerExecutableFiles executable_files;
872 gboolean unreachable_objects;
873 gboolean collection_summary;
874 gboolean report_gc_events;
876 gboolean track_stack;
877 gboolean track_calls;
878 gboolean save_allocation_caller;
879 gboolean save_allocation_stack;
880 gboolean allocations_carry_id;
883 static MonoProfiler *profiler;
885 #ifndef PLATFORM_WIN32
888 #ifdef MONO_ARCH_USE_SIGACTION
889 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy, siginfo_t *info, void *context)
890 #elif defined(__sparc__)
891 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy, void *sigctx)
893 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy)
897 request_heap_snapshot (void) {
898 profiler->heap_shot_was_signalled = TRUE;
899 mono_gc_collect (mono_gc_max_generation ());
903 SIG_HANDLER_SIGNATURE (gc_request_handler) {
904 profiler->heap_shot_was_signalled = TRUE;
905 WRITER_EVENT_RAISE ();
909 add_gc_request_handler (int signal_number)
913 #ifdef MONO_ARCH_USE_SIGACTION
914 sa.sa_sigaction = gc_request_handler;
915 sigemptyset (&sa.sa_mask);
916 sa.sa_flags = SA_SIGINFO;
918 sa.sa_handler = gc_request_handler;
919 sigemptyset (&sa.sa_mask);
923 g_assert (sigaction (signal_number, &sa, NULL) != -1);
927 enable_profiler (void) {
928 profiler->profiler_enabled = TRUE;
932 disable_profiler (void) {
933 profiler->profiler_enabled = FALSE;
939 SIG_HANDLER_SIGNATURE (toggle_handler) {
940 if (profiler->profiler_enabled) {
941 profiler->profiler_enabled = FALSE;
943 profiler->profiler_enabled = TRUE;
948 add_toggle_handler (int signal_number)
952 #ifdef MONO_ARCH_USE_SIGACTION
953 sa.sa_sigaction = toggle_handler;
954 sigemptyset (&sa.sa_mask);
955 sa.sa_flags = SA_SIGINFO;
957 sa.sa_handler = toggle_handler;
958 sigemptyset (&sa.sa_mask);
962 g_assert (sigaction (signal_number, &sa, NULL) != -1);
968 #define DEBUG_LOAD_EVENTS 0
969 #define DEBUG_MAPPING_EVENTS 0
970 #define DEBUG_LOGGING_PROFILER 0
971 #define DEBUG_HEAP_PROFILER 0
972 #define DEBUG_CLASS_BITMAPS 0
973 #define DEBUG_STATISTICAL_PROFILER 0
974 #define DEBUG_WRITER_THREAD 0
975 #define DEBUG_FILE_WRITES 0
976 #if (DEBUG_LOGGING_PROFILER || DEBUG_STATISTICAL_PROFILER || DEBUG_HEAP_PROFILER || DEBUG_WRITER_THREAD || DEBUG_FILE_WRITES)
977 #define LOG_WRITER_THREAD(m) printf ("WRITER-THREAD-LOG %s\n", m)
979 #define LOG_WRITER_THREAD(m)
982 #if DEBUG_LOGGING_PROFILER
983 static int event_counter = 0;
984 #define EVENT_MARK() printf ("[EVENT:%d]", ++ event_counter)
988 thread_stack_initialize_empty (ProfilerThreadStack *stack) {
991 stack->last_saved_top = 0;
992 stack->last_written_frame = 0;
994 stack->method_is_jitted = NULL;
995 stack->written_frames = NULL;
999 thread_stack_free (ProfilerThreadStack *stack) {
1000 stack->capacity = 0;
1002 stack->last_saved_top = 0;
1003 stack->last_written_frame = 0;
1004 if (stack->stack != NULL) {
1005 g_free (stack->stack);
1006 stack->stack = NULL;
1008 if (stack->method_is_jitted != NULL) {
1009 g_free (stack->method_is_jitted);
1010 stack->method_is_jitted = NULL;
1012 if (stack->written_frames != NULL) {
1013 g_free (stack->written_frames);
1014 stack->written_frames = NULL;
1019 thread_stack_initialize (ProfilerThreadStack *stack, guint32 capacity) {
1020 stack->capacity = capacity;
1022 stack->last_saved_top = 0;
1023 stack->last_written_frame = 0;
1024 stack->stack = g_new0 (MonoMethod*, capacity);
1025 stack->method_is_jitted = g_new0 (guint8, capacity);
1026 stack->written_frames = g_new0 (guint32, capacity);
1030 thread_stack_push_jitted (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1031 if (stack->top >= stack->capacity) {
1032 MonoMethod **old_stack = stack->stack;
1033 guint8 *old_method_is_jitted = stack->method_is_jitted;
1034 guint32 *old_written_frames = stack->written_frames;
1035 guint32 top = stack->top;
1036 guint32 last_saved_top = stack->last_saved_top;
1037 guint32 last_written_frame = stack->last_written_frame;
1038 thread_stack_initialize (stack, stack->capacity * 2);
1039 memcpy (stack->stack, old_stack, top * sizeof (MonoMethod*));
1040 memcpy (stack->method_is_jitted, old_method_is_jitted, top * sizeof (guint8));
1041 memcpy (stack->written_frames, old_written_frames, top * sizeof (guint32));
1043 g_free (old_method_is_jitted);
1044 g_free (old_written_frames);
1046 stack->last_saved_top = last_saved_top;
1047 stack->last_written_frame = last_written_frame;
1049 stack->stack [stack->top] = method;
1050 stack->method_is_jitted [stack->top] = method_is_jitted;
1055 thread_stack_push (ProfilerThreadStack *stack, MonoMethod* method) {
1056 thread_stack_push_jitted (stack, method, FALSE);
1060 thread_stack_pop (ProfilerThreadStack *stack) {
1061 if (stack->top > 0) {
1063 if (stack->last_saved_top > stack->top) {
1064 stack->last_saved_top = stack->top;
1066 return stack->stack [stack->top];
1073 thread_stack_top (ProfilerThreadStack *stack) {
1074 if (stack->top > 0) {
1075 return stack->stack [stack->top - 1];
1082 thread_stack_top_is_jitted (ProfilerThreadStack *stack) {
1083 if (stack->top > 0) {
1084 return stack->method_is_jitted [stack->top - 1];
1091 thread_stack_index_from_top (ProfilerThreadStack *stack, int index) {
1092 if (stack->top > index) {
1093 return stack->stack [stack->top - (index + 1)];
1100 thread_stack_index_from_top_is_jitted (ProfilerThreadStack *stack, int index) {
1101 if (stack->top > index) {
1102 return stack->method_is_jitted [stack->top - (index + 1)];
1109 thread_stack_push_safely (ProfilerThreadStack *stack, MonoMethod* method) {
1110 if (stack->stack != NULL) {
1111 thread_stack_push (stack, method);
1116 thread_stack_push_jitted_safely (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1117 if (stack->stack != NULL) {
1118 thread_stack_push_jitted (stack, method, method_is_jitted);
1123 thread_stack_count_unsaved_frames (ProfilerThreadStack *stack) {
1124 int result = stack->top - stack->last_saved_top;
1125 return (result > 0) ? result : 0;
1129 thread_stack_get_last_written_frame (ProfilerThreadStack *stack) {
1130 return stack->last_written_frame;
1134 thread_stack_set_last_written_frame (ProfilerThreadStack *stack, int last_written_frame) {
1135 stack->last_written_frame = last_written_frame;
1138 static inline guint32
1139 thread_stack_written_frame_at_index (ProfilerThreadStack *stack, int index) {
1140 return stack->written_frames [index];
1144 thread_stack_write_frame_at_index (ProfilerThreadStack *stack, int index, guint32 method_id_and_is_jitted) {
1145 stack->written_frames [index] = method_id_and_is_jitted;
1148 static ClassIdMappingElement*
1149 class_id_mapping_element_get (MonoClass *klass) {
1150 return g_hash_table_lookup (profiler->classes->table, (gconstpointer) klass);
1153 static MethodIdMappingElement*
1154 method_id_mapping_element_get (MonoMethod *method) {
1155 return g_hash_table_lookup (profiler->methods->table, (gconstpointer) method);
1158 #define BITS_TO_BYTES(v) do {\
1164 static ClassIdMappingElement*
1165 class_id_mapping_element_new (MonoClass *klass) {
1166 ClassIdMappingElement *result = g_new (ClassIdMappingElement, 1);
1168 result->name = mono_type_full_name (mono_class_get_type (klass));
1169 result->klass = klass;
1170 result->next_unwritten = profiler->classes->unwritten;
1171 profiler->classes->unwritten = result;
1172 result->id = profiler->classes->next_id;
1173 profiler->classes->next_id ++;
1175 result->data.bitmap.compact = 0;
1176 result->data.layout.slots = CLASS_LAYOUT_NOT_INITIALIZED;
1177 result->data.layout.references = CLASS_LAYOUT_NOT_INITIALIZED;
1179 g_hash_table_insert (profiler->classes->table, klass, result);
1181 #if (DEBUG_MAPPING_EVENTS)
1182 printf ("Created new CLASS mapping element \"%s\" (%p)[%d]\n", result->name, klass, result->id);
1188 class_id_mapping_element_build_layout_bitmap (MonoClass *klass, ClassIdMappingElement *klass_id) {
1189 MonoClass *parent_class = mono_class_get_parent (klass);
1190 int number_of_reference_fields = 0;
1191 int max_offset_of_reference_fields = 0;
1192 ClassIdMappingElement *parent_id;
1194 MonoClassField *field;
1196 #if (DEBUG_CLASS_BITMAPS)
1197 printf ("class_id_mapping_element_build_layout_bitmap: building layout for class %s.%s: ", mono_class_get_namespace (klass), mono_class_get_name (klass));
1200 if (parent_class != NULL) {
1201 parent_id = class_id_mapping_element_get (parent_class);
1202 g_assert (parent_id != NULL);
1204 if (parent_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1205 #if (DEBUG_CLASS_BITMAPS)
1206 printf ("[recursively building bitmap for father class]\n");
1208 class_id_mapping_element_build_layout_bitmap (parent_class, parent_id);
1215 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1216 MonoType* field_type = mono_field_get_type (field);
1217 // For now, skip static fields
1218 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1221 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1222 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1223 if (field_offset > max_offset_of_reference_fields) {
1224 max_offset_of_reference_fields = field_offset;
1226 number_of_reference_fields ++;
1228 MonoClass *field_class = mono_class_from_mono_type (field_type);
1229 if (field_class && mono_class_is_valuetype (field_class)) {
1230 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1231 g_assert (field_id != NULL);
1233 if (field_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1234 if (field_id != klass_id) {
1235 #if (DEBUG_CLASS_BITMAPS)
1236 printf ("[recursively building bitmap for field %s]\n", mono_field_get_name (field));
1238 class_id_mapping_element_build_layout_bitmap (field_class, field_id);
1240 #if (DEBUG_CLASS_BITMAPS)
1241 printf ("[breaking recursive bitmap build for field %s]", mono_field_get_name (field));
1244 klass_id->data.bitmap.compact = 0;
1245 klass_id->data.layout.slots = 0;
1246 klass_id->data.layout.references = 0;
1250 if (field_id->data.layout.references > 0) {
1251 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1252 int max_offset_reference_in_field = (field_id->data.layout.slots - 1) * sizeof (gpointer);
1254 if ((field_offset + max_offset_reference_in_field) > max_offset_of_reference_fields) {
1255 max_offset_of_reference_fields = field_offset + max_offset_reference_in_field;
1258 number_of_reference_fields += field_id->data.layout.references;
1264 #if (DEBUG_CLASS_BITMAPS)
1265 printf ("[allocating bitmap for class %s.%s (references %d, max offset %d, slots %d)]", mono_class_get_namespace (klass), mono_class_get_name (klass), number_of_reference_fields, max_offset_of_reference_fields, (int)(max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1267 if ((number_of_reference_fields == 0) && ((parent_id == NULL) || (parent_id->data.layout.references == 0))) {
1268 #if (DEBUG_CLASS_BITMAPS)
1269 printf ("[no references at all]");
1271 klass_id->data.bitmap.compact = 0;
1272 klass_id->data.layout.slots = 0;
1273 klass_id->data.layout.references = 0;
1275 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1276 #if (DEBUG_CLASS_BITMAPS)
1277 printf ("[parent %s.%s has %d references in %d slots]", mono_class_get_namespace (parent_class), mono_class_get_name (parent_class), parent_id->data.layout.references, parent_id->data.layout.slots);
1279 klass_id->data.layout.slots = parent_id->data.layout.slots;
1280 klass_id->data.layout.references = parent_id->data.layout.references;
1282 #if (DEBUG_CLASS_BITMAPS)
1283 printf ("[no references from parent]");
1285 klass_id->data.layout.slots = 0;
1286 klass_id->data.layout.references = 0;
1289 if (number_of_reference_fields > 0) {
1290 klass_id->data.layout.slots += ((max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1291 klass_id->data.layout.references += number_of_reference_fields;
1292 #if (DEBUG_CLASS_BITMAPS)
1293 printf ("[adding data, going to %d references in %d slots]", klass_id->data.layout.references, klass_id->data.layout.slots);
1297 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1298 #if (DEBUG_CLASS_BITMAPS)
1299 printf ("[zeroing bitmap]");
1301 klass_id->data.bitmap.compact = 0;
1302 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1303 #if (DEBUG_CLASS_BITMAPS)
1304 printf ("[copying compact father bitmap]");
1306 klass_id->data.bitmap.compact = parent_id->data.bitmap.compact;
1309 int size_of_bitmap = klass_id->data.layout.slots;
1310 BITS_TO_BYTES (size_of_bitmap);
1311 #if (DEBUG_CLASS_BITMAPS)
1312 printf ("[allocating %d bytes for bitmap]", size_of_bitmap);
1314 klass_id->data.bitmap.extended = g_malloc0 (size_of_bitmap);
1315 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1316 int size_of_father_bitmap = parent_id->data.layout.slots;
1317 if (size_of_father_bitmap <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1319 #if (DEBUG_CLASS_BITMAPS)
1320 printf ("[copying %d bits from father bitmap]", size_of_father_bitmap);
1322 for (father_slot = 0; father_slot < size_of_father_bitmap; father_slot ++) {
1323 if (parent_id->data.bitmap.compact & (((guint64)1) << father_slot)) {
1324 klass_id->data.bitmap.extended [father_slot >> 3] |= (1 << (father_slot & 7));
1328 BITS_TO_BYTES (size_of_father_bitmap);
1329 #if (DEBUG_CLASS_BITMAPS)
1330 printf ("[copying %d bytes from father bitmap]", size_of_father_bitmap);
1332 memcpy (klass_id->data.bitmap.extended, parent_id->data.bitmap.extended, size_of_father_bitmap);
1338 #if (DEBUG_CLASS_BITMAPS)
1339 printf ("[starting filling iteration]\n");
1342 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1343 MonoType* field_type = mono_field_get_type (field);
1344 // For now, skip static fields
1345 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1348 #if (DEBUG_CLASS_BITMAPS)
1349 printf ("[Working on field %s]", mono_field_get_name (field));
1351 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1352 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1354 g_assert ((field_offset % sizeof (gpointer)) == 0);
1355 field_slot = field_offset / sizeof (gpointer);
1356 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1357 klass_id->data.bitmap.compact |= (((guint64)1) << field_slot);
1359 klass_id->data.bitmap.extended [field_slot >> 3] |= (1 << (field_slot & 7));
1361 #if (DEBUG_CLASS_BITMAPS)
1362 printf ("[reference at offset %d, slot %d]", field_offset, field_slot);
1365 MonoClass *field_class = mono_class_from_mono_type (field_type);
1366 if (field_class && mono_class_is_valuetype (field_class)) {
1367 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1371 g_assert (field_id != NULL);
1372 field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1373 g_assert ((field_id->data.layout.references == 0) || ((field_offset % sizeof (gpointer)) == 0));
1374 field_slot = field_offset / sizeof (gpointer);
1375 #if (DEBUG_CLASS_BITMAPS)
1376 printf ("[value type at offset %d, slot %d, with %d references in %d slots]", field_offset, field_slot, field_id->data.layout.references, field_id->data.layout.slots);
1379 if (field_id->data.layout.references > 0) {
1381 if (field_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1382 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1383 if (field_id->data.bitmap.compact & (((guint64)1) << sub_field_slot)) {
1384 int actual_slot = field_slot + sub_field_slot;
1385 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1386 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1388 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1393 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1394 if (field_id->data.bitmap.extended [sub_field_slot >> 3] & (1 << (sub_field_slot & 7))) {
1395 int actual_slot = field_slot + sub_field_slot;
1396 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1397 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1399 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1408 #if (DEBUG_CLASS_BITMAPS)
1411 printf ("\nLayot of class \"%s.%s\": references %d, slots %d, bitmap {", mono_class_get_namespace (klass), mono_class_get_name (klass), klass_id->data.layout.references, klass_id->data.layout.slots);
1412 for (slot = 0; slot < klass_id->data.layout.slots; slot ++) {
1413 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1414 if (klass_id->data.bitmap.compact & (((guint64)1) << slot)) {
1420 if (klass_id->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
1434 static MethodIdMappingElement*
1435 method_id_mapping_element_new (MonoMethod *method) {
1436 MethodIdMappingElement *result = g_new (MethodIdMappingElement, 1);
1437 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
1439 result->name = g_strdup_printf ("%s (%s)", mono_method_get_name (method), signature);
1441 result->method = method;
1442 result->next_unwritten = profiler->methods->unwritten;
1443 profiler->methods->unwritten = result;
1444 result->id = profiler->methods->next_id;
1445 profiler->methods->next_id ++;
1446 g_hash_table_insert (profiler->methods->table, method, result);
1448 result->data.code_start = NULL;
1449 result->data.code_size = 0;
1451 #if (DEBUG_MAPPING_EVENTS)
1452 printf ("Created new METHOD mapping element \"%s\" (%p)[%d]\n", result->name, method, result->id);
1459 method_id_mapping_element_destroy (gpointer element) {
1460 MethodIdMappingElement *e = (MethodIdMappingElement*) element;
1467 class_id_mapping_element_destroy (gpointer element) {
1468 ClassIdMappingElement *e = (ClassIdMappingElement*) element;
1471 if ((e->data.layout.slots != CLASS_LAYOUT_NOT_INITIALIZED) && (e->data.layout.slots > CLASS_LAYOUT_PACKED_BITMAP_SIZE))
1472 g_free (e->data.bitmap.extended);
1476 static MethodIdMapping*
1477 method_id_mapping_new (void) {
1478 MethodIdMapping *result = g_new (MethodIdMapping, 1);
1479 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, method_id_mapping_element_destroy);
1480 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, method_id_mapping_element_destroy);
1481 result->unwritten = NULL;
1482 result->next_id = 1;
1486 static ClassIdMapping*
1487 class_id_mapping_new (void) {
1488 ClassIdMapping *result = g_new (ClassIdMapping, 1);
1489 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, class_id_mapping_element_destroy);
1490 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, class_id_mapping_element_destroy);
1491 result->unwritten = NULL;
1492 result->next_id = 1;
1497 method_id_mapping_destroy (MethodIdMapping *map) {
1498 g_hash_table_destroy (map->table);
1503 class_id_mapping_destroy (ClassIdMapping *map) {
1504 g_hash_table_destroy (map->table);
1508 #if (DEBUG_LOAD_EVENTS)
1510 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element);
1513 static LoadedElement*
1514 loaded_element_load_start (GHashTable *table, gpointer item) {
1515 LoadedElement *element = g_new0 (LoadedElement, 1);
1516 element->id = profiler->loaded_element_next_free_id;
1517 profiler->loaded_element_next_free_id ++;
1518 #if (DEBUG_LOAD_EVENTS)
1519 print_load_event ("LOAD START", table, item, element);
1521 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_start_counter);
1522 g_hash_table_insert (table, item, element);
1526 static LoadedElement*
1527 loaded_element_load_end (GHashTable *table, gpointer item, char *name) {
1528 LoadedElement *element = g_hash_table_lookup (table, item);
1529 #if (DEBUG_LOAD_EVENTS)
1530 print_load_event ("LOAD END", table, item, element);
1532 g_assert (element != NULL);
1533 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_end_counter);
1534 element->name = name;
1535 element->loaded = TRUE;
1539 static LoadedElement*
1540 loaded_element_unload_start (GHashTable *table, gpointer item) {
1541 LoadedElement *element = g_hash_table_lookup (table, item);
1542 #if (DEBUG_LOAD_EVENTS)
1543 print_load_event ("UNLOAD START", table, item, element);
1545 g_assert (element != NULL);
1546 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_start_counter);
1550 static LoadedElement*
1551 loaded_element_unload_end (GHashTable *table, gpointer item) {
1552 LoadedElement *element = g_hash_table_lookup (table, item);
1553 #if (DEBUG_LOAD_EVENTS)
1554 print_load_event ("UNLOAD END", table, item, element);
1556 g_assert (element != NULL);
1557 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_end_counter);
1558 element->unloaded = TRUE;
1562 static LoadedElement*
1563 loaded_element_find (GHashTable *table, gpointer item) {
1564 LoadedElement *element = g_hash_table_lookup (table, item);
1569 loaded_element_get_id (GHashTable *table, gpointer item) {
1570 LoadedElement *element = loaded_element_find (table, item);
1571 if (element != NULL) {
1579 loaded_element_destroy (gpointer element) {
1580 if (((LoadedElement*)element)->name)
1581 g_free (((LoadedElement*)element)->name);
1585 #if (DEBUG_LOAD_EVENTS)
1587 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element) {
1588 const char* item_name;
1591 if (table == profiler->loaded_assemblies) {
1592 //item_info = g_strdup_printf("ASSEMBLY %p (dynamic %d)", item, mono_image_is_dynamic (mono_assembly_get_image((MonoAssembly*)item)));
1593 item_info = g_strdup_printf("ASSEMBLY %p", item);
1594 } else if (table == profiler->loaded_modules) {
1595 //item_info = g_strdup_printf("MODULE %p (dynamic %d)", item, mono_image_is_dynamic ((MonoImage*)item));
1596 item_info = g_strdup_printf("MODULE %p", item);
1597 } else if (table == profiler->loaded_appdomains) {
1598 item_info = g_strdup_printf("APPDOMAIN %p (id %d)", item, mono_domain_get_id ((MonoDomain*)item));
1601 g_assert_not_reached ();
1604 if (element != NULL) {
1605 item_name = element->name;
1607 item_name = "<NULL>";
1610 printf ("%s EVENT for %s (%s [id %d])\n", event_name, item_info, item_name, element->id);
1616 profiler_heap_shot_object_buffers_destroy (ProfilerHeapShotObjectBuffer *buffer) {
1617 while (buffer != NULL) {
1618 ProfilerHeapShotObjectBuffer *next = buffer->next;
1619 #if DEBUG_HEAP_PROFILER
1620 printf ("profiler_heap_shot_object_buffers_destroy: destroyed buffer %p (%p-%p)\n", buffer, & (buffer->buffer [0]), buffer->end);
1627 static ProfilerHeapShotObjectBuffer*
1628 profiler_heap_shot_object_buffer_new (ProfilerPerThreadData *data) {
1629 ProfilerHeapShotObjectBuffer *buffer;
1630 ProfilerHeapShotObjectBuffer *result = g_new (ProfilerHeapShotObjectBuffer, 1);
1631 result->next_free_slot = & (result->buffer [0]);
1632 result->end = & (result->buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE]);
1633 result->first_unprocessed_slot = & (result->buffer [0]);
1634 result->next = data->heap_shot_object_buffers;
1635 data->heap_shot_object_buffers = result;
1636 #if DEBUG_HEAP_PROFILER
1637 printf ("profiler_heap_shot_object_buffer_new: created buffer %p (%p-%p)\n", result, result->next_free_slot, result->end);
1639 for (buffer = result; buffer != NULL; buffer = buffer->next) {
1640 ProfilerHeapShotObjectBuffer *last = buffer->next;
1641 if ((last != NULL) && (last->first_unprocessed_slot == last->end)) {
1642 buffer->next = NULL;
1643 profiler_heap_shot_object_buffers_destroy (last);
1650 static ProfilerHeapShotWriteJob*
1651 profiler_heap_shot_write_job_new (gboolean heap_shot_was_signalled, gboolean dump_heap_data, guint32 collection) {
1652 ProfilerHeapShotWriteJob *job = g_new (ProfilerHeapShotWriteJob, 1);
1654 job->next_unwritten = NULL;
1656 if (profiler->action_flags.unreachable_objects || dump_heap_data) {
1657 job->buffers = g_new (ProfilerHeapShotWriteBuffer, 1);
1658 job->buffers->next = NULL;
1659 job->last_next = & (job->buffers->next);
1660 job->start = & (job->buffers->buffer [0]);
1661 job->cursor = job->start;
1662 job->end = & (job->buffers->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1664 job->buffers = NULL;
1665 job->last_next = NULL;
1670 job->full_buffers = 0;
1672 if (profiler->action_flags.collection_summary) {
1673 job->summary.capacity = profiler->classes->next_id;
1674 job->summary.per_class_data = g_new0 (ProfilerHeapShotClassSummary, job->summary.capacity);
1676 job->summary.capacity = 0;
1677 job->summary.per_class_data = NULL;
1680 job->heap_shot_was_signalled = heap_shot_was_signalled;
1681 job->collection = collection;
1682 job->dump_heap_data = dump_heap_data;
1683 #if DEBUG_HEAP_PROFILER
1684 printf ("profiler_heap_shot_write_job_new: created job %p with buffer %p(%p-%p) (collection %d, dump %d)\n", job, job->buffers, job->start, job->end, collection, dump_heap_data);
1690 profiler_heap_shot_write_job_has_data (ProfilerHeapShotWriteJob *job) {
1691 return ((job->buffers != NULL) || (job->summary.capacity > 0));
1695 profiler_heap_shot_write_job_add_buffer (ProfilerHeapShotWriteJob *job, gpointer value) {
1696 ProfilerHeapShotWriteBuffer *buffer = g_new (ProfilerHeapShotWriteBuffer, 1);
1697 buffer->next = NULL;
1698 *(job->last_next) = buffer;
1699 job->last_next = & (buffer->next);
1700 job->full_buffers ++;
1701 buffer->buffer [0] = value;
1702 job->start = & (buffer->buffer [0]);
1703 job->cursor = & (buffer->buffer [1]);
1704 job->end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1705 #if DEBUG_HEAP_PROFILER
1706 printf ("profiler_heap_shot_write_job_add_buffer: in job %p, added buffer %p(%p-%p) with value %p at address %p (cursor now %p)\n", job, buffer, job->start, job->end, value, &(buffer->buffer [0]), job->cursor);
1708 ProfilerHeapShotWriteBuffer *current_buffer;
1709 for (current_buffer = job->buffers; current_buffer != NULL; current_buffer = current_buffer->next) {
1710 printf ("profiler_heap_shot_write_job_add_buffer: now job %p has buffer %p\n", job, current_buffer);
1717 profiler_heap_shot_write_job_free_buffers (ProfilerHeapShotWriteJob *job) {
1718 ProfilerHeapShotWriteBuffer *buffer = job->buffers;
1720 while (buffer != NULL) {
1721 ProfilerHeapShotWriteBuffer *next = buffer->next;
1722 #if DEBUG_HEAP_PROFILER
1723 printf ("profiler_heap_shot_write_job_free_buffers: in job %p, freeing buffer %p\n", job, buffer);
1729 job->buffers = NULL;
1731 if (job->summary.per_class_data != NULL) {
1732 g_free (job->summary.per_class_data);
1733 job->summary.per_class_data = NULL;
1735 job->summary.capacity = 0;
1739 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job);
1742 profiler_process_heap_shot_write_jobs (void) {
1743 gboolean done = FALSE;
1746 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1747 ProfilerHeapShotWriteJob *previous_job = NULL;
1748 ProfilerHeapShotWriteJob *next_job;
1751 while (current_job != NULL) {
1752 next_job = current_job->next_unwritten;
1754 if (next_job != NULL) {
1755 if (profiler_heap_shot_write_job_has_data (current_job)) {
1758 if (! profiler_heap_shot_write_job_has_data (next_job)) {
1759 current_job->next_unwritten = NULL;
1763 if (profiler_heap_shot_write_job_has_data (current_job)) {
1764 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: writing...");
1765 profiler_heap_shot_write_block (current_job);
1766 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: done");
1767 if (previous_job != NULL) {
1768 previous_job->next_unwritten = NULL;
1773 previous_job = current_job;
1774 current_job = next_job;
1780 profiler_free_heap_shot_write_jobs (void) {
1781 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1782 ProfilerHeapShotWriteJob *next_job;
1784 if (current_job != NULL) {
1785 while (current_job->next_unwritten != NULL) {
1786 #if DEBUG_HEAP_PROFILER
1787 printf ("profiler_free_heap_shot_write_jobs: job %p must not be freed\n", current_job);
1789 current_job = current_job->next_unwritten;
1792 next_job = current_job->next;
1793 current_job->next = NULL;
1794 current_job = next_job;
1796 while (current_job != NULL) {
1797 #if DEBUG_HEAP_PROFILER
1798 printf ("profiler_free_heap_shot_write_jobs: job %p will be freed\n", current_job);
1800 next_job = current_job->next;
1801 profiler_heap_shot_write_job_free_buffers (current_job);
1802 g_free (current_job);
1803 current_job = next_job;
1809 profiler_destroy_heap_shot_write_jobs (void) {
1810 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1811 ProfilerHeapShotWriteJob *next_job;
1813 while (current_job != NULL) {
1814 next_job = current_job->next;
1815 profiler_heap_shot_write_job_free_buffers (current_job);
1816 g_free (current_job);
1817 current_job = next_job;
1822 profiler_add_heap_shot_write_job (ProfilerHeapShotWriteJob *job) {
1823 job->next = profiler->heap_shot_write_jobs;
1824 job->next_unwritten = job->next;
1825 profiler->heap_shot_write_jobs = job;
1826 #if DEBUG_HEAP_PROFILER
1827 printf ("profiler_add_heap_shot_write_job: added job %p\n", job);
1831 #if DEBUG_HEAP_PROFILER
1832 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p\n", (d)->thread_id, (o), (d)->heap_shot_object_buffers->next_free_slot)
1833 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p in new buffer %p\n", (d)->thread_id, (o), buffer->next_free_slot, buffer)
1835 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o)
1836 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o)
1838 #define STORE_ALLOCATED_OBJECT(d,o) do {\
1839 if ((d)->heap_shot_object_buffers->next_free_slot < (d)->heap_shot_object_buffers->end) {\
1840 STORE_ALLOCATED_OBJECT_MESSAGE1 ((d), (o));\
1841 *((d)->heap_shot_object_buffers->next_free_slot) = (o);\
1842 (d)->heap_shot_object_buffers->next_free_slot ++;\
1844 ProfilerHeapShotObjectBuffer *buffer = profiler_heap_shot_object_buffer_new (d);\
1845 STORE_ALLOCATED_OBJECT_MESSAGE2 ((d), (o));\
1846 *((buffer)->next_free_slot) = (o);\
1847 (buffer)->next_free_slot ++;\
1851 static ProfilerPerThreadData*
1852 profiler_per_thread_data_new (guint32 buffer_size)
1854 ProfilerPerThreadData *data = g_new (ProfilerPerThreadData, 1);
1856 data->events = g_new0 (ProfilerEventData, buffer_size);
1857 data->next_free_event = data->events;
1858 data->next_unreserved_event = data->events;
1859 data->end_event = data->events + (buffer_size - 1);
1860 data->first_unwritten_event = data->events;
1861 data->first_unmapped_event = data->events;
1862 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
1863 data->last_event_counter = data->start_event_counter;
1864 data->thread_id = CURRENT_THREAD_ID ();
1865 data->heap_shot_object_buffers = NULL;
1866 if ((profiler->action_flags.unreachable_objects == TRUE) ||
1867 (profiler->action_flags.heap_shot == TRUE) ||
1868 (profiler->action_flags.collection_summary == TRUE)) {
1869 profiler_heap_shot_object_buffer_new (data);
1871 if (profiler->action_flags.track_stack) {
1872 thread_stack_initialize (&(data->stack), 64);
1874 thread_stack_initialize_empty (&(data->stack));
1880 profiler_per_thread_data_destroy (ProfilerPerThreadData *data) {
1881 g_free (data->events);
1882 profiler_heap_shot_object_buffers_destroy (data->heap_shot_object_buffers);
1883 thread_stack_free (&(data->stack));
1887 static ProfilerStatisticalData*
1888 profiler_statistical_data_new (MonoProfiler *profiler) {
1889 int buffer_size = profiler->statistical_buffer_size * (profiler->statistical_call_chain_depth + 1);
1890 ProfilerStatisticalData *data = g_new (ProfilerStatisticalData, 1);
1892 data->hits = g_new0 (ProfilerStatisticalHit, buffer_size);
1893 data->next_free_index = 0;
1894 data->end_index = profiler->statistical_buffer_size;
1895 data->first_unwritten_index = 0;
1901 profiler_statistical_data_destroy (ProfilerStatisticalData *data) {
1902 g_free (data->hits);
1907 profiler_add_write_buffer (void) {
1908 if (profiler->current_write_buffer->next == NULL) {
1909 profiler->current_write_buffer->next = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
1910 profiler->current_write_buffer->next->next = NULL;
1912 //printf ("Added next buffer %p, to buffer %p\n", profiler->current_write_buffer->next, profiler->current_write_buffer);
1915 profiler->current_write_buffer = profiler->current_write_buffer->next;
1916 profiler->current_write_position = 0;
1917 profiler->full_write_buffers ++;
1921 profiler_free_write_buffers (void) {
1922 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
1923 while (current_buffer != NULL) {
1924 ProfilerFileWriteBuffer *next_buffer = current_buffer->next;
1926 //printf ("Freeing write buffer %p, next is %p\n", current_buffer, next_buffer);
1928 g_free (current_buffer);
1929 current_buffer = next_buffer;
1933 #define WRITE_BYTE(b) do {\
1934 if (profiler->current_write_position >= PROFILER_FILE_WRITE_BUFFER_SIZE) {\
1935 profiler_add_write_buffer ();\
1937 profiler->current_write_buffer->buffer [profiler->current_write_position] = (b);\
1938 profiler->current_write_position ++;\
1943 write_current_block (guint16 code) {
1944 guint32 size = (profiler->full_write_buffers * PROFILER_FILE_WRITE_BUFFER_SIZE) + profiler->current_write_position;
1945 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
1946 guint64 current_counter;
1947 guint32 counter_delta;
1950 MONO_PROFILER_GET_CURRENT_COUNTER (current_counter);
1951 if (profiler->last_header_counter != 0) {
1952 counter_delta = current_counter - profiler->last_header_counter;
1956 profiler->last_header_counter = current_counter;
1958 header [0] = code & 0xff;
1959 header [1] = (code >> 8) & 0xff;
1960 header [2] = size & 0xff;
1961 header [3] = (size >> 8) & 0xff;
1962 header [4] = (size >> 16) & 0xff;
1963 header [5] = (size >> 24) & 0xff;
1964 header [6] = counter_delta & 0xff;
1965 header [7] = (counter_delta >> 8) & 0xff;
1966 header [8] = (counter_delta >> 16) & 0xff;
1967 header [9] = (counter_delta >> 24) & 0xff;
1969 #if (DEBUG_FILE_WRITES)
1970 printf ("write_current_block: writing header (code %d)\n", code);
1972 WRITE_BUFFER (& (header [0]), 10);
1974 while ((current_buffer != NULL) && (profiler->full_write_buffers > 0)) {
1975 #if (DEBUG_FILE_WRITES)
1976 printf ("write_current_block: writing buffer (size %d)\n", PROFILER_FILE_WRITE_BUFFER_SIZE);
1978 WRITE_BUFFER (& (current_buffer->buffer [0]), PROFILER_FILE_WRITE_BUFFER_SIZE);
1979 profiler->full_write_buffers --;
1980 current_buffer = current_buffer->next;
1982 if (profiler->current_write_position > 0) {
1983 #if (DEBUG_FILE_WRITES)
1984 printf ("write_current_block: writing last buffer (size %d)\n", profiler->current_write_position);
1986 WRITE_BUFFER (& (current_buffer->buffer [0]), profiler->current_write_position);
1989 #if (DEBUG_FILE_WRITES)
1990 printf ("write_current_block: buffers flushed\n");
1993 profiler->current_write_buffer = profiler->write_buffers;
1994 profiler->current_write_position = 0;
1995 profiler->full_write_buffers = 0;
1999 #define SEVEN_BITS_MASK (0x7f)
2000 #define EIGHT_BIT_MASK (0x80)
2003 write_uint32 (guint32 value) {
2004 while (value > SEVEN_BITS_MASK) {
2005 WRITE_BYTE (value & SEVEN_BITS_MASK);
2008 WRITE_BYTE (value | EIGHT_BIT_MASK);
2011 write_uint64 (guint64 value) {
2012 while (value > SEVEN_BITS_MASK) {
2013 WRITE_BYTE (value & SEVEN_BITS_MASK);
2016 WRITE_BYTE (value | EIGHT_BIT_MASK);
2019 write_string (const char *string) {
2020 while (*string != 0) {
2021 WRITE_BYTE (*string);
2027 static void write_clock_data (void);
2029 write_directives_block (gboolean start) {
2030 write_clock_data ();
2033 if (profiler->action_flags.save_allocation_caller) {
2034 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER);
2036 if (profiler->action_flags.save_allocation_stack || profiler->action_flags.track_calls) {
2037 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK);
2039 if (profiler->action_flags.allocations_carry_id) {
2040 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID);
2042 write_uint32 (MONO_PROFILER_DIRECTIVE_LOADED_ELEMENTS_CARRY_ID);
2043 write_uint32 (MONO_PROFILER_DIRECTIVE_CLASSES_CARRY_ASSEMBLY_ID);
2044 write_uint32 (MONO_PROFILER_DIRECTIVE_METHODS_CARRY_WRAPPER_FLAG);
2046 write_uint32 (MONO_PROFILER_DIRECTIVE_END);
2048 write_clock_data ();
2049 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES);
2052 #if DEBUG_HEAP_PROFILER
2053 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c) printf ("WRITE_HEAP_SHOT_JOB_VALUE: writing value %p at cursor %p\n", (v), (c))
2055 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c)
2057 #define WRITE_HEAP_SHOT_JOB_VALUE(j,v) do {\
2058 if ((j)->cursor < (j)->end) {\
2059 WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE ((v), ((j)->cursor));\
2060 *((j)->cursor) = (v);\
2063 profiler_heap_shot_write_job_add_buffer (j, v);\
2068 #undef GUINT_TO_POINTER
2069 #undef GPOINTER_TO_UINT
2070 #if (SIZEOF_VOID_P == 4)
2071 #define GUINT_TO_POINTER(u) ((void*)(guint32)(u))
2072 #define GPOINTER_TO_UINT(p) ((guint32)(void*)(p))
2073 #elif (SIZEOF_VOID_P == 8)
2074 #define GUINT_TO_POINTER(u) ((void*)(guint64)(u))
2075 #define GPOINTER_TO_UINT(p) ((guint64)(void*)(p))
2077 #error Bad size of void pointer
2080 #define WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE(j,v,c) WRITE_HEAP_SHOT_JOB_VALUE (j, GUINT_TO_POINTER (GPOINTER_TO_UINT (v)|(c)))
2082 #if DEBUG_HEAP_PROFILER
2083 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE() printf ("profiler_heap_shot_write_block[UPDATE_JOB_BUFFER_CURSOR]: in job %p, moving to buffer %p and cursor %p\n", job, buffer, cursor)
2085 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE()
2087 #define UPDATE_JOB_BUFFER_CURSOR() do {\
2089 if (cursor >= end) {\
2090 buffer = buffer->next;\
2091 if (buffer != NULL) {\
2092 cursor = & (buffer->buffer [0]);\
2093 if (buffer->next != NULL) {\
2094 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);\
2102 UPDATE_JOB_BUFFER_CURSOR_MESSAGE ();\
2106 profiler_heap_shot_write_data_block (ProfilerHeapShotWriteJob *job) {
2107 ProfilerHeapShotWriteBuffer *buffer;
2110 guint64 start_counter;
2112 guint64 end_counter;
2115 write_uint64 (job->start_counter);
2116 write_uint64 (job->start_time);
2117 write_uint64 (job->end_counter);
2118 write_uint64 (job->end_time);
2119 write_uint32 (job->collection);
2120 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2121 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2122 write_uint64 (start_counter);
2123 write_uint64 (start_time);
2124 #if DEBUG_HEAP_PROFILER
2125 printf ("profiler_heap_shot_write_data_block: start writing job %p (start %p, end %p)...\n", job, & (job->buffers->buffer [0]), job->cursor);
2127 buffer = job->buffers;
2128 cursor = & (buffer->buffer [0]);
2129 if (buffer->next != NULL) {
2130 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
2134 if (cursor >= end) {
2137 #if DEBUG_HEAP_PROFILER
2138 printf ("profiler_heap_shot_write_data_block: in job %p, starting at buffer %p and cursor %p\n", job, buffer, cursor);
2140 while (cursor != NULL) {
2141 gpointer value = *cursor;
2142 HeapProfilerJobValueCode code = GPOINTER_TO_UINT (value) & HEAP_CODE_MASK;
2143 #if DEBUG_HEAP_PROFILER
2144 printf ("profiler_heap_shot_write_data_block: got value %p and code %d\n", value, code);
2147 UPDATE_JOB_BUFFER_CURSOR ();
2148 if (code == HEAP_CODE_FREE_OBJECT_CLASS) {
2149 MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2150 //MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) % 4);
2151 ClassIdMappingElement *class_id;
2154 class_id = class_id_mapping_element_get (klass);
2155 if (class_id == NULL) {
2156 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2158 g_assert (class_id != NULL);
2159 write_uint32 ((class_id->id << 2) | HEAP_CODE_FREE_OBJECT_CLASS);
2161 size = GPOINTER_TO_UINT (*cursor);
2162 UPDATE_JOB_BUFFER_CURSOR ();
2163 write_uint32 (size);
2164 #if DEBUG_HEAP_PROFILER
2165 printf ("profiler_heap_shot_write_data_block: wrote unreachable object of class %p (id %d, size %d)\n", klass, class_id->id, size);
2167 } else if (code == HEAP_CODE_OBJECT) {
2168 MonoObject *object = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2169 MonoClass *klass = mono_object_get_class (object);
2170 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
2171 guint32 size = mono_object_get_size (object);
2172 guint32 references = GPOINTER_TO_UINT (*cursor);
2173 UPDATE_JOB_BUFFER_CURSOR ();
2175 if (class_id == NULL) {
2176 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2178 g_assert (class_id != NULL);
2180 write_uint64 (GPOINTER_TO_UINT (value));
2181 write_uint32 (class_id->id);
2182 write_uint32 (size);
2183 write_uint32 (references);
2184 #if DEBUG_HEAP_PROFILER
2185 printf ("profiler_heap_shot_write_data_block: writing object %p (references %d)\n", value, references);
2188 while (references > 0) {
2189 gpointer reference = *cursor;
2190 write_uint64 (GPOINTER_TO_UINT (reference));
2191 UPDATE_JOB_BUFFER_CURSOR ();
2193 #if DEBUG_HEAP_PROFILER
2194 printf ("profiler_heap_shot_write_data_block: inside object %p, wrote reference %p)\n", value, reference);
2198 #if DEBUG_HEAP_PROFILER
2199 printf ("profiler_heap_shot_write_data_block: unknown code %d in value %p\n", code, value);
2201 g_assert_not_reached ();
2206 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2207 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2208 write_uint64 (end_counter);
2209 write_uint64 (end_time);
2211 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA);
2212 #if DEBUG_HEAP_PROFILER
2213 printf ("profiler_heap_shot_write_data_block: writing job %p done.\n", job);
2217 profiler_heap_shot_write_summary_block (ProfilerHeapShotWriteJob *job) {
2218 guint64 start_counter;
2220 guint64 end_counter;
2224 #if DEBUG_HEAP_PROFILER
2225 printf ("profiler_heap_shot_write_summary_block: start writing job %p...\n", job);
2227 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2228 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2229 write_uint64 (start_counter);
2230 write_uint64 (start_time);
2232 write_uint32 (job->collection);
2234 for (id = 0; id < job->summary.capacity; id ++) {
2235 if ((job->summary.per_class_data [id].reachable.instances > 0) || (job->summary.per_class_data [id].unreachable.instances > 0)) {
2237 write_uint32 (job->summary.per_class_data [id].reachable.instances);
2238 write_uint32 (job->summary.per_class_data [id].reachable.bytes);
2239 write_uint32 (job->summary.per_class_data [id].unreachable.instances);
2240 write_uint32 (job->summary.per_class_data [id].unreachable.bytes);
2245 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2246 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2247 write_uint64 (end_counter);
2248 write_uint64 (end_time);
2250 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY);
2251 #if DEBUG_HEAP_PROFILER
2252 printf ("profiler_heap_shot_write_summary_block: writing job %p done.\n", job);
2257 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job) {
2258 #if DEBUG_HEAP_PROFILER
2259 printf ("profiler_heap_shot_write_block: working on job %p...\n", job);
2262 if (profiler->action_flags.collection_summary == TRUE) {
2263 profiler_heap_shot_write_summary_block (job);
2266 if ((profiler->action_flags.unreachable_objects == TRUE) || (profiler->action_flags.heap_shot == TRUE)) {
2267 profiler_heap_shot_write_data_block (job);
2270 profiler_heap_shot_write_job_free_buffers (job);
2271 #if DEBUG_HEAP_PROFILER
2272 printf ("profiler_heap_shot_write_block: work on job %p done.\n", job);
2277 write_element_load_block (LoadedElement *element, guint8 kind, gsize thread_id, gpointer item) {
2279 write_uint64 (element->load_start_counter);
2280 write_uint64 (element->load_end_counter);
2281 write_uint64 (thread_id);
2282 write_uint32 (element->id);
2283 write_string (element->name);
2284 if (kind & MONO_PROFILER_LOADED_EVENT_ASSEMBLY) {
2285 MonoImage *image = mono_assembly_get_image ((MonoAssembly*) item);
2286 MonoAssemblyName aname;
2287 if (mono_assembly_fill_assembly_name (image, &aname)) {
2288 write_string (aname.name);
2289 write_uint32 (aname.major);
2290 write_uint32 (aname.minor);
2291 write_uint32 (aname.build);
2292 write_uint32 (aname.revision);
2293 write_string (aname.culture && *aname.culture? aname.culture: "neutral");
2294 write_string (aname.public_key_token [0] ? (char *)aname.public_key_token : "null");
2295 /* Retargetable flag */
2296 write_uint32 ((aname.flags & 0x00000100) ? 1 : 0);
2298 write_string ("UNKNOWN");
2303 write_string ("neutral");
2304 write_string ("null");
2308 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_LOADED);
2309 element->load_written = TRUE;
2313 write_element_unload_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2315 write_uint64 (element->unload_start_counter);
2316 write_uint64 (element->unload_end_counter);
2317 write_uint64 (thread_id);
2318 write_uint32 (element->id);
2319 write_string (element->name);
2320 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED);
2321 element->unload_written = TRUE;
2325 write_clock_data (void) {
2329 MONO_PROFILER_GET_CURRENT_COUNTER (counter);
2330 MONO_PROFILER_GET_CURRENT_TIME (time);
2332 write_uint64 (counter);
2333 write_uint64 (time);
2337 write_mapping_block (gsize thread_id) {
2338 ClassIdMappingElement *current_class;
2339 MethodIdMappingElement *current_method;
2341 if ((profiler->classes->unwritten == NULL) && (profiler->methods->unwritten == NULL))
2344 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2345 printf ("[write_mapping_block][TID %ld] START\n", thread_id);
2348 write_clock_data ();
2349 write_uint64 (thread_id);
2351 for (current_class = profiler->classes->unwritten; current_class != NULL; current_class = current_class->next_unwritten) {
2352 MonoImage *image = mono_class_get_image (current_class->klass);
2353 MonoAssembly *assembly = mono_image_get_assembly (image);
2354 guint32 assembly_id = loaded_element_get_id (profiler->loaded_assemblies, assembly);
2355 write_uint32 (current_class->id);
2356 write_uint32 (assembly_id);
2357 write_string (current_class->name);
2358 #if (DEBUG_MAPPING_EVENTS)
2359 printf ("mapping CLASS (%d => %s)\n", current_class->id, current_class->name);
2361 g_free (current_class->name);
2362 current_class->name = NULL;
2365 profiler->classes->unwritten = NULL;
2367 for (current_method = profiler->methods->unwritten; current_method != NULL; current_method = current_method->next_unwritten) {
2368 MonoMethod *method = current_method->method;
2369 MonoClass *klass = mono_method_get_class (method);
2370 ClassIdMappingElement *class_element = class_id_mapping_element_get (klass);
2371 g_assert (class_element != NULL);
2372 write_uint32 (current_method->id);
2373 write_uint32 (class_element->id);
2374 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2379 write_string (current_method->name);
2380 #if (DEBUG_MAPPING_EVENTS)
2381 printf ("mapping METHOD ([%d]%d => %s)\n", class_element?class_element->id:1, current_method->id, current_method->name);
2383 g_free (current_method->name);
2384 current_method->name = NULL;
2387 profiler->methods->unwritten = NULL;
2389 write_clock_data ();
2390 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_MAPPING);
2392 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2393 printf ("[write_mapping_block][TID %ld] END\n", thread_id);
2398 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER = 1,
2399 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_IMPLICIT = 2,
2400 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT = 3,
2401 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION = 4,
2402 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT = 5,
2403 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT = 6,
2404 MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT = 7
2405 } MonoProfilerPackedEventCode;
2406 #define MONO_PROFILER_PACKED_EVENT_CODE_BITS 3
2407 #define MONO_PROFILER_PACKED_EVENT_DATA_BITS (8-MONO_PROFILER_PACKED_EVENT_CODE_BITS)
2408 #define MONO_PROFILER_PACKED_EVENT_DATA_MASK ((1<<MONO_PROFILER_PACKED_EVENT_DATA_BITS)-1)
2410 #define MONO_PROFILER_EVENT_MAKE_PACKED_CODE(result,data,base) do {\
2411 result = ((base)|((data & MONO_PROFILER_PACKED_EVENT_DATA_MASK) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2412 data >>= MONO_PROFILER_PACKED_EVENT_DATA_BITS;\
2414 #define MONO_PROFILER_EVENT_MAKE_FULL_CODE(result,code,kind,base) do {\
2415 result = ((base)|((((kind)<<4) | (code)) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2419 rewrite_last_written_stack (ProfilerThreadStack *stack) {
2421 int i = thread_stack_get_last_written_frame (stack);
2423 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2424 WRITE_BYTE (event_code);
2430 write_uint32 (thread_stack_written_frame_at_index (stack, i));
2435 static ProfilerEventData*
2436 write_stack_section_event (ProfilerEventData *events, ProfilerPerThreadData *data) {
2437 int last_saved_frame = events->data.number;
2438 int saved_frames = events->value;
2442 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2443 WRITE_BYTE (event_code);
2444 write_uint32 (last_saved_frame);
2445 write_uint32 (saved_frames);
2446 thread_stack_set_last_written_frame (&(data->stack), last_saved_frame + saved_frames);
2449 for (i = 0; i < saved_frames; i++) {
2450 guint8 code = events->code;
2452 MethodIdMappingElement *method;
2453 guint32 frame_value;
2455 if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) {
2457 } else if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER) {
2460 g_assert_not_reached ();
2464 method = method_id_mapping_element_get (events->data.address);
2465 g_assert (method != NULL);
2466 frame_value = (method->id << 1) | jit_flag;
2467 write_uint32 (frame_value);
2468 thread_stack_write_frame_at_index (&(data->stack), last_saved_frame + saved_frames - (1 + i), frame_value);
2475 static ProfilerEventData*
2476 write_event (ProfilerEventData *event, ProfilerPerThreadData *data) {
2477 ProfilerEventData *next = event + 1;
2478 gboolean write_event_value = TRUE;
2481 guint64 event_value;
2482 gboolean write_event_value_extension_1 = FALSE;
2483 guint64 event_value_extension_1 = 0;
2484 gboolean write_event_value_extension_2 = FALSE;
2485 guint64 event_value_extension_2 = 0;
2487 event_value = event->value;
2488 if (event_value == MAX_EVENT_VALUE) {
2489 event_value = *((guint64*)next);
2493 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
2494 MethodIdMappingElement *element = method_id_mapping_element_get (event->data.address);
2495 g_assert (element != NULL);
2496 event_data = element->id;
2498 if (event->code == MONO_PROFILER_EVENT_METHOD_CALL) {
2499 if (event->kind == MONO_PROFILER_EVENT_KIND_START) {
2500 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER);
2502 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT);
2505 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT);
2507 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
2508 ClassIdMappingElement *element = class_id_mapping_element_get (event->data.address);
2509 g_assert (element != NULL);
2510 event_data = element->id;
2512 if (event->code == MONO_PROFILER_EVENT_CLASS_ALLOCATION) {
2513 if ((! profiler->action_flags.save_allocation_caller) || (! (next->code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER))) {
2514 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION);
2516 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2519 if (profiler->action_flags.save_allocation_caller) {
2520 MonoMethod *caller_method = next->data.address;
2522 if ((next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) && (next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER)) {
2523 g_assert_not_reached ();
2526 if (caller_method != NULL) {
2527 MethodIdMappingElement *caller = method_id_mapping_element_get (caller_method);
2528 g_assert (caller != NULL);
2529 event_value_extension_1 = caller->id;
2532 write_event_value_extension_1 = TRUE;
2536 if (profiler->action_flags.allocations_carry_id) {
2537 event_value_extension_2 = GPOINTER_TO_UINT (next->data.address);
2539 if (next->code != MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID) {
2540 g_assert_not_reached ();
2543 write_event_value_extension_2 = TRUE;
2546 } else if (event->code == MONO_PROFILER_EVENT_CLASS_MONITOR) {
2547 g_assert (next->code == MONO_PROFILER_EVENT_OBJECT_MONITOR);
2549 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2550 event_value_extension_1 = next->value;
2551 write_event_value_extension_1 = TRUE;
2552 event_value_extension_2 = GPOINTER_TO_UINT (next->data.address);
2553 write_event_value_extension_2 = TRUE;
2556 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2559 if (event->code == MONO_PROFILER_EVENT_STACK_SECTION) {
2560 return write_stack_section_event (event, data);
2562 event_data = event->data.number;
2563 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2567 /* Skip writing JIT events if the user did not ask for them */
2568 if ((event->code == MONO_PROFILER_EVENT_METHOD_JIT) && ! profiler->action_flags.jit_time) {
2572 #if (DEBUG_LOGGING_PROFILER)
2574 printf ("writing EVENT[%p] data_type:%d, kind:%d, code:%d (%d:%ld:%ld)\n", event,
2575 event->data_type, event->kind, event->code,
2576 event_code, event_data, event_value);
2579 WRITE_BYTE (event_code);
2580 write_uint64 (event_data);
2581 if (write_event_value) {
2582 write_uint64 (event_value);
2583 if (write_event_value_extension_1) {
2584 write_uint64 (event_value_extension_1);
2586 if (write_event_value_extension_2) {
2587 write_uint64 (event_value_extension_2);
2595 write_thread_data_block (ProfilerPerThreadData *data) {
2596 ProfilerEventData *start = data->first_unwritten_event;
2597 ProfilerEventData *end = data->first_unmapped_event;
2601 #if (DEBUG_FILE_WRITES)
2602 printf ("write_thread_data_block: preparing buffer for thread %ld\n", (guint64) data->thread_id);
2604 write_clock_data ();
2605 write_uint64 (data->thread_id);
2607 write_uint64 (data->start_event_counter);
2609 /* If we are tracking the stack, make sure that stack sections */
2610 /* can be fully reconstructed even reading only one block */
2611 if (profiler->action_flags.track_stack) {
2612 rewrite_last_written_stack (&(data->stack));
2615 while (start < end) {
2616 start = write_event (start, data);
2619 data->first_unwritten_event = end;
2621 write_clock_data ();
2622 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_EVENTS);
2623 #if (DEBUG_FILE_WRITES)
2624 printf ("write_thread_data_block: buffer for thread %ld written\n", (guint64) data->thread_id);
2628 static ProfilerExecutableMemoryRegionData*
2629 profiler_executable_memory_region_new (gpointer *start, gpointer *end, guint32 file_offset, char *file_name, guint32 id) {
2630 ProfilerExecutableMemoryRegionData *result = g_new (ProfilerExecutableMemoryRegionData, 1);
2631 result->start = start;
2633 result->file_offset = file_offset;
2634 result->file_name = g_strdup (file_name);
2636 result->is_new = TRUE;
2638 result->file = NULL;
2639 result->file_region_reference = NULL;
2640 result->symbols_capacity = id;
2641 result->symbols_count = id;
2642 result->symbols = NULL;
2648 executable_file_close (ProfilerExecutableMemoryRegionData *region);
2651 profiler_executable_memory_region_destroy (ProfilerExecutableMemoryRegionData *data) {
2652 if (data->file != NULL) {
2653 executable_file_close (data);
2656 if (data->symbols != NULL) {
2657 g_free (data->symbols);
2658 data->symbols = NULL;
2660 if (data->file_name != NULL) {
2661 g_free (data->file_name);
2662 data->file_name = NULL;
2667 static ProfilerExecutableMemoryRegions*
2668 profiler_executable_memory_regions_new (int next_id, int next_unmanaged_function_id) {
2669 ProfilerExecutableMemoryRegions *result = g_new (ProfilerExecutableMemoryRegions, 1);
2670 result->regions = g_new0 (ProfilerExecutableMemoryRegionData*, 32);
2671 result->regions_capacity = 32;
2672 result->regions_count = 0;
2673 result->next_id = next_id;
2674 result->next_unmanaged_function_id = next_unmanaged_function_id;
2679 profiler_executable_memory_regions_destroy (ProfilerExecutableMemoryRegions *regions) {
2682 for (i = 0; i < regions->regions_count; i++) {
2683 profiler_executable_memory_region_destroy (regions->regions [i]);
2685 g_free (regions->regions);
2689 static ProfilerExecutableMemoryRegionData*
2690 find_address_region (ProfilerExecutableMemoryRegions *regions, gpointer address) {
2692 int high_index = regions->regions_count;
2693 int middle_index = 0;
2694 ProfilerExecutableMemoryRegionData *middle_region = regions->regions [0];
2696 if ((regions->regions_count == 0) || (regions->regions [low_index]->start > address) || (regions->regions [high_index - 1]->end < address)) {
2700 //printf ("find_address_region: Looking for address %p in %d regions (from %p to %p)\n", address, regions->regions_count, regions->regions [low_index]->start, regions->regions [high_index - 1]->end);
2702 while (low_index != high_index) {
2703 middle_index = low_index + ((high_index - low_index) / 2);
2704 middle_region = regions->regions [middle_index];
2706 //printf ("find_address_region: Looking for address %p, considering index %d[%p-%p] (%d-%d)\n", address, middle_index, middle_region->start, middle_region->end, low_index, high_index);
2708 if (middle_region->start > address) {
2709 if (middle_index > 0) {
2710 high_index = middle_index;
2714 } else if (middle_region->end < address) {
2715 if (middle_index < regions->regions_count - 1) {
2716 low_index = middle_index + 1;
2721 return middle_region;
2725 if ((middle_region == NULL) || (middle_region->start > address) || (middle_region->end < address)) {
2728 return middle_region;
2733 append_region (ProfilerExecutableMemoryRegions *regions, gpointer *start, gpointer *end, guint32 file_offset, char *file_name) {
2734 if (regions->regions_count >= regions->regions_capacity) {
2735 ProfilerExecutableMemoryRegionData **new_regions = g_new0 (ProfilerExecutableMemoryRegionData*, regions->regions_capacity * 2);
2736 memcpy (new_regions, regions->regions, regions->regions_capacity * sizeof (ProfilerExecutableMemoryRegionData*));
2737 g_free (regions->regions);
2738 regions->regions = new_regions;
2739 regions->regions_capacity = regions->regions_capacity * 2;
2741 regions->regions [regions->regions_count] = profiler_executable_memory_region_new (start, end, file_offset, file_name, regions->next_id);
2742 regions->regions_count ++;
2743 regions->next_id ++;
2747 regions_are_equivalent (ProfilerExecutableMemoryRegionData *region1, ProfilerExecutableMemoryRegionData *region2) {
2748 if ((region1->start == region2->start) &&
2749 (region1->end == region2->end) &&
2750 (region1->file_offset == region2->file_offset) &&
2751 ! strcmp (region1->file_name, region2->file_name)) {
2759 compare_regions (const void *a1, const void *a2) {
2760 ProfilerExecutableMemoryRegionData *r1 = * (ProfilerExecutableMemoryRegionData**) a1;
2761 ProfilerExecutableMemoryRegionData *r2 = * (ProfilerExecutableMemoryRegionData**) a2;
2762 return (r1->start < r2->start)? -1 : ((r1->start > r2->start)? 1 : 0);
2766 restore_old_regions (ProfilerExecutableMemoryRegions *old_regions, ProfilerExecutableMemoryRegions *new_regions) {
2770 for (new_i = 0; new_i < new_regions->regions_count; new_i++) {
2771 ProfilerExecutableMemoryRegionData *new_region = new_regions->regions [new_i];
2772 for (old_i = 0; old_i < old_regions->regions_count; old_i++) {
2773 ProfilerExecutableMemoryRegionData *old_region = old_regions->regions [old_i];
2774 if ( regions_are_equivalent (old_region, new_region)) {
2775 new_regions->regions [new_i] = old_region;
2776 old_regions->regions [old_i] = new_region;
2778 // FIXME (sanity check)
2779 g_assert (new_region->is_new && ! old_region->is_new);
2786 sort_regions (ProfilerExecutableMemoryRegions *regions) {
2787 if (regions->regions_count > 1) {
2790 qsort (regions->regions, regions->regions_count, sizeof (ProfilerExecutableMemoryRegionData *), compare_regions);
2793 while (i < regions->regions_count) {
2794 ProfilerExecutableMemoryRegionData *current_region = regions->regions [i];
2795 ProfilerExecutableMemoryRegionData *previous_region = regions->regions [i - 1];
2797 if (regions_are_equivalent (previous_region, current_region)) {
2800 if (! current_region->is_new) {
2801 profiler_executable_memory_region_destroy (previous_region);
2802 regions->regions [i - 1] = current_region;
2804 profiler_executable_memory_region_destroy (current_region);
2807 for (j = i + 1; j < regions->regions_count; j++) {
2808 regions->regions [j - 1] = regions->regions [j];
2811 regions->regions_count --;
2820 fix_region_references (ProfilerExecutableMemoryRegions *regions) {
2822 for (i = 0; i < regions->regions_count; i++) {
2823 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
2824 if (region->file_region_reference != NULL) {
2825 region->file_region_reference->region = region;
2831 executable_file_add_region_reference (ProfilerExecutableFile *file, ProfilerExecutableMemoryRegionData *region) {
2832 guint8 *section_headers = file->data + file->header->e_shoff;
2835 for (section_index = 1; section_index < file->header->e_shnum; section_index ++) {
2836 ElfSection *section_header = (ElfSection*) (section_headers + (file->header->e_shentsize * section_index));
2838 if ((section_header->sh_addr != 0) && (section_header->sh_flags & ELF_SHF_EXECINSTR) &&
2839 (region->file_offset <= section_header->sh_offset) && (region->file_offset + (((guint8*)region->end)-((guint8*)region->start)) >= (section_header->sh_offset + section_header->sh_size))) {
2840 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [section_index]);
2841 section_region->region = region;
2842 section_region->section_address = (gpointer) section_header->sh_addr;
2843 section_region->section_offset = section_header->sh_offset;
2844 region->file_region_reference = section_region;
2849 static gboolean check_elf_header (ElfHeader* header) {
2850 guint16 test = 0x0102;
2852 if ((header->e_ident [EI_MAG0] != 0x7f) || (header->e_ident [EI_MAG1] != 'E') ||
2853 (header->e_ident [EI_MAG2] != 'L') || (header->e_ident [EI_MAG3] != 'F')) {
2857 if (sizeof (gsize) == 4) {
2858 if (header->e_ident [EI_CLASS] != ELF_CLASS_32) {
2859 g_warning ("Class is not ELF_CLASS_32 with gsize size %d", (int) sizeof (gsize));
2862 } else if (sizeof (gsize) == 8) {
2863 if (header->e_ident [EI_CLASS] != ELF_CLASS_64) {
2864 g_warning ("Class is not ELF_CLASS_64 with gsize size %d", (int) sizeof (gsize));
2868 g_warning ("Absurd gsize size %d", (int) sizeof (gsize));
2872 if ((*(guint8*)(&test)) == 0x01) {
2873 if (header->e_ident [EI_DATA] != ELF_DATA_MSB) {
2874 g_warning ("Data is not ELF_DATA_MSB with first test byte 0x01");
2877 } else if ((*(guint8*)(&test)) == 0x02) {
2878 if (header->e_ident [EI_DATA] != ELF_DATA_LSB) {
2879 g_warning ("Data is not ELF_DATA_LSB with first test byte 0x02");
2883 g_warning ("Absurd test byte value");
2890 static gboolean check_elf_file (int fd) {
2891 void *header = malloc (sizeof (ElfHeader));
2892 ssize_t read_result = read (fd, header, sizeof (ElfHeader));
2895 if (read_result != sizeof (ElfHeader)) {
2898 result = check_elf_header ((ElfHeader*) header);
2905 static ProfilerExecutableFile*
2906 executable_file_open (ProfilerExecutableMemoryRegionData *region) {
2907 ProfilerExecutableFiles *files = & (profiler->executable_files);
2908 ProfilerExecutableFile *file = region->file;
2911 file = (ProfilerExecutableFile*) g_hash_table_lookup (files->table, region->file_name);
2914 struct stat stat_buffer;
2915 int symtab_index = 0;
2916 int strtab_index = 0;
2917 int dynsym_index = 0;
2918 int dynstr_index = 0;
2920 guint8 *section_headers;
2924 file = g_new0 (ProfilerExecutableFile, 1);
2925 region->file = file;
2926 g_hash_table_insert (files->table, region->file_name, file);
2927 file->reference_count ++;
2928 file->next_new_file = files->new_files;
2929 files->new_files = file;
2931 file->fd = open (region->file_name, O_RDONLY);
2932 if (file->fd == -1) {
2933 //g_warning ("Cannot open file '%s': '%s'", region->file_name, strerror (errno));
2936 if (fstat (file->fd, &stat_buffer) != 0) {
2937 //g_warning ("Cannot stat file '%s': '%s'", region->file_name, strerror (errno));
2939 } else if (! check_elf_file (file->fd)) {
2942 size_t region_length = ((guint8*)region->end) - ((guint8*)region->start);
2943 file->length = stat_buffer.st_size;
2945 if (file->length == region_length) {
2946 file->data = region->start;
2950 file->data = mmap (NULL, file->length, PROT_READ, MAP_PRIVATE, file->fd, 0);
2952 if (file->data == MAP_FAILED) {
2954 //g_warning ("Cannot map file '%s': '%s'", region->file_name, strerror (errno));
2962 /* OK, this is a usable elf file, and we mmapped it... */
2963 header = (ElfHeader*) file->data;
2964 file->header = header;
2965 section_headers = file->data + file->header->e_shoff;
2966 file->main_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * header->e_shstrndx)))->sh_offset);
2968 for (section_index = 0; section_index < header->e_shnum; section_index ++) {
2969 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
2971 if (section_header->sh_type == ELF_SHT_SYMTAB) {
2972 symtab_index = section_index;
2973 } else if (section_header->sh_type == ELF_SHT_DYNSYM) {
2974 dynsym_index = section_index;
2975 } else if (section_header->sh_type == ELF_SHT_STRTAB) {
2976 if (! strcmp (file->main_string_table + section_header->sh_name, ".strtab")) {
2977 strtab_index = section_index;
2978 } else if (! strcmp (file->main_string_table + section_header->sh_name, ".dynstr")) {
2979 dynstr_index = section_index;
2984 if ((symtab_index != 0) && (strtab_index != 0)) {
2985 section_index = symtab_index;
2986 strings_index = strtab_index;
2987 } else if ((dynsym_index != 0) && (dynstr_index != 0)) {
2988 section_index = dynsym_index;
2989 strings_index = dynstr_index;
2995 if (section_index != 0) {
2996 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
2997 file->symbol_size = section_header->sh_entsize;
2998 file->symbols_count = (guint32) (section_header->sh_size / section_header->sh_entsize);
2999 file->symbols_start = file->data + section_header->sh_offset;
3000 file->symbols_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * strings_index)))->sh_offset);
3003 file->section_regions = g_new0 (ProfilerExecutableFileSectionRegion, file->header->e_shnum);
3005 region->file = file;
3006 file->reference_count ++;
3010 if (file->header != NULL) {
3011 executable_file_add_region_reference (file, region);
3018 executable_file_free (ProfilerExecutableFile* file) {
3019 if (file->fd != -1) {
3020 if (close (file->fd) != 0) {
3021 g_warning ("Cannot close file: '%s'", strerror (errno));
3023 if (file->data != NULL) {
3024 if (munmap (file->data, file->length) != 0) {
3025 g_warning ("Cannot unmap file: '%s'", strerror (errno));
3029 if (file->section_regions != NULL) {
3030 g_free (file->section_regions);
3031 file->section_regions = NULL;
3037 executable_file_close (ProfilerExecutableMemoryRegionData *region) {
3038 region->file->reference_count --;
3040 if ((region->file_region_reference != NULL) && (region->file_region_reference->region == region)) {
3041 region->file_region_reference->region = NULL;
3042 region->file_region_reference->section_address = 0;
3043 region->file_region_reference->section_offset = 0;
3046 if (region->file->reference_count <= 0) {
3047 ProfilerExecutableFiles *files = & (profiler->executable_files);
3048 g_hash_table_remove (files->table, region->file_name);
3049 executable_file_free (region->file);
3050 region->file = NULL;
3055 executable_file_count_symbols (ProfilerExecutableFile *file) {
3058 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
3059 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
3061 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
3062 (symbol->st_shndx > 0) &&
3063 (symbol->st_shndx < file->header->e_shnum)) {
3064 int symbol_section_index = symbol->st_shndx;
3065 ProfilerExecutableMemoryRegionData *region = file->section_regions [symbol_section_index].region;
3066 if ((region != NULL) && (region->symbols == NULL)) {
3067 region->symbols_count ++;
3074 executable_memory_regions_prepare_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
3076 for (i = 0; i < regions->regions_count; i++) {
3077 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3078 if ((region->symbols_count > 0) && (region->symbols == NULL)) {
3079 region->symbols = g_new (ProfilerUnmanagedSymbol, region->symbols_count);
3080 region->symbols_capacity = region->symbols_count;
3081 region->symbols_count = 0;
3087 executable_region_symbol_get_name (ProfilerExecutableMemoryRegionData *region, ProfilerUnmanagedSymbol *symbol) {
3088 ElfSymbol *elf_symbol = (ElfSymbol*) (region->file->symbols_start + (symbol->index * region->file->symbol_size));
3089 return region->file->symbols_string_table + elf_symbol->st_name;
3093 executable_file_build_symbol_tables (ProfilerExecutableFile *file) {
3096 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
3097 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
3099 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
3100 (symbol->st_shndx > 0) &&
3101 (symbol->st_shndx < file->header->e_shnum)) {
3102 int symbol_section_index = symbol->st_shndx;
3103 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [symbol_section_index]);
3104 ProfilerExecutableMemoryRegionData *region = section_region->region;
3106 if (region != NULL) {
3107 ProfilerUnmanagedSymbol *new_symbol = & (region->symbols [region->symbols_count]);
3108 region->symbols_count ++;
3111 new_symbol->index = symbol_index;
3112 new_symbol->size = symbol->st_size;
3113 new_symbol->offset = (((guint8*) symbol->st_value) - section_region->section_address) - (region->file_offset - section_region->section_offset);
3120 compare_region_symbols (const void *p1, const void *p2) {
3121 const ProfilerUnmanagedSymbol *s1 = p1;
3122 const ProfilerUnmanagedSymbol *s2 = p2;
3123 return (s1->offset < s2->offset)? -1 : ((s1->offset > s2->offset)? 1 : 0);
3127 executable_memory_regions_sort_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
3129 for (i = 0; i < regions->regions_count; i++) {
3130 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3131 if ((region->is_new) && (region->symbols != NULL)) {
3132 qsort (region->symbols, region->symbols_count, sizeof (ProfilerUnmanagedSymbol), compare_region_symbols);
3138 build_symbol_tables (ProfilerExecutableMemoryRegions *regions, ProfilerExecutableFiles *files) {
3140 ProfilerExecutableFile *file;
3142 for (i = 0; i < regions->regions_count; i++) {
3143 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3144 if ((region->is_new) && (region->file == NULL)) {
3145 executable_file_open (region);
3149 for (file = files->new_files; file != NULL; file = file->next_new_file) {
3150 executable_file_count_symbols (file);
3153 executable_memory_regions_prepare_symbol_tables (regions);
3155 for (file = files->new_files; file != NULL; file = file->next_new_file) {
3156 executable_file_build_symbol_tables (file);
3159 executable_memory_regions_sort_symbol_tables (regions);
3161 file = files->new_files;
3162 while (file != NULL) {
3163 ProfilerExecutableFile *next_file = file->next_new_file;
3164 file->next_new_file = NULL;
3167 files->new_files = NULL;
3170 static ProfilerUnmanagedSymbol*
3171 executable_memory_region_find_symbol (ProfilerExecutableMemoryRegionData *region, guint32 offset) {
3172 if (region->symbols_count > 0) {
3173 ProfilerUnmanagedSymbol *low = region->symbols;
3174 ProfilerUnmanagedSymbol *high = region->symbols + (region->symbols_count - 1);
3175 int step = region->symbols_count >> 1;
3176 ProfilerUnmanagedSymbol *current = region->symbols + step;
3179 step = (high - low) >> 1;
3181 if (offset < current->offset) {
3183 current = high - step;
3184 } else if (offset >= current->offset) {
3185 if (offset >= (current->offset + current->size)) {
3187 current = low + step;
3194 if ((offset >= current->offset) && (offset < (current->offset + current->size))) {
3204 //FIXME: make also Win32 and BSD variants
3205 #define MAPS_BUFFER_SIZE 4096
3206 #define MAPS_FILENAME_SIZE 2048
3209 update_regions_buffer (int fd, char *buffer) {
3210 ssize_t result = read (fd, buffer, MAPS_BUFFER_SIZE);
3212 if (result == MAPS_BUFFER_SIZE) {
3214 } else if (result >= 0) {
3215 *(buffer + result) = 0;
3223 #define GOTO_NEXT_CHAR(c,b,fd) do {\
3225 if (((c) - (b) >= MAPS_BUFFER_SIZE) || ((*(c) == 0) && ((c) != (b)))) {\
3226 update_regions_buffer ((fd), (b));\
3231 static int hex_digit_value (char c) {
3232 if ((c >= '0') && (c <= '9')) {
3234 } else if ((c >= 'a') && (c <= 'f')) {
3235 return c - 'a' + 10;
3236 } else if ((c >= 'A') && (c <= 'F')) {
3237 return c - 'A' + 10;
3259 MAP_LINE_PARSER_STATE_INVALID,
3260 MAP_LINE_PARSER_STATE_START_ADDRESS,
3261 MAP_LINE_PARSER_STATE_END_ADDRESS,
3262 MAP_LINE_PARSER_STATE_PERMISSIONS,
3263 MAP_LINE_PARSER_STATE_OFFSET,
3264 MAP_LINE_PARSER_STATE_DEVICE,
3265 MAP_LINE_PARSER_STATE_INODE,
3266 MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME,
3267 MAP_LINE_PARSER_STATE_FILENAME,
3268 MAP_LINE_PARSER_STATE_DONE
3269 } MapLineParserState;
3271 const char *map_line_parser_state [] = {
3279 "BLANK_BEFORE_FILENAME",
3285 parse_map_line (ProfilerExecutableMemoryRegions *regions, int fd, char *buffer, char *filename, char *current) {
3286 MapLineParserState state = MAP_LINE_PARSER_STATE_START_ADDRESS;
3287 gsize start_address = 0;
3288 gsize end_address = 0;
3290 int filename_index = 0;
3291 gboolean is_executable = FALSE;
3292 gboolean done = FALSE;
3298 case MAP_LINE_PARSER_STATE_START_ADDRESS:
3300 start_address <<= 4;
3301 start_address |= hex_digit_value (c);
3302 } else if (c == '-') {
3303 state = MAP_LINE_PARSER_STATE_END_ADDRESS;
3305 state = MAP_LINE_PARSER_STATE_INVALID;
3308 case MAP_LINE_PARSER_STATE_END_ADDRESS:
3311 end_address |= hex_digit_value (c);
3312 } else if (isblank (c)) {
3313 state = MAP_LINE_PARSER_STATE_PERMISSIONS;
3315 state = MAP_LINE_PARSER_STATE_INVALID;
3318 case MAP_LINE_PARSER_STATE_PERMISSIONS:
3320 is_executable = TRUE;
3321 } else if (isblank (c)) {
3322 state = MAP_LINE_PARSER_STATE_OFFSET;
3323 } else if ((c != '-') && ! isalpha (c)) {
3324 state = MAP_LINE_PARSER_STATE_INVALID;
3327 case MAP_LINE_PARSER_STATE_OFFSET:
3330 offset |= hex_digit_value (c);
3331 } else if (isblank (c)) {
3332 state = MAP_LINE_PARSER_STATE_DEVICE;
3334 state = MAP_LINE_PARSER_STATE_INVALID;
3337 case MAP_LINE_PARSER_STATE_DEVICE:
3339 state = MAP_LINE_PARSER_STATE_INODE;
3340 } else if ((c != ':') && ! isxdigit (c)) {
3341 state = MAP_LINE_PARSER_STATE_INVALID;
3344 case MAP_LINE_PARSER_STATE_INODE:
3346 state = MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME;
3347 } else if (! isdigit (c)) {
3348 state = MAP_LINE_PARSER_STATE_INVALID;
3351 case MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME:
3352 if ((c == '/') || (c == '[')) {
3353 state = MAP_LINE_PARSER_STATE_FILENAME;
3354 filename [filename_index] = *current;
3356 } else if (! isblank (c)) {
3357 state = MAP_LINE_PARSER_STATE_INVALID;
3360 case MAP_LINE_PARSER_STATE_FILENAME:
3361 if (filename_index < MAPS_FILENAME_SIZE) {
3363 state = MAP_LINE_PARSER_STATE_DONE;
3365 filename [filename_index] = 0;
3367 filename [filename_index] = *current;
3371 filename [filename_index] = 0;
3372 g_warning ("ELF filename too long: \"%s\"...\n", filename);
3375 case MAP_LINE_PARSER_STATE_DONE:
3376 if (done && is_executable) {
3377 filename [filename_index] = 0;
3378 append_region (regions, (gpointer) start_address, (gpointer) end_address, offset, filename);
3381 case MAP_LINE_PARSER_STATE_INVALID:
3383 state = MAP_LINE_PARSER_STATE_DONE;
3390 } else if (c == '\n') {
3391 state = MAP_LINE_PARSER_STATE_DONE;
3394 GOTO_NEXT_CHAR(current, buffer, fd);
3400 scan_process_regions (ProfilerExecutableMemoryRegions *regions) {
3406 fd = open ("/proc/self/maps", O_RDONLY);
3411 buffer = malloc (MAPS_BUFFER_SIZE);
3412 filename = malloc (MAPS_FILENAME_SIZE);
3413 update_regions_buffer (fd, buffer);
3415 while (current != NULL) {
3416 current = parse_map_line (regions, fd, buffer, filename, current);
3428 MONO_PROFILER_STATISTICAL_CODE_END = 0,
3429 MONO_PROFILER_STATISTICAL_CODE_METHOD = 1,
3430 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID = 2,
3431 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID = 3,
3432 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION = 4,
3433 MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN = 5,
3434 MONO_PROFILER_STATISTICAL_CODE_REGIONS = 7
3435 } MonoProfilerStatisticalCode;
3438 refresh_memory_regions (void) {
3439 ProfilerExecutableMemoryRegions *old_regions = profiler->executable_regions;
3440 ProfilerExecutableMemoryRegions *new_regions = profiler_executable_memory_regions_new (old_regions->next_id, old_regions->next_unmanaged_function_id);
3443 LOG_WRITER_THREAD ("Refreshing memory regions...");
3444 scan_process_regions (new_regions);
3445 sort_regions (new_regions);
3446 restore_old_regions (old_regions, new_regions);
3447 fix_region_references (new_regions);
3448 LOG_WRITER_THREAD ("Refreshed memory regions.");
3450 LOG_WRITER_THREAD ("Building symbol tables...");
3451 build_symbol_tables (new_regions, & (profiler->executable_files));
3453 printf ("Symbol tables done!\n");
3454 printf ("Region summary...\n");
3455 for (i = 0; i < new_regions->regions_count; i++) {
3456 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3457 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3458 region->start, region->end, region->file_offset, region->file_name);
3460 printf ("New symbol tables dump...\n");
3461 for (i = 0; i < new_regions->regions_count; i++) {
3462 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3464 if (region->is_new) {
3467 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3468 region->start, region->end, region->file_offset, region->file_name);
3469 for (symbol_index = 0; symbol_index < region->symbols_count; symbol_index ++) {
3470 ProfilerUnmanagedSymbol *symbol = & (region->symbols [symbol_index]);
3471 printf (" [%d] Symbol %s (offset %d, size %d)\n", symbol_index,
3472 executable_region_symbol_get_name (region, symbol),
3473 symbol->offset, symbol->size);
3478 LOG_WRITER_THREAD ("Built symbol tables.");
3480 // This marks the region "sub-block"
3481 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_REGIONS);
3483 // First write the "removed" regions
3484 for (i = 0; i < old_regions->regions_count; i++) {
3485 ProfilerExecutableMemoryRegionData *region = old_regions->regions [i];
3486 if (! region->is_new) {
3487 #if DEBUG_STATISTICAL_PROFILER
3488 printf ("[refresh_memory_regions] Invalidated region %d\n", region->id);
3490 write_uint32 (region->id);
3495 // Then write the new ones
3496 for (i = 0; i < new_regions->regions_count; i++) {
3497 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3498 if (region->is_new) {
3499 region->is_new = FALSE;
3501 #if DEBUG_STATISTICAL_PROFILER
3502 printf ("[refresh_memory_regions] Wrote region %d (%p-%p[%d] '%s')\n", region->id, region->start, region->end, region->file_offset, region->file_name);
3504 write_uint32 (region->id);
3505 write_uint64 (GPOINTER_TO_UINT (region->start));
3506 write_uint32 (GPOINTER_TO_UINT (region->end) - GPOINTER_TO_UINT (region->start));
3507 write_uint32 (region->file_offset);
3508 write_string (region->file_name);
3513 // Finally, free the old ones, and replace them
3514 profiler_executable_memory_regions_destroy (old_regions);
3515 profiler->executable_regions = new_regions;
3519 write_statistical_hit (MonoDomain *domain, gpointer address, gboolean regions_refreshed) {
3520 MonoJitInfo *ji = (domain != NULL) ? mono_jit_info_table_find (domain, (char*) address) : NULL;
3523 MonoMethod *method = mono_jit_info_get_method (ji);
3524 MethodIdMappingElement *element = method_id_mapping_element_get (method);
3526 if (element != NULL) {
3527 #if DEBUG_STATISTICAL_PROFILER
3528 printf ("[write_statistical_hit] Wrote method %d\n", element->id);
3530 write_uint32 ((element->id << 3) | MONO_PROFILER_STATISTICAL_CODE_METHOD);
3532 #if DEBUG_STATISTICAL_PROFILER
3533 printf ("[write_statistical_hit] Wrote unknown method %p\n", method);
3535 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_METHOD);
3538 ProfilerExecutableMemoryRegionData *region = find_address_region (profiler->executable_regions, address);
3540 if (region == NULL && ! regions_refreshed) {
3541 #if DEBUG_STATISTICAL_PROFILER
3542 printf ("[write_statistical_hit] Cannot find region for address %p, refreshing...\n", address);
3544 refresh_memory_regions ();
3545 regions_refreshed = TRUE;
3546 region = find_address_region (profiler->executable_regions, address);
3549 if (region != NULL) {
3550 guint32 offset = ((guint8*)address) - ((guint8*)region->start);
3551 ProfilerUnmanagedSymbol *symbol = executable_memory_region_find_symbol (region, offset);
3553 if (symbol != NULL) {
3554 if (symbol->id > 0) {
3555 #if DEBUG_STATISTICAL_PROFILER
3556 printf ("[write_statistical_hit] Wrote unmanaged symbol %d\n", symbol->id);
3558 write_uint32 ((symbol->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID);
3560 ProfilerExecutableMemoryRegions *regions = profiler->executable_regions;
3561 const char *symbol_name = executable_region_symbol_get_name (region, symbol);
3562 symbol->id = regions->next_unmanaged_function_id;
3563 regions->next_unmanaged_function_id ++;
3564 #if DEBUG_STATISTICAL_PROFILER
3565 printf ("[write_statistical_hit] Wrote new unmanaged symbol in region %d[%d]\n", region->id, offset);
3567 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID);
3568 write_uint32 (symbol->id);
3569 write_string (symbol_name);
3572 #if DEBUG_STATISTICAL_PROFILER
3573 printf ("[write_statistical_hit] Wrote unknown unmanaged hit in region %d[%d] (address %p)\n", region->id, offset, address);
3575 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3576 write_uint32 (offset);
3579 #if DEBUG_STATISTICAL_PROFILER
3580 printf ("[write_statistical_hit] Wrote unknown unmanaged hit %p\n", address);
3582 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3583 write_uint64 (GPOINTER_TO_UINT (address));
3587 return regions_refreshed;
3591 flush_all_mappings (void);
3594 write_statistical_data_block (ProfilerStatisticalData *data) {
3595 MonoThread *current_thread = mono_thread_current ();
3596 int start_index = data->first_unwritten_index;
3597 int end_index = data->next_free_index;
3598 gboolean regions_refreshed = FALSE;
3599 int call_chain_depth = profiler->statistical_call_chain_depth;
3602 if (end_index > data->end_index)
3603 end_index = data->end_index;
3605 if (start_index == end_index)
3608 data->first_unwritten_index = end_index;
3610 write_clock_data ();
3612 #if DEBUG_STATISTICAL_PROFILER
3613 printf ("[write_statistical_data_block] Starting loop at index %d\n", start_index);
3616 for (index = start_index; index < end_index; index ++) {
3617 int base_index = index * (call_chain_depth + 1);
3618 ProfilerStatisticalHit hit = data->hits [base_index];
3621 regions_refreshed = write_statistical_hit ((current_thread != NULL) ? hit.domain : NULL, hit.address, regions_refreshed);
3624 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3625 hit = data->hits [base_index + callers_count];
3626 if (hit.address == NULL) {
3631 if (callers_count > 0) {
3632 write_uint32 ((callers_count << 3) | MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN);
3634 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3635 hit = data->hits [base_index + callers_count];
3636 if (hit.address != NULL) {
3637 regions_refreshed = write_statistical_hit ((current_thread != NULL) ? hit.domain : NULL, hit.address, regions_refreshed);
3644 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_END);
3646 #if DEBUG_STATISTICAL_PROFILER
3647 printf ("[write_statistical_data_block] Ending loop at index %d\n", end_index);
3649 write_clock_data ();
3651 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL);
3655 write_intro_block (void) {
3657 write_string ("mono");
3658 write_uint32 (profiler->flags);
3659 write_uint64 (profiler->start_counter);
3660 write_uint64 (profiler->start_time);
3661 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_INTRO);
3665 write_end_block (void) {
3667 write_uint64 (profiler->end_counter);
3668 write_uint64 (profiler->end_time);
3669 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_END);
3673 update_mapping (ProfilerPerThreadData *data) {
3674 ProfilerEventData *start = data->first_unmapped_event;
3675 ProfilerEventData *end = data->next_free_event;
3676 data->first_unmapped_event = end;
3678 #if (DEBUG_LOGGING_PROFILER)
3679 printf ("[update_mapping][TID %ld] START\n", data->thread_id);
3681 while (start < end) {
3682 #if DEBUG_LOGGING_PROFILER
3683 printf ("Examining event %p[TID %ld] looking for a new mapping...\n", start, data->thread_id);
3685 if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3686 ClassIdMappingElement *element = class_id_mapping_element_get (start->data.address);
3687 if (element == NULL) {
3688 MonoClass *klass = start->data.address;
3689 class_id_mapping_element_new (klass);
3691 } else if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3692 MethodIdMappingElement *element = method_id_mapping_element_get (start->data.address);
3693 if (element == NULL) {
3694 MonoMethod *method = start->data.address;
3695 if (method != NULL) {
3696 method_id_mapping_element_new (method);
3701 if (start->value == MAX_EVENT_VALUE) {
3706 #if (DEBUG_LOGGING_PROFILER)
3707 printf ("[update_mapping][TID %ld] END\n", data->thread_id);
3712 flush_all_mappings (void) {
3713 ProfilerPerThreadData *data;
3715 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3716 update_mapping (data);
3718 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3719 write_mapping_block (data->thread_id);
3724 flush_full_event_data_buffer (ProfilerPerThreadData *data) {
3727 // We flush all mappings because some id definitions could come
3728 // from other threads
3729 flush_all_mappings ();
3730 g_assert (data->first_unmapped_event >= data->next_free_event);
3732 write_thread_data_block (data);
3734 data->next_free_event = data->events;
3735 data->next_unreserved_event = data->events;
3736 data->first_unwritten_event = data->events;
3737 data->first_unmapped_event = data->events;
3738 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
3739 data->last_event_counter = data->start_event_counter;
3744 /* The ">=" operator is intentional, to leave one spare slot for "extended values" */
3745 #define RESERVE_EVENTS(d,e,count) do {\
3746 if ((d)->next_unreserved_event >= ((d)->end_event - (count))) {\
3747 flush_full_event_data_buffer (d);\
3749 (e) = (d)->next_unreserved_event;\
3750 (d)->next_unreserved_event += (count);\
3752 #define GET_NEXT_FREE_EVENT(d,e) RESERVE_EVENTS ((d),(e),1)
3753 #define COMMIT_RESERVED_EVENTS(d) do {\
3754 data->next_free_event = data->next_unreserved_event;\
3758 flush_everything (void) {
3759 ProfilerPerThreadData *data;
3761 flush_all_mappings ();
3762 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3763 write_thread_data_block (data);
3765 write_statistical_data_block (profiler->statistical_data);
3768 /* This assumes the lock is held: it just offloads the work to the writer thread. */
3770 writer_thread_flush_everything (void) {
3771 if (CHECK_WRITER_THREAD ()) {
3772 profiler->writer_thread_flush_everything = TRUE;
3773 LOG_WRITER_THREAD ("writer_thread_flush_everything: raising event...");
3774 WRITER_EVENT_RAISE ();
3775 LOG_WRITER_THREAD ("writer_thread_flush_everything: waiting event...");
3776 WRITER_EVENT_DONE_WAIT ();
3777 LOG_WRITER_THREAD ("writer_thread_flush_everything: got event.");
3779 LOG_WRITER_THREAD ("writer_thread_flush_everything: no thread.");
3783 #define RESULT_TO_LOAD_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_LOADED_EVENT_SUCCESS:MONO_PROFILER_LOADED_EVENT_FAILURE)
3785 appdomain_start_load (MonoProfiler *profiler, MonoDomain *domain) {
3787 loaded_element_load_start (profiler->loaded_appdomains, domain);
3792 appdomain_end_load (MonoProfiler *profiler, MonoDomain *domain, int result) {
3794 LoadedElement *element;
3796 name = g_strdup_printf ("%d", mono_domain_get_id (domain));
3798 element = loaded_element_load_end (profiler->loaded_appdomains, domain, name);
3799 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), domain);
3804 appdomain_start_unload (MonoProfiler *profiler, MonoDomain *domain) {
3806 loaded_element_unload_start (profiler->loaded_appdomains, domain);
3807 writer_thread_flush_everything ();
3812 appdomain_end_unload (MonoProfiler *profiler, MonoDomain *domain) {
3813 LoadedElement *element;
3816 element = loaded_element_unload_end (profiler->loaded_appdomains, domain);
3817 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN, CURRENT_THREAD_ID ());
3822 module_start_load (MonoProfiler *profiler, MonoImage *module) {
3824 loaded_element_load_start (profiler->loaded_modules, module);
3829 module_end_load (MonoProfiler *profiler, MonoImage *module, int result) {
3831 MonoAssemblyName aname;
3832 LoadedElement *element;
3834 if (mono_assembly_fill_assembly_name (module, &aname)) {
3835 name = mono_stringify_assembly_name (&aname);
3837 name = g_strdup_printf ("Dynamic module \"%p\"", module);
3840 element = loaded_element_load_end (profiler->loaded_modules, module, name);
3841 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_MODULE | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), module);
3846 module_start_unload (MonoProfiler *profiler, MonoImage *module) {
3848 loaded_element_unload_start (profiler->loaded_modules, module);
3849 writer_thread_flush_everything ();
3854 module_end_unload (MonoProfiler *profiler, MonoImage *module) {
3855 LoadedElement *element;
3858 element = loaded_element_unload_end (profiler->loaded_modules, module);
3859 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_MODULE, CURRENT_THREAD_ID ());
3864 assembly_start_load (MonoProfiler *profiler, MonoAssembly *assembly) {
3866 loaded_element_load_start (profiler->loaded_assemblies, assembly);
3871 assembly_end_load (MonoProfiler *profiler, MonoAssembly *assembly, int result) {
3873 MonoAssemblyName aname;
3874 LoadedElement *element;
3876 if (mono_assembly_fill_assembly_name (mono_assembly_get_image (assembly), &aname)) {
3877 name = mono_stringify_assembly_name (&aname);
3879 name = g_strdup_printf ("Dynamic assembly \"%p\"", assembly);
3882 element = loaded_element_load_end (profiler->loaded_assemblies, assembly, name);
3883 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), assembly);
3888 assembly_start_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
3890 loaded_element_unload_start (profiler->loaded_assemblies, assembly);
3891 writer_thread_flush_everything ();
3895 assembly_end_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
3896 LoadedElement *element;
3899 element = loaded_element_unload_end (profiler->loaded_assemblies, assembly);
3900 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY, CURRENT_THREAD_ID ());
3904 #if (DEBUG_LOGGING_PROFILER)
3906 class_event_code_to_string (MonoProfilerClassEvents code) {
3908 case MONO_PROFILER_EVENT_CLASS_LOAD: return "LOAD";
3909 case MONO_PROFILER_EVENT_CLASS_UNLOAD: return "UNLOAD";
3910 case MONO_PROFILER_EVENT_CLASS_ALLOCATION: return "ALLOCATION";
3911 case MONO_PROFILER_EVENT_CLASS_EXCEPTION: return "EXCEPTION";
3912 default: g_assert_not_reached (); return "";
3916 method_event_code_to_string (MonoProfilerMethodEvents code) {
3918 case MONO_PROFILER_EVENT_METHOD_CALL: return "CALL";
3919 case MONO_PROFILER_EVENT_METHOD_JIT: return "JIT";
3920 case MONO_PROFILER_EVENT_METHOD_FREED: return "FREED";
3921 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER: return "ALLOCATION_CALLER";
3922 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER: return "ALLOCATION_JIT_TIME_CALLER";
3923 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
3924 default: g_assert_not_reached (); return "";
3928 number_event_code_to_string (MonoProfilerEvents code) {
3930 case MONO_PROFILER_EVENT_THREAD: return "THREAD";
3931 case MONO_PROFILER_EVENT_GC_COLLECTION: return "GC_COLLECTION";
3932 case MONO_PROFILER_EVENT_GC_MARK: return "GC_MARK";
3933 case MONO_PROFILER_EVENT_GC_SWEEP: return "GC_SWEEP";
3934 case MONO_PROFILER_EVENT_GC_RESIZE: return "GC_RESIZE";
3935 case MONO_PROFILER_EVENT_GC_STOP_WORLD: return "GC_STOP_WORLD";
3936 case MONO_PROFILER_EVENT_GC_START_WORLD: return "GC_START_WORLD";
3937 case MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION: return "JIT_TIME_ALLOCATION";
3938 case MONO_PROFILER_EVENT_STACK_SECTION: return "STACK_SECTION";
3939 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
3940 default: g_assert_not_reached (); return "";
3944 event_result_to_string (MonoProfilerEventResult code) {
3946 case MONO_PROFILER_EVENT_RESULT_SUCCESS: return "SUCCESS";
3947 case MONO_PROFILER_EVENT_RESULT_FAILURE: return "FAILURE";
3948 default: g_assert_not_reached (); return "";
3952 event_kind_to_string (MonoProfilerEventKind code) {
3954 case MONO_PROFILER_EVENT_KIND_START: return "START";
3955 case MONO_PROFILER_EVENT_KIND_END: return "END";
3956 default: g_assert_not_reached (); return "";
3960 print_event_data (ProfilerPerThreadData *data, ProfilerEventData *event, guint64 value) {
3961 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3962 printf ("STORE EVENT [TID %ld][EVENT %ld] CLASS[%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s)\n",
3964 event - data->events,
3965 event->data.address,
3966 class_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
3967 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
3968 event_kind_to_string (event->kind),
3973 mono_class_get_namespace ((MonoClass*) event->data.address),
3974 mono_class_get_name ((MonoClass*) event->data.address));
3975 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3976 printf ("STORE EVENT [TID %ld][EVENT %ld] METHOD[%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s:%s (?))\n",
3978 event - data->events,
3979 event->data.address,
3980 method_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
3981 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
3982 event_kind_to_string (event->kind),
3987 (event->data.address != NULL) ? mono_class_get_namespace (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
3988 (event->data.address != NULL) ? mono_class_get_name (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
3989 (event->data.address != NULL) ? mono_method_get_name ((MonoMethod*) event->data.address) : "<NULL>");
3991 printf ("STORE EVENT [TID %ld][EVENT %ld] NUMBER[%ld] %s:%s[%d-%d-%d] %ld\n",
3993 event - data->events,
3994 (guint64) event->data.number,
3995 number_event_code_to_string (event->code),
3996 event_kind_to_string (event->kind),
4003 #define LOG_EVENT(data,ev,val) print_event_data ((data),(ev),(val))
4005 #define LOG_EVENT(data,ev,val)
4008 #define RESULT_TO_EVENT_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_EVENT_RESULT_SUCCESS:MONO_PROFILER_EVENT_RESULT_FAILURE)
4010 #define STORE_EVENT_ITEM_COUNTER(event,p,i,dt,c,k) do {\
4013 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
4014 (event)->data.address = (i);\
4015 (event)->data_type = (dt);\
4016 (event)->code = (c);\
4017 (event)->kind = (k);\
4018 delta = counter - data->last_event_counter;\
4019 if (delta < MAX_EVENT_VALUE) {\
4020 (event)->value = delta;\
4022 ProfilerEventData *extension = data->next_unreserved_event;\
4023 data->next_unreserved_event ++;\
4024 (event)->value = MAX_EVENT_VALUE;\
4025 *(guint64*)extension = delta;\
4027 data->last_event_counter = counter;\
4028 LOG_EVENT (data, (event), delta);\
4030 #define STORE_EVENT_ITEM_VALUE(event,p,i,dt,c,k,v) do {\
4031 (event)->data.address = (i);\
4032 (event)->data_type = (dt);\
4033 (event)->code = (c);\
4034 (event)->kind = (k);\
4035 if ((v) < MAX_EVENT_VALUE) {\
4036 (event)->value = (v);\
4038 ProfilerEventData *extension = data->next_unreserved_event;\
4039 data->next_unreserved_event ++;\
4040 (event)->value = MAX_EVENT_VALUE;\
4041 *(guint64*)extension = (v);\
4043 LOG_EVENT (data, (event), (v));\
4045 #define STORE_EVENT_NUMBER_COUNTER(event,p,n,dt,c,k) do {\
4048 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
4049 (event)->data.number = (n);\
4050 (event)->data_type = (dt);\
4051 (event)->code = (c);\
4052 (event)->kind = (k);\
4053 delta = counter - data->last_event_counter;\
4054 if (delta < MAX_EVENT_VALUE) {\
4055 (event)->value = delta;\
4057 ProfilerEventData *extension = data->next_unreserved_event;\
4058 data->next_unreserved_event ++;\
4059 (event)->value = MAX_EVENT_VALUE;\
4060 *(guint64*)extension = delta;\
4062 data->last_event_counter = counter;\
4063 LOG_EVENT (data, (event), delta);\
4065 #define STORE_EVENT_NUMBER_VALUE(event,p,n,dt,c,k,v) do {\
4066 (event)->data.number = (n);\
4067 (event)->data_type = (dt);\
4068 (event)->code = (c);\
4069 (event)->kind = (k);\
4070 if ((v) < MAX_EVENT_VALUE) {\
4071 (event)->value = (v);\
4073 ProfilerEventData *extension = data->next_unreserved_event;\
4074 data->next_unreserved_event ++;\
4075 (event)->value = MAX_EVENT_VALUE;\
4076 *(guint64*)extension = (v);\
4078 LOG_EVENT (data, (event), (v));\
4080 #define INCREMENT_EVENT(event) do {\
4081 if ((event)->value != MAX_EVENT_VALUE) {\
4089 class_start_load (MonoProfiler *profiler, MonoClass *klass) {
4090 ProfilerPerThreadData *data;
4091 ProfilerEventData *event;
4092 GET_PROFILER_THREAD_DATA (data);
4093 GET_NEXT_FREE_EVENT (data, event);
4094 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD, MONO_PROFILER_EVENT_KIND_START);
4095 COMMIT_RESERVED_EVENTS (data);
4098 class_end_load (MonoProfiler *profiler, MonoClass *klass, int result) {
4099 ProfilerPerThreadData *data;
4100 ProfilerEventData *event;
4101 GET_PROFILER_THREAD_DATA (data);
4102 GET_NEXT_FREE_EVENT (data, event);
4103 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
4104 COMMIT_RESERVED_EVENTS (data);
4107 class_start_unload (MonoProfiler *profiler, MonoClass *klass) {
4108 ProfilerPerThreadData *data;
4109 ProfilerEventData *event;
4110 GET_PROFILER_THREAD_DATA (data);
4111 GET_NEXT_FREE_EVENT (data, event);
4112 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_START);
4113 COMMIT_RESERVED_EVENTS (data);
4116 class_end_unload (MonoProfiler *profiler, MonoClass *klass) {
4117 ProfilerPerThreadData *data;
4118 ProfilerEventData *event;
4119 GET_PROFILER_THREAD_DATA (data);
4120 GET_NEXT_FREE_EVENT (data, event);
4121 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_END);
4122 COMMIT_RESERVED_EVENTS (data);
4126 method_start_jit (MonoProfiler *profiler, MonoMethod *method) {
4127 ProfilerPerThreadData *data;
4128 ProfilerEventData *event;
4129 GET_PROFILER_THREAD_DATA (data);
4130 GET_NEXT_FREE_EVENT (data, event);
4131 thread_stack_push_jitted_safely (&(data->stack), method, TRUE);
4132 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT, MONO_PROFILER_EVENT_KIND_START);
4133 COMMIT_RESERVED_EVENTS (data);
4136 method_end_jit (MonoProfiler *profiler, MonoMethod *method, int result) {
4137 ProfilerPerThreadData *data;
4138 ProfilerEventData *event;
4139 GET_PROFILER_THREAD_DATA (data);
4140 GET_NEXT_FREE_EVENT (data, event);
4141 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
4142 thread_stack_pop (&(data->stack));
4143 COMMIT_RESERVED_EVENTS (data);
4148 method_jit_result (MonoProfiler *prof, MonoMethod *method, MonoJitInfo* jinfo, int result) {
4149 if (profiler->action_flags.oprofile && (result == MONO_PROFILE_OK)) {
4150 MonoClass *klass = mono_method_get_class (method);
4151 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
4152 char *name = g_strdup_printf ("%s.%s:%s (%s)", mono_class_get_namespace (klass), mono_class_get_name (klass), mono_method_get_name (method), signature);
4153 gpointer code_start = mono_jit_info_get_code_start (jinfo);
4154 int code_size = mono_jit_info_get_code_size (jinfo);
4156 if (op_write_native_code (name, code_start, code_size)) {
4157 g_warning ("Problem calling op_write_native_code\n");
4168 method_enter (MonoProfiler *profiler, MonoMethod *method) {
4169 ProfilerPerThreadData *data;
4171 CHECK_PROFILER_ENABLED ();
4172 GET_PROFILER_THREAD_DATA (data);
4173 if (profiler->action_flags.track_calls) {
4174 ProfilerEventData *event;
4175 GET_NEXT_FREE_EVENT (data, event);
4176 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_START);
4177 COMMIT_RESERVED_EVENTS (data);
4179 if (profiler->action_flags.track_stack) {
4180 thread_stack_push_safely (&(data->stack), method);
4184 method_leave (MonoProfiler *profiler, MonoMethod *method) {
4185 ProfilerPerThreadData *data;
4187 CHECK_PROFILER_ENABLED ();
4188 GET_PROFILER_THREAD_DATA (data);
4189 if (profiler->action_flags.track_calls) {
4190 ProfilerEventData *event;
4191 GET_NEXT_FREE_EVENT (data, event);
4192 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_END);
4193 COMMIT_RESERVED_EVENTS (data);
4195 if (profiler->action_flags.track_stack) {
4196 thread_stack_pop (&(data->stack));
4201 method_free (MonoProfiler *profiler, MonoMethod *method) {
4202 ProfilerPerThreadData *data;
4203 ProfilerEventData *event;
4204 GET_PROFILER_THREAD_DATA (data);
4205 GET_NEXT_FREE_EVENT (data, event);
4206 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_FREED, 0);
4207 COMMIT_RESERVED_EVENTS (data);
4211 thread_start (MonoProfiler *profiler, gsize tid) {
4212 ProfilerPerThreadData *data;
4213 ProfilerEventData *event;
4214 GET_PROFILER_THREAD_DATA (data);
4215 GET_NEXT_FREE_EVENT (data, event);
4216 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_START);
4217 COMMIT_RESERVED_EVENTS (data);
4220 thread_end (MonoProfiler *profiler, gsize tid) {
4221 ProfilerPerThreadData *data;
4222 ProfilerEventData *event;
4223 GET_PROFILER_THREAD_DATA (data);
4224 GET_NEXT_FREE_EVENT (data, event);
4225 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_END);
4226 COMMIT_RESERVED_EVENTS (data);
4229 static ProfilerEventData*
4230 save_stack_delta (MonoProfiler *profiler, ProfilerPerThreadData *data, ProfilerEventData *events, int unsaved_frames) {
4233 /* In this loop it is safe to simply increment "events" because MAX_EVENT_VALUE cannot be reached. */
4234 STORE_EVENT_NUMBER_VALUE (events, profiler, data->stack.last_saved_top, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_STACK_SECTION, 0, unsaved_frames);
4236 for (i = 0; i < unsaved_frames; i++) {
4237 if (! thread_stack_index_from_top_is_jitted (&(data->stack), i)) {
4238 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4240 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4245 data->stack.last_saved_top = data->stack.top;
4251 object_allocated (MonoProfiler *profiler, MonoObject *obj, MonoClass *klass) {
4252 ProfilerPerThreadData *data;
4253 ProfilerEventData *events;
4255 int event_slot_count;
4257 GET_PROFILER_THREAD_DATA (data);
4258 event_slot_count = 1;
4259 if (profiler->action_flags.save_allocation_caller) {
4260 event_slot_count ++;
4262 if (profiler->action_flags.allocations_carry_id) {
4263 event_slot_count ++;
4265 if (profiler->action_flags.save_allocation_stack) {
4266 unsaved_frames = thread_stack_count_unsaved_frames (&(data->stack));
4267 event_slot_count += (unsaved_frames + 1);
4271 RESERVE_EVENTS (data, events, event_slot_count);
4273 if (profiler->action_flags.save_allocation_stack) {
4274 events = save_stack_delta (profiler, data, events, unsaved_frames);
4277 STORE_EVENT_ITEM_VALUE (events, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_ALLOCATION, 0, (guint64) mono_object_get_size (obj));
4278 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
4279 STORE_ALLOCATED_OBJECT (data, obj);
4282 if (profiler->action_flags.save_allocation_caller) {
4283 MonoMethod *caller = thread_stack_top (&(data->stack));
4284 gboolean caller_is_jitted = thread_stack_top_is_jitted (&(data->stack));
4286 /* In this loop it is safe to simply increment "events" because MAX_EVENT_VALUE cannot be reached. */
4289 while ((caller != NULL) && (caller->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)) {
4290 caller = thread_stack_index_from_top (&(data->stack), index);
4291 caller_is_jitted = thread_stack_index_from_top_is_jitted (&(data->stack), index);
4294 if (! caller_is_jitted) {
4295 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4297 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4300 if (profiler->action_flags.allocations_carry_id) {
4302 STORE_EVENT_ITEM_VALUE (events, profiler, obj, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID, 0, 0);
4305 COMMIT_RESERVED_EVENTS (data);
4309 monitor_event (MonoProfiler *profiler, MonoObject *obj, MonoProfilerMonitorEvent event) {
4310 ProfilerPerThreadData *data;
4311 ProfilerEventData *events;
4314 int event_slot_count;
4316 CHECK_PROFILER_ENABLED ();
4318 GET_PROFILER_THREAD_DATA (data);
4319 klass = mono_object_get_class (obj);
4321 unsaved_frames = thread_stack_count_unsaved_frames (&(data->stack));
4322 if (unsaved_frames > 0) {
4323 event_slot_count = unsaved_frames + 3;
4325 event_slot_count = 2;
4328 RESERVE_EVENTS (data, events, event_slot_count);
4329 if (unsaved_frames > 0) {
4330 events = save_stack_delta (profiler, data, events, unsaved_frames);
4332 STORE_EVENT_ITEM_COUNTER (events, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_MONITOR, MONO_PROFILER_EVENT_KIND_START);
4333 INCREMENT_EVENT (events);
4334 STORE_EVENT_ITEM_VALUE (events, profiler, obj, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_OBJECT_MONITOR, 0, event);
4335 COMMIT_RESERVED_EVENTS (data);
4339 statistical_call_chain (MonoProfiler *profiler, int call_chain_depth, guchar **ips, void *context) {
4340 MonoDomain *domain = mono_domain_get ();
4341 ProfilerStatisticalData *data;
4344 CHECK_PROFILER_ENABLED ();
4346 data = profiler->statistical_data;
4347 index = InterlockedIncrement ((int*) &data->next_free_index);
4349 if (index <= data->end_index) {
4350 unsigned int base_index = (index - 1) * (profiler->statistical_call_chain_depth + 1);
4351 unsigned int call_chain_index = 0;
4353 //printf ("[statistical_call_chain] (%d)\n", call_chain_depth);
4354 while (call_chain_index < call_chain_depth) {
4355 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4356 //printf ("[statistical_call_chain] [%d] = %p\n", base_index + call_chain_index, ips [call_chain_index]);
4357 hit->address = (gpointer) ips [call_chain_index];
4358 hit->domain = domain;
4359 call_chain_index ++;
4361 while (call_chain_index <= profiler->statistical_call_chain_depth) {
4362 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4363 //printf ("[statistical_call_chain] [%d] = NULL\n", base_index + call_chain_index);
4364 hit->address = NULL;
4366 call_chain_index ++;
4369 /* Check if we are the one that must swap the buffers */
4370 if (index == data->end_index + 1) {
4371 ProfilerStatisticalData *new_data;
4373 /* In the *impossible* case that the writer thread has not finished yet, */
4374 /* loop waiting for it and meanwhile lose all statistical events... */
4376 /* First, wait that it consumed the ready buffer */
4377 while (profiler->statistical_data_ready != NULL);
4378 /* Then, wait that it produced the free buffer */
4379 new_data = profiler->statistical_data_second_buffer;
4380 } while (new_data == NULL);
4382 profiler->statistical_data_ready = data;
4383 profiler->statistical_data = new_data;
4384 profiler->statistical_data_second_buffer = NULL;
4385 WRITER_EVENT_RAISE ();
4386 /* Otherwise exit from the handler and drop the event... */
4391 /* Loop again, hoping to acquire a free slot this time (otherwise the event will be dropped) */
4394 } while (data == NULL);
4398 statistical_hit (MonoProfiler *profiler, guchar *ip, void *context) {
4399 MonoDomain *domain = mono_domain_get ();
4400 ProfilerStatisticalData *data;
4403 CHECK_PROFILER_ENABLED ();
4405 data = profiler->statistical_data;
4406 index = InterlockedIncrement ((int*) &data->next_free_index);
4408 if (index <= data->end_index) {
4409 ProfilerStatisticalHit *hit = & (data->hits [index - 1]);
4410 hit->address = (gpointer) ip;
4411 hit->domain = domain;
4413 /* Check if we are the one that must swap the buffers */
4414 if (index == data->end_index + 1) {
4415 ProfilerStatisticalData *new_data;
4417 /* In the *impossible* case that the writer thread has not finished yet, */
4418 /* loop waiting for it and meanwhile lose all statistical events... */
4420 /* First, wait that it consumed the ready buffer */
4421 while (profiler->statistical_data_ready != NULL);
4422 /* Then, wait that it produced the free buffer */
4423 new_data = profiler->statistical_data_second_buffer;
4424 } while (new_data == NULL);
4426 profiler->statistical_data_ready = data;
4427 profiler->statistical_data = new_data;
4428 profiler->statistical_data_second_buffer = NULL;
4429 WRITER_EVENT_RAISE ();
4432 /* Loop again, hoping to acquire a free slot this time */
4435 } while (data == NULL);
4438 static MonoProfilerEvents
4439 gc_event_code_from_profiler_event (MonoGCEvent event) {
4441 case MONO_GC_EVENT_START:
4442 case MONO_GC_EVENT_END:
4443 return MONO_PROFILER_EVENT_GC_COLLECTION;
4444 case MONO_GC_EVENT_MARK_START:
4445 case MONO_GC_EVENT_MARK_END:
4446 return MONO_PROFILER_EVENT_GC_MARK;
4447 case MONO_GC_EVENT_RECLAIM_START:
4448 case MONO_GC_EVENT_RECLAIM_END:
4449 return MONO_PROFILER_EVENT_GC_SWEEP;
4450 case MONO_GC_EVENT_PRE_STOP_WORLD:
4451 case MONO_GC_EVENT_POST_STOP_WORLD:
4452 return MONO_PROFILER_EVENT_GC_STOP_WORLD;
4453 case MONO_GC_EVENT_PRE_START_WORLD:
4454 case MONO_GC_EVENT_POST_START_WORLD:
4455 return MONO_PROFILER_EVENT_GC_START_WORLD;
4457 g_assert_not_reached ();
4462 static MonoProfilerEventKind
4463 gc_event_kind_from_profiler_event (MonoGCEvent event) {
4465 case MONO_GC_EVENT_START:
4466 case MONO_GC_EVENT_MARK_START:
4467 case MONO_GC_EVENT_RECLAIM_START:
4468 case MONO_GC_EVENT_PRE_STOP_WORLD:
4469 case MONO_GC_EVENT_PRE_START_WORLD:
4470 return MONO_PROFILER_EVENT_KIND_START;
4471 case MONO_GC_EVENT_END:
4472 case MONO_GC_EVENT_MARK_END:
4473 case MONO_GC_EVENT_RECLAIM_END:
4474 case MONO_GC_EVENT_POST_START_WORLD:
4475 case MONO_GC_EVENT_POST_STOP_WORLD:
4476 return MONO_PROFILER_EVENT_KIND_END;
4478 g_assert_not_reached ();
4483 #define HEAP_SHOT_COMMAND_FILE_MAX_LENGTH 64
4485 profiler_heap_shot_process_command_file (void) {
4486 //FIXME: Port to Windows as well
4487 struct stat stat_buf;
4489 char buffer [HEAP_SHOT_COMMAND_FILE_MAX_LENGTH + 1];
4491 if (profiler->heap_shot_command_file_name == NULL)
4493 if (stat (profiler->heap_shot_command_file_name, &stat_buf) != 0)
4495 if (stat_buf.st_size > HEAP_SHOT_COMMAND_FILE_MAX_LENGTH)
4497 if ((stat_buf.st_mtim.tv_sec * 1000000) < profiler->heap_shot_command_file_access_time)
4500 fd = open (profiler->heap_shot_command_file_name, O_RDONLY);
4504 if (read (fd, &(buffer [0]), stat_buf.st_size) != stat_buf.st_size) {
4507 buffer [stat_buf.st_size] = 0;
4508 profiler->dump_next_heap_snapshots = atoi (buffer);
4509 MONO_PROFILER_GET_CURRENT_TIME (profiler->heap_shot_command_file_access_time);
4516 dump_current_heap_snapshot (void) {
4519 if (profiler->heap_shot_was_signalled) {
4522 profiler_heap_shot_process_command_file ();
4523 if (profiler->dump_next_heap_snapshots > 0) {
4524 profiler->dump_next_heap_snapshots--;
4526 } else if (profiler->dump_next_heap_snapshots < 0) {
4537 profiler_heap_buffers_setup (ProfilerHeapShotHeapBuffers *heap) {
4538 heap->buffers = g_new (ProfilerHeapShotHeapBuffer, 1);
4539 heap->buffers->previous = NULL;
4540 heap->buffers->next = NULL;
4541 heap->buffers->start_slot = &(heap->buffers->buffer [0]);
4542 heap->buffers->end_slot = &(heap->buffers->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4543 heap->last = heap->buffers;
4544 heap->current = heap->buffers;
4545 heap->first_free_slot = & (heap->buffers->buffer [0]);
4548 profiler_heap_buffers_clear (ProfilerHeapShotHeapBuffers *heap) {
4549 heap->buffers = NULL;
4551 heap->current = NULL;
4552 heap->first_free_slot = NULL;
4555 profiler_heap_buffers_free (ProfilerHeapShotHeapBuffers *heap) {
4556 ProfilerHeapShotHeapBuffer *current = heap->buffers;
4557 while (current != NULL) {
4558 ProfilerHeapShotHeapBuffer *next = current->next;
4562 profiler_heap_buffers_clear (heap);
4566 report_object_references (gpointer *start, ClassIdMappingElement *layout, ProfilerHeapShotWriteJob *job) {
4567 int reported_references = 0;
4570 for (slot = 0; slot < layout->data.layout.slots; slot ++) {
4571 gboolean slot_has_reference;
4572 if (layout->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
4573 if (layout->data.bitmap.compact & (((guint64)1) << slot)) {
4574 slot_has_reference = TRUE;
4576 slot_has_reference = FALSE;
4579 if (layout->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
4580 slot_has_reference = TRUE;
4582 slot_has_reference = FALSE;
4586 if (slot_has_reference) {
4587 gpointer field = start [slot];
4589 if ((field != NULL) && mono_object_is_alive (field)) {
4590 reported_references ++;
4591 WRITE_HEAP_SHOT_JOB_VALUE (job, field);
4596 return reported_references;
4600 profiler_heap_report_object_reachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4602 MonoClass *klass = mono_object_get_class (obj);
4603 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4604 if (class_id == NULL) {
4605 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4607 g_assert (class_id != NULL);
4609 if (job->summary.capacity > 0) {
4610 guint32 id = class_id->id;
4611 g_assert (id < job->summary.capacity);
4613 job->summary.per_class_data [id].reachable.instances ++;
4614 job->summary.per_class_data [id].reachable.bytes += mono_object_get_size (obj);
4616 if (profiler->action_flags.heap_shot && job->dump_heap_data) {
4617 int reference_counter = 0;
4618 gpointer *reference_counter_location;
4620 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, obj, HEAP_CODE_OBJECT);
4621 #if DEBUG_HEAP_PROFILER
4622 printf ("profiler_heap_report_object_reachable: reported object %p at cursor %p\n", obj, (job->cursor - 1));
4624 WRITE_HEAP_SHOT_JOB_VALUE (job, NULL);
4625 reference_counter_location = job->cursor - 1;
4627 if (mono_class_get_rank (klass)) {
4628 MonoArray *array = (MonoArray *) obj;
4629 MonoClass *element_class = mono_class_get_element_class (klass);
4630 ClassIdMappingElement *element_id = class_id_mapping_element_get (element_class);
4632 g_assert (element_id != NULL);
4633 if (element_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4634 class_id_mapping_element_build_layout_bitmap (element_class, element_id);
4636 if (! mono_class_is_valuetype (element_class)) {
4637 int length = mono_array_length (array);
4639 for (i = 0; i < length; i++) {
4640 MonoObject *array_element = mono_array_get (array, MonoObject*, i);
4641 if ((array_element != NULL) && mono_object_is_alive (array_element)) {
4642 reference_counter ++;
4643 WRITE_HEAP_SHOT_JOB_VALUE (job, array_element);
4646 } else if (element_id->data.layout.references > 0) {
4647 int length = mono_array_length (array);
4648 int array_element_size = mono_array_element_size (klass);
4650 for (i = 0; i < length; i++) {
4651 gpointer array_element_address = mono_array_addr_with_size (array, array_element_size, i);
4652 reference_counter += report_object_references (array_element_address, element_id, job);
4656 if (class_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4657 class_id_mapping_element_build_layout_bitmap (klass, class_id);
4659 if (class_id->data.layout.references > 0) {
4660 reference_counter += report_object_references ((gpointer)(((char*)obj) + sizeof (MonoObject)), class_id, job);
4664 *reference_counter_location = GINT_TO_POINTER (reference_counter);
4665 #if DEBUG_HEAP_PROFILER
4666 printf ("profiler_heap_report_object_reachable: updated reference_counter_location %p with value %d\n", reference_counter_location, reference_counter);
4672 profiler_heap_report_object_unreachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4674 MonoClass *klass = mono_object_get_class (obj);
4675 guint32 size = mono_object_get_size (obj);
4677 if (job->summary.capacity > 0) {
4678 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4681 if (class_id == NULL) {
4682 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4684 g_assert (class_id != NULL);
4686 g_assert (id < job->summary.capacity);
4688 job->summary.per_class_data [id].unreachable.instances ++;
4689 job->summary.per_class_data [id].unreachable.bytes += size;
4691 if (profiler->action_flags.unreachable_objects && job->dump_heap_data) {
4692 #if DEBUG_HEAP_PROFILER
4693 printf ("profiler_heap_report_object_unreachable: at job %p writing klass %p\n", job, klass);
4695 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, klass, HEAP_CODE_FREE_OBJECT_CLASS);
4697 #if DEBUG_HEAP_PROFILER
4698 printf ("profiler_heap_report_object_unreachable: at job %p writing size %p\n", job, GUINT_TO_POINTER (size));
4700 WRITE_HEAP_SHOT_JOB_VALUE (job, GUINT_TO_POINTER (size));
4706 profiler_heap_add_object (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4707 if (heap->first_free_slot >= heap->current->end_slot) {
4708 if (heap->current->next != NULL) {
4709 heap->current = heap->current->next;
4711 ProfilerHeapShotHeapBuffer *buffer = g_new (ProfilerHeapShotHeapBuffer, 1);
4712 buffer->previous = heap->last;
4713 buffer->next = NULL;
4714 buffer->start_slot = &(buffer->buffer [0]);
4715 buffer->end_slot = &(buffer->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4716 heap->current = buffer;
4717 heap->last->next = buffer;
4718 heap->last = buffer;
4720 heap->first_free_slot = &(heap->current->buffer [0]);
4723 *(heap->first_free_slot) = obj;
4724 heap->first_free_slot ++;
4725 profiler_heap_report_object_reachable (job, obj);
4729 profiler_heap_pop_object_from_end (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject** current_slot) {
4730 while (heap->first_free_slot != current_slot) {
4733 if (heap->first_free_slot > heap->current->start_slot) {
4734 heap->first_free_slot --;
4736 heap->current = heap->current->previous;
4737 g_assert (heap->current != NULL);
4738 heap->first_free_slot = heap->current->end_slot - 1;
4741 obj = *(heap->first_free_slot);
4743 if (mono_object_is_alive (obj)) {
4744 profiler_heap_report_object_reachable (job, obj);
4747 profiler_heap_report_object_unreachable (job, obj);
4754 profiler_heap_scan (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job) {
4755 ProfilerHeapShotHeapBuffer *current_buffer = heap->buffers;
4756 MonoObject** current_slot = current_buffer->start_slot;
4758 while (current_slot != heap->first_free_slot) {
4759 MonoObject *obj = *current_slot;
4760 if (mono_object_is_alive (obj)) {
4761 profiler_heap_report_object_reachable (job, obj);
4763 profiler_heap_report_object_unreachable (job, obj);
4764 *current_slot = profiler_heap_pop_object_from_end (heap, job, current_slot);
4767 if (*current_slot != NULL) {
4770 if (current_slot == current_buffer->end_slot) {
4771 current_buffer = current_buffer->next;
4772 g_assert (current_buffer != NULL);
4773 current_slot = current_buffer->start_slot;
4779 static inline gboolean
4780 heap_shot_write_job_should_be_created (gboolean dump_heap_data) {
4781 return dump_heap_data || profiler->action_flags.unreachable_objects || profiler->action_flags.collection_summary;
4785 process_gc_event (MonoProfiler *profiler, gboolean do_heap_profiling, MonoGCEvent ev) {
4786 static gboolean dump_heap_data;
4789 case MONO_GC_EVENT_PRE_STOP_WORLD:
4790 // Get the lock, so we are sure nobody is flushing events during the collection,
4791 // and we can update all mappings (building the class descriptors).
4792 // This is necessary also during lock profiling (even if do_heap_profiling is FALSE).
4795 case MONO_GC_EVENT_POST_STOP_WORLD:
4796 if (do_heap_profiling) {
4797 dump_heap_data = dump_current_heap_snapshot ();
4798 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
4799 ProfilerPerThreadData *data;
4800 // Update all mappings, so that we have built all the class descriptors.
4801 flush_all_mappings ();
4802 // Also write all event buffers, so that allocations are recorded.
4803 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4804 write_thread_data_block (data);
4808 dump_heap_data = FALSE;
4813 case MONO_GC_EVENT_MARK_END: {
4814 if (do_heap_profiling) {
4815 ProfilerHeapShotWriteJob *job;
4816 ProfilerPerThreadData *data;
4818 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
4819 job = profiler_heap_shot_write_job_new (profiler->heap_shot_was_signalled, dump_heap_data, profiler->garbage_collection_counter);
4820 profiler->heap_shot_was_signalled = FALSE;
4821 MONO_PROFILER_GET_CURRENT_COUNTER (job->start_counter);
4822 MONO_PROFILER_GET_CURRENT_TIME (job->start_time);
4827 profiler_heap_scan (&(profiler->heap), job);
4829 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4830 ProfilerHeapShotObjectBuffer *buffer;
4831 for (buffer = data->heap_shot_object_buffers; buffer != NULL; buffer = buffer->next) {
4832 MonoObject **cursor;
4833 for (cursor = buffer->first_unprocessed_slot; cursor < buffer->next_free_slot; cursor ++) {
4834 MonoObject *obj = *cursor;
4835 #if DEBUG_HEAP_PROFILER
4836 printf ("gc_event: in object buffer %p(%p-%p) cursor at %p has object %p ", buffer, &(buffer->buffer [0]), buffer->end, cursor, obj);
4838 if (mono_object_is_alive (obj)) {
4839 #if DEBUG_HEAP_PROFILER
4840 printf ("(object is alive, adding to heap)\n");
4842 profiler_heap_add_object (&(profiler->heap), job, obj);
4844 #if DEBUG_HEAP_PROFILER
4845 printf ("(object is unreachable, reporting in job)\n");
4847 profiler_heap_report_object_unreachable (job, obj);
4850 buffer->first_unprocessed_slot = cursor;
4855 MONO_PROFILER_GET_CURRENT_COUNTER (job->end_counter);
4856 MONO_PROFILER_GET_CURRENT_TIME (job->end_time);
4858 profiler_add_heap_shot_write_job (job);
4859 profiler_free_heap_shot_write_jobs ();
4860 WRITER_EVENT_RAISE ();
4871 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation) {
4872 ProfilerPerThreadData *data;
4873 ProfilerEventData *event;
4874 gboolean do_heap_profiling = profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary;
4875 guint32 event_value;
4877 if (ev == MONO_GC_EVENT_START) {
4878 profiler->garbage_collection_counter ++;
4881 event_value = (profiler->garbage_collection_counter << 8) | generation;
4883 if (ev == MONO_GC_EVENT_POST_STOP_WORLD) {
4884 process_gc_event (profiler, do_heap_profiling, ev);
4887 /* Check if the gc event should be recorded. */
4888 if (profiler->action_flags.report_gc_events || do_heap_profiling) {
4889 GET_PROFILER_THREAD_DATA (data);
4890 GET_NEXT_FREE_EVENT (data, event);
4891 STORE_EVENT_NUMBER_COUNTER (event, profiler, event_value, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, gc_event_code_from_profiler_event (ev), gc_event_kind_from_profiler_event (ev));
4892 COMMIT_RESERVED_EVENTS (data);
4895 if (ev != MONO_GC_EVENT_POST_STOP_WORLD) {
4896 process_gc_event (profiler, do_heap_profiling, ev);
4901 gc_resize (MonoProfiler *profiler, gint64 new_size) {
4902 ProfilerPerThreadData *data;
4903 ProfilerEventData *event;
4904 GET_PROFILER_THREAD_DATA (data);
4905 GET_NEXT_FREE_EVENT (data, event);
4906 profiler->garbage_collection_counter ++;
4907 STORE_EVENT_NUMBER_VALUE (event, profiler, new_size, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_GC_RESIZE, 0, profiler->garbage_collection_counter);
4908 COMMIT_RESERVED_EVENTS (data);
4912 runtime_initialized (MonoProfiler *profiler) {
4913 LOG_WRITER_THREAD ("runtime_initialized: waking writer thread to enable it...\n");
4914 WRITER_EVENT_ENABLE_RAISE ();
4915 LOG_WRITER_THREAD ("runtime_initialized: waiting writer thread...\n");
4916 WRITER_EVENT_DONE_WAIT ();
4917 LOG_WRITER_THREAD ("runtime_initialized: writer thread enabled.\n");
4918 mono_add_internal_call ("Mono.Profiler.RuntimeControls::EnableProfiler", enable_profiler);
4919 mono_add_internal_call ("Mono.Profiler.RuntimeControls::DisableProfiler", disable_profiler);
4920 mono_add_internal_call ("Mono.Profiler.RuntimeControls::TakeHeapSnapshot", request_heap_snapshot);
4921 LOG_WRITER_THREAD ("runtime_initialized: initialized internal calls.\n");
4924 /* called at the end of the program */
4926 profiler_shutdown (MonoProfiler *prof)
4928 ProfilerPerThreadData* current_thread_data;
4929 ProfilerPerThreadData* next_thread_data;
4931 LOG_WRITER_THREAD ("profiler_shutdown: zeroing relevant flags");
4932 mono_profiler_set_events (0);
4933 //profiler->flags = 0;
4934 //profiler->action_flags.unreachable_objects = FALSE;
4935 //profiler->action_flags.heap_shot = FALSE;
4937 LOG_WRITER_THREAD ("profiler_shutdown: asking stats thread to exit");
4938 profiler->terminate_writer_thread = TRUE;
4939 WRITER_EVENT_RAISE ();
4940 LOG_WRITER_THREAD ("profiler_shutdown: waiting for stats thread to exit");
4941 WAIT_WRITER_THREAD ();
4942 LOG_WRITER_THREAD ("profiler_shutdown: stats thread should be dead now");
4943 WRITER_EVENT_DESTROY ();
4946 flush_everything ();
4947 MONO_PROFILER_GET_CURRENT_TIME (profiler->end_time);
4948 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->end_counter);
4954 g_free (profiler->file_name);
4955 if (profiler->file_name_suffix != NULL) {
4956 g_free (profiler->file_name_suffix);
4959 method_id_mapping_destroy (profiler->methods);
4960 class_id_mapping_destroy (profiler->classes);
4961 g_hash_table_destroy (profiler->loaded_assemblies);
4962 g_hash_table_destroy (profiler->loaded_modules);
4963 g_hash_table_destroy (profiler->loaded_appdomains);
4965 FREE_PROFILER_THREAD_DATA ();
4967 for (current_thread_data = profiler->per_thread_data; current_thread_data != NULL; current_thread_data = next_thread_data) {
4968 next_thread_data = current_thread_data->next;
4969 profiler_per_thread_data_destroy (current_thread_data);
4971 if (profiler->statistical_data != NULL) {
4972 profiler_statistical_data_destroy (profiler->statistical_data);
4974 if (profiler->statistical_data_ready != NULL) {
4975 profiler_statistical_data_destroy (profiler->statistical_data_ready);
4977 if (profiler->statistical_data_second_buffer != NULL) {
4978 profiler_statistical_data_destroy (profiler->statistical_data_second_buffer);
4980 if (profiler->executable_regions != NULL) {
4981 profiler_executable_memory_regions_destroy (profiler->executable_regions);
4984 profiler_heap_buffers_free (&(profiler->heap));
4985 if (profiler->heap_shot_command_file_name != NULL) {
4986 g_free (profiler->heap_shot_command_file_name);
4989 profiler_free_write_buffers ();
4990 profiler_destroy_heap_shot_write_jobs ();
4992 DELETE_PROFILER_MUTEX ();
4995 if (profiler->action_flags.oprofile) {
5004 #ifndef PLATFORM_WIN32
5006 parse_signal_name (const char *signal_name) {
5007 if (! strcasecmp (signal_name, "SIGUSR1")) {
5009 } else if (! strcasecmp (signal_name, "SIGUSR2")) {
5011 } else if (! strcasecmp (signal_name, "SIGPROF")) {
5014 return atoi (signal_name);
5018 check_signal_number (int signal_number) {
5019 if (((signal_number == SIGPROF) && ! (profiler->flags & MONO_PROFILE_STATISTICAL)) ||
5020 (signal_number == SIGUSR1) ||
5021 (signal_number == SIGUSR2)) {
5029 #define FAIL_ARGUMENT_CHECK(message) do {\
5030 failure_message = (message);\
5031 goto failure_handling;\
5033 #define FAIL_PARSING_VALUED_ARGUMENT FAIL_ARGUMENT_CHECK("cannot parse valued argument %s")
5034 #define FAIL_PARSING_FLAG_ARGUMENT FAIL_ARGUMENT_CHECK("cannot parse flag argument %s")
5035 #define CHECK_CONDITION(condition,message) do {\
5036 gboolean result = (condition);\
5038 FAIL_ARGUMENT_CHECK (message);\
5041 #define FAIL_IF_HAS_MINUS CHECK_CONDITION(has_minus,"minus ('-') modifier not allowed for argument %s")
5042 #define TRUE_IF_NOT_MINUS ((!has_minus)?TRUE:FALSE)
5044 #define DEFAULT_ARGUMENTS "s"
5046 setup_user_options (const char *arguments) {
5047 gchar **arguments_array, **current_argument;
5048 #ifndef PLATFORM_WIN32
5049 int gc_request_signal_number = 0;
5050 int toggle_signal_number = 0;
5052 detect_fast_timer ();
5054 profiler->file_name = NULL;
5055 profiler->file_name_suffix = NULL;
5056 profiler->per_thread_buffer_size = 10000;
5057 profiler->statistical_buffer_size = 10000;
5058 profiler->statistical_call_chain_depth = 0;
5059 profiler->write_buffer_size = 1024;
5060 profiler->heap_shot_command_file_name = NULL;
5061 profiler->dump_next_heap_snapshots = 0;
5062 profiler->heap_shot_command_file_access_time = 0;
5063 profiler->heap_shot_was_signalled = FALSE;
5064 profiler->flags = MONO_PROFILE_APPDOMAIN_EVENTS|
5065 MONO_PROFILE_ASSEMBLY_EVENTS|
5066 MONO_PROFILE_MODULE_EVENTS|
5067 MONO_PROFILE_CLASS_EVENTS|
5068 MONO_PROFILE_METHOD_EVENTS|
5069 MONO_PROFILE_JIT_COMPILATION;
5070 profiler->profiler_enabled = TRUE;
5072 if (arguments == NULL) {
5073 arguments = DEFAULT_ARGUMENTS;
5074 } else if (strstr (arguments, ":")) {
5075 arguments = strstr (arguments, ":") + 1;
5076 if (arguments [0] == 0) {
5077 arguments = DEFAULT_ARGUMENTS;
5081 arguments_array = g_strsplit (arguments, ",", -1);
5083 for (current_argument = arguments_array; ((current_argument != NULL) && (current_argument [0] != 0)); current_argument ++) {
5084 char *argument = *current_argument;
5085 char *equals = strstr (argument, "=");
5086 const char *failure_message = NULL;
5090 if (*argument == '+') {
5094 } else if (*argument == '-') {
5103 if (equals != NULL) {
5104 int equals_position = equals - argument;
5106 if (! (strncmp (argument, "per-thread-buffer-size", equals_position) && strncmp (argument, "tbs", equals_position))) {
5107 int value = atoi (equals + 1);
5110 profiler->per_thread_buffer_size = value;
5112 } else if (! (strncmp (argument, "statistical", equals_position) && strncmp (argument, "stat", equals_position) && strncmp (argument, "s", equals_position))) {
5113 int value = atoi (equals + 1);
5119 profiler->statistical_call_chain_depth = value;
5120 profiler->flags |= MONO_PROFILE_STATISTICAL;
5122 } else if (! (strncmp (argument, "statistical-thread-buffer-size", equals_position) && strncmp (argument, "sbs", equals_position))) {
5123 int value = atoi (equals + 1);
5126 profiler->statistical_buffer_size = value;
5128 } else if (! (strncmp (argument, "write-buffer-size", equals_position) && strncmp (argument, "wbs", equals_position))) {
5129 int value = atoi (equals + 1);
5132 profiler->write_buffer_size = value;
5134 } else if (! (strncmp (argument, "output", equals_position) && strncmp (argument, "out", equals_position) && strncmp (argument, "o", equals_position) && strncmp (argument, "O", equals_position))) {
5136 if (strlen (equals + 1) > 0) {
5137 profiler->file_name = g_strdup (equals + 1);
5139 } else if (! (strncmp (argument, "output-suffix", equals_position) && strncmp (argument, "suffix", equals_position) && strncmp (argument, "os", equals_position) && strncmp (argument, "OS", equals_position))) {
5141 if (strlen (equals + 1) > 0) {
5142 profiler->file_name_suffix = g_strdup (equals + 1);
5144 } else if (! (strncmp (argument, "heap-shot", equals_position) && strncmp (argument, "heap", equals_position) && strncmp (argument, "h", equals_position))) {
5145 char *parameter = equals + 1;
5146 if (! strcmp (parameter, "all")) {
5147 profiler->dump_next_heap_snapshots = -1;
5149 gc_request_signal_number = parse_signal_name (parameter);
5153 profiler->action_flags.save_allocation_caller = TRUE;
5154 profiler->action_flags.save_allocation_stack = TRUE;
5155 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5157 profiler->action_flags.heap_shot = TRUE_IF_NOT_MINUS;
5158 } else if (! (strncmp (argument, "gc-commands", equals_position) && strncmp (argument, "gc-c", equals_position) && strncmp (argument, "gcc", equals_position))) {
5160 if (strlen (equals + 1) > 0) {
5161 profiler->heap_shot_command_file_name = g_strdup (equals + 1);
5163 } else if (! (strncmp (argument, "gc-dumps", equals_position) && strncmp (argument, "gc-d", equals_position) && strncmp (argument, "gcd", equals_position))) {
5165 if (strlen (equals + 1) > 0) {
5166 profiler->dump_next_heap_snapshots = atoi (equals + 1);
5168 #ifndef PLATFORM_WIN32
5169 } else if (! (strncmp (argument, "toggle-signal", equals_position) && strncmp (argument, "ts", equals_position))) {
5171 if (strlen (equals + 1) > 0) {
5172 char *signal_name = equals + 1;
5173 toggle_signal_number = parse_signal_name (signal_name);
5177 FAIL_PARSING_VALUED_ARGUMENT;
5180 if (! (strcmp (argument, "jit") && strcmp (argument, "j"))) {
5181 profiler->action_flags.jit_time = TRUE_IF_NOT_MINUS;
5182 } else if (! (strcmp (argument, "allocations") && strcmp (argument, "alloc") && strcmp (argument, "a"))) {
5185 profiler->action_flags.save_allocation_caller = TRUE;
5186 profiler->action_flags.save_allocation_stack = TRUE;
5189 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5191 profiler->flags &= ~MONO_PROFILE_ALLOCATIONS;
5193 } else if (! (strcmp (argument, "monitor") && strcmp (argument, "locks") && strcmp (argument, "lock"))) {
5195 profiler->action_flags.track_stack = TRUE;
5196 profiler->flags |= MONO_PROFILE_MONITOR_EVENTS;
5197 profiler->flags |= MONO_PROFILE_GC;
5198 } else if (! (strcmp (argument, "gc") && strcmp (argument, "g"))) {
5200 profiler->action_flags.report_gc_events = TRUE;
5201 profiler->flags |= MONO_PROFILE_GC;
5202 } else if (! (strcmp (argument, "allocations-summary") && strcmp (argument, "as"))) {
5203 profiler->action_flags.collection_summary = TRUE_IF_NOT_MINUS;
5204 } else if (! (strcmp (argument, "heap-shot") && strcmp (argument, "heap") && strcmp (argument, "h"))) {
5207 profiler->action_flags.save_allocation_caller = TRUE;
5208 profiler->action_flags.save_allocation_stack = TRUE;
5209 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5211 profiler->action_flags.heap_shot = TRUE_IF_NOT_MINUS;
5212 } else if (! (strcmp (argument, "unreachable") && strcmp (argument, "free") && strcmp (argument, "f"))) {
5213 profiler->action_flags.unreachable_objects = TRUE_IF_NOT_MINUS;
5214 } else if (! (strcmp (argument, "threads") && strcmp (argument, "t"))) {
5216 profiler->flags |= MONO_PROFILE_THREADS;
5218 profiler->flags &= ~MONO_PROFILE_THREADS;
5220 } else if (! (strcmp (argument, "enter-leave") && strcmp (argument, "calls") && strcmp (argument, "c"))) {
5221 profiler->action_flags.track_calls = TRUE_IF_NOT_MINUS;
5222 } else if (! (strcmp (argument, "statistical") && strcmp (argument, "stat") && strcmp (argument, "s"))) {
5224 profiler->flags |= MONO_PROFILE_STATISTICAL;
5226 profiler->flags &= ~MONO_PROFILE_STATISTICAL;
5228 } else if (! (strcmp (argument, "save-allocation-caller") && strcmp (argument, "sac"))) {
5229 profiler->action_flags.save_allocation_caller = TRUE_IF_NOT_MINUS;
5230 } else if (! (strcmp (argument, "save-allocation-stack") && strcmp (argument, "sas"))) {
5231 profiler->action_flags.save_allocation_stack = TRUE_IF_NOT_MINUS;
5232 } else if (! (strcmp (argument, "allocations-carry-id") && strcmp (argument, "aci"))) {
5233 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5234 } else if (! (strcmp (argument, "start-enabled") && strcmp (argument, "se"))) {
5235 profiler->profiler_enabled = TRUE_IF_NOT_MINUS;
5236 } else if (! (strcmp (argument, "start-disabled") && strcmp (argument, "sd"))) {
5237 profiler->profiler_enabled = ! TRUE_IF_NOT_MINUS;
5238 } else if (! (strcmp (argument, "force-accurate-timer") && strcmp (argument, "fac"))) {
5239 use_fast_timer = TRUE_IF_NOT_MINUS;
5241 } else if (! (strcmp (argument, "oprofile") && strcmp (argument, "oprof"))) {
5242 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5243 profiler->action_flags.oprofile = TRUE;
5244 if (op_open_agent ()) {
5245 FAIL_ARGUMENT_CHECK ("problem calling op_open_agent");
5248 } else if (strcmp (argument, "logging")) {
5249 FAIL_PARSING_FLAG_ARGUMENT;
5254 if (failure_message != NULL) {
5255 g_warning (failure_message, argument);
5256 failure_message = NULL;
5260 g_free (arguments_array);
5262 #ifndef PLATFORM_WIN32
5263 if (gc_request_signal_number != 0) {
5264 if (check_signal_number (gc_request_signal_number) && (gc_request_signal_number != toggle_signal_number)) {
5265 add_gc_request_handler (gc_request_signal_number);
5267 g_error ("Cannot use signal %d", gc_request_signal_number);
5270 if (toggle_signal_number != 0) {
5271 if (check_signal_number (toggle_signal_number) && (toggle_signal_number != gc_request_signal_number)) {
5272 add_toggle_handler (toggle_signal_number);
5274 g_error ("Cannot use signal %d", gc_request_signal_number);
5279 /* Ensure that the profiler flags needed to support required action flags are active */
5280 if (profiler->action_flags.jit_time) {
5281 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5283 if (profiler->action_flags.save_allocation_caller || profiler->action_flags.save_allocation_stack || profiler->action_flags.allocations_carry_id) {
5284 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5286 if (profiler->action_flags.collection_summary || profiler->action_flags.heap_shot || profiler->action_flags.unreachable_objects) {
5287 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5288 profiler->action_flags.report_gc_events = TRUE;
5290 if (profiler->action_flags.track_calls) {
5291 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5292 profiler->action_flags.jit_time = TRUE;
5294 if (profiler->action_flags.save_allocation_caller || profiler->action_flags.save_allocation_stack) {
5295 profiler->action_flags.track_stack = TRUE;
5296 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5298 if (profiler->action_flags.track_stack) {
5299 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5302 /* Tracking call stacks is useless if we already emit all enter-exit events... */
5303 if (profiler->action_flags.track_calls) {
5304 profiler->action_flags.track_stack = FALSE;
5305 profiler->action_flags.save_allocation_caller = FALSE;
5306 profiler->action_flags.save_allocation_stack = FALSE;
5309 /* Without JIT events the stat profiler will not find method IDs... */
5310 if (profiler->flags | MONO_PROFILE_STATISTICAL) {
5311 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5313 /* Profiling allocations without knowing which gc we are doing is not nice... */
5314 if (profiler->flags | MONO_PROFILE_ALLOCATIONS) {
5315 profiler->flags |= MONO_PROFILE_GC;
5316 profiler->action_flags.report_gc_events = TRUE;
5320 if (profiler->file_name == NULL) {
5321 char *program_name = g_get_prgname ();
5323 if (program_name != NULL) {
5324 char *name_buffer = g_strdup (program_name);
5325 char *name_start = name_buffer;
5328 /* Jump over the last '/' */
5329 cursor = strrchr (name_buffer, '/');
5330 if (cursor == NULL) {
5331 cursor = name_buffer;
5335 name_start = cursor;
5337 /* Then jump over the last '\\' */
5338 cursor = strrchr (name_start, '\\');
5339 if (cursor == NULL) {
5340 cursor = name_start;
5344 name_start = cursor;
5346 /* Finally, find the last '.' */
5347 cursor = strrchr (name_start, '.');
5348 if (cursor != NULL) {
5352 if (profiler->file_name_suffix == NULL) {
5353 profiler->file_name = g_strdup_printf ("%s.mprof", name_start);
5355 profiler->file_name = g_strdup_printf ("%s-%s.mprof", name_start, profiler->file_name_suffix);
5357 g_free (name_buffer);
5359 profiler->file_name = g_strdup_printf ("%s.mprof", "profiler-log");
5365 thread_detach_callback (MonoThread *thread) {
5366 LOG_WRITER_THREAD ("thread_detach_callback: asking writer thread to detach");
5367 profiler->detach_writer_thread = TRUE;
5368 WRITER_EVENT_RAISE ();
5369 LOG_WRITER_THREAD ("thread_detach_callback: done");
5374 data_writer_thread (gpointer nothing) {
5375 static gboolean thread_attached = FALSE;
5376 static gboolean thread_detached = FALSE;
5377 static MonoThread *this_thread = NULL;
5379 /* Wait for the OK to attach to the runtime */
5380 WRITER_EVENT_ENABLE_WAIT ();
5381 if (! profiler->terminate_writer_thread) {
5382 MonoDomain * root_domain = mono_get_root_domain ();
5383 if (root_domain != NULL) {
5384 LOG_WRITER_THREAD ("data_writer_thread: attaching thread");
5385 this_thread = mono_thread_attach (root_domain);
5386 mono_thread_set_manage_callback (this_thread, thread_detach_callback);
5387 thread_attached = TRUE;
5389 g_error ("Cannot get root domain\n");
5392 /* Execution was too short, pretend we attached and detached. */
5393 thread_attached = TRUE;
5394 thread_detached = TRUE;
5396 profiler->writer_thread_enabled = TRUE;
5397 /* Notify that we are attached to the runtime */
5398 WRITER_EVENT_DONE_RAISE ();
5401 ProfilerStatisticalData *statistical_data;
5404 LOG_WRITER_THREAD ("data_writer_thread: going to sleep");
5405 WRITER_EVENT_WAIT ();
5406 LOG_WRITER_THREAD ("data_writer_thread: just woke up");
5408 if (profiler->heap_shot_was_signalled) {
5409 LOG_WRITER_THREAD ("data_writer_thread: starting requested collection");
5410 mono_gc_collect (mono_gc_max_generation ());
5411 LOG_WRITER_THREAD ("data_writer_thread: requested collection done");
5414 statistical_data = profiler->statistical_data_ready;
5415 done = (statistical_data == NULL) && (profiler->heap_shot_write_jobs == NULL) && (profiler->writer_thread_flush_everything == FALSE);
5417 if ((!done) && thread_attached) {
5418 if (profiler->writer_thread_flush_everything) {
5419 /* Note that this assumes the lock is held by the thread that woke us up! */
5420 if (! thread_detached) {
5421 LOG_WRITER_THREAD ("data_writer_thread: flushing everything...");
5422 flush_everything ();
5423 profiler->writer_thread_flush_everything = FALSE;
5424 WRITER_EVENT_DONE_RAISE ();
5425 LOG_WRITER_THREAD ("data_writer_thread: flushed everything.");
5427 LOG_WRITER_THREAD ("data_writer_thread: flushing requested, but thread is detached...");
5428 profiler->writer_thread_flush_everything = FALSE;
5429 WRITER_EVENT_DONE_RAISE ();
5430 LOG_WRITER_THREAD ("data_writer_thread: done event raised.");
5433 LOG_WRITER_THREAD ("data_writer_thread: acquiring lock and writing data");
5436 // This makes sure that all method ids are in place
5437 LOG_WRITER_THREAD ("data_writer_thread: writing mapping...");
5438 flush_all_mappings ();
5439 LOG_WRITER_THREAD ("data_writer_thread: wrote mapping");
5441 if ((statistical_data != NULL) && ! thread_detached) {
5442 LOG_WRITER_THREAD ("data_writer_thread: writing statistical data...");
5443 profiler->statistical_data_ready = NULL;
5444 write_statistical_data_block (statistical_data);
5445 statistical_data->next_free_index = 0;
5446 statistical_data->first_unwritten_index = 0;
5447 profiler->statistical_data_second_buffer = statistical_data;
5448 LOG_WRITER_THREAD ("data_writer_thread: wrote statistical data");
5451 profiler_process_heap_shot_write_jobs ();
5454 LOG_WRITER_THREAD ("data_writer_thread: wrote data and released lock");
5457 if (profiler->writer_thread_flush_everything) {
5458 LOG_WRITER_THREAD ("data_writer_thread: flushing requested, but thread is not attached...");
5459 profiler->writer_thread_flush_everything = FALSE;
5460 WRITER_EVENT_DONE_RAISE ();
5461 LOG_WRITER_THREAD ("data_writer_thread: done event raised.");
5465 if (profiler->detach_writer_thread) {
5466 if (this_thread != NULL) {
5467 LOG_WRITER_THREAD ("data_writer_thread: detach requested, acquiring lock and flushing data");
5469 flush_everything ();
5471 LOG_WRITER_THREAD ("data_writer_thread: flushed data and released lock");
5472 LOG_WRITER_THREAD ("data_writer_thread: detaching thread");
5473 mono_thread_detach (this_thread);
5475 profiler->detach_writer_thread = FALSE;
5476 thread_detached = TRUE;
5478 LOG_WRITER_THREAD ("data_writer_thread: warning: thread has already been detached");
5482 if (profiler->terminate_writer_thread) {
5483 LOG_WRITER_THREAD ("data_writer_thread: exiting thread");
5484 CLEANUP_WRITER_THREAD ();
5492 mono_profiler_startup (const char *desc);
5494 /* the entry point (mono_profiler_load?) */
5496 mono_profiler_startup (const char *desc)
5498 profiler = g_new0 (MonoProfiler, 1);
5500 setup_user_options ((desc != NULL) ? desc : DEFAULT_ARGUMENTS);
5502 INITIALIZE_PROFILER_MUTEX ();
5503 MONO_PROFILER_GET_CURRENT_TIME (profiler->start_time);
5504 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->start_counter);
5505 profiler->last_header_counter = 0;
5507 profiler->methods = method_id_mapping_new ();
5508 profiler->classes = class_id_mapping_new ();
5509 profiler->loaded_element_next_free_id = 1;
5510 profiler->loaded_assemblies = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5511 profiler->loaded_modules = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5512 profiler->loaded_appdomains = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5514 profiler->statistical_data = profiler_statistical_data_new (profiler);
5515 profiler->statistical_data_second_buffer = profiler_statistical_data_new (profiler);
5517 profiler->write_buffers = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
5518 profiler->write_buffers->next = NULL;
5519 profiler->current_write_buffer = profiler->write_buffers;
5520 profiler->current_write_position = 0;
5521 profiler->full_write_buffers = 0;
5523 profiler->executable_regions = profiler_executable_memory_regions_new (1, 1);
5525 profiler->executable_files.table = g_hash_table_new (g_str_hash, g_str_equal);
5526 profiler->executable_files.new_files = NULL;
5528 profiler->heap_shot_write_jobs = NULL;
5529 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
5530 profiler_heap_buffers_setup (&(profiler->heap));
5532 profiler_heap_buffers_clear (&(profiler->heap));
5534 profiler->garbage_collection_counter = 0;
5536 WRITER_EVENT_INIT ();
5537 LOG_WRITER_THREAD ("mono_profiler_startup: creating writer thread");
5538 CREATE_WRITER_THREAD (data_writer_thread);
5539 LOG_WRITER_THREAD ("mono_profiler_startup: created writer thread");
5541 ALLOCATE_PROFILER_THREAD_DATA ();
5545 write_intro_block ();
5546 write_directives_block (TRUE);
5548 mono_profiler_install (profiler, profiler_shutdown);
5550 mono_profiler_install_appdomain (appdomain_start_load, appdomain_end_load,
5551 appdomain_start_unload, appdomain_end_unload);
5552 mono_profiler_install_assembly (assembly_start_load, assembly_end_load,
5553 assembly_start_unload, assembly_end_unload);
5554 mono_profiler_install_module (module_start_load, module_end_load,
5555 module_start_unload, module_end_unload);
5556 mono_profiler_install_class (class_start_load, class_end_load,
5557 class_start_unload, class_end_unload);
5558 mono_profiler_install_jit_compile (method_start_jit, method_end_jit);
5559 mono_profiler_install_enter_leave (method_enter, method_leave);
5560 mono_profiler_install_method_free (method_free);
5561 mono_profiler_install_thread (thread_start, thread_end);
5562 mono_profiler_install_allocation (object_allocated);
5563 mono_profiler_install_monitor (monitor_event);
5564 mono_profiler_install_statistical (statistical_hit);
5565 mono_profiler_install_statistical_call_chain (statistical_call_chain, profiler->statistical_call_chain_depth);
5566 mono_profiler_install_gc (gc_event, gc_resize);
5567 mono_profiler_install_runtime_initialized (runtime_initialized);
5569 mono_profiler_install_jit_end (method_jit_result);
5572 mono_profiler_set_events (profiler->flags);