2 #include <mono/metadata/profiler.h>
3 #include <mono/metadata/class.h>
4 #include <mono/metadata/class-internals.h>
5 #include <mono/metadata/assembly.h>
6 #include <mono/metadata/loader.h>
7 #include <mono/metadata/threads.h>
8 #include <mono/metadata/debug-helpers.h>
9 #include <mono/metadata/mono-gc.h>
10 #include <mono/io-layer/atomic.h>
19 #define HAS_OPROFILE 0
22 #include <libopagent.h>
25 // Needed for heap analysis
26 extern gboolean mono_object_is_alive (MonoObject* obj);
29 MONO_PROFILER_FILE_BLOCK_KIND_INTRO = 1,
30 MONO_PROFILER_FILE_BLOCK_KIND_END = 2,
31 MONO_PROFILER_FILE_BLOCK_KIND_MAPPING = 3,
32 MONO_PROFILER_FILE_BLOCK_KIND_LOADED = 4,
33 MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED = 5,
34 MONO_PROFILER_FILE_BLOCK_KIND_EVENTS = 6,
35 MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL = 7,
36 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA = 8,
37 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY = 9,
38 MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES = 10
39 } MonoProfilerFileBlockKind;
42 MONO_PROFILER_DIRECTIVE_END = 0,
43 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER = 1,
44 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK = 2,
45 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID = 3,
46 MONO_PROFILER_DIRECTIVE_LAST
47 } MonoProfilerDirectives;
50 #define MONO_PROFILER_LOADED_EVENT_MODULE 1
51 #define MONO_PROFILER_LOADED_EVENT_ASSEMBLY 2
52 #define MONO_PROFILER_LOADED_EVENT_APPDOMAIN 4
53 #define MONO_PROFILER_LOADED_EVENT_SUCCESS 8
54 #define MONO_PROFILER_LOADED_EVENT_FAILURE 16
57 MONO_PROFILER_EVENT_DATA_TYPE_OTHER = 0,
58 MONO_PROFILER_EVENT_DATA_TYPE_METHOD = 1,
59 MONO_PROFILER_EVENT_DATA_TYPE_CLASS = 2
60 } MonoProfilerEventDataType;
62 typedef struct _ProfilerEventData {
67 unsigned int data_type:2;
70 unsigned int value:25;
73 #define EVENT_VALUE_BITS (25)
74 #define MAX_EVENT_VALUE ((1<<EVENT_VALUE_BITS)-1)
77 MONO_PROFILER_EVENT_METHOD_JIT = 0,
78 MONO_PROFILER_EVENT_METHOD_FREED = 1,
79 MONO_PROFILER_EVENT_METHOD_CALL = 2,
80 MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER = 3,
81 MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER = 4
82 } MonoProfilerMethodEvents;
84 MONO_PROFILER_EVENT_CLASS_LOAD = 0,
85 MONO_PROFILER_EVENT_CLASS_UNLOAD = 1,
86 MONO_PROFILER_EVENT_CLASS_EXCEPTION = 2,
87 MONO_PROFILER_EVENT_CLASS_ALLOCATION = 3
88 } MonoProfilerClassEvents;
90 MONO_PROFILER_EVENT_RESULT_SUCCESS = 0,
91 MONO_PROFILER_EVENT_RESULT_FAILURE = 4
92 } MonoProfilerEventResult;
93 #define MONO_PROFILER_EVENT_RESULT_MASK MONO_PROFILER_EVENT_RESULT_FAILURE
95 MONO_PROFILER_EVENT_THREAD = 1,
96 MONO_PROFILER_EVENT_GC_COLLECTION = 2,
97 MONO_PROFILER_EVENT_GC_MARK = 3,
98 MONO_PROFILER_EVENT_GC_SWEEP = 4,
99 MONO_PROFILER_EVENT_GC_RESIZE = 5,
100 MONO_PROFILER_EVENT_GC_STOP_WORLD = 6,
101 MONO_PROFILER_EVENT_GC_START_WORLD = 7,
102 MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION = 8,
103 MONO_PROFILER_EVENT_STACK_SECTION = 9,
104 MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID = 10
105 } MonoProfilerEvents;
107 MONO_PROFILER_EVENT_KIND_START = 0,
108 MONO_PROFILER_EVENT_KIND_END = 1
109 } MonoProfilerEventKind;
111 #define MONO_PROFILER_GET_CURRENT_TIME(t) {\
112 struct timeval current_time;\
113 gettimeofday (¤t_time, NULL);\
114 (t) = (((guint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;\
117 static gboolean use_fast_timer = FALSE;
119 #if (defined(__i386__) || defined(__x86_64__)) && ! defined(PLATFORM_WIN32)
121 #if defined(__i386__)
122 static const guchar cpuid_impl [] = {
123 0x55, /* push %ebp */
124 0x89, 0xe5, /* mov %esp,%ebp */
125 0x53, /* push %ebx */
126 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
127 0x0f, 0xa2, /* cpuid */
128 0x50, /* push %eax */
129 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
130 0x89, 0x18, /* mov %ebx,(%eax) */
131 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
132 0x89, 0x08, /* mov %ecx,(%eax) */
133 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
134 0x89, 0x10, /* mov %edx,(%eax) */
136 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
137 0x89, 0x02, /* mov %eax,(%edx) */
143 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
146 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx) {
149 __asm__ __volatile__ (
152 "movl %%eax, %%edx\n"
153 "xorl $0x200000, %%eax\n"
158 "xorl %%edx, %%eax\n"
159 "andl $0x200000, %%eax\n"
181 CpuidFunc func = (CpuidFunc) cpuid_impl;
182 func (id, p_eax, p_ebx, p_ecx, p_edx);
184 * We use this approach because of issues with gcc and pic code, see:
185 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
186 __asm__ __volatile__ ("cpuid"
187 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
195 static void detect_fast_timer (void) {
196 int p_eax, p_ebx, p_ecx, p_edx;
198 if (cpuid (0x1, &p_eax, &p_ebx, &p_ecx, &p_edx)) {
200 use_fast_timer = TRUE;
202 use_fast_timer = FALSE;
205 use_fast_timer = FALSE;
210 #if defined(__x86_64__)
211 static void detect_fast_timer (void) {
213 guint32 eax,ebx,ecx,edx;
214 __asm__ __volatile__ ("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(op));
216 use_fast_timer = TRUE;
218 use_fast_timer = FALSE;
223 static __inline__ guint64 rdtsc(void) {
225 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
226 return ((guint64) lo) | (((guint64) hi) << 32);
228 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) {\
229 if (use_fast_timer) {\
232 MONO_PROFILER_GET_CURRENT_TIME ((c));\
236 static void detect_fast_timer (void) {
237 use_fast_timer = FALSE;
239 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) MONO_PROFILER_GET_CURRENT_TIME ((c))
243 #define CLASS_LAYOUT_PACKED_BITMAP_SIZE 64
244 #define CLASS_LAYOUT_NOT_INITIALIZED (0xFFFF)
247 HEAP_CODE_OBJECT = 1,
248 HEAP_CODE_FREE_OBJECT_CLASS = 2,
250 } HeapProfilerJobValueCode;
251 typedef struct _MonoProfilerClassData {
260 } MonoProfilerClassData;
262 typedef struct _MonoProfilerMethodData {
265 } MonoProfilerMethodData;
267 typedef struct _ClassIdMappingElement {
271 struct _ClassIdMappingElement *next_unwritten;
272 MonoProfilerClassData data;
273 } ClassIdMappingElement;
275 typedef struct _MethodIdMappingElement {
279 struct _MethodIdMappingElement *next_unwritten;
280 MonoProfilerMethodData data;
281 } MethodIdMappingElement;
283 typedef struct _ClassIdMapping {
285 ClassIdMappingElement *unwritten;
289 typedef struct _MethodIdMapping {
291 MethodIdMappingElement *unwritten;
295 typedef struct _LoadedElement {
297 guint64 load_start_counter;
298 guint64 load_end_counter;
299 guint64 unload_start_counter;
300 guint64 unload_end_counter;
304 guint8 unload_written;
307 #define PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE 1024
308 #define PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE 4096
309 #define PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE 4096
311 typedef struct _ProfilerHeapShotObjectBuffer {
312 struct _ProfilerHeapShotObjectBuffer *next;
313 MonoObject **next_free_slot;
315 MonoObject **first_unprocessed_slot;
316 MonoObject *buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE];
317 } ProfilerHeapShotObjectBuffer;
319 typedef struct _ProfilerHeapShotHeapBuffer {
320 struct _ProfilerHeapShotHeapBuffer *next;
321 struct _ProfilerHeapShotHeapBuffer *previous;
322 MonoObject **start_slot;
323 MonoObject **end_slot;
324 MonoObject *buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE];
325 } ProfilerHeapShotHeapBuffer;
327 typedef struct _ProfilerHeapShotHeapBuffers {
328 ProfilerHeapShotHeapBuffer *buffers;
329 ProfilerHeapShotHeapBuffer *last;
330 ProfilerHeapShotHeapBuffer *current;
331 MonoObject **first_free_slot;
332 } ProfilerHeapShotHeapBuffers;
335 typedef struct _ProfilerHeapShotWriteBuffer {
336 struct _ProfilerHeapShotWriteBuffer *next;
337 gpointer buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE];
338 } ProfilerHeapShotWriteBuffer;
340 typedef struct _ProfilerHeapShotClassSummary {
349 } ProfilerHeapShotClassSummary;
351 typedef struct _ProfilerHeapShotCollectionSummary {
352 ProfilerHeapShotClassSummary *per_class_data;
354 } ProfilerHeapShotCollectionSummary;
356 typedef struct _ProfilerHeapShotWriteJob {
357 struct _ProfilerHeapShotWriteJob *next;
358 struct _ProfilerHeapShotWriteJob *next_unwritten;
362 ProfilerHeapShotWriteBuffer *buffers;
363 ProfilerHeapShotWriteBuffer **last_next;
364 guint32 full_buffers;
365 gboolean heap_shot_was_signalled;
366 guint64 start_counter;
371 ProfilerHeapShotCollectionSummary summary;
372 gboolean dump_heap_data;
373 } ProfilerHeapShotWriteJob;
375 typedef struct _ProfilerThreadStack {
378 guint32 last_saved_top;
380 guint8 *method_is_jitted;
381 } ProfilerThreadStack;
383 typedef struct _ProfilerPerThreadData {
384 ProfilerEventData *events;
385 ProfilerEventData *next_free_event;
386 ProfilerEventData *end_event;
387 ProfilerEventData *first_unwritten_event;
388 ProfilerEventData *first_unmapped_event;
389 guint64 start_event_counter;
390 guint64 last_event_counter;
392 ProfilerHeapShotObjectBuffer *heap_shot_object_buffers;
393 ProfilerThreadStack stack;
394 struct _ProfilerPerThreadData* next;
395 } ProfilerPerThreadData;
397 typedef struct _ProfilerStatisticalHit {
400 } ProfilerStatisticalHit;
402 typedef struct _ProfilerStatisticalData {
403 ProfilerStatisticalHit *hits;
406 int first_unwritten_index;
407 } ProfilerStatisticalData;
409 typedef struct _ProfilerUnmanagedSymbol {
414 } ProfilerUnmanagedSymbol;
416 struct _ProfilerExecutableFile;
418 typedef struct _ProfilerExecutableMemoryRegionData {
426 struct _ProfilerExecutableFile *file;
427 guint32 symbols_count;
428 guint32 symbols_capacity;
429 ProfilerUnmanagedSymbol *symbols;
430 } ProfilerExecutableMemoryRegionData;
432 typedef struct _ProfilerExecutableMemoryRegions {
433 ProfilerExecutableMemoryRegionData **regions;
434 guint32 regions_capacity;
435 guint32 regions_count;
437 guint32 next_unmanaged_function_id;
438 } ProfilerExecutableMemoryRegions;
440 /* Start of ELF definitions */
442 typedef guint16 ElfHalf;
443 typedef guint32 ElfWord;
444 typedef gsize ElfAddr;
445 typedef gsize ElfOff;
448 unsigned char e_ident[EI_NIDENT];
454 ElfOff e_shoff; // Section header table
456 ElfHalf e_ehsize; // Header size
459 ElfHalf e_shentsize; // Section header entry size
460 ElfHalf e_shnum; // Section header entries number
461 ElfHalf e_shstrndx; // String table index
464 #if (SIZEOF_VOID_P == 4)
469 ElfAddr sh_addr; // Address in memory
470 ElfOff sh_offset; // Offset in file
474 ElfWord sh_addralign;
481 unsigned char st_info; // Use ELF32_ST_TYPE to get symbol type
482 unsigned char st_other;
483 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
485 #elif (SIZEOF_VOID_P == 8)
490 ElfAddr sh_addr; // Address in memory
491 ElfOff sh_offset; // Offset in file
500 unsigned char st_info; // Use ELF_ST_TYPE to get symbol type
501 unsigned char st_other;
502 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
507 #error Bad size of void pointer
511 #define ELF_ST_BIND(i) ((i)>>4)
512 #define ELF_ST_TYPE(i) ((i)&0xf)
525 ELF_FILE_TYPE_NONE = 0,
526 ELF_FILE_TYPE_REL = 1,
527 ELF_FILE_TYPE_EXEC = 2,
528 ELF_FILE_TYPE_DYN = 3,
529 ELF_FILE_TYPE_CORE = 4
546 ELF_SHT_PROGBITS = 1,
570 ELF_SHF_EXECINSTR = 4,
573 #define ELF_SHN_UNDEF 0
574 #define ELF_SHN_LORESERVE 0xff00
575 #define ELF_SHN_LOPROC 0xff00
576 #define ELF_SHN_HIPROC 0xff1f
577 #define ELF_SHN_ABS 0xfff1
578 #define ELF_SHN_COMMON 0xfff2
579 #define ELF_SHN_HIRESERVE 0xffff
580 /* End of ELF definitions */
582 typedef struct _ProfilerExecutableFileSectionRegion {
583 ProfilerExecutableMemoryRegionData *region;
584 guint8 *section_address;
585 gsize section_offset;
586 } ProfilerExecutableFileSectionRegion;
588 typedef struct _ProfilerExecutableFile {
589 guint32 reference_count;
591 /* Used for mmap and munmap */
598 guint8 *symbols_start;
599 guint32 symbols_count;
601 const char *symbols_string_table;
602 const char *main_string_table;
604 ProfilerExecutableFileSectionRegion *section_regions;
606 struct _ProfilerExecutableFile *next_new_file;
607 } ProfilerExecutableFile;
609 typedef struct _ProfilerExecutableFiles {
611 ProfilerExecutableFile *new_files;
612 } ProfilerExecutableFiles;
615 #define CLEANUP_WRITER_THREAD() do {profiler->writer_thread_terminated = TRUE;} while (0)
616 #define CHECK_WRITER_THREAD() (! profiler->writer_thread_terminated)
618 #ifndef PLATFORM_WIN32
619 #include <sys/types.h>
620 #include <sys/time.h>
621 #include <sys/stat.h>
625 #include <semaphore.h>
627 #include <sys/mman.h>
628 #include <sys/types.h>
629 #include <sys/stat.h>
633 #define MUTEX_TYPE pthread_mutex_t
634 #define INITIALIZE_PROFILER_MUTEX() pthread_mutex_init (&(profiler->mutex), NULL)
635 #define DELETE_PROFILER_MUTEX() pthread_mutex_destroy (&(profiler->mutex))
636 #define LOCK_PROFILER() do {/*LOG_WRITER_THREAD ("LOCK_PROFILER");*/ pthread_mutex_lock (&(profiler->mutex));} while (0)
637 #define UNLOCK_PROFILER() do {/*LOG_WRITER_THREAD ("UNLOCK_PROFILER");*/ pthread_mutex_unlock (&(profiler->mutex));} while (0)
639 #define THREAD_TYPE pthread_t
640 #define CREATE_WRITER_THREAD(f) pthread_create (&(profiler->data_writer_thread), NULL, ((void*(*)(void*))f), NULL)
641 #define EXIT_THREAD() pthread_exit (NULL);
642 #define WAIT_WRITER_THREAD() do {\
643 if (CHECK_WRITER_THREAD ()) {\
644 pthread_join (profiler->data_writer_thread, NULL);\
647 #define CURRENT_THREAD_ID() (gsize) pthread_self ()
649 #ifndef HAVE_KW_THREAD
650 static pthread_key_t pthread_profiler_key;
651 static pthread_once_t profiler_pthread_once = PTHREAD_ONCE_INIT;
653 make_pthread_profiler_key (void) {
654 (void) pthread_key_create (&pthread_profiler_key, NULL);
656 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) pthread_getspecific (pthread_profiler_key))
657 #define SET_PROFILER_THREAD_DATA(x) (void) pthread_setspecific (pthread_profiler_key, (x))
658 #define ALLOCATE_PROFILER_THREAD_DATA() (void) pthread_once (&profiler_pthread_once, make_pthread_profiler_key)
659 #define FREE_PROFILER_THREAD_DATA() (void) pthread_key_delete (pthread_profiler_key)
662 #define EVENT_TYPE sem_t
663 #define WRITER_EVENT_INIT() do {\
664 sem_init (&(profiler->enable_data_writer_event), 0, 0);\
665 sem_init (&(profiler->wake_data_writer_event), 0, 0);\
666 sem_init (&(profiler->done_data_writer_event), 0, 0);\
668 #define WRITER_EVENT_DESTROY() do {\
669 sem_destroy (&(profiler->enable_data_writer_event));\
670 sem_destroy (&(profiler->wake_data_writer_event));\
671 sem_destroy (&(profiler->done_data_writer_event));\
673 #define WRITER_EVENT_WAIT() (void) sem_wait (&(profiler->wake_data_writer_event))
674 #define WRITER_EVENT_RAISE() (void) sem_post (&(profiler->wake_data_writer_event))
675 #define WRITER_EVENT_ENABLE_WAIT() (void) sem_wait (&(profiler->enable_data_writer_event))
676 #define WRITER_EVENT_ENABLE_RAISE() (void) sem_post (&(profiler->enable_data_writer_event))
677 #define WRITER_EVENT_DONE_WAIT() do {\
678 if (CHECK_WRITER_THREAD ()) {\
679 (void) sem_wait (&(profiler->done_data_writer_event));\
682 #define WRITER_EVENT_DONE_RAISE() (void) sem_post (&(profiler->done_data_writer_event))
685 #define FILE_HANDLE_TYPE FILE*
686 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
687 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
688 #define FLUSH_FILE() fflush (profiler->file)
689 #define CLOSE_FILE() fclose (profiler->file);
691 #define FILE_HANDLE_TYPE int
692 #define OPEN_FILE() profiler->file = open (profiler->file_name, O_WRONLY|O_CREAT|O_TRUNC, 0664);
693 #define WRITE_BUFFER(b,s) write (profiler->file, (b), (s))
695 #define CLOSE_FILE() close (profiler->file);
702 #define MUTEX_TYPE CRITICAL_SECTION
703 #define INITIALIZE_PROFILER_MUTEX() InitializeCriticalSection (&(profiler->mutex))
704 #define DELETE_PROFILER_MUTEX() DeleteCriticalSection (&(profiler->mutex))
705 #define LOCK_PROFILER() EnterCriticalSection (&(profiler->mutex))
706 #define UNLOCK_PROFILER() LeaveCriticalSection (&(profiler->mutex))
708 #define THREAD_TYPE HANDLE
709 #define CREATE_WRITER_THREAD(f) CreateThread (NULL, (1*1024*1024), (f), NULL, 0, NULL);
710 #define EXIT_THREAD() ExitThread (0);
711 #define WAIT_WRITER_THREAD() do {\
712 if (CHECK_WRITER_THREAD ()) {\
713 WaitForSingleObject (profiler->data_writer_thread, INFINITE);\
716 #define CURRENT_THREAD_ID() (gsize) GetCurrentThreadId ()
718 #ifndef HAVE_KW_THREAD
719 static guint32 profiler_thread_id = -1;
720 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*)TlsGetValue (profiler_thread_id))
721 #define SET_PROFILER_THREAD_DATA(x) TlsSetValue (profiler_thread_id, (x));
722 #define ALLOCATE_PROFILER_THREAD_DATA() profiler_thread_id = TlsAlloc ()
723 #define FREE_PROFILER_THREAD_DATA() TlsFree (profiler_thread_id)
726 #define EVENT_TYPE HANDLE
727 #define WRITER_EVENT_INIT() (void) do {\
728 profiler->enable_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
729 profiler->wake_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
730 profiler->done_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
732 #define WRITER_EVENT_DESTROY() CloseHandle (profiler->statistical_data_writer_event)
733 #define WRITER_EVENT_INIT() (void) do {\
734 CloseHandle (profiler->enable_data_writer_event);\
735 CloseHandle (profiler->wake_data_writer_event);\
736 CloseHandle (profiler->done_data_writer_event);\
738 #define WRITER_EVENT_WAIT() WaitForSingleObject (profiler->wake_data_writer_event, INFINITE)
739 #define WRITER_EVENT_RAISE() SetEvent (profiler->wake_data_writer_event)
740 #define WRITER_EVENT_ENABLE_WAIT() WaitForSingleObject (profiler->enable_data_writer_event, INFINITE)
741 #define WRITER_EVENT_ENABLE_RAISE() SetEvent (profiler->enable_data_writer_event)
742 #define WRITER_EVENT_DONE_WAIT() do {\
743 if (CHECK_WRITER_THREAD ()) {\
744 WaitForSingleObject (profiler->done_data_writer_event, INFINITE);\
747 #define WRITER_EVENT_DONE_RAISE() SetEvent (profiler->done_data_writer_event)
749 #define FILE_HANDLE_TYPE FILE*
750 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
751 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
752 #define FLUSH_FILE() fflush (profiler->file)
753 #define CLOSE_FILE() fclose (profiler->file);
757 #ifdef HAVE_KW_THREAD
758 static __thread ProfilerPerThreadData * tls_profiler_per_thread_data;
759 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) tls_profiler_per_thread_data)
760 #define SET_PROFILER_THREAD_DATA(x) tls_profiler_per_thread_data = (x)
761 #define ALLOCATE_PROFILER_THREAD_DATA() /* nop */
762 #define FREE_PROFILER_THREAD_DATA() /* nop */
765 #define GET_PROFILER_THREAD_DATA(data) do {\
766 ProfilerPerThreadData *_result = LOOKUP_PROFILER_THREAD_DATA ();\
768 _result = profiler_per_thread_data_new (profiler->per_thread_buffer_size);\
770 _result->next = profiler->per_thread_data;\
771 profiler->per_thread_data = _result;\
773 SET_PROFILER_THREAD_DATA (_result);\
778 #define PROFILER_FILE_WRITE_BUFFER_SIZE (profiler->write_buffer_size)
779 typedef struct _ProfilerFileWriteBuffer {
780 struct _ProfilerFileWriteBuffer *next;
782 } ProfilerFileWriteBuffer;
784 #define CHECK_PROFILER_ENABLED() do {\
785 if (! profiler->profiler_enabled)\
788 struct _MonoProfiler {
791 MonoProfileFlags flags;
792 gboolean profiler_enabled;
794 char *file_name_suffix;
795 FILE_HANDLE_TYPE file;
798 guint64 start_counter;
802 guint64 last_header_counter;
804 MethodIdMapping *methods;
805 ClassIdMapping *classes;
807 GHashTable *loaded_assemblies;
808 GHashTable *loaded_modules;
809 GHashTable *loaded_appdomains;
811 guint32 per_thread_buffer_size;
812 guint32 statistical_buffer_size;
813 ProfilerPerThreadData* per_thread_data;
814 ProfilerStatisticalData *statistical_data;
815 ProfilerStatisticalData *statistical_data_ready;
816 ProfilerStatisticalData *statistical_data_second_buffer;
817 int statistical_call_chain_depth;
819 THREAD_TYPE data_writer_thread;
820 EVENT_TYPE enable_data_writer_event;
821 EVENT_TYPE wake_data_writer_event;
822 EVENT_TYPE done_data_writer_event;
823 gboolean terminate_writer_thread;
824 gboolean writer_thread_terminated;
825 gboolean detach_writer_thread;
826 gboolean writer_thread_enabled;
827 gboolean writer_thread_flush_everything;
829 ProfilerFileWriteBuffer *write_buffers;
830 ProfilerFileWriteBuffer *current_write_buffer;
831 int write_buffer_size;
832 int current_write_position;
833 int full_write_buffers;
835 ProfilerHeapShotWriteJob *heap_shot_write_jobs;
836 ProfilerHeapShotHeapBuffers heap;
838 char *heap_shot_command_file_name;
839 int dump_next_heap_snapshots;
840 guint64 heap_shot_command_file_access_time;
841 gboolean heap_shot_was_signalled;
842 guint32 garbage_collection_counter;
844 ProfilerExecutableMemoryRegions *executable_regions;
845 ProfilerExecutableFiles executable_files;
852 gboolean unreachable_objects;
853 gboolean collection_summary;
855 gboolean track_stack;
856 gboolean track_calls;
857 gboolean save_allocation_caller;
858 gboolean save_allocation_stack;
859 gboolean allocations_carry_id;
862 static MonoProfiler *profiler;
864 #ifndef PLATFORM_WIN32
867 #ifdef MONO_ARCH_USE_SIGACTION
868 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy, siginfo_t *info, void *context)
869 #elif defined(__sparc__)
870 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy, void *sigctx)
872 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy)
876 request_heap_snapshot (void) {
877 profiler->heap_shot_was_signalled = TRUE;
878 mono_gc_collect (mono_gc_max_generation ());
882 SIG_HANDLER_SIGNATURE (gc_request_handler) {
883 profiler->heap_shot_was_signalled = TRUE;
884 WRITER_EVENT_RAISE ();
888 add_gc_request_handler (int signal_number)
892 #ifdef MONO_ARCH_USE_SIGACTION
893 sa.sa_sigaction = gc_request_handler;
894 sigemptyset (&sa.sa_mask);
895 sa.sa_flags = SA_SIGINFO;
897 sa.sa_handler = gc_request_handler;
898 sigemptyset (&sa.sa_mask);
902 g_assert (sigaction (signal_number, &sa, NULL) != -1);
906 enable_profiler (void) {
907 profiler->profiler_enabled = TRUE;
911 disable_profiler (void) {
912 profiler->profiler_enabled = FALSE;
918 SIG_HANDLER_SIGNATURE (toggle_handler) {
919 if (profiler->profiler_enabled) {
920 profiler->profiler_enabled = FALSE;
922 profiler->profiler_enabled = TRUE;
927 add_toggle_handler (int signal_number)
931 #ifdef MONO_ARCH_USE_SIGACTION
932 sa.sa_sigaction = toggle_handler;
933 sigemptyset (&sa.sa_mask);
934 sa.sa_flags = SA_SIGINFO;
936 sa.sa_handler = toggle_handler;
937 sigemptyset (&sa.sa_mask);
941 g_assert (sigaction (signal_number, &sa, NULL) != -1);
947 #define DEBUG_LOAD_EVENTS 0
948 #define DEBUG_MAPPING_EVENTS 0
949 #define DEBUG_LOGGING_PROFILER 0
950 #define DEBUG_HEAP_PROFILER 0
951 #define DEBUG_CLASS_BITMAPS 0
952 #define DEBUG_STATISTICAL_PROFILER 0
953 #define DEBUG_WRITER_THREAD 0
954 #define DEBUG_FILE_WRITES 0
955 #if (DEBUG_LOGGING_PROFILER || DEBUG_STATISTICAL_PROFILER || DEBUG_HEAP_PROFILER || DEBUG_WRITER_THREAD || DEBUG_FILE_WRITES)
956 #define LOG_WRITER_THREAD(m) printf ("WRITER-THREAD-LOG %s\n", m)
958 #define LOG_WRITER_THREAD(m)
961 #if DEBUG_LOGGING_PROFILER
962 static int event_counter = 0;
963 #define EVENT_MARK() printf ("[EVENT:%d]", ++ event_counter)
967 thread_stack_initialize_empty (ProfilerThreadStack *stack) {
970 stack->last_saved_top = 0;
972 stack->method_is_jitted = NULL;
976 thread_stack_free (ProfilerThreadStack *stack) {
979 stack->last_saved_top = 0;
980 if (stack->stack != NULL) {
981 g_free (stack->stack);
984 if (stack->method_is_jitted != NULL) {
985 g_free (stack->method_is_jitted);
986 stack->method_is_jitted = NULL;
991 thread_stack_initialize (ProfilerThreadStack *stack, guint32 capacity) {
992 stack->capacity = capacity;
994 stack->last_saved_top = 0;
995 stack->stack = g_new0 (MonoMethod*, capacity);
996 stack->method_is_jitted = g_new0 (guint8, capacity);
1000 thread_stack_reset_saved_state (ProfilerThreadStack *stack) {
1001 stack->last_saved_top = 0;
1005 thread_stack_push_jitted (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1006 if (stack->top >= stack->capacity) {
1007 MonoMethod **old_stack = stack->stack;
1008 guint8 *old_method_is_jitted = stack->method_is_jitted;
1009 guint32 top = stack->top;
1010 thread_stack_initialize (stack, stack->capacity * 2);
1011 memcpy (stack->stack, old_stack, top * sizeof (MonoMethod*));
1012 memcpy (stack->method_is_jitted, old_method_is_jitted, top * sizeof (guint8));
1015 stack->stack [stack->top] = method;
1016 stack->method_is_jitted [stack->top] = method_is_jitted;
1021 thread_stack_push (ProfilerThreadStack *stack, MonoMethod* method) {
1022 thread_stack_push_jitted (stack, method, FALSE);
1026 thread_stack_pop (ProfilerThreadStack *stack) {
1027 if (stack->top > 0) {
1029 if (stack->last_saved_top > stack->top) {
1030 stack->last_saved_top = stack->top;
1032 return stack->stack [stack->top];
1039 thread_stack_top (ProfilerThreadStack *stack) {
1040 if (stack->top > 0) {
1041 return stack->stack [stack->top - 1];
1048 thread_stack_top_is_jitted (ProfilerThreadStack *stack) {
1049 if (stack->top > 0) {
1050 return stack->method_is_jitted [stack->top - 1];
1057 thread_stack_index_from_top (ProfilerThreadStack *stack, int index) {
1058 if (stack->top > index) {
1059 return stack->stack [stack->top - (index + 1)];
1066 thread_stack_index_from_top_is_jitted (ProfilerThreadStack *stack, int index) {
1067 if (stack->top > index) {
1068 return stack->method_is_jitted [stack->top - (index + 1)];
1075 thread_stack_push_safely (ProfilerThreadStack *stack, MonoMethod* method) {
1076 if (stack->stack != NULL) {
1077 thread_stack_push (stack, method);
1082 thread_stack_push_jitted_safely (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1083 if (stack->stack != NULL) {
1084 thread_stack_push_jitted (stack, method, method_is_jitted);
1089 thread_stack_count_unsaved_frames (ProfilerThreadStack *stack) {
1090 int result = stack->top - stack->last_saved_top;
1091 return (result > 0) ? result : 0;
1094 static ClassIdMappingElement*
1095 class_id_mapping_element_get (MonoClass *klass) {
1096 return g_hash_table_lookup (profiler->classes->table, (gconstpointer) klass);
1099 static MethodIdMappingElement*
1100 method_id_mapping_element_get (MonoMethod *method) {
1101 return g_hash_table_lookup (profiler->methods->table, (gconstpointer) method);
1104 #define BITS_TO_BYTES(v) do {\
1110 static ClassIdMappingElement*
1111 class_id_mapping_element_new (MonoClass *klass) {
1112 ClassIdMappingElement *result = g_new (ClassIdMappingElement, 1);
1114 result->name = mono_type_full_name (mono_class_get_type (klass));
1115 result->klass = klass;
1116 result->next_unwritten = profiler->classes->unwritten;
1117 profiler->classes->unwritten = result;
1118 result->id = profiler->classes->next_id;
1119 profiler->classes->next_id ++;
1121 result->data.bitmap.compact = 0;
1122 result->data.layout.slots = CLASS_LAYOUT_NOT_INITIALIZED;
1123 result->data.layout.references = CLASS_LAYOUT_NOT_INITIALIZED;
1125 g_hash_table_insert (profiler->classes->table, klass, result);
1127 #if (DEBUG_MAPPING_EVENTS)
1128 printf ("Created new CLASS mapping element \"%s\" (%p)[%d]\n", result->name, klass, result->id);
1134 class_id_mapping_element_build_layout_bitmap (MonoClass *klass, ClassIdMappingElement *klass_id) {
1135 MonoClass *parent_class = mono_class_get_parent (klass);
1136 int number_of_reference_fields = 0;
1137 int max_offset_of_reference_fields = 0;
1138 ClassIdMappingElement *parent_id;
1140 MonoClassField *field;
1142 #if (DEBUG_CLASS_BITMAPS)
1143 printf ("class_id_mapping_element_build_layout_bitmap: building layout for class %s.%s: ", mono_class_get_namespace (klass), mono_class_get_name (klass));
1146 if (parent_class != NULL) {
1147 parent_id = class_id_mapping_element_get (parent_class);
1148 g_assert (parent_id != NULL);
1150 if (parent_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1151 #if (DEBUG_CLASS_BITMAPS)
1152 printf ("[recursively building bitmap for father class]\n");
1154 class_id_mapping_element_build_layout_bitmap (parent_class, parent_id);
1161 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1162 MonoType* field_type = mono_field_get_type (field);
1163 // For now, skip static fields
1164 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1167 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1168 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1169 if (field_offset > max_offset_of_reference_fields) {
1170 max_offset_of_reference_fields = field_offset;
1172 number_of_reference_fields ++;
1174 MonoClass *field_class = mono_class_from_mono_type (field_type);
1175 if (field_class && mono_class_is_valuetype (field_class)) {
1176 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1177 g_assert (field_id != NULL);
1179 if (field_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1180 if (field_id != klass_id) {
1181 #if (DEBUG_CLASS_BITMAPS)
1182 printf ("[recursively building bitmap for field %s]\n", mono_field_get_name (field));
1184 class_id_mapping_element_build_layout_bitmap (field_class, field_id);
1186 #if (DEBUG_CLASS_BITMAPS)
1187 printf ("[breaking recursive bitmap build for field %s]", mono_field_get_name (field));
1190 klass_id->data.bitmap.compact = 0;
1191 klass_id->data.layout.slots = 0;
1192 klass_id->data.layout.references = 0;
1196 if (field_id->data.layout.references > 0) {
1197 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1198 int max_offset_reference_in_field = (field_id->data.layout.slots - 1) * sizeof (gpointer);
1200 if ((field_offset + max_offset_reference_in_field) > max_offset_of_reference_fields) {
1201 max_offset_of_reference_fields = field_offset + max_offset_reference_in_field;
1204 number_of_reference_fields += field_id->data.layout.references;
1210 #if (DEBUG_CLASS_BITMAPS)
1211 printf ("[allocating bitmap for class %s.%s (references %d, max offset %d, slots %d)]", mono_class_get_namespace (klass), mono_class_get_name (klass), number_of_reference_fields, max_offset_of_reference_fields, (int)(max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1213 if ((number_of_reference_fields == 0) && ((parent_id == NULL) || (parent_id->data.layout.references == 0))) {
1214 #if (DEBUG_CLASS_BITMAPS)
1215 printf ("[no references at all]");
1217 klass_id->data.bitmap.compact = 0;
1218 klass_id->data.layout.slots = 0;
1219 klass_id->data.layout.references = 0;
1221 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1222 #if (DEBUG_CLASS_BITMAPS)
1223 printf ("[parent %s.%s has %d references in %d slots]", mono_class_get_namespace (parent_class), mono_class_get_name (parent_class), parent_id->data.layout.references, parent_id->data.layout.slots);
1225 klass_id->data.layout.slots = parent_id->data.layout.slots;
1226 klass_id->data.layout.references = parent_id->data.layout.references;
1228 #if (DEBUG_CLASS_BITMAPS)
1229 printf ("[no references from parent]");
1231 klass_id->data.layout.slots = 0;
1232 klass_id->data.layout.references = 0;
1235 if (number_of_reference_fields > 0) {
1236 klass_id->data.layout.slots += ((max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1237 klass_id->data.layout.references += number_of_reference_fields;
1238 #if (DEBUG_CLASS_BITMAPS)
1239 printf ("[adding data, going to %d references in %d slots]", klass_id->data.layout.references, klass_id->data.layout.slots);
1243 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1244 #if (DEBUG_CLASS_BITMAPS)
1245 printf ("[zeroing bitmap]");
1247 klass_id->data.bitmap.compact = 0;
1248 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1249 #if (DEBUG_CLASS_BITMAPS)
1250 printf ("[copying compact father bitmap]");
1252 klass_id->data.bitmap.compact = parent_id->data.bitmap.compact;
1255 int size_of_bitmap = klass_id->data.layout.slots;
1256 BITS_TO_BYTES (size_of_bitmap);
1257 #if (DEBUG_CLASS_BITMAPS)
1258 printf ("[allocating %d bytes for bitmap]", size_of_bitmap);
1260 klass_id->data.bitmap.extended = g_malloc0 (size_of_bitmap);
1261 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1262 int size_of_father_bitmap = parent_id->data.layout.slots;
1263 if (size_of_father_bitmap <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1265 #if (DEBUG_CLASS_BITMAPS)
1266 printf ("[copying %d bits from father bitmap]", size_of_father_bitmap);
1268 for (father_slot = 0; father_slot < size_of_father_bitmap; father_slot ++) {
1269 if (parent_id->data.bitmap.compact & (((guint64)1) << father_slot)) {
1270 klass_id->data.bitmap.extended [father_slot >> 3] |= (1 << (father_slot & 7));
1274 BITS_TO_BYTES (size_of_father_bitmap);
1275 #if (DEBUG_CLASS_BITMAPS)
1276 printf ("[copying %d bytes from father bitmap]", size_of_father_bitmap);
1278 memcpy (klass_id->data.bitmap.extended, parent_id->data.bitmap.extended, size_of_father_bitmap);
1284 #if (DEBUG_CLASS_BITMAPS)
1285 printf ("[starting filling iteration]\n");
1288 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1289 MonoType* field_type = mono_field_get_type (field);
1290 // For now, skip static fields
1291 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1294 #if (DEBUG_CLASS_BITMAPS)
1295 printf ("[Working on field %s]", mono_field_get_name (field));
1297 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1298 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1300 g_assert ((field_offset % sizeof (gpointer)) == 0);
1301 field_slot = field_offset / sizeof (gpointer);
1302 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1303 klass_id->data.bitmap.compact |= (((guint64)1) << field_slot);
1305 klass_id->data.bitmap.extended [field_slot >> 3] |= (1 << (field_slot & 7));
1307 #if (DEBUG_CLASS_BITMAPS)
1308 printf ("[reference at offset %d, slot %d]", field_offset, field_slot);
1311 MonoClass *field_class = mono_class_from_mono_type (field_type);
1312 if (field_class && mono_class_is_valuetype (field_class)) {
1313 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1317 g_assert (field_id != NULL);
1318 field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1319 g_assert ((field_id->data.layout.references == 0) || ((field_offset % sizeof (gpointer)) == 0));
1320 field_slot = field_offset / sizeof (gpointer);
1321 #if (DEBUG_CLASS_BITMAPS)
1322 printf ("[value type at offset %d, slot %d, with %d references in %d slots]", field_offset, field_slot, field_id->data.layout.references, field_id->data.layout.slots);
1325 if (field_id->data.layout.references > 0) {
1327 if (field_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1328 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1329 if (field_id->data.bitmap.compact & (((guint64)1) << sub_field_slot)) {
1330 int actual_slot = field_slot + sub_field_slot;
1331 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1332 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1334 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1339 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1340 if (field_id->data.bitmap.extended [sub_field_slot >> 3] & (1 << (sub_field_slot & 7))) {
1341 int actual_slot = field_slot + sub_field_slot;
1342 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1343 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1345 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1354 #if (DEBUG_CLASS_BITMAPS)
1357 printf ("\nLayot of class \"%s.%s\": references %d, slots %d, bitmap {", mono_class_get_namespace (klass), mono_class_get_name (klass), klass_id->data.layout.references, klass_id->data.layout.slots);
1358 for (slot = 0; slot < klass_id->data.layout.slots; slot ++) {
1359 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1360 if (klass_id->data.bitmap.compact & (((guint64)1) << slot)) {
1366 if (klass_id->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
1380 static MethodIdMappingElement*
1381 method_id_mapping_element_new (MonoMethod *method) {
1382 MethodIdMappingElement *result = g_new (MethodIdMappingElement, 1);
1383 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
1385 result->name = g_strdup_printf ("%s (%s)", mono_method_get_name (method), signature);
1387 result->method = method;
1388 result->next_unwritten = profiler->methods->unwritten;
1389 profiler->methods->unwritten = result;
1390 result->id = profiler->methods->next_id;
1391 profiler->methods->next_id ++;
1392 g_hash_table_insert (profiler->methods->table, method, result);
1394 result->data.code_start = NULL;
1395 result->data.code_size = 0;
1397 #if (DEBUG_MAPPING_EVENTS)
1398 printf ("Created new METHOD mapping element \"%s\" (%p)[%d]\n", result->name, method, result->id);
1405 method_id_mapping_element_destroy (gpointer element) {
1406 MethodIdMappingElement *e = (MethodIdMappingElement*) element;
1413 class_id_mapping_element_destroy (gpointer element) {
1414 ClassIdMappingElement *e = (ClassIdMappingElement*) element;
1417 if ((e->data.layout.slots != CLASS_LAYOUT_NOT_INITIALIZED) && (e->data.layout.slots > CLASS_LAYOUT_PACKED_BITMAP_SIZE))
1418 g_free (e->data.bitmap.extended);
1422 static MethodIdMapping*
1423 method_id_mapping_new (void) {
1424 MethodIdMapping *result = g_new (MethodIdMapping, 1);
1425 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, method_id_mapping_element_destroy);
1426 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, method_id_mapping_element_destroy);
1427 result->unwritten = NULL;
1428 result->next_id = 1;
1432 static ClassIdMapping*
1433 class_id_mapping_new (void) {
1434 ClassIdMapping *result = g_new (ClassIdMapping, 1);
1435 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, class_id_mapping_element_destroy);
1436 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, class_id_mapping_element_destroy);
1437 result->unwritten = NULL;
1438 result->next_id = 1;
1443 method_id_mapping_destroy (MethodIdMapping *map) {
1444 g_hash_table_destroy (map->table);
1449 class_id_mapping_destroy (ClassIdMapping *map) {
1450 g_hash_table_destroy (map->table);
1454 #if (DEBUG_LOAD_EVENTS)
1456 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element);
1459 static LoadedElement*
1460 loaded_element_load_start (GHashTable *table, gpointer item) {
1461 LoadedElement *element = g_new0 (LoadedElement, 1);
1462 #if (DEBUG_LOAD_EVENTS)
1463 print_load_event ("LOAD START", table, item, element);
1465 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_start_counter);
1466 g_hash_table_insert (table, item, element);
1470 static LoadedElement*
1471 loaded_element_load_end (GHashTable *table, gpointer item, char *name) {
1472 LoadedElement *element = g_hash_table_lookup (table, item);
1473 #if (DEBUG_LOAD_EVENTS)
1474 print_load_event ("LOAD END", table, item, element);
1476 g_assert (element != NULL);
1477 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_end_counter);
1478 element->name = name;
1479 element->loaded = TRUE;
1483 static LoadedElement*
1484 loaded_element_unload_start (GHashTable *table, gpointer item) {
1485 LoadedElement *element = g_hash_table_lookup (table, item);
1486 #if (DEBUG_LOAD_EVENTS)
1487 print_load_event ("UNLOAD START", table, item, element);
1489 g_assert (element != NULL);
1490 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_start_counter);
1494 static LoadedElement*
1495 loaded_element_unload_end (GHashTable *table, gpointer item) {
1496 LoadedElement *element = g_hash_table_lookup (table, item);
1497 #if (DEBUG_LOAD_EVENTS)
1498 print_load_event ("UNLOAD END", table, item, element);
1500 g_assert (element != NULL);
1501 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_end_counter);
1502 element->unloaded = TRUE;
1508 loaded_element_destroy (gpointer element) {
1509 if (((LoadedElement*)element)->name)
1510 g_free (((LoadedElement*)element)->name);
1514 #if (DEBUG_LOAD_EVENTS)
1516 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element) {
1517 const char* item_name;
1520 if (table == profiler->loaded_assemblies) {
1521 //item_info = g_strdup_printf("ASSEMBLY %p (dynamic %d)", item, mono_image_is_dynamic (mono_assembly_get_image((MonoAssembly*)item)));
1522 item_info = g_strdup_printf("ASSEMBLY %p", item);
1523 } else if (table == profiler->loaded_modules) {
1524 //item_info = g_strdup_printf("MODULE %p (dynamic %d)", item, mono_image_is_dynamic ((MonoImage*)item));
1525 item_info = g_strdup_printf("MODULE %p", item);
1526 } else if (table == profiler->loaded_appdomains) {
1527 item_info = g_strdup_printf("APPDOMAIN %p (id %d)", item, mono_domain_get_id ((MonoDomain*)item));
1530 g_assert_not_reached ();
1533 if (element != NULL) {
1534 item_name = element->name;
1536 item_name = "<NULL>";
1539 printf ("%s EVENT for %s (%s)\n", event_name, item_info, item_name);
1545 profiler_heap_shot_object_buffers_destroy (ProfilerHeapShotObjectBuffer *buffer) {
1546 while (buffer != NULL) {
1547 ProfilerHeapShotObjectBuffer *next = buffer->next;
1548 #if DEBUG_HEAP_PROFILER
1549 printf ("profiler_heap_shot_object_buffers_destroy: destroyed buffer %p (%p-%p)\n", buffer, & (buffer->buffer [0]), buffer->end);
1556 static ProfilerHeapShotObjectBuffer*
1557 profiler_heap_shot_object_buffer_new (ProfilerPerThreadData *data) {
1558 ProfilerHeapShotObjectBuffer *buffer;
1559 ProfilerHeapShotObjectBuffer *result = g_new (ProfilerHeapShotObjectBuffer, 1);
1560 result->next_free_slot = & (result->buffer [0]);
1561 result->end = & (result->buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE]);
1562 result->first_unprocessed_slot = & (result->buffer [0]);
1563 result->next = data->heap_shot_object_buffers;
1564 data->heap_shot_object_buffers = result;
1565 #if DEBUG_HEAP_PROFILER
1566 printf ("profiler_heap_shot_object_buffer_new: created buffer %p (%p-%p)\n", result, result->next_free_slot, result->end);
1568 for (buffer = result; buffer != NULL; buffer = buffer->next) {
1569 ProfilerHeapShotObjectBuffer *last = buffer->next;
1570 if ((last != NULL) && (last->first_unprocessed_slot == last->end)) {
1571 buffer->next = NULL;
1572 profiler_heap_shot_object_buffers_destroy (last);
1579 static ProfilerHeapShotWriteJob*
1580 profiler_heap_shot_write_job_new (gboolean heap_shot_was_signalled, gboolean dump_heap_data, guint32 collection) {
1581 ProfilerHeapShotWriteJob *job = g_new (ProfilerHeapShotWriteJob, 1);
1583 job->next_unwritten = NULL;
1585 if (profiler->action_flags.unreachable_objects || dump_heap_data) {
1586 job->buffers = g_new (ProfilerHeapShotWriteBuffer, 1);
1587 job->buffers->next = NULL;
1588 job->last_next = & (job->buffers->next);
1589 job->start = & (job->buffers->buffer [0]);
1590 job->cursor = job->start;
1591 job->end = & (job->buffers->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1593 job->buffers = NULL;
1594 job->last_next = NULL;
1599 job->full_buffers = 0;
1601 if (profiler->action_flags.collection_summary) {
1602 job->summary.capacity = profiler->classes->next_id;
1603 job->summary.per_class_data = g_new0 (ProfilerHeapShotClassSummary, job->summary.capacity);
1605 job->summary.capacity = 0;
1606 job->summary.per_class_data = NULL;
1609 job->heap_shot_was_signalled = heap_shot_was_signalled;
1610 job->collection = collection;
1611 job->dump_heap_data = dump_heap_data;
1612 #if DEBUG_HEAP_PROFILER
1613 printf ("profiler_heap_shot_write_job_new: created job %p with buffer %p(%p-%p) (collection %d, dump %d)\n", job, job->buffers, job->start, job->end, collection, dump_heap_data);
1619 profiler_heap_shot_write_job_has_data (ProfilerHeapShotWriteJob *job) {
1620 return ((job->buffers != NULL) || (job->summary.capacity > 0));
1624 profiler_heap_shot_write_job_add_buffer (ProfilerHeapShotWriteJob *job, gpointer value) {
1625 ProfilerHeapShotWriteBuffer *buffer = g_new (ProfilerHeapShotWriteBuffer, 1);
1626 buffer->next = NULL;
1627 *(job->last_next) = buffer;
1628 job->last_next = & (buffer->next);
1629 job->full_buffers ++;
1630 buffer->buffer [0] = value;
1631 job->start = & (buffer->buffer [0]);
1632 job->cursor = & (buffer->buffer [1]);
1633 job->end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1634 #if DEBUG_HEAP_PROFILER
1635 printf ("profiler_heap_shot_write_job_add_buffer: in job %p, added buffer %p(%p-%p) with value %p at address %p (cursor now %p)\n", job, buffer, job->start, job->end, value, &(buffer->buffer [0]), job->cursor);
1637 ProfilerHeapShotWriteBuffer *current_buffer;
1638 for (current_buffer = job->buffers; current_buffer != NULL; current_buffer = current_buffer->next) {
1639 printf ("profiler_heap_shot_write_job_add_buffer: now job %p has buffer %p\n", job, current_buffer);
1646 profiler_heap_shot_write_job_free_buffers (ProfilerHeapShotWriteJob *job) {
1647 ProfilerHeapShotWriteBuffer *buffer = job->buffers;
1649 while (buffer != NULL) {
1650 ProfilerHeapShotWriteBuffer *next = buffer->next;
1651 #if DEBUG_HEAP_PROFILER
1652 printf ("profiler_heap_shot_write_job_free_buffers: in job %p, freeing buffer %p\n", job, buffer);
1658 job->buffers = NULL;
1660 if (job->summary.per_class_data != NULL) {
1661 g_free (job->summary.per_class_data);
1662 job->summary.per_class_data = NULL;
1664 job->summary.capacity = 0;
1668 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job);
1671 profiler_process_heap_shot_write_jobs (void) {
1672 gboolean done = FALSE;
1675 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1676 ProfilerHeapShotWriteJob *previous_job = NULL;
1677 ProfilerHeapShotWriteJob *next_job;
1680 while (current_job != NULL) {
1681 next_job = current_job->next_unwritten;
1683 if (next_job != NULL) {
1684 if (profiler_heap_shot_write_job_has_data (current_job)) {
1687 if (! profiler_heap_shot_write_job_has_data (next_job)) {
1688 current_job->next_unwritten = NULL;
1692 if (profiler_heap_shot_write_job_has_data (current_job)) {
1693 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: writing...");
1694 profiler_heap_shot_write_block (current_job);
1695 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: done");
1696 if (previous_job != NULL) {
1697 previous_job->next_unwritten = NULL;
1702 previous_job = current_job;
1703 current_job = next_job;
1709 profiler_free_heap_shot_write_jobs (void) {
1710 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1711 ProfilerHeapShotWriteJob *next_job;
1713 if (current_job != NULL) {
1714 while (current_job->next_unwritten != NULL) {
1715 #if DEBUG_HEAP_PROFILER
1716 printf ("profiler_free_heap_shot_write_jobs: job %p must not be freed\n", current_job);
1718 current_job = current_job->next_unwritten;
1721 next_job = current_job->next;
1722 current_job->next = NULL;
1723 current_job = next_job;
1725 while (current_job != NULL) {
1726 #if DEBUG_HEAP_PROFILER
1727 printf ("profiler_free_heap_shot_write_jobs: job %p will be freed\n", current_job);
1729 next_job = current_job->next;
1730 profiler_heap_shot_write_job_free_buffers (current_job);
1731 g_free (current_job);
1732 current_job = next_job;
1738 profiler_destroy_heap_shot_write_jobs (void) {
1739 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1740 ProfilerHeapShotWriteJob *next_job;
1742 while (current_job != NULL) {
1743 next_job = current_job->next;
1744 profiler_heap_shot_write_job_free_buffers (current_job);
1745 g_free (current_job);
1746 current_job = next_job;
1751 profiler_add_heap_shot_write_job (ProfilerHeapShotWriteJob *job) {
1752 job->next = profiler->heap_shot_write_jobs;
1753 job->next_unwritten = job->next;
1754 profiler->heap_shot_write_jobs = job;
1755 #if DEBUG_HEAP_PROFILER
1756 printf ("profiler_add_heap_shot_write_job: added job %p\n", job);
1760 #if DEBUG_HEAP_PROFILER
1761 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p\n", (d)->thread_id, (o), (d)->heap_shot_object_buffers->next_free_slot)
1762 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p in new buffer %p\n", (d)->thread_id, (o), buffer->next_free_slot, buffer)
1764 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o)
1765 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o)
1767 #define STORE_ALLOCATED_OBJECT(d,o) do {\
1768 if ((d)->heap_shot_object_buffers->next_free_slot < (d)->heap_shot_object_buffers->end) {\
1769 STORE_ALLOCATED_OBJECT_MESSAGE1 ((d), (o));\
1770 *((d)->heap_shot_object_buffers->next_free_slot) = (o);\
1771 (d)->heap_shot_object_buffers->next_free_slot ++;\
1773 ProfilerHeapShotObjectBuffer *buffer = profiler_heap_shot_object_buffer_new (d);\
1774 STORE_ALLOCATED_OBJECT_MESSAGE2 ((d), (o));\
1775 *((buffer)->next_free_slot) = (o);\
1776 (buffer)->next_free_slot ++;\
1780 static ProfilerPerThreadData*
1781 profiler_per_thread_data_new (guint32 buffer_size)
1783 ProfilerPerThreadData *data = g_new (ProfilerPerThreadData, 1);
1785 data->events = g_new0 (ProfilerEventData, buffer_size);
1786 data->next_free_event = data->events;
1787 data->end_event = data->events + (buffer_size - 1);
1788 data->first_unwritten_event = data->events;
1789 data->first_unmapped_event = data->events;
1790 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
1791 data->last_event_counter = data->start_event_counter;
1792 data->thread_id = CURRENT_THREAD_ID ();
1793 data->heap_shot_object_buffers = NULL;
1794 if ((profiler->action_flags.unreachable_objects == TRUE) ||
1795 (profiler->action_flags.heap_shot == TRUE) ||
1796 (profiler->action_flags.collection_summary == TRUE)) {
1797 profiler_heap_shot_object_buffer_new (data);
1799 if (profiler->action_flags.track_stack) {
1800 thread_stack_initialize (&(data->stack), 64);
1802 thread_stack_initialize_empty (&(data->stack));
1808 profiler_per_thread_data_destroy (ProfilerPerThreadData *data) {
1809 g_free (data->events);
1810 profiler_heap_shot_object_buffers_destroy (data->heap_shot_object_buffers);
1811 thread_stack_free (&(data->stack));
1815 static ProfilerStatisticalData*
1816 profiler_statistical_data_new (MonoProfiler *profiler) {
1817 int buffer_size = profiler->statistical_buffer_size * (profiler->statistical_call_chain_depth + 1);
1818 ProfilerStatisticalData *data = g_new (ProfilerStatisticalData, 1);
1820 data->hits = g_new0 (ProfilerStatisticalHit, buffer_size);
1821 data->next_free_index = 0;
1822 data->end_index = profiler->statistical_buffer_size;
1823 data->first_unwritten_index = 0;
1829 profiler_statistical_data_destroy (ProfilerStatisticalData *data) {
1830 g_free (data->hits);
1835 profiler_add_write_buffer (void) {
1836 if (profiler->current_write_buffer->next == NULL) {
1837 profiler->current_write_buffer->next = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
1838 profiler->current_write_buffer->next->next = NULL;
1840 //printf ("Added next buffer %p, to buffer %p\n", profiler->current_write_buffer->next, profiler->current_write_buffer);
1843 profiler->current_write_buffer = profiler->current_write_buffer->next;
1844 profiler->current_write_position = 0;
1845 profiler->full_write_buffers ++;
1849 profiler_free_write_buffers (void) {
1850 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
1851 while (current_buffer != NULL) {
1852 ProfilerFileWriteBuffer *next_buffer = current_buffer->next;
1854 //printf ("Freeing write buffer %p, next is %p\n", current_buffer, next_buffer);
1856 g_free (current_buffer);
1857 current_buffer = next_buffer;
1861 #define WRITE_BYTE(b) do {\
1862 if (profiler->current_write_position >= PROFILER_FILE_WRITE_BUFFER_SIZE) {\
1863 profiler_add_write_buffer ();\
1865 profiler->current_write_buffer->buffer [profiler->current_write_position] = (b);\
1866 profiler->current_write_position ++;\
1871 write_current_block (guint16 code) {
1872 guint32 size = (profiler->full_write_buffers * PROFILER_FILE_WRITE_BUFFER_SIZE) + profiler->current_write_position;
1873 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
1874 guint64 current_counter;
1875 guint32 counter_delta;
1878 MONO_PROFILER_GET_CURRENT_COUNTER (current_counter);
1879 if (profiler->last_header_counter != 0) {
1880 counter_delta = current_counter - profiler->last_header_counter;
1884 profiler->last_header_counter = current_counter;
1886 header [0] = code & 0xff;
1887 header [1] = (code >> 8) & 0xff;
1888 header [2] = size & 0xff;
1889 header [3] = (size >> 8) & 0xff;
1890 header [4] = (size >> 16) & 0xff;
1891 header [5] = (size >> 24) & 0xff;
1892 header [6] = counter_delta & 0xff;
1893 header [7] = (counter_delta >> 8) & 0xff;
1894 header [8] = (counter_delta >> 16) & 0xff;
1895 header [9] = (counter_delta >> 24) & 0xff;
1897 #if (DEBUG_FILE_WRITES)
1898 printf ("write_current_block: writing header (code %d)\n", code);
1900 WRITE_BUFFER (& (header [0]), 10);
1902 while ((current_buffer != NULL) && (profiler->full_write_buffers > 0)) {
1903 #if (DEBUG_FILE_WRITES)
1904 printf ("write_current_block: writing buffer (size %d)\n", PROFILER_FILE_WRITE_BUFFER_SIZE);
1906 WRITE_BUFFER (& (current_buffer->buffer [0]), PROFILER_FILE_WRITE_BUFFER_SIZE);
1907 profiler->full_write_buffers --;
1908 current_buffer = current_buffer->next;
1910 if (profiler->current_write_position > 0) {
1911 #if (DEBUG_FILE_WRITES)
1912 printf ("write_current_block: writing last buffer (size %d)\n", profiler->current_write_position);
1914 WRITE_BUFFER (& (current_buffer->buffer [0]), profiler->current_write_position);
1917 #if (DEBUG_FILE_WRITES)
1918 printf ("write_current_block: buffers flushed\n");
1921 profiler->current_write_buffer = profiler->write_buffers;
1922 profiler->current_write_position = 0;
1923 profiler->full_write_buffers = 0;
1927 #define SEVEN_BITS_MASK (0x7f)
1928 #define EIGHT_BIT_MASK (0x80)
1931 write_uint32 (guint32 value) {
1932 while (value > SEVEN_BITS_MASK) {
1933 WRITE_BYTE (value & SEVEN_BITS_MASK);
1936 WRITE_BYTE (value | EIGHT_BIT_MASK);
1939 write_uint64 (guint64 value) {
1940 while (value > SEVEN_BITS_MASK) {
1941 WRITE_BYTE (value & SEVEN_BITS_MASK);
1944 WRITE_BYTE (value | EIGHT_BIT_MASK);
1947 write_string (const char *string) {
1948 while (*string != 0) {
1949 WRITE_BYTE (*string);
1955 static void write_clock_data (void);
1957 write_directives_block (gboolean start) {
1958 write_clock_data ();
1961 if (profiler->action_flags.save_allocation_caller) {
1962 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER);
1964 if (profiler->action_flags.save_allocation_stack || profiler->action_flags.track_calls) {
1965 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK);
1967 if (profiler->action_flags.allocations_carry_id) {
1968 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID);
1971 write_uint32 (MONO_PROFILER_DIRECTIVE_END);
1973 write_clock_data ();
1974 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES);
1977 #if DEBUG_HEAP_PROFILER
1978 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c) printf ("WRITE_HEAP_SHOT_JOB_VALUE: writing value %p at cursor %p\n", (v), (c))
1980 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c)
1982 #define WRITE_HEAP_SHOT_JOB_VALUE(j,v) do {\
1983 if ((j)->cursor < (j)->end) {\
1984 WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE ((v), ((j)->cursor));\
1985 *((j)->cursor) = (v);\
1988 profiler_heap_shot_write_job_add_buffer (j, v);\
1993 #undef GUINT_TO_POINTER
1994 #undef GPOINTER_TO_UINT
1995 #if (SIZEOF_VOID_P == 4)
1996 #define GUINT_TO_POINTER(u) ((void*)(guint32)(u))
1997 #define GPOINTER_TO_UINT(p) ((guint32)(void*)(p))
1998 #elif (SIZEOF_VOID_P == 8)
1999 #define GUINT_TO_POINTER(u) ((void*)(guint64)(u))
2000 #define GPOINTER_TO_UINT(p) ((guint64)(void*)(p))
2002 #error Bad size of void pointer
2005 #define WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE(j,v,c) WRITE_HEAP_SHOT_JOB_VALUE (j, GUINT_TO_POINTER (GPOINTER_TO_UINT (v)|(c)))
2007 #if DEBUG_HEAP_PROFILER
2008 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE() printf ("profiler_heap_shot_write_block[UPDATE_JOB_BUFFER_CURSOR]: in job %p, moving to buffer %p and cursor %p\n", job, buffer, cursor)
2010 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE()
2012 #define UPDATE_JOB_BUFFER_CURSOR() do {\
2014 if (cursor >= end) {\
2015 buffer = buffer->next;\
2016 if (buffer != NULL) {\
2017 cursor = & (buffer->buffer [0]);\
2018 if (buffer->next != NULL) {\
2019 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);\
2027 UPDATE_JOB_BUFFER_CURSOR_MESSAGE ();\
2031 profiler_heap_shot_write_data_block (ProfilerHeapShotWriteJob *job) {
2032 ProfilerHeapShotWriteBuffer *buffer;
2035 guint64 start_counter;
2037 guint64 end_counter;
2040 write_uint64 (job->start_counter);
2041 write_uint64 (job->start_time);
2042 write_uint64 (job->end_counter);
2043 write_uint64 (job->end_time);
2044 write_uint32 (job->collection);
2045 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2046 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2047 write_uint64 (start_counter);
2048 write_uint64 (start_time);
2049 #if DEBUG_HEAP_PROFILER
2050 printf ("profiler_heap_shot_write_data_block: start writing job %p (start %p, end %p)...\n", job, & (job->buffers->buffer [0]), job->cursor);
2052 buffer = job->buffers;
2053 cursor = & (buffer->buffer [0]);
2054 if (buffer->next != NULL) {
2055 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
2059 if (cursor >= end) {
2062 #if DEBUG_HEAP_PROFILER
2063 printf ("profiler_heap_shot_write_data_block: in job %p, starting at buffer %p and cursor %p\n", job, buffer, cursor);
2065 while (cursor != NULL) {
2066 gpointer value = *cursor;
2067 HeapProfilerJobValueCode code = GPOINTER_TO_UINT (value) & HEAP_CODE_MASK;
2068 #if DEBUG_HEAP_PROFILER
2069 printf ("profiler_heap_shot_write_data_block: got value %p and code %d\n", value, code);
2072 UPDATE_JOB_BUFFER_CURSOR ();
2073 if (code == HEAP_CODE_FREE_OBJECT_CLASS) {
2074 MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2075 //MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) % 4);
2076 ClassIdMappingElement *class_id;
2079 class_id = class_id_mapping_element_get (klass);
2080 if (class_id == NULL) {
2081 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2083 g_assert (class_id != NULL);
2084 write_uint32 ((class_id->id << 2) | HEAP_CODE_FREE_OBJECT_CLASS);
2086 size = GPOINTER_TO_UINT (*cursor);
2087 UPDATE_JOB_BUFFER_CURSOR ();
2088 write_uint32 (size);
2089 #if DEBUG_HEAP_PROFILER
2090 printf ("profiler_heap_shot_write_data_block: wrote unreachable object of class %p (id %d, size %d)\n", klass, class_id->id, size);
2092 } else if (code == HEAP_CODE_OBJECT) {
2093 MonoObject *object = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2094 MonoClass *klass = mono_object_get_class (object);
2095 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
2096 guint32 size = mono_object_get_size (object);
2097 guint32 references = GPOINTER_TO_UINT (*cursor);
2098 UPDATE_JOB_BUFFER_CURSOR ();
2100 if (class_id == NULL) {
2101 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2103 g_assert (class_id != NULL);
2105 write_uint64 (GPOINTER_TO_UINT (value));
2106 write_uint32 (class_id->id);
2107 write_uint32 (size);
2108 write_uint32 (references);
2109 #if DEBUG_HEAP_PROFILER
2110 printf ("profiler_heap_shot_write_data_block: writing object %p (references %d)\n", value, references);
2113 while (references > 0) {
2114 gpointer reference = *cursor;
2115 write_uint64 (GPOINTER_TO_UINT (reference));
2116 UPDATE_JOB_BUFFER_CURSOR ();
2118 #if DEBUG_HEAP_PROFILER
2119 printf ("profiler_heap_shot_write_data_block: inside object %p, wrote reference %p)\n", value, reference);
2123 #if DEBUG_HEAP_PROFILER
2124 printf ("profiler_heap_shot_write_data_block: unknown code %d in value %p\n", code, value);
2126 g_assert_not_reached ();
2131 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2132 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2133 write_uint64 (end_counter);
2134 write_uint64 (end_time);
2136 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA);
2137 #if DEBUG_HEAP_PROFILER
2138 printf ("profiler_heap_shot_write_data_block: writing job %p done.\n", job);
2142 profiler_heap_shot_write_summary_block (ProfilerHeapShotWriteJob *job) {
2143 guint64 start_counter;
2145 guint64 end_counter;
2149 #if DEBUG_HEAP_PROFILER
2150 printf ("profiler_heap_shot_write_summary_block: start writing job %p...\n", job);
2152 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2153 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2154 write_uint64 (start_counter);
2155 write_uint64 (start_time);
2157 write_uint32 (job->collection);
2159 for (id = 0; id < job->summary.capacity; id ++) {
2160 if ((job->summary.per_class_data [id].reachable.instances > 0) || (job->summary.per_class_data [id].unreachable.instances > 0)) {
2162 write_uint32 (job->summary.per_class_data [id].reachable.instances);
2163 write_uint32 (job->summary.per_class_data [id].reachable.bytes);
2164 write_uint32 (job->summary.per_class_data [id].unreachable.instances);
2165 write_uint32 (job->summary.per_class_data [id].unreachable.bytes);
2170 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2171 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2172 write_uint64 (end_counter);
2173 write_uint64 (end_time);
2175 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY);
2176 #if DEBUG_HEAP_PROFILER
2177 printf ("profiler_heap_shot_write_summary_block: writing job %p done.\n", job);
2182 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job) {
2183 #if DEBUG_HEAP_PROFILER
2184 printf ("profiler_heap_shot_write_block: working on job %p...\n", job);
2187 if (profiler->action_flags.collection_summary == TRUE) {
2188 profiler_heap_shot_write_summary_block (job);
2191 if ((profiler->action_flags.unreachable_objects == TRUE) || (profiler->action_flags.heap_shot == TRUE)) {
2192 profiler_heap_shot_write_data_block (job);
2195 profiler_heap_shot_write_job_free_buffers (job);
2196 #if DEBUG_HEAP_PROFILER
2197 printf ("profiler_heap_shot_write_block: work on job %p done.\n", job);
2202 write_element_load_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2204 write_uint64 (element->load_start_counter);
2205 write_uint64 (element->load_end_counter);
2206 write_uint64 (thread_id);
2207 write_string (element->name);
2208 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_LOADED);
2209 element->load_written = TRUE;
2213 write_element_unload_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2215 write_uint64 (element->unload_start_counter);
2216 write_uint64 (element->unload_end_counter);
2217 write_uint64 (thread_id);
2218 write_string (element->name);
2219 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED);
2220 element->unload_written = TRUE;
2224 write_clock_data (void) {
2228 MONO_PROFILER_GET_CURRENT_COUNTER (counter);
2229 MONO_PROFILER_GET_CURRENT_TIME (time);
2231 write_uint64 (counter);
2232 write_uint64 (time);
2236 write_mapping_block (gsize thread_id) {
2237 ClassIdMappingElement *current_class;
2238 MethodIdMappingElement *current_method;
2240 if ((profiler->classes->unwritten == NULL) && (profiler->methods->unwritten == NULL))
2243 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2244 printf ("[write_mapping_block][TID %ld] START\n", thread_id);
2247 write_clock_data ();
2248 write_uint64 (thread_id);
2250 for (current_class = profiler->classes->unwritten; current_class != NULL; current_class = current_class->next_unwritten) {
2251 write_uint32 (current_class->id);
2252 write_string (current_class->name);
2253 #if (DEBUG_MAPPING_EVENTS)
2254 printf ("mapping CLASS (%d => %s)\n", current_class->id, current_class->name);
2256 g_free (current_class->name);
2257 current_class->name = NULL;
2260 profiler->classes->unwritten = NULL;
2262 for (current_method = profiler->methods->unwritten; current_method != NULL; current_method = current_method->next_unwritten) {
2263 MonoMethod *method = current_method->method;
2264 MonoClass *klass = mono_method_get_class (method);
2265 ClassIdMappingElement *class_element = class_id_mapping_element_get (klass);
2266 g_assert (class_element != NULL);
2267 write_uint32 (current_method->id);
2268 write_uint32 (class_element->id);
2269 write_string (current_method->name);
2270 #if (DEBUG_MAPPING_EVENTS)
2271 printf ("mapping METHOD ([%d]%d => %s)\n", class_element?class_element->id:1, current_method->id, current_method->name);
2273 g_free (current_method->name);
2274 current_method->name = NULL;
2277 profiler->methods->unwritten = NULL;
2279 write_clock_data ();
2280 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_MAPPING);
2282 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2283 printf ("[write_mapping_block][TID %ld] END\n", thread_id);
2288 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER = 1,
2289 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_IMPLICIT = 2,
2290 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT = 3,
2291 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION = 4,
2292 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT = 5,
2293 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT = 6,
2294 MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT = 7
2295 } MonoProfilerPackedEventCode;
2296 #define MONO_PROFILER_PACKED_EVENT_CODE_BITS 3
2297 #define MONO_PROFILER_PACKED_EVENT_DATA_BITS (8-MONO_PROFILER_PACKED_EVENT_CODE_BITS)
2298 #define MONO_PROFILER_PACKED_EVENT_DATA_MASK ((1<<MONO_PROFILER_PACKED_EVENT_DATA_BITS)-1)
2300 #define MONO_PROFILER_EVENT_MAKE_PACKED_CODE(result,data,base) do {\
2301 result = ((base)|((data & MONO_PROFILER_PACKED_EVENT_DATA_MASK) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2302 data >>= MONO_PROFILER_PACKED_EVENT_DATA_BITS;\
2304 #define MONO_PROFILER_EVENT_MAKE_FULL_CODE(result,code,kind,base) do {\
2305 result = ((base)|((((kind)<<4) | (code)) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2308 static ProfilerEventData*
2309 write_stack_section_event (ProfilerEventData *events) {
2310 int last_saved_frame = events->data.number;
2311 int saved_frames = events->value;
2315 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2316 WRITE_BYTE (event_code);
2317 write_uint32 (last_saved_frame);
2318 write_uint32 (saved_frames);
2321 for (i = 0; i < saved_frames; i++) {
2322 guint8 code = events->code;
2324 MethodIdMappingElement *method;
2326 if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) {
2328 } else if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER) {
2331 g_assert_not_reached ();
2335 method = method_id_mapping_element_get (events->data.address);
2336 g_assert (method != NULL);
2337 write_uint32 ((method->id << 1) | jit_flag);
2344 static ProfilerEventData*
2345 write_event (ProfilerEventData *event) {
2346 ProfilerEventData *next = event + 1;
2347 gboolean write_event_value = TRUE;
2350 guint64 event_value;
2351 gboolean write_event_value_extension_1 = FALSE;
2352 guint64 event_value_extension_1 = 0;
2353 gboolean write_event_value_extension_2 = FALSE;
2354 guint64 event_value_extension_2 = 0;
2356 event_value = event->value;
2357 if (event_value == MAX_EVENT_VALUE) {
2358 event_value = *((guint64*)next);
2362 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
2363 MethodIdMappingElement *element = method_id_mapping_element_get (event->data.address);
2364 g_assert (element != NULL);
2365 event_data = element->id;
2367 if (event->code == MONO_PROFILER_EVENT_METHOD_CALL) {
2368 if (event->kind == MONO_PROFILER_EVENT_KIND_START) {
2369 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER);
2371 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT);
2374 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT);
2376 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
2377 ClassIdMappingElement *element = class_id_mapping_element_get (event->data.address);
2378 g_assert (element != NULL);
2379 event_data = element->id;
2381 if (event->code == MONO_PROFILER_EVENT_CLASS_ALLOCATION) {
2382 if ((! profiler->action_flags.save_allocation_caller) || (! (next->code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER))) {
2383 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION);
2385 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2388 if (profiler->action_flags.save_allocation_caller) {
2389 MonoMethod *caller_method = next->data.address;
2391 if ((next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) && (next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER)) {
2392 g_assert_not_reached ();
2395 if (caller_method != NULL) {
2396 MethodIdMappingElement *caller = method_id_mapping_element_get (caller_method);
2397 g_assert (caller != NULL);
2398 event_value_extension_1 = caller->id;
2401 write_event_value_extension_1 = TRUE;
2405 if (profiler->action_flags.allocations_carry_id) {
2406 event_value_extension_2 = GPOINTER_TO_UINT (next->data.address);
2408 if (next->code != MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID) {
2409 g_assert_not_reached ();
2412 write_event_value_extension_2 = TRUE;
2416 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2419 if (event->code == MONO_PROFILER_EVENT_STACK_SECTION) {
2420 return write_stack_section_event (event);
2422 event_data = event->data.number;
2423 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2427 /* Skip writing JIT events if the user did not ask for them */
2428 if ((event->code == MONO_PROFILER_EVENT_METHOD_JIT) && ! profiler->action_flags.jit_time) {
2432 #if (DEBUG_LOGGING_PROFILER)
2434 printf ("writing EVENT[%p] data_type:%d, kind:%d, code:%d (%d:%ld:%ld)\n", event,
2435 event->data_type, event->kind, event->code,
2436 event_code, event_data, event_value);
2439 WRITE_BYTE (event_code);
2440 write_uint64 (event_data);
2441 if (write_event_value) {
2442 write_uint64 (event_value);
2443 if (write_event_value_extension_1) {
2444 write_uint64 (event_value_extension_1);
2446 if (write_event_value_extension_2) {
2447 write_uint64 (event_value_extension_2);
2455 write_thread_data_block (ProfilerPerThreadData *data) {
2456 ProfilerEventData *start = data->first_unwritten_event;
2457 ProfilerEventData *end = data->first_unmapped_event;
2461 #if (DEBUG_FILE_WRITES)
2462 printf ("write_thread_data_block: preparing buffer for thread %ld\n", (guint64) data->thread_id);
2464 write_clock_data ();
2465 write_uint64 (data->thread_id);
2467 write_uint64 (data->start_event_counter);
2469 /* Make sure that stack sections can be fully reconstructed even reading only one block */
2470 thread_stack_reset_saved_state (&(data->stack));
2472 while (start < end) {
2473 start = write_event (start);
2476 data->first_unwritten_event = end;
2478 write_clock_data ();
2479 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_EVENTS);
2480 #if (DEBUG_FILE_WRITES)
2481 printf ("write_thread_data_block: buffer for thread %ld written\n", (guint64) data->thread_id);
2485 static ProfilerExecutableMemoryRegionData*
2486 profiler_executable_memory_region_new (gpointer *start, gpointer *end, guint32 file_offset, char *file_name, guint32 id) {
2487 ProfilerExecutableMemoryRegionData *result = g_new (ProfilerExecutableMemoryRegionData, 1);
2488 result->start = start;
2490 result->file_offset = file_offset;
2491 result->file_name = g_strdup (file_name);
2493 result->is_new = TRUE;
2495 result->file = NULL;
2496 result->symbols_capacity = id;
2497 result->symbols_count = id;
2498 result->symbols = NULL;
2504 executable_file_close (ProfilerExecutableMemoryRegionData *region);
2507 profiler_executable_memory_region_destroy (ProfilerExecutableMemoryRegionData *data) {
2508 if (data->file_name != NULL) {
2509 g_free (data->file_name);
2511 if (data->symbols != NULL) {
2512 g_free (data->symbols);
2514 if (data->file != NULL) {
2515 executable_file_close (data);
2520 static ProfilerExecutableMemoryRegions*
2521 profiler_executable_memory_regions_new (int next_id, int next_unmanaged_function_id) {
2522 ProfilerExecutableMemoryRegions *result = g_new (ProfilerExecutableMemoryRegions, 1);
2523 result->regions = g_new0 (ProfilerExecutableMemoryRegionData*, 32);
2524 result->regions_capacity = 32;
2525 result->regions_count = 0;
2526 result->next_id = next_id;
2527 result->next_unmanaged_function_id = next_unmanaged_function_id;
2532 profiler_executable_memory_regions_destroy (ProfilerExecutableMemoryRegions *regions) {
2535 for (i = 0; i < regions->regions_count; i++) {
2536 profiler_executable_memory_region_destroy (regions->regions [i]);
2538 g_free (regions->regions);
2542 static ProfilerExecutableMemoryRegionData*
2543 find_address_region (ProfilerExecutableMemoryRegions *regions, gpointer address) {
2545 int high_index = regions->regions_count;
2546 int middle_index = 0;
2547 ProfilerExecutableMemoryRegionData *middle_region = regions->regions [0];
2549 if ((regions->regions_count == 0) || (regions->regions [low_index]->start > address) || (regions->regions [high_index - 1]->end < address)) {
2553 //printf ("find_address_region: Looking for address %p in %d regions (from %p to %p)\n", address, regions->regions_count, regions->regions [low_index]->start, regions->regions [high_index - 1]->end);
2555 while (low_index != high_index) {
2556 middle_index = low_index + ((high_index - low_index) / 2);
2557 middle_region = regions->regions [middle_index];
2559 //printf ("find_address_region: Looking for address %p, considering index %d[%p-%p] (%d-%d)\n", address, middle_index, middle_region->start, middle_region->end, low_index, high_index);
2561 if (middle_region->start > address) {
2562 if (middle_index > 0) {
2563 high_index = middle_index;
2567 } else if (middle_region->end < address) {
2568 if (middle_index < regions->regions_count - 1) {
2569 low_index = middle_index + 1;
2574 return middle_region;
2578 if ((middle_region == NULL) || (middle_region->start > address) || (middle_region->end < address)) {
2581 return middle_region;
2586 append_region (ProfilerExecutableMemoryRegions *regions, gpointer *start, gpointer *end, guint32 file_offset, char *file_name) {
2587 if (regions->regions_count >= regions->regions_capacity) {
2588 ProfilerExecutableMemoryRegionData **new_regions = g_new0 (ProfilerExecutableMemoryRegionData*, regions->regions_capacity * 2);
2589 memcpy (new_regions, regions->regions, regions->regions_capacity * sizeof (ProfilerExecutableMemoryRegionData*));
2590 g_free (regions->regions);
2591 regions->regions = new_regions;
2592 regions->regions_capacity = regions->regions_capacity * 2;
2594 regions->regions [regions->regions_count] = profiler_executable_memory_region_new (start, end, file_offset, file_name, regions->next_id);
2595 regions->regions_count ++;
2596 regions->next_id ++;
2600 restore_old_regions (ProfilerExecutableMemoryRegions *old_regions, ProfilerExecutableMemoryRegions *new_regions) {
2604 for (old_i = 0; old_i < old_regions->regions_count; old_i++) {
2605 ProfilerExecutableMemoryRegionData *old_region = old_regions->regions [old_i];
2606 for (new_i = 0; new_i < new_regions->regions_count; new_i++) {
2607 ProfilerExecutableMemoryRegionData *new_region = new_regions->regions [new_i];
2608 if ((old_region->start == new_region->start) &&
2609 (old_region->end == new_region->end) &&
2610 (old_region->file_offset == new_region->file_offset) &&
2611 ! strcmp (old_region->file_name, new_region->file_name)) {
2612 new_regions->regions [new_i] = old_region;
2613 old_regions->regions [old_i] = new_region;
2615 // FIXME (sanity check)
2616 g_assert (new_region->is_new && ! old_region->is_new);
2623 compare_regions (const void *a1, const void *a2) {
2624 ProfilerExecutableMemoryRegionData *r1 = * (ProfilerExecutableMemoryRegionData**) a1;
2625 ProfilerExecutableMemoryRegionData *r2 = * (ProfilerExecutableMemoryRegionData**) a2;
2626 return (r1->start < r2->start)? -1 : ((r1->start > r2->start)? 1 : 0);
2630 sort_regions (ProfilerExecutableMemoryRegions *regions) {
2631 qsort (regions->regions, regions->regions_count, sizeof (ProfilerExecutableMemoryRegionData *), compare_regions);
2635 executable_file_add_region_reference (ProfilerExecutableFile *file, ProfilerExecutableMemoryRegionData *region) {
2636 guint8 *section_headers = file->data + file->header->e_shoff;
2639 for (section_index = 1; section_index < file->header->e_shnum; section_index ++) {
2640 ElfSection *section_header = (ElfSection*) (section_headers + (file->header->e_shentsize * section_index));
2642 if ((section_header->sh_addr != 0) && (section_header->sh_flags & ELF_SHF_EXECINSTR) &&
2643 (region->file_offset <= section_header->sh_offset) && (region->file_offset + (((guint8*)region->end)-((guint8*)region->start)) >= (section_header->sh_offset + section_header->sh_size))) {
2644 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [section_index]);
2645 section_region->region = region;
2646 section_region->section_address = (gpointer) section_header->sh_addr;
2647 section_region->section_offset = section_header->sh_offset;
2652 static ProfilerExecutableFile*
2653 executable_file_open (ProfilerExecutableMemoryRegionData *region) {
2654 ProfilerExecutableFiles *files = & (profiler->executable_files);
2655 ProfilerExecutableFile *file = (ProfilerExecutableFile*) g_hash_table_lookup (files->table, region->file_name);
2657 guint16 test = 0x0102;
2658 struct stat stat_buffer;
2659 int symtab_index = 0;
2660 int strtab_index = 0;
2661 int dynsym_index = 0;
2662 int dynstr_index = 0;
2664 guint8 *section_headers;
2668 file = g_new0 (ProfilerExecutableFile, 1);
2669 region->file = file;
2670 file->reference_count ++;
2672 file->fd = open (region->file_name, O_RDONLY);
2673 if (file->fd == -1) {
2674 //g_warning ("Cannot open file '%s': '%s'", region->file_name, strerror (errno));
2677 if (fstat (file->fd, &stat_buffer) != 0) {
2678 //g_warning ("Cannot stat file '%s': '%s'", region->file_name, strerror (errno));
2681 size_t region_length = ((guint8*)region->end) - ((guint8*)region->start);
2682 file->length = stat_buffer.st_size;
2684 if (file->length == region_length) {
2685 file->data = region->start;
2689 file->data = mmap (NULL, file->length, PROT_READ, MAP_PRIVATE, file->fd, 0);
2691 if (file->data == MAP_FAILED) {
2693 //g_warning ("Cannot map file '%s': '%s'", region->file_name, strerror (errno));
2701 header = (ElfHeader*) file->data;
2703 if ((header->e_ident [EI_MAG0] != 0x7f) || (header->e_ident [EI_MAG1] != 'E') ||
2704 (header->e_ident [EI_MAG2] != 'L') || (header->e_ident [EI_MAG3] != 'F')) {
2708 if (sizeof (gsize) == 4) {
2709 if (header->e_ident [EI_CLASS] != ELF_CLASS_32) {
2710 g_warning ("Class is not ELF_CLASS_32 with gsize size %d", (int) sizeof (gsize));
2713 } else if (sizeof (gsize) == 8) {
2714 if (header->e_ident [EI_CLASS] != ELF_CLASS_64) {
2715 g_warning ("Class is not ELF_CLASS_64 with gsize size %d", (int) sizeof (gsize));
2719 g_warning ("Absurd gsize size %d", (int) sizeof (gsize));
2723 if ((*(guint8*)(&test)) == 0x01) {
2724 if (header->e_ident [EI_DATA] != ELF_DATA_MSB) {
2725 g_warning ("Data is not ELF_DATA_MSB with first test byte 0x01");
2728 } else if ((*(guint8*)(&test)) == 0x02) {
2729 if (header->e_ident [EI_DATA] != ELF_DATA_LSB) {
2730 g_warning ("Data is not ELF_DATA_LSB with first test byte 0x02");
2734 g_warning ("Absurd test byte value");
2738 /* OK, this is a usable elf file... */
2739 file->header = header;
2740 section_headers = file->data + header->e_shoff;
2741 file->main_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * header->e_shstrndx)))->sh_offset);
2743 for (section_index = 0; section_index < header->e_shnum; section_index ++) {
2744 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
2746 if (section_header->sh_type == ELF_SHT_SYMTAB) {
2747 symtab_index = section_index;
2748 } else if (section_header->sh_type == ELF_SHT_DYNSYM) {
2749 dynsym_index = section_index;
2750 } else if (section_header->sh_type == ELF_SHT_STRTAB) {
2751 if (! strcmp (file->main_string_table + section_header->sh_name, ".strtab")) {
2752 strtab_index = section_index;
2753 } else if (! strcmp (file->main_string_table + section_header->sh_name, ".dynstr")) {
2754 dynstr_index = section_index;
2759 if ((symtab_index != 0) && (strtab_index != 0)) {
2760 section_index = symtab_index;
2761 strings_index = strtab_index;
2762 } else if ((dynsym_index != 0) && (dynstr_index != 0)) {
2763 section_index = dynsym_index;
2764 strings_index = dynstr_index;
2770 if (section_index != 0) {
2771 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
2772 file->symbol_size = section_header->sh_entsize;
2773 file->symbols_count = (guint32) (section_header->sh_size / section_header->sh_entsize);
2774 file->symbols_start = file->data + section_header->sh_offset;
2775 file->symbols_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * strings_index)))->sh_offset);
2778 file->section_regions = g_new0 (ProfilerExecutableFileSectionRegion, file->header->e_shnum);
2780 region->file = file;
2781 file->reference_count ++;
2784 if (file->header != NULL) {
2785 executable_file_add_region_reference (file, region);
2788 if (file->next_new_file == NULL) {
2789 file->next_new_file = files->new_files;
2790 files->new_files = file;
2796 executable_file_free (ProfilerExecutableFile* file) {
2797 if (file->fd != -1) {
2798 if (close (file->fd) != 0) {
2799 g_warning ("Cannot close file: '%s'", strerror (errno));
2801 if (file->data != NULL) {
2802 if (munmap (file->data, file->length) != 0) {
2803 g_warning ("Cannot unmap file: '%s'", strerror (errno));
2807 if (file->section_regions != NULL) {
2808 g_free (file->section_regions);
2814 executable_file_close (ProfilerExecutableMemoryRegionData *region) {
2815 region->file->reference_count --;
2817 if (region->file->reference_count <= 0) {
2818 ProfilerExecutableFiles *files = & (profiler->executable_files);
2819 g_hash_table_remove (files->table, region->file_name);
2820 executable_file_free (region->file);
2821 region->file = NULL;
2826 executable_file_count_symbols (ProfilerExecutableFile *file) {
2829 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
2830 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
2832 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
2833 (symbol->st_shndx > 0) &&
2834 (symbol->st_shndx < file->header->e_shnum)) {
2835 int symbol_section_index = symbol->st_shndx;
2836 ProfilerExecutableMemoryRegionData *region = file->section_regions [symbol_section_index].region;
2837 if ((region != NULL) && (region->symbols == NULL)) {
2838 region->symbols_count ++;
2845 executable_memory_regions_prepare_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
2847 for (i = 0; i < regions->regions_count; i++) {
2848 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
2849 if ((region->symbols_count > 0) && (region->symbols == NULL)) {
2850 region->symbols = g_new (ProfilerUnmanagedSymbol, region->symbols_count);
2851 region->symbols_capacity = region->symbols_count;
2852 region->symbols_count = 0;
2858 executable_region_symbol_get_name (ProfilerExecutableMemoryRegionData *region, ProfilerUnmanagedSymbol *symbol) {
2859 ElfSymbol *elf_symbol = (ElfSymbol*) (region->file->symbols_start + (symbol->index * region->file->symbol_size));
2860 return region->file->symbols_string_table + elf_symbol->st_name;
2864 executable_file_build_symbol_tables (ProfilerExecutableFile *file) {
2867 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
2868 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
2870 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
2871 (symbol->st_shndx > 0) &&
2872 (symbol->st_shndx < file->header->e_shnum)) {
2873 int symbol_section_index = symbol->st_shndx;
2874 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [symbol_section_index]);
2875 ProfilerExecutableMemoryRegionData *region = section_region->region;
2877 if (region != NULL) {
2878 ProfilerUnmanagedSymbol *new_symbol = & (region->symbols [region->symbols_count]);
2879 region->symbols_count ++;
2882 new_symbol->index = symbol_index;
2883 new_symbol->size = symbol->st_size;
2884 new_symbol->offset = (((guint8*) symbol->st_value) - section_region->section_address) - (region->file_offset - section_region->section_offset);
2891 compare_region_symbols (const void *p1, const void *p2) {
2892 const ProfilerUnmanagedSymbol *s1 = p1;
2893 const ProfilerUnmanagedSymbol *s2 = p2;
2894 return (s1->offset < s2->offset)? -1 : ((s1->offset > s2->offset)? 1 : 0);
2898 executable_memory_regions_sort_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
2900 for (i = 0; i < regions->regions_count; i++) {
2901 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
2902 if ((region->is_new) && (region->symbols != NULL)) {
2903 qsort (region->symbols, region->symbols_count, sizeof (ProfilerUnmanagedSymbol), compare_region_symbols);
2909 build_symbol_tables (ProfilerExecutableMemoryRegions *regions, ProfilerExecutableFiles *files) {
2911 ProfilerExecutableFile *file;
2913 for (i = 0; i < regions->regions_count; i++) {
2914 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
2915 if ((region->is_new) && (region->file == NULL)) {
2916 executable_file_open (region);
2920 for (file = files->new_files; file != NULL; file = file->next_new_file) {
2921 executable_file_count_symbols (file);
2924 executable_memory_regions_prepare_symbol_tables (regions);
2926 for (file = files->new_files; file != NULL; file = file->next_new_file) {
2927 executable_file_build_symbol_tables (file);
2930 executable_memory_regions_sort_symbol_tables (regions);
2932 file = files->new_files;
2933 while (file != NULL) {
2934 ProfilerExecutableFile *next_file = file->next_new_file;
2935 file->next_new_file = NULL;
2938 files->new_files = NULL;
2941 static ProfilerUnmanagedSymbol*
2942 executable_memory_region_find_symbol (ProfilerExecutableMemoryRegionData *region, guint32 offset) {
2943 if (region->symbols_count > 0) {
2944 ProfilerUnmanagedSymbol *low = region->symbols;
2945 ProfilerUnmanagedSymbol *high = region->symbols + (region->symbols_count - 1);
2946 int step = region->symbols_count >> 1;
2947 ProfilerUnmanagedSymbol *current = region->symbols + step;
2950 step = (high - low) >> 1;
2952 if (offset < current->offset) {
2954 current = high - step;
2955 } else if (offset >= current->offset) {
2956 if (offset >= (current->offset + current->size)) {
2958 current = low + step;
2965 if ((offset >= current->offset) && (offset < (current->offset + current->size))) {
2975 //FIXME: make also Win32 and BSD variants
2976 #define MAPS_BUFFER_SIZE 4096
2979 update_regions_buffer (int fd, char *buffer) {
2980 ssize_t result = read (fd, buffer, MAPS_BUFFER_SIZE);
2982 if (result == MAPS_BUFFER_SIZE) {
2984 } else if (result >= 0) {
2985 *(buffer + result) = 0;
2993 #define GOTO_NEXT_CHAR(c,b,fd) do {\
2995 if (((c) - (b) >= MAPS_BUFFER_SIZE) || ((*(c) == 0) && ((c) != (b)))) {\
2996 update_regions_buffer ((fd), (b));\
3001 static int hex_digit_value (char c) {
3002 if ((c >= '0') && (c <= '9')) {
3004 } else if ((c >= 'a') && (c <= 'f')) {
3005 return c - 'a' + 10;
3006 } else if ((c >= 'A') && (c <= 'F')) {
3007 return c - 'A' + 10;
3029 MAP_LINE_PARSER_STATE_INVALID,
3030 MAP_LINE_PARSER_STATE_START_ADDRESS,
3031 MAP_LINE_PARSER_STATE_END_ADDRESS,
3032 MAP_LINE_PARSER_STATE_PERMISSIONS,
3033 MAP_LINE_PARSER_STATE_OFFSET,
3034 MAP_LINE_PARSER_STATE_DEVICE,
3035 MAP_LINE_PARSER_STATE_INODE,
3036 MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME,
3037 MAP_LINE_PARSER_STATE_FILENAME,
3038 MAP_LINE_PARSER_STATE_DONE
3039 } MapLineParserState;
3041 const char *map_line_parser_state [] = {
3049 "BLANK_BEFORE_FILENAME",
3055 parse_map_line (ProfilerExecutableMemoryRegions *regions, int fd, char *buffer, char *current) {
3056 MapLineParserState state = MAP_LINE_PARSER_STATE_START_ADDRESS;
3057 gsize start_address = 0;
3058 gsize end_address = 0;
3060 char *start_filename = NULL;
3061 char *end_filename = NULL;
3062 gboolean is_executable = FALSE;
3063 gboolean done = FALSE;
3069 case MAP_LINE_PARSER_STATE_START_ADDRESS:
3071 start_address <<= 4;
3072 start_address |= hex_digit_value (c);
3073 } else if (c == '-') {
3074 state = MAP_LINE_PARSER_STATE_END_ADDRESS;
3076 state = MAP_LINE_PARSER_STATE_INVALID;
3079 case MAP_LINE_PARSER_STATE_END_ADDRESS:
3082 end_address |= hex_digit_value (c);
3083 } else if (isblank (c)) {
3084 state = MAP_LINE_PARSER_STATE_PERMISSIONS;
3086 state = MAP_LINE_PARSER_STATE_INVALID;
3089 case MAP_LINE_PARSER_STATE_PERMISSIONS:
3091 is_executable = TRUE;
3092 } else if (isblank (c)) {
3093 state = MAP_LINE_PARSER_STATE_OFFSET;
3094 } else if ((c != '-') && ! isalpha (c)) {
3095 state = MAP_LINE_PARSER_STATE_INVALID;
3098 case MAP_LINE_PARSER_STATE_OFFSET:
3101 offset |= hex_digit_value (c);
3102 } else if (isblank (c)) {
3103 state = MAP_LINE_PARSER_STATE_DEVICE;
3105 state = MAP_LINE_PARSER_STATE_INVALID;
3108 case MAP_LINE_PARSER_STATE_DEVICE:
3110 state = MAP_LINE_PARSER_STATE_INODE;
3111 } else if ((c != ':') && ! isxdigit (c)) {
3112 state = MAP_LINE_PARSER_STATE_INVALID;
3115 case MAP_LINE_PARSER_STATE_INODE:
3117 state = MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME;
3118 } else if (! isdigit (c)) {
3119 state = MAP_LINE_PARSER_STATE_INVALID;
3122 case MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME:
3123 if ((c == '/') || (c == '[')) {
3124 state = MAP_LINE_PARSER_STATE_FILENAME;
3125 start_filename = current;
3126 } else if (! isblank (c)) {
3127 state = MAP_LINE_PARSER_STATE_INVALID;
3130 case MAP_LINE_PARSER_STATE_FILENAME:
3132 state = MAP_LINE_PARSER_STATE_DONE;
3134 end_filename = current;
3137 case MAP_LINE_PARSER_STATE_DONE:
3138 if (done && is_executable) {
3140 append_region (regions, (gpointer) start_address, (gpointer) end_address, offset, start_filename);
3143 case MAP_LINE_PARSER_STATE_INVALID:
3145 state = MAP_LINE_PARSER_STATE_DONE;
3152 } else if (c == '\n') {
3153 state = MAP_LINE_PARSER_STATE_DONE;
3156 GOTO_NEXT_CHAR(current, buffer, fd);
3162 scan_process_regions (ProfilerExecutableMemoryRegions *regions) {
3167 fd = open ("/proc/self/maps", O_RDONLY);
3172 buffer = malloc (MAPS_BUFFER_SIZE);
3173 update_regions_buffer (fd, buffer);
3175 while (current != NULL) {
3176 current = parse_map_line (regions, fd, buffer, current);
3187 MONO_PROFILER_STATISTICAL_CODE_END = 0,
3188 MONO_PROFILER_STATISTICAL_CODE_METHOD = 1,
3189 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID = 2,
3190 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID = 3,
3191 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION = 4,
3192 MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN = 5,
3193 MONO_PROFILER_STATISTICAL_CODE_REGIONS = 7
3194 } MonoProfilerStatisticalCode;
3197 refresh_memory_regions (void) {
3198 ProfilerExecutableMemoryRegions *old_regions = profiler->executable_regions;
3199 ProfilerExecutableMemoryRegions *new_regions = profiler_executable_memory_regions_new (old_regions->next_id, old_regions->next_unmanaged_function_id);
3202 LOG_WRITER_THREAD ("Refreshing memory regions...");
3203 scan_process_regions (new_regions);
3204 restore_old_regions (old_regions, new_regions);
3205 sort_regions (new_regions);
3206 LOG_WRITER_THREAD ("Refreshed memory regions.");
3208 LOG_WRITER_THREAD ("Building symbol tables...");
3209 build_symbol_tables (new_regions, & (profiler->executable_files));
3211 printf ("Symbol tables done!\n");
3212 printf ("Region summary...\n");
3213 for (i = 0; i < new_regions->regions_count; i++) {
3214 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3215 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3216 region->start, region->end, region->file_offset, region->file_name);
3218 printf ("New symbol tables dump...\n");
3219 for (i = 0; i < new_regions->regions_count; i++) {
3220 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3222 if (region->is_new) {
3225 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3226 region->start, region->end, region->file_offset, region->file_name);
3227 for (symbol_index = 0; symbol_index < region->symbols_count; symbol_index ++) {
3228 ProfilerUnmanagedSymbol *symbol = & (region->symbols [symbol_index]);
3229 printf (" [%d] Symbol %s (offset %d, size %d)\n", symbol_index,
3230 executable_region_symbol_get_name (region, symbol),
3231 symbol->offset, symbol->size);
3236 LOG_WRITER_THREAD ("Built symbol tables.");
3238 // This marks the region "sub-block"
3239 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_REGIONS);
3241 // First write the "removed" regions
3242 for (i = 0; i < old_regions->regions_count; i++) {
3243 ProfilerExecutableMemoryRegionData *region = old_regions->regions [i];
3244 if (! region->is_new) {
3245 #if DEBUG_STATISTICAL_PROFILER
3246 printf ("[refresh_memory_regions] Invalidated region %d\n", region->id);
3248 write_uint32 (region->id);
3253 // Then write the new ones
3254 for (i = 0; i < new_regions->regions_count; i++) {
3255 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3256 if (region->is_new) {
3257 region->is_new = FALSE;
3259 #if DEBUG_STATISTICAL_PROFILER
3260 printf ("[refresh_memory_regions] Wrote region %d (%p-%p[%d] '%s')\n", region->id, region->start, region->end, region->file_offset, region->file_name);
3262 write_uint32 (region->id);
3263 write_uint64 (GPOINTER_TO_UINT (region->start));
3264 write_uint32 (GPOINTER_TO_UINT (region->end) - GPOINTER_TO_UINT (region->start));
3265 write_uint32 (region->file_offset);
3266 write_string (region->file_name);
3271 // Finally, free the old ones, and replace them
3272 profiler_executable_memory_regions_destroy (old_regions);
3273 profiler->executable_regions = new_regions;
3277 write_statistical_hit (MonoDomain *domain, gpointer address, gboolean regions_refreshed) {
3278 MonoJitInfo *ji = (domain != NULL) ? mono_jit_info_table_find (domain, (char*) address) : NULL;
3281 MonoMethod *method = mono_jit_info_get_method (ji);
3282 MethodIdMappingElement *element = method_id_mapping_element_get (method);
3284 if (element != NULL) {
3285 #if DEBUG_STATISTICAL_PROFILER
3286 printf ("[write_statistical_hit] Wrote method %d\n", element->id);
3288 write_uint32 ((element->id << 3) | MONO_PROFILER_STATISTICAL_CODE_METHOD);
3290 #if DEBUG_STATISTICAL_PROFILER
3291 printf ("[write_statistical_hit] Wrote unknown method %p\n", method);
3293 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_METHOD);
3296 ProfilerExecutableMemoryRegionData *region = find_address_region (profiler->executable_regions, address);
3298 if (region == NULL && ! regions_refreshed) {
3299 #if DEBUG_STATISTICAL_PROFILER
3300 printf ("[write_statistical_hit] Cannot find region for address %p, refreshing...\n", address);
3302 refresh_memory_regions ();
3303 regions_refreshed = TRUE;
3304 region = find_address_region (profiler->executable_regions, address);
3307 if (region != NULL) {
3308 guint32 offset = ((guint8*)address) - ((guint8*)region->start);
3309 ProfilerUnmanagedSymbol *symbol = executable_memory_region_find_symbol (region, offset);
3311 if (symbol != NULL) {
3312 if (symbol->id > 0) {
3313 #if DEBUG_STATISTICAL_PROFILER
3314 printf ("[write_statistical_hit] Wrote unmanaged symbol %d\n", symbol->id);
3316 write_uint32 ((symbol->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID);
3318 ProfilerExecutableMemoryRegions *regions = profiler->executable_regions;
3319 const char *symbol_name = executable_region_symbol_get_name (region, symbol);
3320 symbol->id = regions->next_unmanaged_function_id;
3321 regions->next_unmanaged_function_id ++;
3322 #if DEBUG_STATISTICAL_PROFILER
3323 printf ("[write_statistical_hit] Wrote new unmanaged symbol in region %d[%d]\n", region->id, offset);
3325 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID);
3326 write_uint32 (symbol->id);
3327 write_string (symbol_name);
3330 #if DEBUG_STATISTICAL_PROFILER
3331 printf ("[write_statistical_hit] Wrote unknown unmanaged hit in region %d[%d] (address %p)\n", region->id, offset, address);
3333 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3334 write_uint32 (offset);
3337 #if DEBUG_STATISTICAL_PROFILER
3338 printf ("[write_statistical_hit] Wrote unknown unmanaged hit %p\n", address);
3340 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3341 write_uint64 (GPOINTER_TO_UINT (address));
3345 return regions_refreshed;
3349 flush_all_mappings (void);
3352 write_statistical_data_block (ProfilerStatisticalData *data) {
3353 MonoThread *current_thread = mono_thread_current ();
3354 int start_index = data->first_unwritten_index;
3355 int end_index = data->next_free_index;
3356 gboolean regions_refreshed = FALSE;
3357 int call_chain_depth = profiler->statistical_call_chain_depth;
3360 if (end_index > data->end_index)
3361 end_index = data->end_index;
3363 if (start_index == end_index)
3366 data->first_unwritten_index = end_index;
3368 write_clock_data ();
3370 #if DEBUG_STATISTICAL_PROFILER
3371 printf ("[write_statistical_data_block] Starting loop at index %d\n", start_index);
3374 for (index = start_index; index < end_index; index ++) {
3375 int base_index = index * (call_chain_depth + 1);
3376 ProfilerStatisticalHit hit = data->hits [base_index];
3379 regions_refreshed = write_statistical_hit ((current_thread != NULL) ? hit.domain : NULL, hit.address, regions_refreshed);
3382 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3383 hit = data->hits [base_index + callers_count];
3384 if (hit.address == NULL) {
3389 if (callers_count > 0) {
3390 write_uint32 ((callers_count << 3) | MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN);
3392 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3393 hit = data->hits [base_index + callers_count];
3394 if (hit.address != NULL) {
3395 regions_refreshed = write_statistical_hit ((current_thread != NULL) ? hit.domain : NULL, hit.address, regions_refreshed);
3402 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_END);
3404 #if DEBUG_STATISTICAL_PROFILER
3405 printf ("[write_statistical_data_block] Ending loop at index %d\n", end_index);
3407 write_clock_data ();
3409 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL);
3413 write_intro_block (void) {
3415 write_string ("mono");
3416 write_uint32 (profiler->flags);
3417 write_uint64 (profiler->start_counter);
3418 write_uint64 (profiler->start_time);
3419 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_INTRO);
3423 write_end_block (void) {
3425 write_uint64 (profiler->end_counter);
3426 write_uint64 (profiler->end_time);
3427 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_END);
3431 update_mapping (ProfilerPerThreadData *data) {
3432 ProfilerEventData *start = data->first_unmapped_event;
3433 ProfilerEventData *end = data->next_free_event;
3434 data->first_unmapped_event = end;
3436 #if (DEBUG_LOGGING_PROFILER)
3437 printf ("[update_mapping][TID %ld] START\n", data->thread_id);
3439 while (start < end) {
3440 #if DEBUG_LOGGING_PROFILER
3441 printf ("Examining event %p[TID %ld] looking for a new mapping...\n", start, data->thread_id);
3443 if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3444 ClassIdMappingElement *element = class_id_mapping_element_get (start->data.address);
3445 if (element == NULL) {
3446 MonoClass *klass = start->data.address;
3447 class_id_mapping_element_new (klass);
3449 } else if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3450 MethodIdMappingElement *element = method_id_mapping_element_get (start->data.address);
3451 if (element == NULL) {
3452 MonoMethod *method = start->data.address;
3453 if (method != NULL) {
3454 method_id_mapping_element_new (method);
3459 if (start->value == MAX_EVENT_VALUE) {
3464 #if (DEBUG_LOGGING_PROFILER)
3465 printf ("[update_mapping][TID %ld] END\n", data->thread_id);
3470 flush_all_mappings (void) {
3471 ProfilerPerThreadData *data;
3473 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3474 update_mapping (data);
3476 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3477 write_mapping_block (data->thread_id);
3482 flush_full_event_data_buffer (ProfilerPerThreadData *data) {
3485 // We flush all mappings because some id definitions could come
3486 // from other threads
3487 flush_all_mappings ();
3488 g_assert (data->first_unmapped_event >= data->next_free_event);
3490 write_thread_data_block (data);
3492 data->next_free_event = data->events;
3493 data->first_unwritten_event = data->events;
3494 data->first_unmapped_event = data->events;
3495 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
3496 data->last_event_counter = data->start_event_counter;
3501 /* The ">=" operator is intentional, to leave one spare slot for "extended values" */
3502 #define RESERVE_EVENTS(d,e,count) {\
3503 if ((d)->next_free_event >= ((d)->end_event - (count))) {\
3504 flush_full_event_data_buffer (d);\
3506 (e) = (d)->next_free_event;\
3507 (d)->next_free_event += (count);\
3509 #define GET_NEXT_FREE_EVENT(d,e) RESERVE_EVENTS ((d),(e),1)
3512 flush_everything (void) {
3513 ProfilerPerThreadData *data;
3515 flush_all_mappings ();
3516 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3517 write_thread_data_block (data);
3519 write_statistical_data_block (profiler->statistical_data);
3522 /* This assumes the lock is held: it just offloads the work to the writer thread. */
3524 writer_thread_flush_everything (void) {
3525 if (CHECK_WRITER_THREAD ()) {
3526 profiler->writer_thread_flush_everything = TRUE;
3527 LOG_WRITER_THREAD ("writer_thread_flush_everything: raising event...");
3528 WRITER_EVENT_RAISE ();
3529 LOG_WRITER_THREAD ("writer_thread_flush_everything: waiting event...");
3530 WRITER_EVENT_DONE_WAIT ();
3531 LOG_WRITER_THREAD ("writer_thread_flush_everything: got event.");
3533 LOG_WRITER_THREAD ("writer_thread_flush_everything: no thread.");
3537 #define RESULT_TO_LOAD_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_LOADED_EVENT_SUCCESS:MONO_PROFILER_LOADED_EVENT_FAILURE)
3539 appdomain_start_load (MonoProfiler *profiler, MonoDomain *domain) {
3541 loaded_element_load_start (profiler->loaded_appdomains, domain);
3546 appdomain_end_load (MonoProfiler *profiler, MonoDomain *domain, int result) {
3548 LoadedElement *element;
3550 name = g_strdup_printf ("%d", mono_domain_get_id (domain));
3552 element = loaded_element_load_end (profiler->loaded_appdomains, domain, name);
3553 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3558 appdomain_start_unload (MonoProfiler *profiler, MonoDomain *domain) {
3560 loaded_element_unload_start (profiler->loaded_appdomains, domain);
3561 writer_thread_flush_everything ();
3566 appdomain_end_unload (MonoProfiler *profiler, MonoDomain *domain) {
3567 LoadedElement *element;
3570 element = loaded_element_unload_end (profiler->loaded_appdomains, domain);
3571 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN, CURRENT_THREAD_ID ());
3576 module_start_load (MonoProfiler *profiler, MonoImage *module) {
3578 loaded_element_load_start (profiler->loaded_modules, module);
3583 module_end_load (MonoProfiler *profiler, MonoImage *module, int result) {
3585 MonoAssemblyName aname;
3586 LoadedElement *element;
3588 if (mono_assembly_fill_assembly_name (module, &aname)) {
3589 name = mono_stringify_assembly_name (&aname);
3591 name = g_strdup_printf ("Dynamic module \"%p\"", module);
3594 element = loaded_element_load_end (profiler->loaded_modules, module, name);
3595 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_MODULE | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3600 module_start_unload (MonoProfiler *profiler, MonoImage *module) {
3602 loaded_element_unload_start (profiler->loaded_modules, module);
3603 writer_thread_flush_everything ();
3608 module_end_unload (MonoProfiler *profiler, MonoImage *module) {
3609 LoadedElement *element;
3612 element = loaded_element_unload_end (profiler->loaded_modules, module);
3613 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_MODULE, CURRENT_THREAD_ID ());
3618 assembly_start_load (MonoProfiler *profiler, MonoAssembly *assembly) {
3620 loaded_element_load_start (profiler->loaded_assemblies, assembly);
3625 assembly_end_load (MonoProfiler *profiler, MonoAssembly *assembly, int result) {
3627 MonoAssemblyName aname;
3628 LoadedElement *element;
3630 if (mono_assembly_fill_assembly_name (mono_assembly_get_image (assembly), &aname)) {
3631 name = mono_stringify_assembly_name (&aname);
3633 name = g_strdup_printf ("Dynamic assembly \"%p\"", assembly);
3636 element = loaded_element_load_end (profiler->loaded_assemblies, assembly, name);
3637 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3642 assembly_start_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
3644 loaded_element_unload_start (profiler->loaded_assemblies, assembly);
3645 writer_thread_flush_everything ();
3649 assembly_end_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
3650 LoadedElement *element;
3653 element = loaded_element_unload_end (profiler->loaded_assemblies, assembly);
3654 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY, CURRENT_THREAD_ID ());
3658 #if (DEBUG_LOGGING_PROFILER)
3660 class_event_code_to_string (MonoProfilerClassEvents code) {
3662 case MONO_PROFILER_EVENT_CLASS_LOAD: return "LOAD";
3663 case MONO_PROFILER_EVENT_CLASS_UNLOAD: return "UNLOAD";
3664 case MONO_PROFILER_EVENT_CLASS_ALLOCATION: return "ALLOCATION";
3665 case MONO_PROFILER_EVENT_CLASS_EXCEPTION: return "EXCEPTION";
3666 default: g_assert_not_reached (); return "";
3670 method_event_code_to_string (MonoProfilerMethodEvents code) {
3672 case MONO_PROFILER_EVENT_METHOD_CALL: return "CALL";
3673 case MONO_PROFILER_EVENT_METHOD_JIT: return "JIT";
3674 case MONO_PROFILER_EVENT_METHOD_FREED: return "FREED";
3675 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER: return "ALLOCATION_CALLER";
3676 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER: return "ALLOCATION_JIT_TIME_CALLER";
3677 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
3678 default: g_assert_not_reached (); return "";
3682 number_event_code_to_string (MonoProfilerEvents code) {
3684 case MONO_PROFILER_EVENT_THREAD: return "THREAD";
3685 case MONO_PROFILER_EVENT_GC_COLLECTION: return "GC_COLLECTION";
3686 case MONO_PROFILER_EVENT_GC_MARK: return "GC_MARK";
3687 case MONO_PROFILER_EVENT_GC_SWEEP: return "GC_SWEEP";
3688 case MONO_PROFILER_EVENT_GC_RESIZE: return "GC_RESIZE";
3689 case MONO_PROFILER_EVENT_GC_STOP_WORLD: return "GC_STOP_WORLD";
3690 case MONO_PROFILER_EVENT_GC_START_WORLD: return "GC_START_WORLD";
3691 case MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION: return "JIT_TIME_ALLOCATION";
3692 case MONO_PROFILER_EVENT_STACK_SECTION: return "STACK_SECTION";
3693 default: g_assert_not_reached (); return "";
3697 event_result_to_string (MonoProfilerEventResult code) {
3699 case MONO_PROFILER_EVENT_RESULT_SUCCESS: return "SUCCESS";
3700 case MONO_PROFILER_EVENT_RESULT_FAILURE: return "FAILURE";
3701 default: g_assert_not_reached (); return "";
3705 event_kind_to_string (MonoProfilerEventKind code) {
3707 case MONO_PROFILER_EVENT_KIND_START: return "START";
3708 case MONO_PROFILER_EVENT_KIND_END: return "END";
3709 default: g_assert_not_reached (); return "";
3713 print_event_data (gsize thread_id, ProfilerEventData *event, guint64 value) {
3714 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3715 printf ("[TID %ld] CLASS[%p] event [%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s)\n",
3717 event->data.address,
3719 class_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
3720 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
3721 event_kind_to_string (event->kind),
3726 mono_class_get_namespace ((MonoClass*) event->data.address),
3727 mono_class_get_name ((MonoClass*) event->data.address));
3728 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3729 printf ("[TID %ld] METHOD[%p] event [%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s:%s (?))\n",
3731 event->data.address,
3733 method_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
3734 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
3735 event_kind_to_string (event->kind),
3740 (event->data.address != NULL) ? mono_class_get_namespace (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
3741 (event->data.address != NULL) ? mono_class_get_name (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
3742 (event->data.address != NULL) ? mono_method_get_name ((MonoMethod*) event->data.address) : "<NULL>");
3744 printf ("[TID %ld] NUMBER[%ld] event [%p] %s:%s[%d-%d-%d] %ld\n",
3746 (guint64) event->data.number,
3748 number_event_code_to_string (event->code),
3749 event_kind_to_string (event->kind),
3756 #define LOG_EVENT(tid,ev,val) print_event_data ((tid),(ev),(val))
3758 #define LOG_EVENT(tid,ev,val)
3761 #define RESULT_TO_EVENT_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_EVENT_RESULT_SUCCESS:MONO_PROFILER_EVENT_RESULT_FAILURE)
3763 #define STORE_EVENT_ITEM_COUNTER(event,p,i,dt,c,k) do {\
3766 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
3767 (event)->data.address = (i);\
3768 (event)->data_type = (dt);\
3769 (event)->code = (c);\
3770 (event)->kind = (k);\
3771 delta = counter - data->last_event_counter;\
3772 if (delta < MAX_EVENT_VALUE) {\
3773 (event)->value = delta;\
3775 ProfilerEventData *extension = data->next_free_event;\
3776 data->next_free_event ++;\
3777 (event)->value = MAX_EVENT_VALUE;\
3778 *(guint64*)extension = delta;\
3780 data->last_event_counter = counter;\
3781 LOG_EVENT (data->thread_id, (event), delta);\
3783 #define STORE_EVENT_ITEM_VALUE(event,p,i,dt,c,k,v) do {\
3784 (event)->data.address = (i);\
3785 (event)->data_type = (dt);\
3786 (event)->code = (c);\
3787 (event)->kind = (k);\
3788 if ((v) < MAX_EVENT_VALUE) {\
3789 (event)->value = (v);\
3791 ProfilerEventData *extension = data->next_free_event;\
3792 data->next_free_event ++;\
3793 (event)->value = MAX_EVENT_VALUE;\
3794 *(guint64*)extension = (v);\
3796 LOG_EVENT (data->thread_id, (event), (v));\
3798 #define STORE_EVENT_NUMBER_COUNTER(event,p,n,dt,c,k) do {\
3801 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
3802 (event)->data.number = (n);\
3803 (event)->data_type = (dt);\
3804 (event)->code = (c);\
3805 (event)->kind = (k);\
3806 delta = counter - data->last_event_counter;\
3807 if (delta < MAX_EVENT_VALUE) {\
3808 (event)->value = delta;\
3810 ProfilerEventData *extension = data->next_free_event;\
3811 data->next_free_event ++;\
3812 (event)->value = MAX_EVENT_VALUE;\
3813 *(guint64*)extension = delta;\
3815 data->last_event_counter = counter;\
3816 LOG_EVENT (data->thread_id, (event), delta);\
3818 #define STORE_EVENT_NUMBER_VALUE(event,p,n,dt,c,k,v) do {\
3819 (event)->data.number = (n);\
3820 (event)->data_type = (dt);\
3821 (event)->code = (c);\
3822 (event)->kind = (k);\
3823 if ((v) < MAX_EVENT_VALUE) {\
3824 (event)->value = (v);\
3826 ProfilerEventData *extension = data->next_free_event;\
3827 data->next_free_event ++;\
3828 (event)->value = MAX_EVENT_VALUE;\
3829 *(guint64*)extension = (v);\
3831 LOG_EVENT (data->thread_id, (event), (v));\
3835 class_start_load (MonoProfiler *profiler, MonoClass *klass) {
3836 ProfilerPerThreadData *data;
3837 ProfilerEventData *event;
3838 GET_PROFILER_THREAD_DATA (data);
3839 GET_NEXT_FREE_EVENT (data, event);
3840 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD, MONO_PROFILER_EVENT_KIND_START);
3843 class_end_load (MonoProfiler *profiler, MonoClass *klass, int result) {
3844 ProfilerPerThreadData *data;
3845 ProfilerEventData *event;
3846 GET_PROFILER_THREAD_DATA (data);
3847 GET_NEXT_FREE_EVENT (data, event);
3848 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
3851 class_start_unload (MonoProfiler *profiler, MonoClass *klass) {
3852 ProfilerPerThreadData *data;
3853 ProfilerEventData *event;
3854 GET_PROFILER_THREAD_DATA (data);
3855 GET_NEXT_FREE_EVENT (data, event);
3856 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_START);
3859 class_end_unload (MonoProfiler *profiler, MonoClass *klass) {
3860 ProfilerPerThreadData *data;
3861 ProfilerEventData *event;
3862 GET_PROFILER_THREAD_DATA (data);
3863 GET_NEXT_FREE_EVENT (data, event);
3864 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_END);
3868 method_start_jit (MonoProfiler *profiler, MonoMethod *method) {
3869 ProfilerPerThreadData *data;
3870 ProfilerEventData *event;
3871 GET_PROFILER_THREAD_DATA (data);
3872 GET_NEXT_FREE_EVENT (data, event);
3873 thread_stack_push_jitted_safely (&(data->stack), method, TRUE);
3874 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT, MONO_PROFILER_EVENT_KIND_START);
3877 method_end_jit (MonoProfiler *profiler, MonoMethod *method, int result) {
3878 ProfilerPerThreadData *data;
3879 ProfilerEventData *event;
3880 GET_PROFILER_THREAD_DATA (data);
3881 GET_NEXT_FREE_EVENT (data, event);
3882 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
3883 thread_stack_pop (&(data->stack));
3888 method_jit_result (MonoProfiler *prof, MonoMethod *method, MonoJitInfo* jinfo, int result) {
3889 if (profiler->action_flags.oprofile && (result == MONO_PROFILE_OK)) {
3890 MonoClass *klass = mono_method_get_class (method);
3891 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
3892 char *name = g_strdup_printf ("%s.%s:%s (%s)", mono_class_get_namespace (klass), mono_class_get_name (klass), mono_method_get_name (method), signature);
3893 gpointer code_start = mono_jit_info_get_code_start (jinfo);
3894 int code_size = mono_jit_info_get_code_size (jinfo);
3896 if (op_write_native_code (name, code_start, code_size)) {
3897 g_warning ("Problem calling op_write_native_code\n");
3908 method_enter (MonoProfiler *profiler, MonoMethod *method) {
3909 ProfilerPerThreadData *data;
3911 CHECK_PROFILER_ENABLED ();
3912 GET_PROFILER_THREAD_DATA (data);
3913 if (profiler->action_flags.track_calls) {
3914 ProfilerEventData *event;
3915 GET_NEXT_FREE_EVENT (data, event);
3916 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_START);
3918 if (profiler->action_flags.track_stack) {
3919 thread_stack_push_safely (&(data->stack), method);
3923 method_leave (MonoProfiler *profiler, MonoMethod *method) {
3924 ProfilerPerThreadData *data;
3926 CHECK_PROFILER_ENABLED ();
3927 GET_PROFILER_THREAD_DATA (data);
3928 if (profiler->action_flags.track_calls) {
3929 ProfilerEventData *event;
3930 GET_NEXT_FREE_EVENT (data, event);
3931 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_END);
3933 if (profiler->action_flags.track_stack) {
3934 thread_stack_pop (&(data->stack));
3939 method_free (MonoProfiler *profiler, MonoMethod *method) {
3940 ProfilerPerThreadData *data;
3941 ProfilerEventData *event;
3942 GET_PROFILER_THREAD_DATA (data);
3943 GET_NEXT_FREE_EVENT (data, event);
3944 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_FREED, 0);
3948 thread_start (MonoProfiler *profiler, gsize tid) {
3949 ProfilerPerThreadData *data;
3950 ProfilerEventData *event;
3951 GET_PROFILER_THREAD_DATA (data);
3952 GET_NEXT_FREE_EVENT (data, event);
3953 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_START);
3956 thread_end (MonoProfiler *profiler, gsize tid) {
3957 ProfilerPerThreadData *data;
3958 ProfilerEventData *event;
3959 GET_PROFILER_THREAD_DATA (data);
3960 GET_NEXT_FREE_EVENT (data, event);
3961 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_END);
3965 object_allocated (MonoProfiler *profiler, MonoObject *obj, MonoClass *klass) {
3966 ProfilerPerThreadData *data;
3967 ProfilerEventData *events;
3969 int event_slot_count;
3971 GET_PROFILER_THREAD_DATA (data);
3972 event_slot_count = 1;
3973 if (profiler->action_flags.save_allocation_caller) {
3974 event_slot_count ++;
3976 if (profiler->action_flags.allocations_carry_id) {
3977 event_slot_count ++;
3979 if (profiler->action_flags.save_allocation_stack) {
3980 unsaved_frames = thread_stack_count_unsaved_frames (&(data->stack));
3981 event_slot_count += (unsaved_frames + 1);
3985 RESERVE_EVENTS (data, events, event_slot_count);
3987 if (profiler->action_flags.save_allocation_stack) {
3990 STORE_EVENT_NUMBER_VALUE (events, profiler, data->stack.last_saved_top, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_STACK_SECTION, 0, unsaved_frames);
3992 for (i = 0; i < unsaved_frames; i++) {
3993 if (! thread_stack_index_from_top_is_jitted (&(data->stack), i)) {
3994 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
3996 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4001 data->stack.last_saved_top = data->stack.top;
4004 STORE_EVENT_ITEM_VALUE (events, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_ALLOCATION, 0, (guint64) mono_object_get_size (obj));
4005 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
4006 STORE_ALLOCATED_OBJECT (data, obj);
4009 if (profiler->action_flags.save_allocation_caller) {
4010 MonoMethod *caller = thread_stack_top (&(data->stack));
4011 gboolean caller_is_jitted = thread_stack_top_is_jitted (&(data->stack));
4015 while ((caller != NULL) && (caller->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)) {
4016 caller = thread_stack_index_from_top (&(data->stack), index);
4017 caller_is_jitted = thread_stack_index_from_top_is_jitted (&(data->stack), index);
4020 if (! caller_is_jitted) {
4021 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4023 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4026 if (profiler->action_flags.allocations_carry_id) {
4028 STORE_EVENT_ITEM_VALUE (events, profiler, obj, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID, 0, 0);
4033 statistical_call_chain (MonoProfiler *profiler, int call_chain_depth, guchar **ips, void *context) {
4034 MonoDomain *domain = mono_domain_get ();
4035 ProfilerStatisticalData *data;
4038 CHECK_PROFILER_ENABLED ();
4040 data = profiler->statistical_data;
4041 index = InterlockedIncrement (&data->next_free_index);
4043 if (index <= data->end_index) {
4044 int base_index = (index - 1) * (profiler->statistical_call_chain_depth + 1);
4045 int call_chain_index = 0;
4047 //printf ("[statistical_call_chain] (%d)\n", call_chain_depth);
4048 while (call_chain_index < call_chain_depth) {
4049 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4050 //printf ("[statistical_call_chain] [%d] = %p\n", base_index + call_chain_index, ips [call_chain_index]);
4051 hit->address = (gpointer) ips [call_chain_index];
4052 hit->domain = domain;
4053 call_chain_index ++;
4055 while (call_chain_index <= profiler->statistical_call_chain_depth) {
4056 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4057 //printf ("[statistical_call_chain] [%d] = NULL\n", base_index + call_chain_index);
4058 hit->address = NULL;
4060 call_chain_index ++;
4063 /* Check if we are the one that must swap the buffers */
4064 if (index == data->end_index + 1) {
4065 ProfilerStatisticalData *new_data;
4067 /* In the *impossible* case that the writer thread has not finished yet, */
4068 /* loop waiting for it and meanwhile lose all statistical events... */
4070 /* First, wait that it consumed the ready buffer */
4071 while (profiler->statistical_data_ready != NULL);
4072 /* Then, wait that it produced the free buffer */
4073 new_data = profiler->statistical_data_second_buffer;
4074 } while (new_data == NULL);
4076 profiler->statistical_data_ready = data;
4077 profiler->statistical_data = new_data;
4078 profiler->statistical_data_second_buffer = NULL;
4079 WRITER_EVENT_RAISE ();
4082 /* Loop again, hoping to acquire a free slot this time */
4085 } while (data == NULL);
4089 statistical_hit (MonoProfiler *profiler, guchar *ip, void *context) {
4090 MonoDomain *domain = mono_domain_get ();
4091 ProfilerStatisticalData *data;
4094 CHECK_PROFILER_ENABLED ();
4096 data = profiler->statistical_data;
4097 index = InterlockedIncrement (&data->next_free_index);
4099 if (index <= data->end_index) {
4100 ProfilerStatisticalHit *hit = & (data->hits [index - 1]);
4101 hit->address = (gpointer) ip;
4102 hit->domain = domain;
4104 /* Check if we are the one that must swap the buffers */
4105 if (index == data->end_index + 1) {
4106 ProfilerStatisticalData *new_data;
4108 /* In the *impossible* case that the writer thread has not finished yet, */
4109 /* loop waiting for it and meanwhile lose all statistical events... */
4111 /* First, wait that it consumed the ready buffer */
4112 while (profiler->statistical_data_ready != NULL);
4113 /* Then, wait that it produced the free buffer */
4114 new_data = profiler->statistical_data_second_buffer;
4115 } while (new_data == NULL);
4117 profiler->statistical_data_ready = data;
4118 profiler->statistical_data = new_data;
4119 profiler->statistical_data_second_buffer = NULL;
4120 WRITER_EVENT_RAISE ();
4123 /* Loop again, hoping to acquire a free slot this time */
4126 } while (data == NULL);
4129 static MonoProfilerEvents
4130 gc_event_code_from_profiler_event (MonoGCEvent event) {
4132 case MONO_GC_EVENT_START:
4133 case MONO_GC_EVENT_END:
4134 return MONO_PROFILER_EVENT_GC_COLLECTION;
4135 case MONO_GC_EVENT_MARK_START:
4136 case MONO_GC_EVENT_MARK_END:
4137 return MONO_PROFILER_EVENT_GC_MARK;
4138 case MONO_GC_EVENT_RECLAIM_START:
4139 case MONO_GC_EVENT_RECLAIM_END:
4140 return MONO_PROFILER_EVENT_GC_SWEEP;
4141 case MONO_GC_EVENT_PRE_STOP_WORLD:
4142 case MONO_GC_EVENT_POST_STOP_WORLD:
4143 return MONO_PROFILER_EVENT_GC_STOP_WORLD;
4144 case MONO_GC_EVENT_PRE_START_WORLD:
4145 case MONO_GC_EVENT_POST_START_WORLD:
4146 return MONO_PROFILER_EVENT_GC_START_WORLD;
4148 g_assert_not_reached ();
4153 static MonoProfilerEventKind
4154 gc_event_kind_from_profiler_event (MonoGCEvent event) {
4156 case MONO_GC_EVENT_START:
4157 case MONO_GC_EVENT_MARK_START:
4158 case MONO_GC_EVENT_RECLAIM_START:
4159 case MONO_GC_EVENT_PRE_STOP_WORLD:
4160 case MONO_GC_EVENT_PRE_START_WORLD:
4161 return MONO_PROFILER_EVENT_KIND_START;
4162 case MONO_GC_EVENT_END:
4163 case MONO_GC_EVENT_MARK_END:
4164 case MONO_GC_EVENT_RECLAIM_END:
4165 case MONO_GC_EVENT_POST_START_WORLD:
4166 case MONO_GC_EVENT_POST_STOP_WORLD:
4167 return MONO_PROFILER_EVENT_KIND_END;
4169 g_assert_not_reached ();
4174 #define HEAP_SHOT_COMMAND_FILE_MAX_LENGTH 64
4176 profiler_heap_shot_process_command_file (void) {
4177 //FIXME: Port to Windows as well
4178 struct stat stat_buf;
4180 char buffer [HEAP_SHOT_COMMAND_FILE_MAX_LENGTH + 1];
4182 if (profiler->heap_shot_command_file_name == NULL)
4184 if (stat (profiler->heap_shot_command_file_name, &stat_buf) != 0)
4186 if (stat_buf.st_size > HEAP_SHOT_COMMAND_FILE_MAX_LENGTH)
4188 if ((stat_buf.st_mtim.tv_sec * 1000000) < profiler->heap_shot_command_file_access_time)
4191 fd = open (profiler->heap_shot_command_file_name, O_RDONLY);
4195 if (read (fd, &(buffer [0]), stat_buf.st_size) != stat_buf.st_size) {
4198 buffer [stat_buf.st_size] = 0;
4199 profiler->dump_next_heap_snapshots = atoi (buffer);
4200 MONO_PROFILER_GET_CURRENT_TIME (profiler->heap_shot_command_file_access_time);
4207 dump_current_heap_snapshot (void) {
4210 if (profiler->heap_shot_was_signalled) {
4213 profiler_heap_shot_process_command_file ();
4214 if (profiler->dump_next_heap_snapshots > 0) {
4215 profiler->dump_next_heap_snapshots--;
4217 } else if (profiler->dump_next_heap_snapshots < 0) {
4228 profiler_heap_buffers_setup (ProfilerHeapShotHeapBuffers *heap) {
4229 heap->buffers = g_new (ProfilerHeapShotHeapBuffer, 1);
4230 heap->buffers->previous = NULL;
4231 heap->buffers->next = NULL;
4232 heap->buffers->start_slot = &(heap->buffers->buffer [0]);
4233 heap->buffers->end_slot = &(heap->buffers->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4234 heap->last = heap->buffers;
4235 heap->current = heap->buffers;
4236 heap->first_free_slot = & (heap->buffers->buffer [0]);
4239 profiler_heap_buffers_clear (ProfilerHeapShotHeapBuffers *heap) {
4240 heap->buffers = NULL;
4242 heap->current = NULL;
4243 heap->first_free_slot = NULL;
4246 profiler_heap_buffers_free (ProfilerHeapShotHeapBuffers *heap) {
4247 ProfilerHeapShotHeapBuffer *current = heap->buffers;
4248 while (current != NULL) {
4249 ProfilerHeapShotHeapBuffer *next = current->next;
4253 profiler_heap_buffers_clear (heap);
4257 report_object_references (gpointer *start, ClassIdMappingElement *layout, ProfilerHeapShotWriteJob *job) {
4258 int reported_references = 0;
4261 for (slot = 0; slot < layout->data.layout.slots; slot ++) {
4262 gboolean slot_has_reference;
4263 if (layout->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
4264 if (layout->data.bitmap.compact & (((guint64)1) << slot)) {
4265 slot_has_reference = TRUE;
4267 slot_has_reference = FALSE;
4270 if (layout->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
4271 slot_has_reference = TRUE;
4273 slot_has_reference = FALSE;
4277 if (slot_has_reference) {
4278 gpointer field = start [slot];
4280 if ((field != NULL) && mono_object_is_alive (field)) {
4281 reported_references ++;
4282 WRITE_HEAP_SHOT_JOB_VALUE (job, field);
4287 return reported_references;
4291 profiler_heap_report_object_reachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4293 MonoClass *klass = mono_object_get_class (obj);
4294 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4295 if (class_id == NULL) {
4296 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4298 g_assert (class_id != NULL);
4300 if (job->summary.capacity > 0) {
4301 guint32 id = class_id->id;
4302 g_assert (id < job->summary.capacity);
4304 job->summary.per_class_data [id].reachable.instances ++;
4305 job->summary.per_class_data [id].reachable.bytes += mono_object_get_size (obj);
4307 if (profiler->action_flags.heap_shot && job->dump_heap_data) {
4308 int reference_counter = 0;
4309 gpointer *reference_counter_location;
4311 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, obj, HEAP_CODE_OBJECT);
4312 #if DEBUG_HEAP_PROFILER
4313 printf ("profiler_heap_report_object_reachable: reported object %p at cursor %p\n", obj, (job->cursor - 1));
4315 WRITE_HEAP_SHOT_JOB_VALUE (job, NULL);
4316 reference_counter_location = job->cursor - 1;
4318 if (mono_class_get_rank (klass)) {
4319 MonoArray *array = (MonoArray *) obj;
4320 MonoClass *element_class = mono_class_get_element_class (klass);
4321 ClassIdMappingElement *element_id = class_id_mapping_element_get (element_class);
4323 g_assert (element_id != NULL);
4324 if (element_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4325 class_id_mapping_element_build_layout_bitmap (element_class, element_id);
4327 if (! mono_class_is_valuetype (element_class)) {
4328 int length = mono_array_length (array);
4330 for (i = 0; i < length; i++) {
4331 MonoObject *array_element = mono_array_get (array, MonoObject*, i);
4332 if ((array_element != NULL) && mono_object_is_alive (array_element)) {
4333 reference_counter ++;
4334 WRITE_HEAP_SHOT_JOB_VALUE (job, array_element);
4337 } else if (element_id->data.layout.references > 0) {
4338 int length = mono_array_length (array);
4339 int array_element_size = mono_array_element_size (klass);
4341 for (i = 0; i < length; i++) {
4342 gpointer array_element_address = mono_array_addr_with_size (array, array_element_size, i);
4343 reference_counter += report_object_references (array_element_address, element_id, job);
4347 if (class_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4348 class_id_mapping_element_build_layout_bitmap (klass, class_id);
4350 if (class_id->data.layout.references > 0) {
4351 reference_counter += report_object_references ((gpointer)(((char*)obj) + sizeof (MonoObject)), class_id, job);
4355 *reference_counter_location = GINT_TO_POINTER (reference_counter);
4356 #if DEBUG_HEAP_PROFILER
4357 printf ("profiler_heap_report_object_reachable: updated reference_counter_location %p with value %d\n", reference_counter_location, reference_counter);
4363 profiler_heap_report_object_unreachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4365 MonoClass *klass = mono_object_get_class (obj);
4366 guint32 size = mono_object_get_size (obj);
4368 if (job->summary.capacity > 0) {
4369 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4372 if (class_id == NULL) {
4373 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4375 g_assert (class_id != NULL);
4377 g_assert (id < job->summary.capacity);
4379 job->summary.per_class_data [id].unreachable.instances ++;
4380 job->summary.per_class_data [id].unreachable.bytes += size;
4382 if (profiler->action_flags.unreachable_objects && job->dump_heap_data) {
4383 #if DEBUG_HEAP_PROFILER
4384 printf ("profiler_heap_report_object_unreachable: at job %p writing klass %p\n", job, klass);
4386 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, klass, HEAP_CODE_FREE_OBJECT_CLASS);
4388 #if DEBUG_HEAP_PROFILER
4389 printf ("profiler_heap_report_object_unreachable: at job %p writing size %p\n", job, GUINT_TO_POINTER (size));
4391 WRITE_HEAP_SHOT_JOB_VALUE (job, GUINT_TO_POINTER (size));
4397 profiler_heap_add_object (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4398 if (heap->first_free_slot >= heap->current->end_slot) {
4399 if (heap->current->next != NULL) {
4400 heap->current = heap->current->next;
4402 ProfilerHeapShotHeapBuffer *buffer = g_new (ProfilerHeapShotHeapBuffer, 1);
4403 buffer->previous = heap->last;
4404 buffer->next = NULL;
4405 buffer->start_slot = &(buffer->buffer [0]);
4406 buffer->end_slot = &(buffer->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4407 heap->current = buffer;
4408 heap->last->next = buffer;
4409 heap->last = buffer;
4411 heap->first_free_slot = &(heap->current->buffer [0]);
4414 *(heap->first_free_slot) = obj;
4415 heap->first_free_slot ++;
4416 profiler_heap_report_object_reachable (job, obj);
4420 profiler_heap_pop_object_from_end (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject** current_slot) {
4421 while (heap->first_free_slot != current_slot) {
4424 if (heap->first_free_slot > heap->current->start_slot) {
4425 heap->first_free_slot --;
4427 heap->current = heap->current->previous;
4428 g_assert (heap->current != NULL);
4429 heap->first_free_slot = heap->current->end_slot - 1;
4432 obj = *(heap->first_free_slot);
4434 if (mono_object_is_alive (obj)) {
4435 profiler_heap_report_object_reachable (job, obj);
4438 profiler_heap_report_object_unreachable (job, obj);
4445 profiler_heap_scan (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job) {
4446 ProfilerHeapShotHeapBuffer *current_buffer = heap->buffers;
4447 MonoObject** current_slot = current_buffer->start_slot;
4449 while (current_slot != heap->first_free_slot) {
4450 MonoObject *obj = *current_slot;
4451 if (mono_object_is_alive (obj)) {
4452 profiler_heap_report_object_reachable (job, obj);
4454 profiler_heap_report_object_unreachable (job, obj);
4455 *current_slot = profiler_heap_pop_object_from_end (heap, job, current_slot);
4458 if (*current_slot != NULL) {
4461 if (current_slot == current_buffer->end_slot) {
4462 current_buffer = current_buffer->next;
4463 g_assert (current_buffer != NULL);
4464 current_slot = current_buffer->start_slot;
4470 static inline gboolean
4471 heap_shot_write_job_should_be_created (gboolean dump_heap_data) {
4472 return dump_heap_data || profiler->action_flags.unreachable_objects || profiler->action_flags.collection_summary;
4476 handle_heap_profiling (MonoProfiler *profiler, MonoGCEvent ev) {
4477 static gboolean dump_heap_data;
4480 case MONO_GC_EVENT_PRE_STOP_WORLD:
4481 // Get the lock, so we are sure nobody is flushing events during the collection,
4482 // and we can update all mappings (building the class descriptors).
4485 case MONO_GC_EVENT_POST_STOP_WORLD:
4486 dump_heap_data = dump_current_heap_snapshot ();
4487 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
4488 ProfilerPerThreadData *data;
4489 // Update all mappings, so that we have built all the class descriptors.
4490 flush_all_mappings ();
4491 // Also write all event buffers, so that allocations are recorded.
4492 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4493 write_thread_data_block (data);
4499 case MONO_GC_EVENT_MARK_END: {
4500 ProfilerHeapShotWriteJob *job;
4501 ProfilerPerThreadData *data;
4503 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
4504 job = profiler_heap_shot_write_job_new (profiler->heap_shot_was_signalled, dump_heap_data, profiler->garbage_collection_counter);
4505 profiler->heap_shot_was_signalled = FALSE;
4506 MONO_PROFILER_GET_CURRENT_COUNTER (job->start_counter);
4507 MONO_PROFILER_GET_CURRENT_TIME (job->start_time);
4512 profiler_heap_scan (&(profiler->heap), job);
4514 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4515 ProfilerHeapShotObjectBuffer *buffer;
4516 for (buffer = data->heap_shot_object_buffers; buffer != NULL; buffer = buffer->next) {
4517 MonoObject **cursor;
4518 for (cursor = buffer->first_unprocessed_slot; cursor < buffer->next_free_slot; cursor ++) {
4519 MonoObject *obj = *cursor;
4520 #if DEBUG_HEAP_PROFILER
4521 printf ("gc_event: in object buffer %p(%p-%p) cursor at %p has object %p ", buffer, &(buffer->buffer [0]), buffer->end, cursor, obj);
4523 if (mono_object_is_alive (obj)) {
4524 #if DEBUG_HEAP_PROFILER
4525 printf ("(object is alive, adding to heap)\n");
4527 profiler_heap_add_object (&(profiler->heap), job, obj);
4529 #if DEBUG_HEAP_PROFILER
4530 printf ("(object is unreachable, reporting in job)\n");
4532 profiler_heap_report_object_unreachable (job, obj);
4535 buffer->first_unprocessed_slot = cursor;
4540 MONO_PROFILER_GET_CURRENT_COUNTER (job->end_counter);
4541 MONO_PROFILER_GET_CURRENT_TIME (job->end_time);
4543 profiler_add_heap_shot_write_job (job);
4544 profiler_free_heap_shot_write_jobs ();
4545 WRITER_EVENT_RAISE ();
4555 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation) {
4556 ProfilerPerThreadData *data;
4557 ProfilerEventData *event;
4558 gboolean do_heap_profiling = profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary;
4559 guint32 event_value;
4561 GET_PROFILER_THREAD_DATA (data);
4562 GET_NEXT_FREE_EVENT (data, event);
4564 if (ev == MONO_GC_EVENT_START) {
4565 profiler->garbage_collection_counter ++;
4568 event_value = (profiler->garbage_collection_counter << 8) | generation;
4570 if (do_heap_profiling && (ev == MONO_GC_EVENT_POST_STOP_WORLD)) {
4571 handle_heap_profiling (profiler, ev);
4573 STORE_EVENT_NUMBER_COUNTER (event, profiler, event_value, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, gc_event_code_from_profiler_event (ev), gc_event_kind_from_profiler_event (ev));
4574 if (do_heap_profiling && (ev != MONO_GC_EVENT_POST_STOP_WORLD)) {
4575 handle_heap_profiling (profiler, ev);
4580 gc_resize (MonoProfiler *profiler, gint64 new_size) {
4581 ProfilerPerThreadData *data;
4582 ProfilerEventData *event;
4583 GET_PROFILER_THREAD_DATA (data);
4584 GET_NEXT_FREE_EVENT (data, event);
4585 profiler->garbage_collection_counter ++;
4586 STORE_EVENT_NUMBER_VALUE (event, profiler, new_size, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_GC_RESIZE, 0, profiler->garbage_collection_counter);
4590 runtime_initialized (MonoProfiler *profiler) {
4591 LOG_WRITER_THREAD ("runtime_initialized: waking writer thread to enable it...\n");
4592 WRITER_EVENT_ENABLE_RAISE ();
4593 LOG_WRITER_THREAD ("runtime_initialized: waiting writer thread...\n");
4594 WRITER_EVENT_DONE_WAIT ();
4595 LOG_WRITER_THREAD ("runtime_initialized: writer thread enabled.\n");
4596 mono_add_internal_call ("Mono.Profiler.RuntimeControls::EnableProfiler", enable_profiler);
4597 mono_add_internal_call ("Mono.Profiler.RuntimeControls::DisableProfiler", disable_profiler);
4598 mono_add_internal_call ("Mono.Profiler.RuntimeControls::TakeHeapSnapshot", request_heap_snapshot);
4599 LOG_WRITER_THREAD ("runtime_initialized: initialized internal calls.\n");
4602 /* called at the end of the program */
4604 profiler_shutdown (MonoProfiler *prof)
4606 ProfilerPerThreadData* current_thread_data;
4607 ProfilerPerThreadData* next_thread_data;
4609 LOG_WRITER_THREAD ("profiler_shutdown: zeroing relevant flags");
4610 mono_profiler_set_events (0);
4611 //profiler->flags = 0;
4612 //profiler->action_flags.unreachable_objects = FALSE;
4613 //profiler->action_flags.heap_shot = FALSE;
4615 LOG_WRITER_THREAD ("profiler_shutdown: asking stats thread to exit");
4616 profiler->terminate_writer_thread = TRUE;
4617 WRITER_EVENT_RAISE ();
4618 LOG_WRITER_THREAD ("profiler_shutdown: waiting for stats thread to exit");
4619 WAIT_WRITER_THREAD ();
4620 LOG_WRITER_THREAD ("profiler_shutdown: stats thread should be dead now");
4621 WRITER_EVENT_DESTROY ();
4624 flush_everything ();
4625 MONO_PROFILER_GET_CURRENT_TIME (profiler->end_time);
4626 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->end_counter);
4632 g_free (profiler->file_name);
4633 if (profiler->file_name_suffix != NULL) {
4634 g_free (profiler->file_name_suffix);
4637 method_id_mapping_destroy (profiler->methods);
4638 class_id_mapping_destroy (profiler->classes);
4639 g_hash_table_destroy (profiler->loaded_assemblies);
4640 g_hash_table_destroy (profiler->loaded_modules);
4641 g_hash_table_destroy (profiler->loaded_appdomains);
4643 FREE_PROFILER_THREAD_DATA ();
4645 for (current_thread_data = profiler->per_thread_data; current_thread_data != NULL; current_thread_data = next_thread_data) {
4646 next_thread_data = current_thread_data->next;
4647 profiler_per_thread_data_destroy (current_thread_data);
4649 if (profiler->statistical_data != NULL) {
4650 profiler_statistical_data_destroy (profiler->statistical_data);
4652 if (profiler->statistical_data_ready != NULL) {
4653 profiler_statistical_data_destroy (profiler->statistical_data_ready);
4655 if (profiler->statistical_data_second_buffer != NULL) {
4656 profiler_statistical_data_destroy (profiler->statistical_data_second_buffer);
4658 if (profiler->executable_regions != NULL) {
4659 profiler_executable_memory_regions_destroy (profiler->executable_regions);
4662 profiler_heap_buffers_free (&(profiler->heap));
4663 if (profiler->heap_shot_command_file_name != NULL) {
4664 g_free (profiler->heap_shot_command_file_name);
4667 profiler_free_write_buffers ();
4668 profiler_destroy_heap_shot_write_jobs ();
4670 DELETE_PROFILER_MUTEX ();
4673 if (profiler->action_flags.oprofile) {
4682 #ifndef PLATFORM_WIN32
4684 parse_signal_name (const char *signal_name) {
4685 if (! strcasecmp (signal_name, "SIGUSR1")) {
4687 } else if (! strcasecmp (signal_name, "SIGUSR2")) {
4689 } else if (! strcasecmp (signal_name, "SIGPROF")) {
4692 return atoi (signal_name);
4696 check_signal_number (int signal_number) {
4697 if (((signal_number == SIGPROF) && ! (profiler->flags & MONO_PROFILE_STATISTICAL)) ||
4698 (signal_number == SIGUSR1) ||
4699 (signal_number == SIGUSR2)) {
4707 #define DEFAULT_ARGUMENTS "s"
4709 setup_user_options (const char *arguments) {
4710 gchar **arguments_array, **current_argument;
4711 #ifndef PLATFORM_WIN32
4712 int gc_request_signal_number = 0;
4713 int toggle_signal_number = 0;
4715 detect_fast_timer ();
4717 profiler->file_name = NULL;
4718 profiler->file_name_suffix = NULL;
4719 profiler->per_thread_buffer_size = 10000;
4720 profiler->statistical_buffer_size = 10000;
4721 profiler->statistical_call_chain_depth = 0;
4722 profiler->write_buffer_size = 1024;
4723 profiler->heap_shot_command_file_name = NULL;
4724 profiler->dump_next_heap_snapshots = 0;
4725 profiler->heap_shot_command_file_access_time = 0;
4726 profiler->heap_shot_was_signalled = FALSE;
4727 profiler->flags = MONO_PROFILE_APPDOMAIN_EVENTS|
4728 MONO_PROFILE_ASSEMBLY_EVENTS|
4729 MONO_PROFILE_MODULE_EVENTS|
4730 MONO_PROFILE_CLASS_EVENTS|
4731 MONO_PROFILE_METHOD_EVENTS|
4732 MONO_PROFILE_JIT_COMPILATION;
4733 profiler->profiler_enabled = TRUE;
4735 if (arguments == NULL) {
4736 arguments = DEFAULT_ARGUMENTS;
4737 } else if (strstr (arguments, ":")) {
4738 arguments = strstr (arguments, ":") + 1;
4739 if (arguments [0] == 0) {
4740 arguments = DEFAULT_ARGUMENTS;
4744 arguments_array = g_strsplit (arguments, ",", -1);
4746 for (current_argument = arguments_array; ((current_argument != NULL) && (current_argument [0] != 0)); current_argument ++) {
4747 char *argument = *current_argument;
4748 char *equals = strstr (argument, "=");
4750 if (equals != NULL) {
4751 int equals_position = equals - argument;
4753 if (! (strncmp (argument, "per-thread-buffer-size", equals_position) && strncmp (argument, "tbs", equals_position))) {
4754 int value = atoi (equals + 1);
4756 profiler->per_thread_buffer_size = value;
4758 } else if (! (strncmp (argument, "statistical", equals_position) && strncmp (argument, "stat", equals_position) && strncmp (argument, "s", equals_position))) {
4759 int value = atoi (equals + 1);
4764 profiler->statistical_call_chain_depth = value;
4765 profiler->flags |= MONO_PROFILE_STATISTICAL|MONO_PROFILE_JIT_COMPILATION;
4767 } else if (! (strncmp (argument, "statistical-thread-buffer-size", equals_position) && strncmp (argument, "sbs", equals_position))) {
4768 int value = atoi (equals + 1);
4770 profiler->statistical_buffer_size = value;
4772 } else if (! (strncmp (argument, "write-buffer-size", equals_position) && strncmp (argument, "wbs", equals_position))) {
4773 int value = atoi (equals + 1);
4775 profiler->write_buffer_size = value;
4777 } else if (! (strncmp (argument, "output", equals_position) && strncmp (argument, "out", equals_position) && strncmp (argument, "o", equals_position) && strncmp (argument, "O", equals_position))) {
4778 if (strlen (equals + 1) > 0) {
4779 profiler->file_name = g_strdup (equals + 1);
4781 } else if (! (strncmp (argument, "output-suffix", equals_position) && strncmp (argument, "suffix", equals_position) && strncmp (argument, "os", equals_position) && strncmp (argument, "OS", equals_position))) {
4782 if (strlen (equals + 1) > 0) {
4783 profiler->file_name_suffix = g_strdup (equals + 1);
4785 } else if (! (strncmp (argument, "gc-commands", equals_position) && strncmp (argument, "gc-c", equals_position) && strncmp (argument, "gcc", equals_position))) {
4786 if (strlen (equals + 1) > 0) {
4787 profiler->heap_shot_command_file_name = g_strdup (equals + 1);
4789 } else if (! (strncmp (argument, "gc-dumps", equals_position) && strncmp (argument, "gc-d", equals_position) && strncmp (argument, "gcd", equals_position))) {
4790 if (strlen (equals + 1) > 0) {
4791 profiler->dump_next_heap_snapshots = atoi (equals + 1);
4793 #ifndef PLATFORM_WIN32
4794 } else if (! (strncmp (argument, "gc-signal", equals_position) && strncmp (argument, "gc-s", equals_position) && strncmp (argument, "gcs", equals_position))) {
4795 if (strlen (equals + 1) > 0) {
4796 char *signal_name = equals + 1;
4797 gc_request_signal_number = parse_signal_name (signal_name);
4799 } else if (! (strncmp (argument, "toggle-signal", equals_position) && strncmp (argument, "ts", equals_position))) {
4800 if (strlen (equals + 1) > 0) {
4801 char *signal_name = equals + 1;
4802 toggle_signal_number = parse_signal_name (signal_name);
4806 g_warning ("Cannot parse valued argument %s\n", argument);
4809 if (! (strcmp (argument, "jit") && strcmp (argument, "j"))) {
4810 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
4811 profiler->action_flags.jit_time = TRUE;
4812 } else if (! (strcmp (argument, "allocations") && strcmp (argument, "alloc") && strcmp (argument, "a"))) {
4813 profiler->flags |= MONO_PROFILE_ALLOCATIONS|MONO_PROFILE_GC;
4814 } else if (! (strcmp (argument, "gc") && strcmp (argument, "g"))) {
4815 profiler->flags |= MONO_PROFILE_GC;
4816 } else if (! (strcmp (argument, "allocations-summary") && strcmp (argument, "as"))) {
4817 profiler->flags |= MONO_PROFILE_ALLOCATIONS|MONO_PROFILE_GC;
4818 profiler->action_flags.collection_summary = TRUE;
4819 } else if (! (strcmp (argument, "heap-shot") && strcmp (argument, "heap") && strcmp (argument, "h"))) {
4820 profiler->flags |= MONO_PROFILE_ALLOCATIONS|MONO_PROFILE_GC;
4821 profiler->action_flags.heap_shot = TRUE;
4822 } else if (! (strcmp (argument, "unreachable") && strcmp (argument, "free") && strcmp (argument, "f"))) {
4823 profiler->flags |= MONO_PROFILE_ALLOCATIONS|MONO_PROFILE_GC;
4824 profiler->action_flags.unreachable_objects = TRUE;
4825 } else if (! (strcmp (argument, "threads") && strcmp (argument, "t"))) {
4826 profiler->flags |= MONO_PROFILE_THREADS;
4827 } else if (! (strcmp (argument, "enter-leave") && strcmp (argument, "calls") && strcmp (argument, "c"))) {
4828 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
4829 profiler->action_flags.jit_time = TRUE;
4830 profiler->action_flags.track_calls = TRUE;
4831 } else if (! (strcmp (argument, "statistical") && strcmp (argument, "stat") && strcmp (argument, "s"))) {
4832 profiler->flags |= MONO_PROFILE_STATISTICAL;
4833 } else if (! (strcmp (argument, "track-stack") && strcmp (argument, "ts"))) {
4834 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
4835 profiler->action_flags.track_stack = TRUE;
4836 profiler->action_flags.save_allocation_caller = TRUE;
4837 } else if (! (strcmp (argument, "save-allocation-stack") && strcmp (argument, "sas"))) {
4838 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
4839 profiler->action_flags.track_stack = TRUE;
4840 profiler->action_flags.save_allocation_stack = TRUE;
4841 } else if (! (strcmp (argument, "allocations-carry-id") && strcmp (argument, "aci"))) {
4842 profiler->action_flags.allocations_carry_id = TRUE;
4843 } else if (! (strcmp (argument, "start-enabled") && strcmp (argument, "se"))) {
4844 profiler->profiler_enabled = TRUE;
4845 } else if (! (strcmp (argument, "start-disabled") && strcmp (argument, "sd"))) {
4846 profiler->profiler_enabled = FALSE;
4847 } else if (! (strcmp (argument, "force-accurate-timer") && strcmp (argument, "fac"))) {
4848 use_fast_timer = FALSE;
4850 } else if (! (strcmp (argument, "oprofile") && strcmp (argument, "oprof"))) {
4851 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
4852 profiler->action_flags.oprofile = TRUE;
4853 if (op_open_agent ()) {
4854 g_warning ("Problem calling op_open_agent\n");
4857 } else if (strcmp (argument, "logging")) {
4858 g_warning ("Cannot parse flag argument %s\n", argument);
4863 g_free (arguments_array);
4865 #ifndef PLATFORM_WIN32
4866 if (gc_request_signal_number != 0) {
4867 if (check_signal_number (gc_request_signal_number) && (gc_request_signal_number != toggle_signal_number)) {
4868 add_gc_request_handler (gc_request_signal_number);
4870 g_error ("Cannot use signal %d", gc_request_signal_number);
4873 if (toggle_signal_number != 0) {
4874 if (check_signal_number (toggle_signal_number) && (toggle_signal_number != gc_request_signal_number)) {
4875 add_toggle_handler (toggle_signal_number);
4877 g_error ("Cannot use signal %d", gc_request_signal_number);
4882 if (profiler->file_name == NULL) {
4883 char *program_name = g_get_prgname ();
4885 if (program_name != NULL) {
4886 char *name_buffer = g_strdup (program_name);
4887 char *name_start = name_buffer;
4890 /* Jump over the last '/' */
4891 cursor = strrchr (name_buffer, '/');
4892 if (cursor == NULL) {
4893 cursor = name_buffer;
4897 name_start = cursor;
4899 /* Then jump over the last '\\' */
4900 cursor = strrchr (name_start, '\\');
4901 if (cursor == NULL) {
4902 cursor = name_start;
4906 name_start = cursor;
4908 /* Finally, find the last '.' */
4909 cursor = strrchr (name_start, '.');
4910 if (cursor != NULL) {
4914 if (profiler->file_name_suffix == NULL) {
4915 profiler->file_name = g_strdup_printf ("%s.mprof", name_start);
4917 profiler->file_name = g_strdup_printf ("%s-%s.mprof", name_start, profiler->file_name_suffix);
4919 g_free (name_buffer);
4921 profiler->file_name = g_strdup_printf ("%s.mprof", "profiler-log");
4927 thread_detach_callback (MonoThread *thread) {
4928 LOG_WRITER_THREAD ("thread_detach_callback: asking writer thread to detach");
4929 profiler->detach_writer_thread = TRUE;
4930 WRITER_EVENT_RAISE ();
4931 LOG_WRITER_THREAD ("thread_detach_callback: done");
4936 data_writer_thread (gpointer nothing) {
4937 static gboolean thread_attached = FALSE;
4938 static gboolean thread_detached = FALSE;
4939 static MonoThread *this_thread = NULL;
4941 /* Wait for the OK to attach to the runtime */
4942 WRITER_EVENT_ENABLE_WAIT ();
4943 if (! profiler->terminate_writer_thread) {
4944 MonoDomain * root_domain = mono_get_root_domain ();
4945 if (root_domain != NULL) {
4946 LOG_WRITER_THREAD ("data_writer_thread: attaching thread");
4947 this_thread = mono_thread_attach (root_domain);
4948 mono_thread_set_manage_callback (this_thread, thread_detach_callback);
4949 thread_attached = TRUE;
4951 g_error ("Cannot get root domain\n");
4954 /* Execution was too short, pretend we attached and detached. */
4955 thread_attached = TRUE;
4956 thread_detached = TRUE;
4958 profiler->writer_thread_enabled = TRUE;
4959 /* Notify that we are attached to the runtime */
4960 WRITER_EVENT_DONE_RAISE ();
4963 ProfilerStatisticalData *statistical_data;
4966 LOG_WRITER_THREAD ("data_writer_thread: going to sleep");
4967 WRITER_EVENT_WAIT ();
4968 LOG_WRITER_THREAD ("data_writer_thread: just woke up");
4970 if (profiler->heap_shot_was_signalled) {
4971 LOG_WRITER_THREAD ("data_writer_thread: starting requested collection");
4972 mono_gc_collect (mono_gc_max_generation ());
4973 LOG_WRITER_THREAD ("data_writer_thread: requested collection done");
4976 statistical_data = profiler->statistical_data_ready;
4977 done = (statistical_data == NULL) && (profiler->heap_shot_write_jobs == NULL) && (profiler->writer_thread_flush_everything == FALSE);
4979 if ((!done) && thread_attached) {
4980 if (profiler->writer_thread_flush_everything) {
4981 /* Note that this assumes the lock is held by the thread that woke us up! */
4982 if (! thread_detached) {
4983 LOG_WRITER_THREAD ("data_writer_thread: flushing everything...");
4984 flush_everything ();
4985 profiler->writer_thread_flush_everything = FALSE;
4986 WRITER_EVENT_DONE_RAISE ();
4987 LOG_WRITER_THREAD ("data_writer_thread: flushed everything.");
4989 LOG_WRITER_THREAD ("data_writer_thread: flushing requested, but thread is detached...");
4990 profiler->writer_thread_flush_everything = FALSE;
4991 WRITER_EVENT_DONE_RAISE ();
4992 LOG_WRITER_THREAD ("data_writer_thread: done event raised.");
4995 LOG_WRITER_THREAD ("data_writer_thread: acquiring lock and writing data");
4998 // This makes sure that all method ids are in place
4999 LOG_WRITER_THREAD ("data_writer_thread: writing mapping...");
5000 flush_all_mappings ();
5001 LOG_WRITER_THREAD ("data_writer_thread: wrote mapping");
5003 if ((statistical_data != NULL) && ! thread_detached) {
5004 LOG_WRITER_THREAD ("data_writer_thread: writing statistical data...");
5005 profiler->statistical_data_ready = NULL;
5006 write_statistical_data_block (statistical_data);
5007 statistical_data->next_free_index = 0;
5008 statistical_data->first_unwritten_index = 0;
5009 profiler->statistical_data_second_buffer = statistical_data;
5010 LOG_WRITER_THREAD ("data_writer_thread: wrote statistical data");
5013 profiler_process_heap_shot_write_jobs ();
5016 LOG_WRITER_THREAD ("data_writer_thread: wrote data and released lock");
5019 if (profiler->writer_thread_flush_everything) {
5020 LOG_WRITER_THREAD ("data_writer_thread: flushing requested, but thread is not attached...");
5021 profiler->writer_thread_flush_everything = FALSE;
5022 WRITER_EVENT_DONE_RAISE ();
5023 LOG_WRITER_THREAD ("data_writer_thread: done event raised.");
5027 if (profiler->detach_writer_thread) {
5028 if (this_thread != NULL) {
5029 LOG_WRITER_THREAD ("data_writer_thread: detach requested, acquiring lock and flushing data");
5031 flush_everything ();
5033 LOG_WRITER_THREAD ("data_writer_thread: flushed data and released lock");
5034 LOG_WRITER_THREAD ("data_writer_thread: detaching thread");
5035 mono_thread_detach (this_thread);
5037 profiler->detach_writer_thread = FALSE;
5038 thread_detached = TRUE;
5040 LOG_WRITER_THREAD ("data_writer_thread: warning: thread has already been detached");
5044 if (profiler->terminate_writer_thread) {
5045 LOG_WRITER_THREAD ("data_writer_thread: exiting thread");
5046 CLEANUP_WRITER_THREAD ();
5054 mono_profiler_startup (const char *desc);
5056 /* the entry point (mono_profiler_load?) */
5058 mono_profiler_startup (const char *desc)
5060 profiler = g_new0 (MonoProfiler, 1);
5062 setup_user_options ((desc != NULL) ? desc : DEFAULT_ARGUMENTS);
5064 INITIALIZE_PROFILER_MUTEX ();
5065 MONO_PROFILER_GET_CURRENT_TIME (profiler->start_time);
5066 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->start_counter);
5067 profiler->last_header_counter = 0;
5069 profiler->methods = method_id_mapping_new ();
5070 profiler->classes = class_id_mapping_new ();
5071 profiler->loaded_assemblies = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5072 profiler->loaded_modules = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5073 profiler->loaded_appdomains = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5075 profiler->statistical_data = profiler_statistical_data_new (profiler);
5076 profiler->statistical_data_second_buffer = profiler_statistical_data_new (profiler);
5078 profiler->write_buffers = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
5079 profiler->write_buffers->next = NULL;
5080 profiler->current_write_buffer = profiler->write_buffers;
5081 profiler->current_write_position = 0;
5082 profiler->full_write_buffers = 0;
5084 profiler->executable_regions = profiler_executable_memory_regions_new (1, 1);
5086 profiler->executable_files.table = g_hash_table_new (g_str_hash, g_str_equal);
5087 profiler->executable_files.new_files = NULL;
5089 profiler->heap_shot_write_jobs = NULL;
5090 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
5091 profiler_heap_buffers_setup (&(profiler->heap));
5093 profiler_heap_buffers_clear (&(profiler->heap));
5095 profiler->garbage_collection_counter = 0;
5097 WRITER_EVENT_INIT ();
5098 LOG_WRITER_THREAD ("mono_profiler_startup: creating writer thread");
5099 CREATE_WRITER_THREAD (data_writer_thread);
5100 LOG_WRITER_THREAD ("mono_profiler_startup: created writer thread");
5102 ALLOCATE_PROFILER_THREAD_DATA ();
5106 write_intro_block ();
5107 write_directives_block (TRUE);
5109 mono_profiler_install (profiler, profiler_shutdown);
5111 mono_profiler_install_appdomain (appdomain_start_load, appdomain_end_load,
5112 appdomain_start_unload, appdomain_end_unload);
5113 mono_profiler_install_assembly (assembly_start_load, assembly_end_load,
5114 assembly_start_unload, assembly_end_unload);
5115 mono_profiler_install_module (module_start_load, module_end_load,
5116 module_start_unload, module_end_unload);
5117 mono_profiler_install_class (class_start_load, class_end_load,
5118 class_start_unload, class_end_unload);
5119 mono_profiler_install_jit_compile (method_start_jit, method_end_jit);
5120 mono_profiler_install_enter_leave (method_enter, method_leave);
5121 mono_profiler_install_method_free (method_free);
5122 mono_profiler_install_thread (thread_start, thread_end);
5123 mono_profiler_install_allocation (object_allocated);
5124 mono_profiler_install_statistical (statistical_hit);
5125 mono_profiler_install_statistical_call_chain (statistical_call_chain, profiler->statistical_call_chain_depth);
5126 mono_profiler_install_gc (gc_event, gc_resize);
5127 mono_profiler_install_runtime_initialized (runtime_initialized);
5129 mono_profiler_install_jit_end (method_jit_result);
5132 mono_profiler_set_events (profiler->flags);