2 #include <mono/metadata/profiler.h>
3 #include <mono/metadata/class.h>
4 #include <mono/metadata/class-internals.h>
5 #include <mono/metadata/assembly.h>
6 #include <mono/metadata/loader.h>
7 #include <mono/metadata/threads.h>
8 #include <mono/metadata/debug-helpers.h>
9 #include <mono/metadata/mono-gc.h>
10 #include <mono/io-layer/atomic.h>
19 #define HAS_OPROFILE 0
22 #include <libopagent.h>
25 // Needed for heap analysis
26 extern gboolean mono_object_is_alive (MonoObject* obj);
29 MONO_PROFILER_FILE_BLOCK_KIND_INTRO = 1,
30 MONO_PROFILER_FILE_BLOCK_KIND_END = 2,
31 MONO_PROFILER_FILE_BLOCK_KIND_MAPPING = 3,
32 MONO_PROFILER_FILE_BLOCK_KIND_LOADED = 4,
33 MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED = 5,
34 MONO_PROFILER_FILE_BLOCK_KIND_EVENTS = 6,
35 MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL = 7,
36 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA = 8,
37 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY = 9,
38 MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES = 10
39 } MonoProfilerFileBlockKind;
42 MONO_PROFILER_DIRECTIVE_END = 0,
43 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER = 1,
44 MONO_PROFILER_DIRECTIVE_LAST
45 } MonoProfilerDirectives;
48 #define MONO_PROFILER_LOADED_EVENT_MODULE 1
49 #define MONO_PROFILER_LOADED_EVENT_ASSEMBLY 2
50 #define MONO_PROFILER_LOADED_EVENT_APPDOMAIN 4
51 #define MONO_PROFILER_LOADED_EVENT_SUCCESS 8
52 #define MONO_PROFILER_LOADED_EVENT_FAILURE 16
55 MONO_PROFILER_EVENT_DATA_TYPE_OTHER = 0,
56 MONO_PROFILER_EVENT_DATA_TYPE_METHOD = 1,
57 MONO_PROFILER_EVENT_DATA_TYPE_CLASS = 2
58 } MonoProfilerEventDataType;
60 typedef struct _ProfilerEventData {
65 unsigned int data_type:2;
68 unsigned int value:26;
71 #define EVENT_VALUE_BITS (26)
72 #define MAX_EVENT_VALUE ((1<<EVENT_VALUE_BITS)-1)
75 MONO_PROFILER_EVENT_METHOD_JIT = 0,
76 MONO_PROFILER_EVENT_METHOD_FREED = 1,
77 MONO_PROFILER_EVENT_METHOD_CALL = 2,
78 MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER = 3,
79 MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER = 4
80 } MonoProfilerMethodEvents;
82 MONO_PROFILER_EVENT_CLASS_LOAD = 0,
83 MONO_PROFILER_EVENT_CLASS_UNLOAD = 1,
84 MONO_PROFILER_EVENT_CLASS_EXCEPTION = 2,
85 MONO_PROFILER_EVENT_CLASS_ALLOCATION = 3
86 } MonoProfilerClassEvents;
88 MONO_PROFILER_EVENT_RESULT_SUCCESS = 0,
89 MONO_PROFILER_EVENT_RESULT_FAILURE = 4
90 } MonoProfilerEventResult;
91 #define MONO_PROFILER_EVENT_RESULT_MASK MONO_PROFILER_EVENT_RESULT_FAILURE
93 MONO_PROFILER_EVENT_THREAD = 1,
94 MONO_PROFILER_EVENT_GC_COLLECTION = 2,
95 MONO_PROFILER_EVENT_GC_MARK = 3,
96 MONO_PROFILER_EVENT_GC_SWEEP = 4,
97 MONO_PROFILER_EVENT_GC_RESIZE = 5,
98 MONO_PROFILER_EVENT_GC_STOP_WORLD = 6,
99 MONO_PROFILER_EVENT_GC_START_WORLD = 7,
100 MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION = 8
101 } MonoProfilerEvents;
103 MONO_PROFILER_EVENT_KIND_START = 0,
104 MONO_PROFILER_EVENT_KIND_END = 1
105 } MonoProfilerEventKind;
107 #define MONO_PROFILER_GET_CURRENT_TIME(t) {\
108 struct timeval current_time;\
109 gettimeofday (¤t_time, NULL);\
110 (t) = (((guint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;\
113 static gboolean use_fast_timer = FALSE;
115 #if (defined(__i386__) || defined(__x86_64__)) && ! defined(PLATFORM_WIN32)
117 #if defined(__i386__)
118 static const guchar cpuid_impl [] = {
119 0x55, /* push %ebp */
120 0x89, 0xe5, /* mov %esp,%ebp */
121 0x53, /* push %ebx */
122 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
123 0x0f, 0xa2, /* cpuid */
124 0x50, /* push %eax */
125 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
126 0x89, 0x18, /* mov %ebx,(%eax) */
127 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
128 0x89, 0x08, /* mov %ecx,(%eax) */
129 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
130 0x89, 0x10, /* mov %edx,(%eax) */
132 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
133 0x89, 0x02, /* mov %eax,(%edx) */
139 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
142 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx) {
145 __asm__ __volatile__ (
148 "movl %%eax, %%edx\n"
149 "xorl $0x200000, %%eax\n"
154 "xorl %%edx, %%eax\n"
155 "andl $0x200000, %%eax\n"
177 CpuidFunc func = (CpuidFunc) cpuid_impl;
178 func (id, p_eax, p_ebx, p_ecx, p_edx);
180 * We use this approach because of issues with gcc and pic code, see:
181 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
182 __asm__ __volatile__ ("cpuid"
183 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
191 static void detect_fast_timer (void) {
192 int p_eax, p_ebx, p_ecx, p_edx;
194 if (cpuid (0x1, &p_eax, &p_ebx, &p_ecx, &p_edx)) {
196 use_fast_timer = TRUE;
198 use_fast_timer = FALSE;
201 use_fast_timer = FALSE;
206 #if defined(__x86_64__)
207 static void detect_fast_timer (void) {
209 guint32 eax,ebx,ecx,edx;
210 __asm__ __volatile__ ("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(op));
212 use_fast_timer = TRUE;
214 use_fast_timer = FALSE;
219 static __inline__ guint64 rdtsc(void) {
221 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
222 return ((guint64) lo) | (((guint64) hi) << 32);
224 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) {\
225 if (use_fast_timer) {\
228 MONO_PROFILER_GET_CURRENT_TIME ((c));\
232 static void detect_fast_timer (void) {
233 use_fast_timer = FALSE;
235 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) MONO_PROFILER_GET_CURRENT_TIME ((c))
239 #define CLASS_LAYOUT_PACKED_BITMAP_SIZE 64
240 #define CLASS_LAYOUT_NOT_INITIALIZED (0xFFFF)
243 HEAP_CODE_OBJECT = 1,
244 HEAP_CODE_FREE_OBJECT_CLASS = 2,
246 } HeapProfilerJobValueCode;
247 typedef struct _MonoProfilerClassData {
256 } MonoProfilerClassData;
258 typedef struct _MonoProfilerMethodData {
261 } MonoProfilerMethodData;
263 typedef struct _ClassIdMappingElement {
267 struct _ClassIdMappingElement *next_unwritten;
268 MonoProfilerClassData data;
269 } ClassIdMappingElement;
271 typedef struct _MethodIdMappingElement {
275 struct _MethodIdMappingElement *next_unwritten;
276 MonoProfilerMethodData data;
277 } MethodIdMappingElement;
279 typedef struct _ClassIdMapping {
281 ClassIdMappingElement *unwritten;
285 typedef struct _MethodIdMapping {
287 MethodIdMappingElement *unwritten;
291 typedef struct _LoadedElement {
293 guint64 load_start_counter;
294 guint64 load_end_counter;
295 guint64 unload_start_counter;
296 guint64 unload_end_counter;
300 guint8 unload_written;
303 #define PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE 1024
304 #define PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE 4096
305 #define PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE 4096
307 typedef struct _ProfilerHeapShotObjectBuffer {
308 struct _ProfilerHeapShotObjectBuffer *next;
309 MonoObject **next_free_slot;
311 MonoObject **first_unprocessed_slot;
312 MonoObject *buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE];
313 } ProfilerHeapShotObjectBuffer;
315 typedef struct _ProfilerHeapShotHeapBuffer {
316 struct _ProfilerHeapShotHeapBuffer *next;
317 struct _ProfilerHeapShotHeapBuffer *previous;
318 MonoObject **start_slot;
319 MonoObject **end_slot;
320 MonoObject *buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE];
321 } ProfilerHeapShotHeapBuffer;
323 typedef struct _ProfilerHeapShotHeapBuffers {
324 ProfilerHeapShotHeapBuffer *buffers;
325 ProfilerHeapShotHeapBuffer *last;
326 ProfilerHeapShotHeapBuffer *current;
327 MonoObject **first_free_slot;
328 } ProfilerHeapShotHeapBuffers;
331 typedef struct _ProfilerHeapShotWriteBuffer {
332 struct _ProfilerHeapShotWriteBuffer *next;
333 gpointer buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE];
334 } ProfilerHeapShotWriteBuffer;
336 typedef struct _ProfilerHeapShotClassSummary {
345 } ProfilerHeapShotClassSummary;
347 typedef struct _ProfilerHeapShotCollectionSummary {
348 ProfilerHeapShotClassSummary *per_class_data;
350 } ProfilerHeapShotCollectionSummary;
352 typedef struct _ProfilerHeapShotWriteJob {
353 struct _ProfilerHeapShotWriteJob *next;
354 struct _ProfilerHeapShotWriteJob *next_unwritten;
358 ProfilerHeapShotWriteBuffer *buffers;
359 ProfilerHeapShotWriteBuffer **last_next;
360 guint32 full_buffers;
361 gboolean heap_shot_was_signalled;
362 guint64 start_counter;
367 ProfilerHeapShotCollectionSummary summary;
368 gboolean dump_heap_data;
369 } ProfilerHeapShotWriteJob;
371 typedef struct _ProfilerThreadStack {
375 guint8 *method_is_jitted;
376 } ProfilerThreadStack;
378 typedef struct _ProfilerPerThreadData {
379 ProfilerEventData *events;
380 ProfilerEventData *next_free_event;
381 ProfilerEventData *end_event;
382 ProfilerEventData *first_unwritten_event;
383 ProfilerEventData *first_unmapped_event;
384 guint64 start_event_counter;
385 guint64 last_event_counter;
387 ProfilerHeapShotObjectBuffer *heap_shot_object_buffers;
388 ProfilerThreadStack stack;
389 struct _ProfilerPerThreadData* next;
390 } ProfilerPerThreadData;
392 typedef struct _ProfilerStatisticalHit {
395 } ProfilerStatisticalHit;
397 typedef struct _ProfilerStatisticalData {
398 ProfilerStatisticalHit *hits;
401 int first_unwritten_index;
402 } ProfilerStatisticalData;
404 typedef struct _ProfilerUnmanagedSymbol {
409 } ProfilerUnmanagedSymbol;
411 struct _ProfilerExecutableFile;
413 typedef struct _ProfilerExecutableMemoryRegionData {
421 struct _ProfilerExecutableFile *file;
422 guint32 symbols_count;
423 guint32 symbols_capacity;
424 ProfilerUnmanagedSymbol *symbols;
425 } ProfilerExecutableMemoryRegionData;
427 typedef struct _ProfilerExecutableMemoryRegions {
428 ProfilerExecutableMemoryRegionData **regions;
429 guint32 regions_capacity;
430 guint32 regions_count;
432 guint32 next_unmanaged_function_id;
433 } ProfilerExecutableMemoryRegions;
435 /* Start of ELF definitions */
437 typedef guint16 ElfHalf;
438 typedef guint32 ElfWord;
439 typedef gsize ElfAddr;
440 typedef gsize ElfOff;
443 unsigned char e_ident[EI_NIDENT];
449 ElfOff e_shoff; // Section header table
451 ElfHalf e_ehsize; // Header size
454 ElfHalf e_shentsize; // Section header entry size
455 ElfHalf e_shnum; // Section header entries number
456 ElfHalf e_shstrndx; // String table index
459 #if (SIZEOF_VOID_P == 4)
464 ElfAddr sh_addr; // Address in memory
465 ElfOff sh_offset; // Offset in file
469 ElfWord sh_addralign;
476 unsigned char st_info; // Use ELF32_ST_TYPE to get symbol type
477 unsigned char st_other;
478 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
480 #elif (SIZEOF_VOID_P == 8)
485 ElfAddr sh_addr; // Address in memory
486 ElfOff sh_offset; // Offset in file
495 unsigned char st_info; // Use ELF_ST_TYPE to get symbol type
496 unsigned char st_other;
497 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
502 #error Bad size of void pointer
506 #define ELF_ST_BIND(i) ((i)>>4)
507 #define ELF_ST_TYPE(i) ((i)&0xf)
520 ELF_FILE_TYPE_NONE = 0,
521 ELF_FILE_TYPE_REL = 1,
522 ELF_FILE_TYPE_EXEC = 2,
523 ELF_FILE_TYPE_DYN = 3,
524 ELF_FILE_TYPE_CORE = 4
541 ELF_SHT_PROGBITS = 1,
565 ELF_SHF_EXECINSTR = 4,
568 #define ELF_SHN_UNDEF 0
569 #define ELF_SHN_LORESERVE 0xff00
570 #define ELF_SHN_LOPROC 0xff00
571 #define ELF_SHN_HIPROC 0xff1f
572 #define ELF_SHN_ABS 0xfff1
573 #define ELF_SHN_COMMON 0xfff2
574 #define ELF_SHN_HIRESERVE 0xffff
575 /* End of ELF definitions */
577 typedef struct _ProfilerExecutableFileSectionRegion {
578 ProfilerExecutableMemoryRegionData *region;
579 guint8 *section_address;
580 gsize section_offset;
581 } ProfilerExecutableFileSectionRegion;
583 typedef struct _ProfilerExecutableFile {
584 guint32 reference_count;
586 /* Used for mmap and munmap */
593 guint8 *symbols_start;
594 guint32 symbols_count;
596 const char *symbols_string_table;
597 const char *main_string_table;
599 ProfilerExecutableFileSectionRegion *section_regions;
601 struct _ProfilerExecutableFile *next_new_file;
602 } ProfilerExecutableFile;
604 typedef struct _ProfilerExecutableFiles {
606 ProfilerExecutableFile *new_files;
607 } ProfilerExecutableFiles;
610 #define CLEANUP_WRITER_THREAD() do {profiler->writer_thread_terminated = TRUE;} while (0)
611 #define CHECK_WRITER_THREAD() (! profiler->writer_thread_terminated)
613 #ifndef PLATFORM_WIN32
614 #include <sys/types.h>
615 #include <sys/time.h>
616 #include <sys/stat.h>
620 #include <semaphore.h>
622 #include <sys/mman.h>
623 #include <sys/types.h>
624 #include <sys/stat.h>
628 #define MUTEX_TYPE pthread_mutex_t
629 #define INITIALIZE_PROFILER_MUTEX() pthread_mutex_init (&(profiler->mutex), NULL)
630 #define DELETE_PROFILER_MUTEX() pthread_mutex_destroy (&(profiler->mutex))
631 #define LOCK_PROFILER() do {/*LOG_WRITER_THREAD ("LOCK_PROFILER");*/ pthread_mutex_lock (&(profiler->mutex));} while (0)
632 #define UNLOCK_PROFILER() do {/*LOG_WRITER_THREAD ("UNLOCK_PROFILER");*/ pthread_mutex_unlock (&(profiler->mutex));} while (0)
634 #define THREAD_TYPE pthread_t
635 #define CREATE_WRITER_THREAD(f) pthread_create (&(profiler->data_writer_thread), NULL, ((void*(*)(void*))f), NULL)
636 #define EXIT_THREAD() pthread_exit (NULL);
637 #define WAIT_WRITER_THREAD() do {\
638 if (CHECK_WRITER_THREAD ()) {\
639 pthread_join (profiler->data_writer_thread, NULL);\
642 #define CURRENT_THREAD_ID() (gsize) pthread_self ()
644 #ifndef HAVE_KW_THREAD
645 static pthread_key_t pthread_profiler_key;
646 static pthread_once_t profiler_pthread_once = PTHREAD_ONCE_INIT;
648 make_pthread_profiler_key (void) {
649 (void) pthread_key_create (&pthread_profiler_key, NULL);
651 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) pthread_getspecific (pthread_profiler_key))
652 #define SET_PROFILER_THREAD_DATA(x) (void) pthread_setspecific (pthread_profiler_key, (x))
653 #define ALLOCATE_PROFILER_THREAD_DATA() (void) pthread_once (&profiler_pthread_once, make_pthread_profiler_key)
654 #define FREE_PROFILER_THREAD_DATA() (void) pthread_key_delete (pthread_profiler_key)
657 #define EVENT_TYPE sem_t
658 #define WRITER_EVENT_INIT() do {\
659 sem_init (&(profiler->enable_data_writer_event), 0, 0);\
660 sem_init (&(profiler->wake_data_writer_event), 0, 0);\
661 sem_init (&(profiler->done_data_writer_event), 0, 0);\
663 #define WRITER_EVENT_DESTROY() do {\
664 sem_destroy (&(profiler->enable_data_writer_event));\
665 sem_destroy (&(profiler->wake_data_writer_event));\
666 sem_destroy (&(profiler->done_data_writer_event));\
668 #define WRITER_EVENT_WAIT() (void) sem_wait (&(profiler->wake_data_writer_event))
669 #define WRITER_EVENT_RAISE() (void) sem_post (&(profiler->wake_data_writer_event))
670 #define WRITER_EVENT_ENABLE_WAIT() (void) sem_wait (&(profiler->enable_data_writer_event))
671 #define WRITER_EVENT_ENABLE_RAISE() (void) sem_post (&(profiler->enable_data_writer_event))
672 #define WRITER_EVENT_DONE_WAIT() do {\
673 if (CHECK_WRITER_THREAD ()) {\
674 (void) sem_wait (&(profiler->done_data_writer_event));\
677 #define WRITER_EVENT_DONE_RAISE() (void) sem_post (&(profiler->done_data_writer_event))
680 #define FILE_HANDLE_TYPE FILE*
681 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
682 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
683 #define FLUSH_FILE() fflush (profiler->file)
684 #define CLOSE_FILE() fclose (profiler->file);
686 #define FILE_HANDLE_TYPE int
687 #define OPEN_FILE() profiler->file = open (profiler->file_name, O_WRONLY|O_CREAT|O_TRUNC, 0664);
688 #define WRITE_BUFFER(b,s) write (profiler->file, (b), (s))
690 #define CLOSE_FILE() close (profiler->file);
697 #define MUTEX_TYPE CRITICAL_SECTION
698 #define INITIALIZE_PROFILER_MUTEX() InitializeCriticalSection (&(profiler->mutex))
699 #define DELETE_PROFILER_MUTEX() DeleteCriticalSection (&(profiler->mutex))
700 #define LOCK_PROFILER() EnterCriticalSection (&(profiler->mutex))
701 #define UNLOCK_PROFILER() LeaveCriticalSection (&(profiler->mutex))
703 #define THREAD_TYPE HANDLE
704 #define CREATE_WRITER_THREAD(f) CreateThread (NULL, (1*1024*1024), (f), NULL, 0, NULL);
705 #define EXIT_THREAD() ExitThread (0);
706 #define WAIT_WRITER_THREAD() do {\
707 if (CHECK_WRITER_THREAD ()) {\
708 WaitForSingleObject (profiler->data_writer_thread, INFINITE);\
711 #define CURRENT_THREAD_ID() (gsize) GetCurrentThreadId ()
713 #ifndef HAVE_KW_THREAD
714 static guint32 profiler_thread_id = -1;
715 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*)TlsGetValue (profiler_thread_id))
716 #define SET_PROFILER_THREAD_DATA(x) TlsSetValue (profiler_thread_id, (x));
717 #define ALLOCATE_PROFILER_THREAD_DATA() profiler_thread_id = TlsAlloc ()
718 #define FREE_PROFILER_THREAD_DATA() TlsFree (profiler_thread_id)
721 #define EVENT_TYPE HANDLE
722 #define WRITER_EVENT_INIT() (void) do {\
723 profiler->enable_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
724 profiler->wake_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
725 profiler->done_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
727 #define WRITER_EVENT_DESTROY() CloseHandle (profiler->statistical_data_writer_event)
728 #define WRITER_EVENT_INIT() (void) do {\
729 CloseHandle (profiler->enable_data_writer_event);\
730 CloseHandle (profiler->wake_data_writer_event);\
731 CloseHandle (profiler->done_data_writer_event);\
733 #define WRITER_EVENT_WAIT() WaitForSingleObject (profiler->wake_data_writer_event, INFINITE)
734 #define WRITER_EVENT_RAISE() SetEvent (profiler->wake_data_writer_event)
735 #define WRITER_EVENT_ENABLE_WAIT() WaitForSingleObject (profiler->enable_data_writer_event, INFINITE)
736 #define WRITER_EVENT_ENABLE_RAISE() SetEvent (profiler->enable_data_writer_event)
737 #define WRITER_EVENT_DONE_WAIT() do {\
738 if (CHECK_WRITER_THREAD ()) {\
739 WaitForSingleObject (profiler->done_data_writer_event, INFINITE);\
742 #define WRITER_EVENT_DONE_RAISE() SetEvent (profiler->done_data_writer_event)
744 #define FILE_HANDLE_TYPE FILE*
745 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
746 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
747 #define FLUSH_FILE() fflush (profiler->file)
748 #define CLOSE_FILE() fclose (profiler->file);
752 #ifdef HAVE_KW_THREAD
753 static __thread ProfilerPerThreadData * tls_profiler_per_thread_data;
754 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) tls_profiler_per_thread_data)
755 #define SET_PROFILER_THREAD_DATA(x) tls_profiler_per_thread_data = (x)
756 #define ALLOCATE_PROFILER_THREAD_DATA() /* nop */
757 #define FREE_PROFILER_THREAD_DATA() /* nop */
760 #define GET_PROFILER_THREAD_DATA(data) do {\
761 ProfilerPerThreadData *_result = LOOKUP_PROFILER_THREAD_DATA ();\
763 _result = profiler_per_thread_data_new (profiler->per_thread_buffer_size);\
765 _result->next = profiler->per_thread_data;\
766 profiler->per_thread_data = _result;\
768 SET_PROFILER_THREAD_DATA (_result);\
773 #define PROFILER_FILE_WRITE_BUFFER_SIZE (profiler->write_buffer_size)
774 typedef struct _ProfilerFileWriteBuffer {
775 struct _ProfilerFileWriteBuffer *next;
777 } ProfilerFileWriteBuffer;
779 #define CHECK_PROFILER_ENABLED() do {\
780 if (! profiler->profiler_enabled)\
783 struct _MonoProfiler {
786 MonoProfileFlags flags;
787 gboolean profiler_enabled;
789 char *file_name_suffix;
790 FILE_HANDLE_TYPE file;
793 guint64 start_counter;
797 guint64 last_header_counter;
799 MethodIdMapping *methods;
800 ClassIdMapping *classes;
802 GHashTable *loaded_assemblies;
803 GHashTable *loaded_modules;
804 GHashTable *loaded_appdomains;
806 guint32 per_thread_buffer_size;
807 guint32 statistical_buffer_size;
808 ProfilerPerThreadData* per_thread_data;
809 ProfilerStatisticalData *statistical_data;
810 ProfilerStatisticalData *statistical_data_ready;
811 ProfilerStatisticalData *statistical_data_second_buffer;
812 int statistical_call_chain_depth;
814 THREAD_TYPE data_writer_thread;
815 EVENT_TYPE enable_data_writer_event;
816 EVENT_TYPE wake_data_writer_event;
817 EVENT_TYPE done_data_writer_event;
818 gboolean terminate_writer_thread;
819 gboolean writer_thread_terminated;
820 gboolean detach_writer_thread;
821 gboolean writer_thread_enabled;
822 gboolean writer_thread_flush_everything;
824 ProfilerFileWriteBuffer *write_buffers;
825 ProfilerFileWriteBuffer *current_write_buffer;
826 int write_buffer_size;
827 int current_write_position;
828 int full_write_buffers;
830 ProfilerHeapShotWriteJob *heap_shot_write_jobs;
831 ProfilerHeapShotHeapBuffers heap;
833 char *heap_shot_command_file_name;
834 int dump_next_heap_snapshots;
835 guint64 heap_shot_command_file_access_time;
836 gboolean heap_shot_was_signalled;
837 guint32 garbage_collection_counter;
839 ProfilerExecutableMemoryRegions *executable_regions;
840 ProfilerExecutableFiles executable_files;
847 gboolean unreachable_objects;
848 gboolean collection_summary;
850 gboolean track_stack;
851 gboolean track_calls;
854 static MonoProfiler *profiler;
856 #ifndef PLATFORM_WIN32
859 #ifdef MONO_ARCH_USE_SIGACTION
860 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy, siginfo_t *info, void *context)
861 #elif defined(__sparc__)
862 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy, void *sigctx)
864 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy)
868 request_heap_snapshot (void) {
869 profiler->heap_shot_was_signalled = TRUE;
870 mono_gc_collect (mono_gc_max_generation ());
874 SIG_HANDLER_SIGNATURE (gc_request_handler) {
875 profiler->heap_shot_was_signalled = TRUE;
876 WRITER_EVENT_RAISE ();
880 add_gc_request_handler (int signal_number)
884 #ifdef MONO_ARCH_USE_SIGACTION
885 sa.sa_sigaction = gc_request_handler;
886 sigemptyset (&sa.sa_mask);
887 sa.sa_flags = SA_SIGINFO;
889 sa.sa_handler = gc_request_handler;
890 sigemptyset (&sa.sa_mask);
894 g_assert (sigaction (signal_number, &sa, NULL) != -1);
898 enable_profiler (void) {
899 profiler->profiler_enabled = TRUE;
903 disable_profiler (void) {
904 profiler->profiler_enabled = FALSE;
910 SIG_HANDLER_SIGNATURE (toggle_handler) {
911 if (profiler->profiler_enabled) {
912 profiler->profiler_enabled = FALSE;
914 profiler->profiler_enabled = TRUE;
919 add_toggle_handler (int signal_number)
923 #ifdef MONO_ARCH_USE_SIGACTION
924 sa.sa_sigaction = toggle_handler;
925 sigemptyset (&sa.sa_mask);
926 sa.sa_flags = SA_SIGINFO;
928 sa.sa_handler = toggle_handler;
929 sigemptyset (&sa.sa_mask);
933 g_assert (sigaction (signal_number, &sa, NULL) != -1);
939 #define DEBUG_LOAD_EVENTS 0
940 #define DEBUG_MAPPING_EVENTS 0
941 #define DEBUG_LOGGING_PROFILER 0
942 #define DEBUG_HEAP_PROFILER 0
943 #define DEBUG_CLASS_BITMAPS 0
944 #define DEBUG_STATISTICAL_PROFILER 0
945 #define DEBUG_WRITER_THREAD 0
946 #define DEBUG_FILE_WRITES 0
947 #if (DEBUG_LOGGING_PROFILER || DEBUG_STATISTICAL_PROFILER || DEBUG_HEAP_PROFILER || DEBUG_WRITER_THREAD || DEBUG_FILE_WRITES)
948 #define LOG_WRITER_THREAD(m) printf ("WRITER-THREAD-LOG %s\n", m)
950 #define LOG_WRITER_THREAD(m)
953 #if DEBUG_LOGGING_PROFILER
954 static int event_counter = 0;
955 #define EVENT_MARK() printf ("[EVENT:%d]", ++ event_counter)
959 thread_stack_initialize_empty (ProfilerThreadStack *stack) {
963 stack->method_is_jitted = NULL;
967 thread_stack_free (ProfilerThreadStack *stack) {
970 if (stack->stack != NULL) {
971 g_free (stack->stack);
974 if (stack->method_is_jitted != NULL) {
975 g_free (stack->method_is_jitted);
976 stack->method_is_jitted = NULL;
981 thread_stack_initialize (ProfilerThreadStack *stack, guint32 capacity) {
982 stack->capacity = capacity;
984 stack->stack = g_new0 (MonoMethod*, capacity);
985 stack->method_is_jitted = g_new0 (guint8, capacity);
989 thread_stack_push_jitted (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
990 if (stack->top >= stack->capacity) {
991 MonoMethod **old_stack = stack->stack;
992 guint8 *old_method_is_jitted = stack->method_is_jitted;
993 guint32 top = stack->top;
994 thread_stack_initialize (stack, stack->capacity * 2);
995 memcpy (stack->stack, old_stack, top * sizeof (MonoMethod*));
996 memcpy (stack->method_is_jitted, old_method_is_jitted, top * sizeof (guint8));
999 stack->stack [stack->top] = method;
1000 stack->method_is_jitted [stack->top] = method_is_jitted;
1005 thread_stack_push (ProfilerThreadStack *stack, MonoMethod* method) {
1006 thread_stack_push_jitted (stack, method, FALSE);
1010 thread_stack_pop (ProfilerThreadStack *stack) {
1011 if (stack->top > 0) {
1013 return stack->stack [stack->top];
1020 thread_stack_top (ProfilerThreadStack *stack) {
1021 if (stack->top > 0) {
1022 return stack->stack [stack->top - 1];
1029 thread_stack_top_is_jitted (ProfilerThreadStack *stack) {
1030 if (stack->top > 0) {
1031 return stack->method_is_jitted [stack->top - 1];
1038 thread_stack_index_from_top (ProfilerThreadStack *stack, int index) {
1039 if (stack->top > index) {
1040 return stack->stack [stack->top - (index + 1)];
1047 thread_stack_index_from_top_is_jitted (ProfilerThreadStack *stack, int index) {
1048 if (stack->top > index) {
1049 return stack->method_is_jitted [stack->top - (index + 1)];
1056 thread_stack_push_safely (ProfilerThreadStack *stack, MonoMethod* method) {
1057 if (stack->stack != NULL) {
1058 thread_stack_push (stack, method);
1063 thread_stack_push_jitted_safely (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1064 if (stack->stack != NULL) {
1065 thread_stack_push_jitted (stack, method, method_is_jitted);
1069 static ClassIdMappingElement*
1070 class_id_mapping_element_get (MonoClass *klass) {
1071 return g_hash_table_lookup (profiler->classes->table, (gconstpointer) klass);
1074 static MethodIdMappingElement*
1075 method_id_mapping_element_get (MonoMethod *method) {
1076 return g_hash_table_lookup (profiler->methods->table, (gconstpointer) method);
1079 #define BITS_TO_BYTES(v) do {\
1085 static ClassIdMappingElement*
1086 class_id_mapping_element_new (MonoClass *klass) {
1087 ClassIdMappingElement *result = g_new (ClassIdMappingElement, 1);
1089 result->name = mono_type_full_name (mono_class_get_type (klass));
1090 result->klass = klass;
1091 result->next_unwritten = profiler->classes->unwritten;
1092 profiler->classes->unwritten = result;
1093 result->id = profiler->classes->next_id;
1094 profiler->classes->next_id ++;
1096 result->data.bitmap.compact = 0;
1097 result->data.layout.slots = CLASS_LAYOUT_NOT_INITIALIZED;
1098 result->data.layout.references = CLASS_LAYOUT_NOT_INITIALIZED;
1100 g_hash_table_insert (profiler->classes->table, klass, result);
1102 #if (DEBUG_MAPPING_EVENTS)
1103 printf ("Created new CLASS mapping element \"%s\" (%p)[%d]\n", result->name, klass, result->id);
1109 class_id_mapping_element_build_layout_bitmap (MonoClass *klass, ClassIdMappingElement *klass_id) {
1110 MonoClass *parent_class = mono_class_get_parent (klass);
1111 int number_of_reference_fields = 0;
1112 int max_offset_of_reference_fields = 0;
1113 ClassIdMappingElement *parent_id;
1115 MonoClassField *field;
1117 #if (DEBUG_CLASS_BITMAPS)
1118 printf ("class_id_mapping_element_build_layout_bitmap: building layout for class %s.%s: ", mono_class_get_namespace (klass), mono_class_get_name (klass));
1121 if (parent_class != NULL) {
1122 parent_id = class_id_mapping_element_get (parent_class);
1123 g_assert (parent_id != NULL);
1125 if (parent_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1126 #if (DEBUG_CLASS_BITMAPS)
1127 printf ("[recursively building bitmap for father class]\n");
1129 class_id_mapping_element_build_layout_bitmap (parent_class, parent_id);
1136 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1137 MonoType* field_type = mono_field_get_type (field);
1138 // For now, skip static fields
1139 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1142 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1143 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1144 if (field_offset > max_offset_of_reference_fields) {
1145 max_offset_of_reference_fields = field_offset;
1147 number_of_reference_fields ++;
1149 MonoClass *field_class = mono_class_from_mono_type (field_type);
1150 if (field_class && mono_class_is_valuetype (field_class)) {
1151 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1152 g_assert (field_id != NULL);
1154 if (field_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1155 if (field_id != klass_id) {
1156 #if (DEBUG_CLASS_BITMAPS)
1157 printf ("[recursively building bitmap for field %s]\n", mono_field_get_name (field));
1159 class_id_mapping_element_build_layout_bitmap (field_class, field_id);
1161 #if (DEBUG_CLASS_BITMAPS)
1162 printf ("[breaking recursive bitmap build for field %s]", mono_field_get_name (field));
1165 klass_id->data.bitmap.compact = 0;
1166 klass_id->data.layout.slots = 0;
1167 klass_id->data.layout.references = 0;
1171 if (field_id->data.layout.references > 0) {
1172 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1173 int max_offset_reference_in_field = (field_id->data.layout.slots - 1) * sizeof (gpointer);
1175 if ((field_offset + max_offset_reference_in_field) > max_offset_of_reference_fields) {
1176 max_offset_of_reference_fields = field_offset + max_offset_reference_in_field;
1179 number_of_reference_fields += field_id->data.layout.references;
1185 #if (DEBUG_CLASS_BITMAPS)
1186 printf ("[allocating bitmap for class %s.%s (references %d, max offset %d, slots %d)]", mono_class_get_namespace (klass), mono_class_get_name (klass), number_of_reference_fields, max_offset_of_reference_fields, (int)(max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1188 if ((number_of_reference_fields == 0) && ((parent_id == NULL) || (parent_id->data.layout.references == 0))) {
1189 #if (DEBUG_CLASS_BITMAPS)
1190 printf ("[no references at all]");
1192 klass_id->data.bitmap.compact = 0;
1193 klass_id->data.layout.slots = 0;
1194 klass_id->data.layout.references = 0;
1196 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1197 #if (DEBUG_CLASS_BITMAPS)
1198 printf ("[parent %s.%s has %d references in %d slots]", mono_class_get_namespace (parent_class), mono_class_get_name (parent_class), parent_id->data.layout.references, parent_id->data.layout.slots);
1200 klass_id->data.layout.slots = parent_id->data.layout.slots;
1201 klass_id->data.layout.references = parent_id->data.layout.references;
1203 #if (DEBUG_CLASS_BITMAPS)
1204 printf ("[no references from parent]");
1206 klass_id->data.layout.slots = 0;
1207 klass_id->data.layout.references = 0;
1210 if (number_of_reference_fields > 0) {
1211 klass_id->data.layout.slots += ((max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1212 klass_id->data.layout.references += number_of_reference_fields;
1213 #if (DEBUG_CLASS_BITMAPS)
1214 printf ("[adding data, going to %d references in %d slots]", klass_id->data.layout.references, klass_id->data.layout.slots);
1218 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1219 #if (DEBUG_CLASS_BITMAPS)
1220 printf ("[zeroing bitmap]");
1222 klass_id->data.bitmap.compact = 0;
1223 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1224 #if (DEBUG_CLASS_BITMAPS)
1225 printf ("[copying compact father bitmap]");
1227 klass_id->data.bitmap.compact = parent_id->data.bitmap.compact;
1230 int size_of_bitmap = klass_id->data.layout.slots;
1231 BITS_TO_BYTES (size_of_bitmap);
1232 #if (DEBUG_CLASS_BITMAPS)
1233 printf ("[allocating %d bytes for bitmap]", size_of_bitmap);
1235 klass_id->data.bitmap.extended = g_malloc0 (size_of_bitmap);
1236 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1237 int size_of_father_bitmap = parent_id->data.layout.slots;
1238 if (size_of_father_bitmap <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1240 #if (DEBUG_CLASS_BITMAPS)
1241 printf ("[copying %d bits from father bitmap]", size_of_father_bitmap);
1243 for (father_slot = 0; father_slot < size_of_father_bitmap; father_slot ++) {
1244 if (parent_id->data.bitmap.compact & (((guint64)1) << father_slot)) {
1245 klass_id->data.bitmap.extended [father_slot >> 3] |= (1 << (father_slot & 7));
1249 BITS_TO_BYTES (size_of_father_bitmap);
1250 #if (DEBUG_CLASS_BITMAPS)
1251 printf ("[copying %d bytes from father bitmap]", size_of_father_bitmap);
1253 memcpy (klass_id->data.bitmap.extended, parent_id->data.bitmap.extended, size_of_father_bitmap);
1259 #if (DEBUG_CLASS_BITMAPS)
1260 printf ("[starting filling iteration]\n");
1263 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1264 MonoType* field_type = mono_field_get_type (field);
1265 // For now, skip static fields
1266 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1269 #if (DEBUG_CLASS_BITMAPS)
1270 printf ("[Working on field %s]", mono_field_get_name (field));
1272 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1273 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1275 g_assert ((field_offset % sizeof (gpointer)) == 0);
1276 field_slot = field_offset / sizeof (gpointer);
1277 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1278 klass_id->data.bitmap.compact |= (((guint64)1) << field_slot);
1280 klass_id->data.bitmap.extended [field_slot >> 3] |= (1 << (field_slot & 7));
1282 #if (DEBUG_CLASS_BITMAPS)
1283 printf ("[reference at offset %d, slot %d]", field_offset, field_slot);
1286 MonoClass *field_class = mono_class_from_mono_type (field_type);
1287 if (field_class && mono_class_is_valuetype (field_class)) {
1288 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1292 g_assert (field_id != NULL);
1293 field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1294 g_assert ((field_id->data.layout.references == 0) || ((field_offset % sizeof (gpointer)) == 0));
1295 field_slot = field_offset / sizeof (gpointer);
1296 #if (DEBUG_CLASS_BITMAPS)
1297 printf ("[value type at offset %d, slot %d, with %d references in %d slots]", field_offset, field_slot, field_id->data.layout.references, field_id->data.layout.slots);
1300 if (field_id->data.layout.references > 0) {
1302 if (field_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1303 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1304 if (field_id->data.bitmap.compact & (((guint64)1) << sub_field_slot)) {
1305 int actual_slot = field_slot + sub_field_slot;
1306 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1307 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1309 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1314 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1315 if (field_id->data.bitmap.extended [sub_field_slot >> 3] & (1 << (sub_field_slot & 7))) {
1316 int actual_slot = field_slot + sub_field_slot;
1317 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1318 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1320 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1329 #if (DEBUG_CLASS_BITMAPS)
1332 printf ("\nLayot of class \"%s.%s\": references %d, slots %d, bitmap {", mono_class_get_namespace (klass), mono_class_get_name (klass), klass_id->data.layout.references, klass_id->data.layout.slots);
1333 for (slot = 0; slot < klass_id->data.layout.slots; slot ++) {
1334 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1335 if (klass_id->data.bitmap.compact & (((guint64)1) << slot)) {
1341 if (klass_id->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
1355 static MethodIdMappingElement*
1356 method_id_mapping_element_new (MonoMethod *method) {
1357 MethodIdMappingElement *result = g_new (MethodIdMappingElement, 1);
1358 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
1360 result->name = g_strdup_printf ("%s (%s)", mono_method_get_name (method), signature);
1362 result->method = method;
1363 result->next_unwritten = profiler->methods->unwritten;
1364 profiler->methods->unwritten = result;
1365 result->id = profiler->methods->next_id;
1366 profiler->methods->next_id ++;
1367 g_hash_table_insert (profiler->methods->table, method, result);
1369 result->data.code_start = NULL;
1370 result->data.code_size = 0;
1372 #if (DEBUG_MAPPING_EVENTS)
1373 printf ("Created new METHOD mapping element \"%s\" (%p)[%d]\n", result->name, method, result->id);
1380 method_id_mapping_element_destroy (gpointer element) {
1381 MethodIdMappingElement *e = (MethodIdMappingElement*) element;
1388 class_id_mapping_element_destroy (gpointer element) {
1389 ClassIdMappingElement *e = (ClassIdMappingElement*) element;
1392 if ((e->data.layout.slots != CLASS_LAYOUT_NOT_INITIALIZED) && (e->data.layout.slots > CLASS_LAYOUT_PACKED_BITMAP_SIZE))
1393 g_free (e->data.bitmap.extended);
1397 static MethodIdMapping*
1398 method_id_mapping_new (void) {
1399 MethodIdMapping *result = g_new (MethodIdMapping, 1);
1400 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, method_id_mapping_element_destroy);
1401 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, method_id_mapping_element_destroy);
1402 result->unwritten = NULL;
1403 result->next_id = 1;
1407 static ClassIdMapping*
1408 class_id_mapping_new (void) {
1409 ClassIdMapping *result = g_new (ClassIdMapping, 1);
1410 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, class_id_mapping_element_destroy);
1411 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, class_id_mapping_element_destroy);
1412 result->unwritten = NULL;
1413 result->next_id = 1;
1418 method_id_mapping_destroy (MethodIdMapping *map) {
1419 g_hash_table_destroy (map->table);
1424 class_id_mapping_destroy (ClassIdMapping *map) {
1425 g_hash_table_destroy (map->table);
1429 #if (DEBUG_LOAD_EVENTS)
1431 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element);
1434 static LoadedElement*
1435 loaded_element_load_start (GHashTable *table, gpointer item) {
1436 LoadedElement *element = g_new0 (LoadedElement, 1);
1437 #if (DEBUG_LOAD_EVENTS)
1438 print_load_event ("LOAD START", table, item, element);
1440 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_start_counter);
1441 g_hash_table_insert (table, item, element);
1445 static LoadedElement*
1446 loaded_element_load_end (GHashTable *table, gpointer item, char *name) {
1447 LoadedElement *element = g_hash_table_lookup (table, item);
1448 #if (DEBUG_LOAD_EVENTS)
1449 print_load_event ("LOAD END", table, item, element);
1451 g_assert (element != NULL);
1452 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_end_counter);
1453 element->name = name;
1454 element->loaded = TRUE;
1458 static LoadedElement*
1459 loaded_element_unload_start (GHashTable *table, gpointer item) {
1460 LoadedElement *element = g_hash_table_lookup (table, item);
1461 #if (DEBUG_LOAD_EVENTS)
1462 print_load_event ("UNLOAD START", table, item, element);
1464 g_assert (element != NULL);
1465 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_start_counter);
1469 static LoadedElement*
1470 loaded_element_unload_end (GHashTable *table, gpointer item) {
1471 LoadedElement *element = g_hash_table_lookup (table, item);
1472 #if (DEBUG_LOAD_EVENTS)
1473 print_load_event ("UNLOAD END", table, item, element);
1475 g_assert (element != NULL);
1476 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_end_counter);
1477 element->unloaded = TRUE;
1483 loaded_element_destroy (gpointer element) {
1484 if (((LoadedElement*)element)->name)
1485 g_free (((LoadedElement*)element)->name);
1489 #if (DEBUG_LOAD_EVENTS)
1491 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element) {
1492 const char* item_name;
1495 if (table == profiler->loaded_assemblies) {
1496 //item_info = g_strdup_printf("ASSEMBLY %p (dynamic %d)", item, mono_image_is_dynamic (mono_assembly_get_image((MonoAssembly*)item)));
1497 item_info = g_strdup_printf("ASSEMBLY %p", item);
1498 } else if (table == profiler->loaded_modules) {
1499 //item_info = g_strdup_printf("MODULE %p (dynamic %d)", item, mono_image_is_dynamic ((MonoImage*)item));
1500 item_info = g_strdup_printf("MODULE %p", item);
1501 } else if (table == profiler->loaded_appdomains) {
1502 item_info = g_strdup_printf("APPDOMAIN %p (id %d)", item, mono_domain_get_id ((MonoDomain*)item));
1505 g_assert_not_reached ();
1508 if (element != NULL) {
1509 item_name = element->name;
1511 item_name = "<NULL>";
1514 printf ("%s EVENT for %s (%s)\n", event_name, item_info, item_name);
1520 profiler_heap_shot_object_buffers_destroy (ProfilerHeapShotObjectBuffer *buffer) {
1521 while (buffer != NULL) {
1522 ProfilerHeapShotObjectBuffer *next = buffer->next;
1523 #if DEBUG_HEAP_PROFILER
1524 printf ("profiler_heap_shot_object_buffers_destroy: destroyed buffer %p (%p-%p)\n", buffer, & (buffer->buffer [0]), buffer->end);
1531 static ProfilerHeapShotObjectBuffer*
1532 profiler_heap_shot_object_buffer_new (ProfilerPerThreadData *data) {
1533 ProfilerHeapShotObjectBuffer *buffer;
1534 ProfilerHeapShotObjectBuffer *result = g_new (ProfilerHeapShotObjectBuffer, 1);
1535 result->next_free_slot = & (result->buffer [0]);
1536 result->end = & (result->buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE]);
1537 result->first_unprocessed_slot = & (result->buffer [0]);
1538 result->next = data->heap_shot_object_buffers;
1539 data->heap_shot_object_buffers = result;
1540 #if DEBUG_HEAP_PROFILER
1541 printf ("profiler_heap_shot_object_buffer_new: created buffer %p (%p-%p)\n", result, result->next_free_slot, result->end);
1543 for (buffer = result; buffer != NULL; buffer = buffer->next) {
1544 ProfilerHeapShotObjectBuffer *last = buffer->next;
1545 if ((last != NULL) && (last->first_unprocessed_slot == last->end)) {
1546 buffer->next = NULL;
1547 profiler_heap_shot_object_buffers_destroy (last);
1554 static ProfilerHeapShotWriteJob*
1555 profiler_heap_shot_write_job_new (gboolean heap_shot_was_signalled, gboolean dump_heap_data, guint32 collection) {
1556 ProfilerHeapShotWriteJob *job = g_new (ProfilerHeapShotWriteJob, 1);
1558 job->next_unwritten = NULL;
1560 if (profiler->action_flags.unreachable_objects || dump_heap_data) {
1561 job->buffers = g_new (ProfilerHeapShotWriteBuffer, 1);
1562 job->buffers->next = NULL;
1563 job->last_next = & (job->buffers->next);
1564 job->start = & (job->buffers->buffer [0]);
1565 job->cursor = job->start;
1566 job->end = & (job->buffers->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1568 job->buffers = NULL;
1569 job->last_next = NULL;
1574 job->full_buffers = 0;
1576 if (profiler->action_flags.collection_summary) {
1577 job->summary.capacity = profiler->classes->next_id;
1578 job->summary.per_class_data = g_new0 (ProfilerHeapShotClassSummary, job->summary.capacity);
1580 job->summary.capacity = 0;
1581 job->summary.per_class_data = NULL;
1584 job->heap_shot_was_signalled = heap_shot_was_signalled;
1585 job->collection = collection;
1586 job->dump_heap_data = dump_heap_data;
1587 #if DEBUG_HEAP_PROFILER
1588 printf ("profiler_heap_shot_write_job_new: created job %p with buffer %p(%p-%p) (collection %d, dump %d)\n", job, job->buffers, job->start, job->end, collection, dump_heap_data);
1594 profiler_heap_shot_write_job_has_data (ProfilerHeapShotWriteJob *job) {
1595 return ((job->buffers != NULL) || (job->summary.capacity > 0));
1599 profiler_heap_shot_write_job_add_buffer (ProfilerHeapShotWriteJob *job, gpointer value) {
1600 ProfilerHeapShotWriteBuffer *buffer = g_new (ProfilerHeapShotWriteBuffer, 1);
1601 buffer->next = NULL;
1602 *(job->last_next) = buffer;
1603 job->last_next = & (buffer->next);
1604 job->full_buffers ++;
1605 buffer->buffer [0] = value;
1606 job->start = & (buffer->buffer [0]);
1607 job->cursor = & (buffer->buffer [1]);
1608 job->end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1609 #if DEBUG_HEAP_PROFILER
1610 printf ("profiler_heap_shot_write_job_add_buffer: in job %p, added buffer %p(%p-%p) with value %p at address %p (cursor now %p)\n", job, buffer, job->start, job->end, value, &(buffer->buffer [0]), job->cursor);
1612 ProfilerHeapShotWriteBuffer *current_buffer;
1613 for (current_buffer = job->buffers; current_buffer != NULL; current_buffer = current_buffer->next) {
1614 printf ("profiler_heap_shot_write_job_add_buffer: now job %p has buffer %p\n", job, current_buffer);
1621 profiler_heap_shot_write_job_free_buffers (ProfilerHeapShotWriteJob *job) {
1622 ProfilerHeapShotWriteBuffer *buffer = job->buffers;
1624 while (buffer != NULL) {
1625 ProfilerHeapShotWriteBuffer *next = buffer->next;
1626 #if DEBUG_HEAP_PROFILER
1627 printf ("profiler_heap_shot_write_job_free_buffers: in job %p, freeing buffer %p\n", job, buffer);
1633 job->buffers = NULL;
1635 if (job->summary.per_class_data != NULL) {
1636 g_free (job->summary.per_class_data);
1637 job->summary.per_class_data = NULL;
1639 job->summary.capacity = 0;
1643 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job);
1646 profiler_process_heap_shot_write_jobs (void) {
1647 gboolean done = FALSE;
1650 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1651 ProfilerHeapShotWriteJob *previous_job = NULL;
1652 ProfilerHeapShotWriteJob *next_job;
1655 while (current_job != NULL) {
1656 next_job = current_job->next_unwritten;
1658 if (next_job != NULL) {
1659 if (profiler_heap_shot_write_job_has_data (current_job)) {
1662 if (! profiler_heap_shot_write_job_has_data (next_job)) {
1663 current_job->next_unwritten = NULL;
1667 if (profiler_heap_shot_write_job_has_data (current_job)) {
1668 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: writing...");
1669 profiler_heap_shot_write_block (current_job);
1670 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: done");
1671 if (previous_job != NULL) {
1672 previous_job->next_unwritten = NULL;
1677 previous_job = current_job;
1678 current_job = next_job;
1684 profiler_free_heap_shot_write_jobs (void) {
1685 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1686 ProfilerHeapShotWriteJob *next_job;
1688 if (current_job != NULL) {
1689 while (current_job->next_unwritten != NULL) {
1690 #if DEBUG_HEAP_PROFILER
1691 printf ("profiler_free_heap_shot_write_jobs: job %p must not be freed\n", current_job);
1693 current_job = current_job->next_unwritten;
1696 next_job = current_job->next;
1697 current_job->next = NULL;
1698 current_job = next_job;
1700 while (current_job != NULL) {
1701 #if DEBUG_HEAP_PROFILER
1702 printf ("profiler_free_heap_shot_write_jobs: job %p will be freed\n", current_job);
1704 next_job = current_job->next;
1705 profiler_heap_shot_write_job_free_buffers (current_job);
1706 g_free (current_job);
1707 current_job = next_job;
1713 profiler_destroy_heap_shot_write_jobs (void) {
1714 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1715 ProfilerHeapShotWriteJob *next_job;
1717 while (current_job != NULL) {
1718 next_job = current_job->next;
1719 profiler_heap_shot_write_job_free_buffers (current_job);
1720 g_free (current_job);
1721 current_job = next_job;
1726 profiler_add_heap_shot_write_job (ProfilerHeapShotWriteJob *job) {
1727 job->next = profiler->heap_shot_write_jobs;
1728 job->next_unwritten = job->next;
1729 profiler->heap_shot_write_jobs = job;
1730 #if DEBUG_HEAP_PROFILER
1731 printf ("profiler_add_heap_shot_write_job: added job %p\n", job);
1735 #if DEBUG_HEAP_PROFILER
1736 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p\n", (d)->thread_id, (o), (d)->heap_shot_object_buffers->next_free_slot)
1737 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p in new buffer %p\n", (d)->thread_id, (o), buffer->next_free_slot, buffer)
1739 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o)
1740 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o)
1742 #define STORE_ALLOCATED_OBJECT(d,o) do {\
1743 if ((d)->heap_shot_object_buffers->next_free_slot < (d)->heap_shot_object_buffers->end) {\
1744 STORE_ALLOCATED_OBJECT_MESSAGE1 ((d), (o));\
1745 *((d)->heap_shot_object_buffers->next_free_slot) = (o);\
1746 (d)->heap_shot_object_buffers->next_free_slot ++;\
1748 ProfilerHeapShotObjectBuffer *buffer = profiler_heap_shot_object_buffer_new (d);\
1749 STORE_ALLOCATED_OBJECT_MESSAGE2 ((d), (o));\
1750 *((buffer)->next_free_slot) = (o);\
1751 (buffer)->next_free_slot ++;\
1755 static ProfilerPerThreadData*
1756 profiler_per_thread_data_new (guint32 buffer_size)
1758 ProfilerPerThreadData *data = g_new (ProfilerPerThreadData, 1);
1760 data->events = g_new0 (ProfilerEventData, buffer_size);
1761 data->next_free_event = data->events;
1762 data->end_event = data->events + (buffer_size - 1);
1763 data->first_unwritten_event = data->events;
1764 data->first_unmapped_event = data->events;
1765 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
1766 data->last_event_counter = data->start_event_counter;
1767 data->thread_id = CURRENT_THREAD_ID ();
1768 data->heap_shot_object_buffers = NULL;
1769 if ((profiler->action_flags.unreachable_objects == TRUE) ||
1770 (profiler->action_flags.heap_shot == TRUE) ||
1771 (profiler->action_flags.collection_summary == TRUE)) {
1772 profiler_heap_shot_object_buffer_new (data);
1774 if (profiler->action_flags.track_stack) {
1775 thread_stack_initialize (&(data->stack), 64);
1777 thread_stack_initialize_empty (&(data->stack));
1783 profiler_per_thread_data_destroy (ProfilerPerThreadData *data) {
1784 g_free (data->events);
1785 profiler_heap_shot_object_buffers_destroy (data->heap_shot_object_buffers);
1786 thread_stack_free (&(data->stack));
1790 static ProfilerStatisticalData*
1791 profiler_statistical_data_new (MonoProfiler *profiler) {
1792 int buffer_size = profiler->statistical_buffer_size * (profiler->statistical_call_chain_depth + 1);
1793 ProfilerStatisticalData *data = g_new (ProfilerStatisticalData, 1);
1795 data->hits = g_new0 (ProfilerStatisticalHit, buffer_size);
1796 data->next_free_index = 0;
1797 data->end_index = profiler->statistical_buffer_size;
1798 data->first_unwritten_index = 0;
1804 profiler_statistical_data_destroy (ProfilerStatisticalData *data) {
1805 g_free (data->hits);
1810 profiler_add_write_buffer (void) {
1811 if (profiler->current_write_buffer->next == NULL) {
1812 profiler->current_write_buffer->next = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
1813 profiler->current_write_buffer->next->next = NULL;
1815 //printf ("Added next buffer %p, to buffer %p\n", profiler->current_write_buffer->next, profiler->current_write_buffer);
1818 profiler->current_write_buffer = profiler->current_write_buffer->next;
1819 profiler->current_write_position = 0;
1820 profiler->full_write_buffers ++;
1824 profiler_free_write_buffers (void) {
1825 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
1826 while (current_buffer != NULL) {
1827 ProfilerFileWriteBuffer *next_buffer = current_buffer->next;
1829 //printf ("Freeing write buffer %p, next is %p\n", current_buffer, next_buffer);
1831 g_free (current_buffer);
1832 current_buffer = next_buffer;
1836 #define WRITE_BYTE(b) do {\
1837 if (profiler->current_write_position >= PROFILER_FILE_WRITE_BUFFER_SIZE) {\
1838 profiler_add_write_buffer ();\
1840 profiler->current_write_buffer->buffer [profiler->current_write_position] = (b);\
1841 profiler->current_write_position ++;\
1846 write_current_block (guint16 code) {
1847 guint32 size = (profiler->full_write_buffers * PROFILER_FILE_WRITE_BUFFER_SIZE) + profiler->current_write_position;
1848 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
1849 guint64 current_counter;
1850 guint32 counter_delta;
1853 MONO_PROFILER_GET_CURRENT_COUNTER (current_counter);
1854 if (profiler->last_header_counter != 0) {
1855 counter_delta = current_counter - profiler->last_header_counter;
1859 profiler->last_header_counter = current_counter;
1861 header [0] = code & 0xff;
1862 header [1] = (code >> 8) & 0xff;
1863 header [2] = size & 0xff;
1864 header [3] = (size >> 8) & 0xff;
1865 header [4] = (size >> 16) & 0xff;
1866 header [5] = (size >> 24) & 0xff;
1867 header [6] = counter_delta & 0xff;
1868 header [7] = (counter_delta >> 8) & 0xff;
1869 header [8] = (counter_delta >> 16) & 0xff;
1870 header [9] = (counter_delta >> 24) & 0xff;
1872 #if (DEBUG_FILE_WRITES)
1873 printf ("write_current_block: writing header (code %d)\n", code);
1875 WRITE_BUFFER (& (header [0]), 10);
1877 while ((current_buffer != NULL) && (profiler->full_write_buffers > 0)) {
1878 #if (DEBUG_FILE_WRITES)
1879 printf ("write_current_block: writing buffer (size %d)\n", PROFILER_FILE_WRITE_BUFFER_SIZE);
1881 WRITE_BUFFER (& (current_buffer->buffer [0]), PROFILER_FILE_WRITE_BUFFER_SIZE);
1882 profiler->full_write_buffers --;
1883 current_buffer = current_buffer->next;
1885 if (profiler->current_write_position > 0) {
1886 #if (DEBUG_FILE_WRITES)
1887 printf ("write_current_block: writing last buffer (size %d)\n", profiler->current_write_position);
1889 WRITE_BUFFER (& (current_buffer->buffer [0]), profiler->current_write_position);
1892 #if (DEBUG_FILE_WRITES)
1893 printf ("write_current_block: buffers flushed\n");
1896 profiler->current_write_buffer = profiler->write_buffers;
1897 profiler->current_write_position = 0;
1898 profiler->full_write_buffers = 0;
1902 #define SEVEN_BITS_MASK (0x7f)
1903 #define EIGHT_BIT_MASK (0x80)
1906 write_uint32 (guint32 value) {
1907 while (value > SEVEN_BITS_MASK) {
1908 WRITE_BYTE (value & SEVEN_BITS_MASK);
1911 WRITE_BYTE (value | EIGHT_BIT_MASK);
1914 write_uint64 (guint64 value) {
1915 while (value > SEVEN_BITS_MASK) {
1916 WRITE_BYTE (value & SEVEN_BITS_MASK);
1919 WRITE_BYTE (value | EIGHT_BIT_MASK);
1922 write_string (const char *string) {
1923 while (*string != 0) {
1924 WRITE_BYTE (*string);
1930 static void write_clock_data (void);
1932 write_directives_block (gboolean start) {
1933 write_clock_data ();
1936 if (profiler->action_flags.track_stack) {
1937 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER);
1940 write_uint32 (MONO_PROFILER_DIRECTIVE_END);
1942 write_clock_data ();
1943 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES);
1946 #if DEBUG_HEAP_PROFILER
1947 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c) printf ("WRITE_HEAP_SHOT_JOB_VALUE: writing value %p at cursor %p\n", (v), (c))
1949 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c)
1951 #define WRITE_HEAP_SHOT_JOB_VALUE(j,v) do {\
1952 if ((j)->cursor < (j)->end) {\
1953 WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE ((v), ((j)->cursor));\
1954 *((j)->cursor) = (v);\
1957 profiler_heap_shot_write_job_add_buffer (j, v);\
1962 #undef GUINT_TO_POINTER
1963 #undef GPOINTER_TO_UINT
1964 #if (SIZEOF_VOID_P == 4)
1965 #define GUINT_TO_POINTER(u) ((void*)(guint32)(u))
1966 #define GPOINTER_TO_UINT(p) ((guint32)(void*)(p))
1967 #elif (SIZEOF_VOID_P == 8)
1968 #define GUINT_TO_POINTER(u) ((void*)(guint64)(u))
1969 #define GPOINTER_TO_UINT(p) ((guint64)(void*)(p))
1971 #error Bad size of void pointer
1974 #define WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE(j,v,c) WRITE_HEAP_SHOT_JOB_VALUE (j, GUINT_TO_POINTER (GPOINTER_TO_UINT (v)|(c)))
1976 #if DEBUG_HEAP_PROFILER
1977 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE() printf ("profiler_heap_shot_write_block[UPDATE_JOB_BUFFER_CURSOR]: in job %p, moving to buffer %p and cursor %p\n", job, buffer, cursor)
1979 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE()
1981 #define UPDATE_JOB_BUFFER_CURSOR() do {\
1983 if (cursor >= end) {\
1984 buffer = buffer->next;\
1985 if (buffer != NULL) {\
1986 cursor = & (buffer->buffer [0]);\
1987 if (buffer->next != NULL) {\
1988 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);\
1996 UPDATE_JOB_BUFFER_CURSOR_MESSAGE ();\
2000 profiler_heap_shot_write_data_block (ProfilerHeapShotWriteJob *job) {
2001 ProfilerHeapShotWriteBuffer *buffer;
2004 guint64 start_counter;
2006 guint64 end_counter;
2009 write_uint64 (job->start_counter);
2010 write_uint64 (job->start_time);
2011 write_uint64 (job->end_counter);
2012 write_uint64 (job->end_time);
2013 write_uint32 (job->collection);
2014 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2015 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2016 write_uint64 (start_counter);
2017 write_uint64 (start_time);
2018 #if DEBUG_HEAP_PROFILER
2019 printf ("profiler_heap_shot_write_data_block: start writing job %p (start %p, end %p)...\n", job, & (job->buffers->buffer [0]), job->cursor);
2021 buffer = job->buffers;
2022 cursor = & (buffer->buffer [0]);
2023 if (buffer->next != NULL) {
2024 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
2028 if (cursor >= end) {
2031 #if DEBUG_HEAP_PROFILER
2032 printf ("profiler_heap_shot_write_data_block: in job %p, starting at buffer %p and cursor %p\n", job, buffer, cursor);
2034 while (cursor != NULL) {
2035 gpointer value = *cursor;
2036 HeapProfilerJobValueCode code = GPOINTER_TO_UINT (value) & HEAP_CODE_MASK;
2037 #if DEBUG_HEAP_PROFILER
2038 printf ("profiler_heap_shot_write_data_block: got value %p and code %d\n", value, code);
2041 UPDATE_JOB_BUFFER_CURSOR ();
2042 if (code == HEAP_CODE_FREE_OBJECT_CLASS) {
2043 MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2044 //MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) % 4);
2045 ClassIdMappingElement *class_id;
2048 class_id = class_id_mapping_element_get (klass);
2049 if (class_id == NULL) {
2050 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2052 g_assert (class_id != NULL);
2053 write_uint32 ((class_id->id << 2) | HEAP_CODE_FREE_OBJECT_CLASS);
2055 size = GPOINTER_TO_UINT (*cursor);
2056 UPDATE_JOB_BUFFER_CURSOR ();
2057 write_uint32 (size);
2058 #if DEBUG_HEAP_PROFILER
2059 printf ("profiler_heap_shot_write_data_block: wrote unreachable object of class %p (id %d, size %d)\n", klass, class_id->id, size);
2061 } else if (code == HEAP_CODE_OBJECT) {
2062 MonoObject *object = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2063 MonoClass *klass = mono_object_get_class (object);
2064 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
2065 guint32 size = mono_object_get_size (object);
2066 guint32 references = GPOINTER_TO_UINT (*cursor);
2067 UPDATE_JOB_BUFFER_CURSOR ();
2069 if (class_id == NULL) {
2070 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2072 g_assert (class_id != NULL);
2074 write_uint64 (GPOINTER_TO_UINT (value));
2075 write_uint32 (class_id->id);
2076 write_uint32 (size);
2077 write_uint32 (references);
2078 #if DEBUG_HEAP_PROFILER
2079 printf ("profiler_heap_shot_write_data_block: writing object %p (references %d)\n", value, references);
2082 while (references > 0) {
2083 gpointer reference = *cursor;
2084 write_uint64 (GPOINTER_TO_UINT (reference));
2085 UPDATE_JOB_BUFFER_CURSOR ();
2087 #if DEBUG_HEAP_PROFILER
2088 printf ("profiler_heap_shot_write_data_block: inside object %p, wrote reference %p)\n", value, reference);
2092 #if DEBUG_HEAP_PROFILER
2093 printf ("profiler_heap_shot_write_data_block: unknown code %d in value %p\n", code, value);
2095 g_assert_not_reached ();
2100 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2101 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2102 write_uint64 (end_counter);
2103 write_uint64 (end_time);
2105 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA);
2106 #if DEBUG_HEAP_PROFILER
2107 printf ("profiler_heap_shot_write_data_block: writing job %p done.\n", job);
2111 profiler_heap_shot_write_summary_block (ProfilerHeapShotWriteJob *job) {
2112 guint64 start_counter;
2114 guint64 end_counter;
2118 #if DEBUG_HEAP_PROFILER
2119 printf ("profiler_heap_shot_write_summary_block: start writing job %p...\n", job);
2121 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2122 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2123 write_uint64 (start_counter);
2124 write_uint64 (start_time);
2126 write_uint32 (job->collection);
2128 for (id = 0; id < job->summary.capacity; id ++) {
2129 if ((job->summary.per_class_data [id].reachable.instances > 0) || (job->summary.per_class_data [id].unreachable.instances > 0)) {
2131 write_uint32 (job->summary.per_class_data [id].reachable.instances);
2132 write_uint32 (job->summary.per_class_data [id].reachable.bytes);
2133 write_uint32 (job->summary.per_class_data [id].unreachable.instances);
2134 write_uint32 (job->summary.per_class_data [id].unreachable.bytes);
2139 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2140 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2141 write_uint64 (end_counter);
2142 write_uint64 (end_time);
2144 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY);
2145 #if DEBUG_HEAP_PROFILER
2146 printf ("profiler_heap_shot_write_summary_block: writing job %p done.\n", job);
2151 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job) {
2152 #if DEBUG_HEAP_PROFILER
2153 printf ("profiler_heap_shot_write_block: working on job %p...\n", job);
2156 if (profiler->action_flags.collection_summary == TRUE) {
2157 profiler_heap_shot_write_summary_block (job);
2160 if ((profiler->action_flags.unreachable_objects == TRUE) || (profiler->action_flags.heap_shot == TRUE)) {
2161 profiler_heap_shot_write_data_block (job);
2164 profiler_heap_shot_write_job_free_buffers (job);
2165 #if DEBUG_HEAP_PROFILER
2166 printf ("profiler_heap_shot_write_block: work on job %p done.\n", job);
2171 write_element_load_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2173 write_uint64 (element->load_start_counter);
2174 write_uint64 (element->load_end_counter);
2175 write_uint64 (thread_id);
2176 write_string (element->name);
2177 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_LOADED);
2178 element->load_written = TRUE;
2182 write_element_unload_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2184 write_uint64 (element->unload_start_counter);
2185 write_uint64 (element->unload_end_counter);
2186 write_uint64 (thread_id);
2187 write_string (element->name);
2188 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED);
2189 element->unload_written = TRUE;
2193 write_clock_data (void) {
2197 MONO_PROFILER_GET_CURRENT_COUNTER (counter);
2198 MONO_PROFILER_GET_CURRENT_TIME (time);
2200 write_uint64 (counter);
2201 write_uint64 (time);
2205 write_mapping_block (gsize thread_id) {
2206 ClassIdMappingElement *current_class;
2207 MethodIdMappingElement *current_method;
2209 if ((profiler->classes->unwritten == NULL) && (profiler->methods->unwritten == NULL))
2212 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2213 printf ("[write_mapping_block][TID %ld] START\n", thread_id);
2216 write_clock_data ();
2217 write_uint64 (thread_id);
2219 for (current_class = profiler->classes->unwritten; current_class != NULL; current_class = current_class->next_unwritten) {
2220 write_uint32 (current_class->id);
2221 write_string (current_class->name);
2222 #if (DEBUG_MAPPING_EVENTS)
2223 printf ("mapping CLASS (%d => %s)\n", current_class->id, current_class->name);
2225 g_free (current_class->name);
2226 current_class->name = NULL;
2229 profiler->classes->unwritten = NULL;
2231 for (current_method = profiler->methods->unwritten; current_method != NULL; current_method = current_method->next_unwritten) {
2232 MonoMethod *method = current_method->method;
2233 MonoClass *klass = mono_method_get_class (method);
2234 ClassIdMappingElement *class_element = class_id_mapping_element_get (klass);
2235 g_assert (class_element != NULL);
2236 write_uint32 (current_method->id);
2237 write_uint32 (class_element->id);
2238 write_string (current_method->name);
2239 #if (DEBUG_MAPPING_EVENTS)
2240 printf ("mapping METHOD ([%d]%d => %s)\n", class_element?class_element->id:1, current_method->id, current_method->name);
2242 g_free (current_method->name);
2243 current_method->name = NULL;
2246 profiler->methods->unwritten = NULL;
2248 write_clock_data ();
2249 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_MAPPING);
2251 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2252 printf ("[write_mapping_block][TID %ld] END\n", thread_id);
2257 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER = 1,
2258 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_IMPLICIT = 2,
2259 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT = 3,
2260 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION = 4,
2261 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT = 5,
2262 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT = 6,
2263 MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT = 7
2264 } MonoProfilerPackedEventCode;
2265 #define MONO_PROFILER_PACKED_EVENT_CODE_BITS 3
2266 #define MONO_PROFILER_PACKED_EVENT_DATA_BITS (8-MONO_PROFILER_PACKED_EVENT_CODE_BITS)
2267 #define MONO_PROFILER_PACKED_EVENT_DATA_MASK ((1<<MONO_PROFILER_PACKED_EVENT_DATA_BITS)-1)
2269 #define MONO_PROFILER_EVENT_MAKE_PACKED_CODE(result,data,base) do {\
2270 result = ((base)|((data & MONO_PROFILER_PACKED_EVENT_DATA_MASK) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2271 data >>= MONO_PROFILER_PACKED_EVENT_DATA_BITS;\
2273 #define MONO_PROFILER_EVENT_MAKE_FULL_CODE(result,code,kind,base) do {\
2274 result = ((base)|((((kind)<<4) | (code)) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2277 static ProfilerEventData*
2278 write_event (ProfilerEventData *event) {
2279 ProfilerEventData *next = event + 1;
2280 gboolean write_event_value = TRUE;
2283 guint64 event_value;
2284 gboolean write_event_value_extension_1 = FALSE;
2285 guint64 event_value_extension_1 = 0;
2287 event_value = event->value;
2288 if (event_value == MAX_EVENT_VALUE) {
2289 event_value = *((guint64*)next);
2293 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
2294 MethodIdMappingElement *element = method_id_mapping_element_get (event->data.address);
2295 g_assert (element != NULL);
2296 event_data = element->id;
2298 if (event->code == MONO_PROFILER_EVENT_METHOD_CALL) {
2299 if (event->kind == MONO_PROFILER_EVENT_KIND_START) {
2300 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER);
2302 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT);
2305 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT);
2307 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
2308 ClassIdMappingElement *element = class_id_mapping_element_get (event->data.address);
2309 g_assert (element != NULL);
2310 event_data = element->id;
2312 if (event->code == MONO_PROFILER_EVENT_CLASS_ALLOCATION) {
2313 if (! profiler->action_flags.track_stack) {
2314 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION);
2316 MonoMethod *caller_method = next->data.address;
2318 if (next->code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) {
2319 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION);
2320 } else if (next->code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER) {
2321 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2323 /* If we are tracking the stack, the next event must be the caller method */
2324 g_assert_not_reached ();
2326 if (caller_method != NULL) {
2327 MethodIdMappingElement *caller = method_id_mapping_element_get (caller_method);
2328 g_assert (caller != NULL);
2329 event_value_extension_1 = caller->id;
2332 write_event_value_extension_1 = TRUE;
2336 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2339 event_data = event->data.number;
2340 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2343 /* Skip writing JIT events if the user did not ask for them */
2344 if ((event->code == MONO_PROFILER_EVENT_METHOD_JIT) && ! profiler->action_flags.jit_time) {
2348 #if (DEBUG_LOGGING_PROFILER)
2350 printf ("writing EVENT[%p] data_type:%d, kind:%d, code:%d (%d:%ld:%ld)\n", event,
2351 event->data_type, event->kind, event->code,
2352 event_code, event_data, event_value);
2355 WRITE_BYTE (event_code);
2356 write_uint64 (event_data);
2357 if (write_event_value) {
2358 write_uint64 (event_value);
2359 if (write_event_value_extension_1) {
2360 write_uint64 (event_value_extension_1);
2368 write_thread_data_block (ProfilerPerThreadData *data) {
2369 ProfilerEventData *start = data->first_unwritten_event;
2370 ProfilerEventData *end = data->first_unmapped_event;
2374 #if (DEBUG_FILE_WRITES)
2375 printf ("write_thread_data_block: preparing buffer for thread %ld\n", (guint64) data->thread_id);
2377 write_clock_data ();
2378 write_uint64 (data->thread_id);
2380 write_uint64 (data->start_event_counter);
2382 while (start < end) {
2383 start = write_event (start);
2386 data->first_unwritten_event = end;
2388 write_clock_data ();
2389 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_EVENTS);
2390 #if (DEBUG_FILE_WRITES)
2391 printf ("write_thread_data_block: buffer for thread %ld written\n", (guint64) data->thread_id);
2395 static ProfilerExecutableMemoryRegionData*
2396 profiler_executable_memory_region_new (gpointer *start, gpointer *end, guint32 file_offset, char *file_name, guint32 id) {
2397 ProfilerExecutableMemoryRegionData *result = g_new (ProfilerExecutableMemoryRegionData, 1);
2398 result->start = start;
2400 result->file_offset = file_offset;
2401 result->file_name = g_strdup (file_name);
2403 result->is_new = TRUE;
2405 result->file = NULL;
2406 result->symbols_capacity = id;
2407 result->symbols_count = id;
2408 result->symbols = NULL;
2414 executable_file_close (ProfilerExecutableMemoryRegionData *region);
2417 profiler_executable_memory_region_destroy (ProfilerExecutableMemoryRegionData *data) {
2418 if (data->file_name != NULL) {
2419 g_free (data->file_name);
2421 if (data->symbols != NULL) {
2422 g_free (data->symbols);
2424 if (data->file != NULL) {
2425 executable_file_close (data);
2430 static ProfilerExecutableMemoryRegions*
2431 profiler_executable_memory_regions_new (int next_id, int next_unmanaged_function_id) {
2432 ProfilerExecutableMemoryRegions *result = g_new (ProfilerExecutableMemoryRegions, 1);
2433 result->regions = g_new0 (ProfilerExecutableMemoryRegionData*, 32);
2434 result->regions_capacity = 32;
2435 result->regions_count = 0;
2436 result->next_id = next_id;
2437 result->next_unmanaged_function_id = next_unmanaged_function_id;
2442 profiler_executable_memory_regions_destroy (ProfilerExecutableMemoryRegions *regions) {
2445 for (i = 0; i < regions->regions_count; i++) {
2446 profiler_executable_memory_region_destroy (regions->regions [i]);
2448 g_free (regions->regions);
2452 static ProfilerExecutableMemoryRegionData*
2453 find_address_region (ProfilerExecutableMemoryRegions *regions, gpointer address) {
2455 int high_index = regions->regions_count;
2456 int middle_index = 0;
2457 ProfilerExecutableMemoryRegionData *middle_region = regions->regions [0];
2459 if ((regions->regions_count == 0) || (regions->regions [low_index]->start > address) || (regions->regions [high_index - 1]->end < address)) {
2463 //printf ("find_address_region: Looking for address %p in %d regions (from %p to %p)\n", address, regions->regions_count, regions->regions [low_index]->start, regions->regions [high_index - 1]->end);
2465 while (low_index != high_index) {
2466 middle_index = low_index + ((high_index - low_index) / 2);
2467 middle_region = regions->regions [middle_index];
2469 //printf ("find_address_region: Looking for address %p, considering index %d[%p-%p] (%d-%d)\n", address, middle_index, middle_region->start, middle_region->end, low_index, high_index);
2471 if (middle_region->start > address) {
2472 if (middle_index > 0) {
2473 high_index = middle_index;
2477 } else if (middle_region->end < address) {
2478 if (middle_index < regions->regions_count - 1) {
2479 low_index = middle_index + 1;
2484 return middle_region;
2488 if ((middle_region == NULL) || (middle_region->start > address) || (middle_region->end < address)) {
2491 return middle_region;
2496 append_region (ProfilerExecutableMemoryRegions *regions, gpointer *start, gpointer *end, guint32 file_offset, char *file_name) {
2497 if (regions->regions_count >= regions->regions_capacity) {
2498 ProfilerExecutableMemoryRegionData **new_regions = g_new0 (ProfilerExecutableMemoryRegionData*, regions->regions_capacity * 2);
2499 memcpy (new_regions, regions->regions, regions->regions_capacity * sizeof (ProfilerExecutableMemoryRegionData*));
2500 g_free (regions->regions);
2501 regions->regions = new_regions;
2502 regions->regions_capacity = regions->regions_capacity * 2;
2504 regions->regions [regions->regions_count] = profiler_executable_memory_region_new (start, end, file_offset, file_name, regions->next_id);
2505 regions->regions_count ++;
2506 regions->next_id ++;
2510 restore_old_regions (ProfilerExecutableMemoryRegions *old_regions, ProfilerExecutableMemoryRegions *new_regions) {
2514 for (old_i = 0; old_i < old_regions->regions_count; old_i++) {
2515 ProfilerExecutableMemoryRegionData *old_region = old_regions->regions [old_i];
2516 for (new_i = 0; new_i < new_regions->regions_count; new_i++) {
2517 ProfilerExecutableMemoryRegionData *new_region = new_regions->regions [new_i];
2518 if ((old_region->start == new_region->start) &&
2519 (old_region->end == new_region->end) &&
2520 (old_region->file_offset == new_region->file_offset) &&
2521 ! strcmp (old_region->file_name, new_region->file_name)) {
2522 new_regions->regions [new_i] = old_region;
2523 old_regions->regions [old_i] = new_region;
2525 // FIXME (sanity check)
2526 g_assert (new_region->is_new && ! old_region->is_new);
2533 compare_regions (const void *a1, const void *a2) {
2534 ProfilerExecutableMemoryRegionData *r1 = * (ProfilerExecutableMemoryRegionData**) a1;
2535 ProfilerExecutableMemoryRegionData *r2 = * (ProfilerExecutableMemoryRegionData**) a2;
2536 return (r1->start < r2->start)? -1 : ((r1->start > r2->start)? 1 : 0);
2540 sort_regions (ProfilerExecutableMemoryRegions *regions) {
2541 qsort (regions->regions, regions->regions_count, sizeof (ProfilerExecutableMemoryRegionData *), compare_regions);
2545 executable_file_add_region_reference (ProfilerExecutableFile *file, ProfilerExecutableMemoryRegionData *region) {
2546 guint8 *section_headers = file->data + file->header->e_shoff;
2549 for (section_index = 1; section_index < file->header->e_shnum; section_index ++) {
2550 ElfSection *section_header = (ElfSection*) (section_headers + (file->header->e_shentsize * section_index));
2552 if ((section_header->sh_addr != 0) && (section_header->sh_flags & ELF_SHF_EXECINSTR) &&
2553 (region->file_offset <= section_header->sh_offset) && (region->file_offset + (((guint8*)region->end)-((guint8*)region->start)) >= (section_header->sh_offset + section_header->sh_size))) {
2554 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [section_index]);
2555 section_region->region = region;
2556 section_region->section_address = (gpointer) section_header->sh_addr;
2557 section_region->section_offset = section_header->sh_offset;
2562 static ProfilerExecutableFile*
2563 executable_file_open (ProfilerExecutableMemoryRegionData *region) {
2564 ProfilerExecutableFiles *files = & (profiler->executable_files);
2565 ProfilerExecutableFile *file = (ProfilerExecutableFile*) g_hash_table_lookup (files->table, region->file_name);
2567 guint16 test = 0x0102;
2568 struct stat stat_buffer;
2569 int symtab_index = 0;
2570 int strtab_index = 0;
2571 int dynsym_index = 0;
2572 int dynstr_index = 0;
2574 guint8 *section_headers;
2578 file = g_new0 (ProfilerExecutableFile, 1);
2579 region->file = file;
2580 file->reference_count ++;
2582 file->fd = open (region->file_name, O_RDONLY);
2583 if (file->fd == -1) {
2584 //g_warning ("Cannot open file '%s': '%s'", region->file_name, strerror (errno));
2587 if (fstat (file->fd, &stat_buffer) != 0) {
2588 //g_warning ("Cannot stat file '%s': '%s'", region->file_name, strerror (errno));
2591 size_t region_length = ((guint8*)region->end) - ((guint8*)region->start);
2592 file->length = stat_buffer.st_size;
2594 if (file->length == region_length) {
2595 file->data = region->start;
2599 file->data = mmap (NULL, file->length, PROT_READ, MAP_PRIVATE, file->fd, 0);
2601 if (file->data == MAP_FAILED) {
2603 //g_warning ("Cannot map file '%s': '%s'", region->file_name, strerror (errno));
2611 header = (ElfHeader*) file->data;
2613 if ((header->e_ident [EI_MAG0] != 0x7f) || (header->e_ident [EI_MAG1] != 'E') ||
2614 (header->e_ident [EI_MAG2] != 'L') || (header->e_ident [EI_MAG3] != 'F')) {
2618 if (sizeof (gsize) == 4) {
2619 if (header->e_ident [EI_CLASS] != ELF_CLASS_32) {
2620 g_warning ("Class is not ELF_CLASS_32 with gsize size %d", (int) sizeof (gsize));
2623 } else if (sizeof (gsize) == 8) {
2624 if (header->e_ident [EI_CLASS] != ELF_CLASS_64) {
2625 g_warning ("Class is not ELF_CLASS_64 with gsize size %d", (int) sizeof (gsize));
2629 g_warning ("Absurd gsize size %d", (int) sizeof (gsize));
2633 if ((*(guint8*)(&test)) == 0x01) {
2634 if (header->e_ident [EI_DATA] != ELF_DATA_MSB) {
2635 g_warning ("Data is not ELF_DATA_MSB with first test byte 0x01");
2638 } else if ((*(guint8*)(&test)) == 0x02) {
2639 if (header->e_ident [EI_DATA] != ELF_DATA_LSB) {
2640 g_warning ("Data is not ELF_DATA_LSB with first test byte 0x02");
2644 g_warning ("Absurd test byte value");
2648 /* OK, this is a usable elf file... */
2649 file->header = header;
2650 section_headers = file->data + header->e_shoff;
2651 file->main_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * header->e_shstrndx)))->sh_offset);
2653 for (section_index = 0; section_index < header->e_shnum; section_index ++) {
2654 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
2656 if (section_header->sh_type == ELF_SHT_SYMTAB) {
2657 symtab_index = section_index;
2658 } else if (section_header->sh_type == ELF_SHT_DYNSYM) {
2659 dynsym_index = section_index;
2660 } else if (section_header->sh_type == ELF_SHT_STRTAB) {
2661 if (! strcmp (file->main_string_table + section_header->sh_name, ".strtab")) {
2662 strtab_index = section_index;
2663 } else if (! strcmp (file->main_string_table + section_header->sh_name, ".dynstr")) {
2664 dynstr_index = section_index;
2669 if ((symtab_index != 0) && (strtab_index != 0)) {
2670 section_index = symtab_index;
2671 strings_index = strtab_index;
2672 } else if ((dynsym_index != 0) && (dynstr_index != 0)) {
2673 section_index = dynsym_index;
2674 strings_index = dynstr_index;
2680 if (section_index != 0) {
2681 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
2682 file->symbol_size = section_header->sh_entsize;
2683 file->symbols_count = (guint32) (section_header->sh_size / section_header->sh_entsize);
2684 file->symbols_start = file->data + section_header->sh_offset;
2685 file->symbols_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * strings_index)))->sh_offset);
2688 file->section_regions = g_new0 (ProfilerExecutableFileSectionRegion, file->header->e_shnum);
2690 region->file = file;
2691 file->reference_count ++;
2694 if (file->header != NULL) {
2695 executable_file_add_region_reference (file, region);
2698 if (file->next_new_file == NULL) {
2699 file->next_new_file = files->new_files;
2700 files->new_files = file;
2706 executable_file_free (ProfilerExecutableFile* file) {
2707 if (file->fd != -1) {
2708 if (close (file->fd) != 0) {
2709 g_warning ("Cannot close file: '%s'", strerror (errno));
2711 if (file->data != NULL) {
2712 if (munmap (file->data, file->length) != 0) {
2713 g_warning ("Cannot unmap file: '%s'", strerror (errno));
2717 if (file->section_regions != NULL) {
2718 g_free (file->section_regions);
2724 executable_file_close (ProfilerExecutableMemoryRegionData *region) {
2725 region->file->reference_count --;
2727 if (region->file->reference_count <= 0) {
2728 ProfilerExecutableFiles *files = & (profiler->executable_files);
2729 g_hash_table_remove (files->table, region->file_name);
2730 executable_file_free (region->file);
2731 region->file = NULL;
2736 executable_file_count_symbols (ProfilerExecutableFile *file) {
2739 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
2740 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
2742 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
2743 (symbol->st_shndx > 0) &&
2744 (symbol->st_shndx < file->header->e_shnum)) {
2745 int symbol_section_index = symbol->st_shndx;
2746 ProfilerExecutableMemoryRegionData *region = file->section_regions [symbol_section_index].region;
2747 if ((region != NULL) && (region->symbols == NULL)) {
2748 region->symbols_count ++;
2755 executable_memory_regions_prepare_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
2757 for (i = 0; i < regions->regions_count; i++) {
2758 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
2759 if ((region->symbols_count > 0) && (region->symbols == NULL)) {
2760 region->symbols = g_new (ProfilerUnmanagedSymbol, region->symbols_count);
2761 region->symbols_capacity = region->symbols_count;
2762 region->symbols_count = 0;
2768 executable_region_symbol_get_name (ProfilerExecutableMemoryRegionData *region, ProfilerUnmanagedSymbol *symbol) {
2769 ElfSymbol *elf_symbol = (ElfSymbol*) (region->file->symbols_start + (symbol->index * region->file->symbol_size));
2770 return region->file->symbols_string_table + elf_symbol->st_name;
2774 executable_file_build_symbol_tables (ProfilerExecutableFile *file) {
2777 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
2778 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
2780 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
2781 (symbol->st_shndx > 0) &&
2782 (symbol->st_shndx < file->header->e_shnum)) {
2783 int symbol_section_index = symbol->st_shndx;
2784 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [symbol_section_index]);
2785 ProfilerExecutableMemoryRegionData *region = section_region->region;
2787 if (region != NULL) {
2788 ProfilerUnmanagedSymbol *new_symbol = & (region->symbols [region->symbols_count]);
2789 region->symbols_count ++;
2792 new_symbol->index = symbol_index;
2793 new_symbol->size = symbol->st_size;
2794 new_symbol->offset = (((guint8*) symbol->st_value) - section_region->section_address) - (region->file_offset - section_region->section_offset);
2801 compare_region_symbols (const void *p1, const void *p2) {
2802 const ProfilerUnmanagedSymbol *s1 = p1;
2803 const ProfilerUnmanagedSymbol *s2 = p2;
2804 return (s1->offset < s2->offset)? -1 : ((s1->offset > s2->offset)? 1 : 0);
2808 executable_memory_regions_sort_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
2810 for (i = 0; i < regions->regions_count; i++) {
2811 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
2812 if ((region->is_new) && (region->symbols != NULL)) {
2813 qsort (region->symbols, region->symbols_count, sizeof (ProfilerUnmanagedSymbol), compare_region_symbols);
2819 build_symbol_tables (ProfilerExecutableMemoryRegions *regions, ProfilerExecutableFiles *files) {
2821 ProfilerExecutableFile *file;
2823 for (i = 0; i < regions->regions_count; i++) {
2824 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
2825 if ((region->is_new) && (region->file == NULL)) {
2826 executable_file_open (region);
2830 for (file = files->new_files; file != NULL; file = file->next_new_file) {
2831 executable_file_count_symbols (file);
2834 executable_memory_regions_prepare_symbol_tables (regions);
2836 for (file = files->new_files; file != NULL; file = file->next_new_file) {
2837 executable_file_build_symbol_tables (file);
2840 executable_memory_regions_sort_symbol_tables (regions);
2842 file = files->new_files;
2843 while (file != NULL) {
2844 ProfilerExecutableFile *next_file = file->next_new_file;
2845 file->next_new_file = NULL;
2848 files->new_files = NULL;
2851 static ProfilerUnmanagedSymbol*
2852 executable_memory_region_find_symbol (ProfilerExecutableMemoryRegionData *region, guint32 offset) {
2853 if (region->symbols_count > 0) {
2854 ProfilerUnmanagedSymbol *low = region->symbols;
2855 ProfilerUnmanagedSymbol *high = region->symbols + (region->symbols_count - 1);
2856 int step = region->symbols_count >> 1;
2857 ProfilerUnmanagedSymbol *current = region->symbols + step;
2860 step = (high - low) >> 1;
2862 if (offset < current->offset) {
2864 current = high - step;
2865 } else if (offset >= current->offset) {
2866 if (offset >= (current->offset + current->size)) {
2868 current = low + step;
2875 if ((offset >= current->offset) && (offset < (current->offset + current->size))) {
2885 //FIXME: make also Win32 and BSD variants
2886 #define MAPS_BUFFER_SIZE 4096
2889 update_regions_buffer (int fd, char *buffer) {
2890 ssize_t result = read (fd, buffer, MAPS_BUFFER_SIZE);
2892 if (result == MAPS_BUFFER_SIZE) {
2894 } else if (result >= 0) {
2895 *(buffer + result) = 0;
2903 #define GOTO_NEXT_CHAR(c,b,fd) do {\
2905 if (((c) - (b) >= MAPS_BUFFER_SIZE) || ((*(c) == 0) && ((c) != (b)))) {\
2906 update_regions_buffer ((fd), (b));\
2911 static int hex_digit_value (char c) {
2912 if ((c >= '0') && (c <= '9')) {
2914 } else if ((c >= 'a') && (c <= 'f')) {
2915 return c - 'a' + 10;
2916 } else if ((c >= 'A') && (c <= 'F')) {
2917 return c - 'A' + 10;
2939 MAP_LINE_PARSER_STATE_INVALID,
2940 MAP_LINE_PARSER_STATE_START_ADDRESS,
2941 MAP_LINE_PARSER_STATE_END_ADDRESS,
2942 MAP_LINE_PARSER_STATE_PERMISSIONS,
2943 MAP_LINE_PARSER_STATE_OFFSET,
2944 MAP_LINE_PARSER_STATE_DEVICE,
2945 MAP_LINE_PARSER_STATE_INODE,
2946 MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME,
2947 MAP_LINE_PARSER_STATE_FILENAME,
2948 MAP_LINE_PARSER_STATE_DONE
2949 } MapLineParserState;
2951 const char *map_line_parser_state [] = {
2959 "BLANK_BEFORE_FILENAME",
2965 parse_map_line (ProfilerExecutableMemoryRegions *regions, int fd, char *buffer, char *current) {
2966 MapLineParserState state = MAP_LINE_PARSER_STATE_START_ADDRESS;
2967 gsize start_address = 0;
2968 gsize end_address = 0;
2970 char *start_filename = NULL;
2971 char *end_filename = NULL;
2972 gboolean is_executable = FALSE;
2973 gboolean done = FALSE;
2979 case MAP_LINE_PARSER_STATE_START_ADDRESS:
2981 start_address <<= 4;
2982 start_address |= hex_digit_value (c);
2983 } else if (c == '-') {
2984 state = MAP_LINE_PARSER_STATE_END_ADDRESS;
2986 state = MAP_LINE_PARSER_STATE_INVALID;
2989 case MAP_LINE_PARSER_STATE_END_ADDRESS:
2992 end_address |= hex_digit_value (c);
2993 } else if (isblank (c)) {
2994 state = MAP_LINE_PARSER_STATE_PERMISSIONS;
2996 state = MAP_LINE_PARSER_STATE_INVALID;
2999 case MAP_LINE_PARSER_STATE_PERMISSIONS:
3001 is_executable = TRUE;
3002 } else if (isblank (c)) {
3003 state = MAP_LINE_PARSER_STATE_OFFSET;
3004 } else if ((c != '-') && ! isalpha (c)) {
3005 state = MAP_LINE_PARSER_STATE_INVALID;
3008 case MAP_LINE_PARSER_STATE_OFFSET:
3011 offset |= hex_digit_value (c);
3012 } else if (isblank (c)) {
3013 state = MAP_LINE_PARSER_STATE_DEVICE;
3015 state = MAP_LINE_PARSER_STATE_INVALID;
3018 case MAP_LINE_PARSER_STATE_DEVICE:
3020 state = MAP_LINE_PARSER_STATE_INODE;
3021 } else if ((c != ':') && ! isxdigit (c)) {
3022 state = MAP_LINE_PARSER_STATE_INVALID;
3025 case MAP_LINE_PARSER_STATE_INODE:
3027 state = MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME;
3028 } else if (! isdigit (c)) {
3029 state = MAP_LINE_PARSER_STATE_INVALID;
3032 case MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME:
3033 if ((c == '/') || (c == '[')) {
3034 state = MAP_LINE_PARSER_STATE_FILENAME;
3035 start_filename = current;
3036 } else if (! isblank (c)) {
3037 state = MAP_LINE_PARSER_STATE_INVALID;
3040 case MAP_LINE_PARSER_STATE_FILENAME:
3042 state = MAP_LINE_PARSER_STATE_DONE;
3044 end_filename = current;
3047 case MAP_LINE_PARSER_STATE_DONE:
3048 if (done && is_executable) {
3050 append_region (regions, (gpointer) start_address, (gpointer) end_address, offset, start_filename);
3053 case MAP_LINE_PARSER_STATE_INVALID:
3055 state = MAP_LINE_PARSER_STATE_DONE;
3062 } else if (c == '\n') {
3063 state = MAP_LINE_PARSER_STATE_DONE;
3066 GOTO_NEXT_CHAR(current, buffer, fd);
3072 scan_process_regions (ProfilerExecutableMemoryRegions *regions) {
3077 fd = open ("/proc/self/maps", O_RDONLY);
3082 buffer = malloc (MAPS_BUFFER_SIZE);
3083 update_regions_buffer (fd, buffer);
3085 while (current != NULL) {
3086 current = parse_map_line (regions, fd, buffer, current);
3097 MONO_PROFILER_STATISTICAL_CODE_END = 0,
3098 MONO_PROFILER_STATISTICAL_CODE_METHOD = 1,
3099 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID = 2,
3100 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID = 3,
3101 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION = 4,
3102 MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN = 5,
3103 MONO_PROFILER_STATISTICAL_CODE_REGIONS = 7
3104 } MonoProfilerStatisticalCode;
3107 refresh_memory_regions (void) {
3108 ProfilerExecutableMemoryRegions *old_regions = profiler->executable_regions;
3109 ProfilerExecutableMemoryRegions *new_regions = profiler_executable_memory_regions_new (old_regions->next_id, old_regions->next_unmanaged_function_id);
3112 LOG_WRITER_THREAD ("Refreshing memory regions...");
3113 scan_process_regions (new_regions);
3114 restore_old_regions (old_regions, new_regions);
3115 sort_regions (new_regions);
3116 LOG_WRITER_THREAD ("Refreshed memory regions.");
3118 LOG_WRITER_THREAD ("Building symbol tables...");
3119 build_symbol_tables (new_regions, & (profiler->executable_files));
3121 printf ("Symbol tables done!\n");
3122 printf ("Region summary...\n");
3123 for (i = 0; i < new_regions->regions_count; i++) {
3124 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3125 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3126 region->start, region->end, region->file_offset, region->file_name);
3128 printf ("New symbol tables dump...\n");
3129 for (i = 0; i < new_regions->regions_count; i++) {
3130 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3132 if (region->is_new) {
3135 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3136 region->start, region->end, region->file_offset, region->file_name);
3137 for (symbol_index = 0; symbol_index < region->symbols_count; symbol_index ++) {
3138 ProfilerUnmanagedSymbol *symbol = & (region->symbols [symbol_index]);
3139 printf (" [%d] Symbol %s (offset %d, size %d)\n", symbol_index,
3140 executable_region_symbol_get_name (region, symbol),
3141 symbol->offset, symbol->size);
3146 LOG_WRITER_THREAD ("Built symbol tables.");
3148 // This marks the region "sub-block"
3149 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_REGIONS);
3151 // First write the "removed" regions
3152 for (i = 0; i < old_regions->regions_count; i++) {
3153 ProfilerExecutableMemoryRegionData *region = old_regions->regions [i];
3154 if (! region->is_new) {
3155 #if DEBUG_STATISTICAL_PROFILER
3156 printf ("[refresh_memory_regions] Invalidated region %d\n", region->id);
3158 write_uint32 (region->id);
3163 // Then write the new ones
3164 for (i = 0; i < new_regions->regions_count; i++) {
3165 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3166 if (region->is_new) {
3167 region->is_new = FALSE;
3169 #if DEBUG_STATISTICAL_PROFILER
3170 printf ("[refresh_memory_regions] Wrote region %d (%p-%p[%d] '%s')\n", region->id, region->start, region->end, region->file_offset, region->file_name);
3172 write_uint32 (region->id);
3173 write_uint64 (GPOINTER_TO_UINT (region->start));
3174 write_uint32 (GPOINTER_TO_UINT (region->end) - GPOINTER_TO_UINT (region->start));
3175 write_uint32 (region->file_offset);
3176 write_string (region->file_name);
3181 // Finally, free the old ones, and replace them
3182 profiler_executable_memory_regions_destroy (old_regions);
3183 profiler->executable_regions = new_regions;
3187 write_statistical_hit (MonoDomain *domain, gpointer address, gboolean regions_refreshed) {
3188 MonoJitInfo *ji = (domain != NULL) ? mono_jit_info_table_find (domain, (char*) address) : NULL;
3191 MonoMethod *method = mono_jit_info_get_method (ji);
3192 MethodIdMappingElement *element = method_id_mapping_element_get (method);
3194 if (element != NULL) {
3195 #if DEBUG_STATISTICAL_PROFILER
3196 printf ("[write_statistical_hit] Wrote method %d\n", element->id);
3198 write_uint32 ((element->id << 3) | MONO_PROFILER_STATISTICAL_CODE_METHOD);
3200 #if DEBUG_STATISTICAL_PROFILER
3201 printf ("[write_statistical_hit] Wrote unknown method %p\n", method);
3203 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_METHOD);
3206 ProfilerExecutableMemoryRegionData *region = find_address_region (profiler->executable_regions, address);
3208 if (region == NULL && ! regions_refreshed) {
3209 #if DEBUG_STATISTICAL_PROFILER
3210 printf ("[write_statistical_hit] Cannot find region for address %p, refreshing...\n", address);
3212 refresh_memory_regions ();
3213 regions_refreshed = TRUE;
3214 region = find_address_region (profiler->executable_regions, address);
3217 if (region != NULL) {
3218 guint32 offset = ((guint8*)address) - ((guint8*)region->start);
3219 ProfilerUnmanagedSymbol *symbol = executable_memory_region_find_symbol (region, offset);
3221 if (symbol != NULL) {
3222 if (symbol->id > 0) {
3223 #if DEBUG_STATISTICAL_PROFILER
3224 printf ("[write_statistical_hit] Wrote unmanaged symbol %d\n", symbol->id);
3226 write_uint32 ((symbol->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID);
3228 ProfilerExecutableMemoryRegions *regions = profiler->executable_regions;
3229 const char *symbol_name = executable_region_symbol_get_name (region, symbol);
3230 symbol->id = regions->next_unmanaged_function_id;
3231 regions->next_unmanaged_function_id ++;
3232 #if DEBUG_STATISTICAL_PROFILER
3233 printf ("[write_statistical_hit] Wrote new unmanaged symbol in region %d[%d]\n", region->id, offset);
3235 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID);
3236 write_uint32 (symbol->id);
3237 write_string (symbol_name);
3240 #if DEBUG_STATISTICAL_PROFILER
3241 printf ("[write_statistical_hit] Wrote unknown unmanaged hit in region %d[%d] (address %p)\n", region->id, offset, address);
3243 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3244 write_uint32 (offset);
3247 #if DEBUG_STATISTICAL_PROFILER
3248 printf ("[write_statistical_hit] Wrote unknown unmanaged hit %p\n", address);
3250 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3251 write_uint64 (GPOINTER_TO_UINT (address));
3255 return regions_refreshed;
3259 flush_all_mappings (void);
3262 write_statistical_data_block (ProfilerStatisticalData *data) {
3263 MonoThread *current_thread = mono_thread_current ();
3264 int start_index = data->first_unwritten_index;
3265 int end_index = data->next_free_index;
3266 gboolean regions_refreshed = FALSE;
3267 int call_chain_depth = profiler->statistical_call_chain_depth;
3270 if (end_index > data->end_index)
3271 end_index = data->end_index;
3273 if (start_index == end_index)
3276 data->first_unwritten_index = end_index;
3278 write_clock_data ();
3280 #if DEBUG_STATISTICAL_PROFILER
3281 printf ("[write_statistical_data_block] Starting loop at index %d\n", start_index);
3284 for (index = start_index; index < end_index; index ++) {
3285 int base_index = index * (call_chain_depth + 1);
3286 ProfilerStatisticalHit hit = data->hits [base_index];
3289 regions_refreshed = write_statistical_hit ((current_thread != NULL) ? hit.domain : NULL, hit.address, regions_refreshed);
3292 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3293 hit = data->hits [base_index + callers_count];
3294 if (hit.address == NULL) {
3299 if (callers_count > 0) {
3300 write_uint32 ((callers_count << 3) | MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN);
3302 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3303 hit = data->hits [base_index + callers_count];
3304 if (hit.address != NULL) {
3305 regions_refreshed = write_statistical_hit ((current_thread != NULL) ? hit.domain : NULL, hit.address, regions_refreshed);
3312 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_END);
3314 #if DEBUG_STATISTICAL_PROFILER
3315 printf ("[write_statistical_data_block] Ending loop at index %d\n", end_index);
3317 write_clock_data ();
3319 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL);
3323 write_intro_block (void) {
3325 write_string ("mono");
3326 write_uint32 (profiler->flags);
3327 write_uint64 (profiler->start_counter);
3328 write_uint64 (profiler->start_time);
3329 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_INTRO);
3333 write_end_block (void) {
3335 write_uint64 (profiler->end_counter);
3336 write_uint64 (profiler->end_time);
3337 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_END);
3341 update_mapping (ProfilerPerThreadData *data) {
3342 ProfilerEventData *start = data->first_unmapped_event;
3343 ProfilerEventData *end = data->next_free_event;
3344 data->first_unmapped_event = end;
3346 #if (DEBUG_LOGGING_PROFILER)
3347 printf ("[update_mapping][TID %ld] START\n", data->thread_id);
3349 while (start < end) {
3350 #if DEBUG_LOGGING_PROFILER
3351 printf ("Examining event %p[TID %ld] looking for a new mapping...\n", start, data->thread_id);
3353 if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3354 ClassIdMappingElement *element = class_id_mapping_element_get (start->data.address);
3355 if (element == NULL) {
3356 MonoClass *klass = start->data.address;
3357 class_id_mapping_element_new (klass);
3359 } else if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3360 MethodIdMappingElement *element = method_id_mapping_element_get (start->data.address);
3361 if (element == NULL) {
3362 MonoMethod *method = start->data.address;
3363 if (method != NULL) {
3364 method_id_mapping_element_new (method);
3369 if (start->value == MAX_EVENT_VALUE) {
3374 #if (DEBUG_LOGGING_PROFILER)
3375 printf ("[update_mapping][TID %ld] END\n", data->thread_id);
3380 flush_all_mappings (void) {
3381 ProfilerPerThreadData *data;
3383 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3384 update_mapping (data);
3386 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3387 write_mapping_block (data->thread_id);
3392 flush_full_event_data_buffer (ProfilerPerThreadData *data) {
3395 // We flush all mappings because some id definitions could come
3396 // from other threads
3397 flush_all_mappings ();
3398 g_assert (data->first_unmapped_event >= data->next_free_event);
3400 write_thread_data_block (data);
3402 data->next_free_event = data->events;
3403 data->first_unwritten_event = data->events;
3404 data->first_unmapped_event = data->events;
3405 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
3406 data->last_event_counter = data->start_event_counter;
3411 #define GET_NEXT_FREE_EVENT(d,e) {\
3412 if ((d)->next_free_event >= (d)->end_event) {\
3413 flush_full_event_data_buffer (d);\
3415 (e) = (d)->next_free_event;\
3416 (d)->next_free_event ++;\
3418 #define GET_NEXT_FREE_EVENT_LEAVING_ONE_FREE(d,e) {\
3419 if ((d)->next_free_event >= ((d)->end_event - 2)) {\
3420 flush_full_event_data_buffer (d);\
3422 (e) = (d)->next_free_event;\
3423 (d)->next_free_event ++;\
3427 flush_everything (void) {
3428 ProfilerPerThreadData *data;
3430 flush_all_mappings ();
3431 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3432 write_thread_data_block (data);
3434 write_statistical_data_block (profiler->statistical_data);
3437 /* This assumes the lock is held: it just offloads the work to the writer thread. */
3439 writer_thread_flush_everything (void) {
3440 if (CHECK_WRITER_THREAD ()) {
3441 profiler->writer_thread_flush_everything = TRUE;
3442 LOG_WRITER_THREAD ("writer_thread_flush_everything: raising event...");
3443 WRITER_EVENT_RAISE ();
3444 LOG_WRITER_THREAD ("writer_thread_flush_everything: waiting event...");
3445 WRITER_EVENT_DONE_WAIT ();
3446 LOG_WRITER_THREAD ("writer_thread_flush_everything: got event.");
3448 LOG_WRITER_THREAD ("writer_thread_flush_everything: no thread.");
3452 #define RESULT_TO_LOAD_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_LOADED_EVENT_SUCCESS:MONO_PROFILER_LOADED_EVENT_FAILURE)
3454 appdomain_start_load (MonoProfiler *profiler, MonoDomain *domain) {
3456 loaded_element_load_start (profiler->loaded_appdomains, domain);
3461 appdomain_end_load (MonoProfiler *profiler, MonoDomain *domain, int result) {
3463 LoadedElement *element;
3465 name = g_strdup_printf ("%d", mono_domain_get_id (domain));
3467 element = loaded_element_load_end (profiler->loaded_appdomains, domain, name);
3468 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3473 appdomain_start_unload (MonoProfiler *profiler, MonoDomain *domain) {
3475 loaded_element_unload_start (profiler->loaded_appdomains, domain);
3476 writer_thread_flush_everything ();
3481 appdomain_end_unload (MonoProfiler *profiler, MonoDomain *domain) {
3482 LoadedElement *element;
3485 element = loaded_element_unload_end (profiler->loaded_appdomains, domain);
3486 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN, CURRENT_THREAD_ID ());
3491 module_start_load (MonoProfiler *profiler, MonoImage *module) {
3493 loaded_element_load_start (profiler->loaded_modules, module);
3498 module_end_load (MonoProfiler *profiler, MonoImage *module, int result) {
3500 MonoAssemblyName aname;
3501 LoadedElement *element;
3503 if (mono_assembly_fill_assembly_name (module, &aname)) {
3504 name = mono_stringify_assembly_name (&aname);
3506 name = g_strdup_printf ("Dynamic module \"%p\"", module);
3509 element = loaded_element_load_end (profiler->loaded_modules, module, name);
3510 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_MODULE | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3515 module_start_unload (MonoProfiler *profiler, MonoImage *module) {
3517 loaded_element_unload_start (profiler->loaded_modules, module);
3518 writer_thread_flush_everything ();
3523 module_end_unload (MonoProfiler *profiler, MonoImage *module) {
3524 LoadedElement *element;
3527 element = loaded_element_unload_end (profiler->loaded_modules, module);
3528 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_MODULE, CURRENT_THREAD_ID ());
3533 assembly_start_load (MonoProfiler *profiler, MonoAssembly *assembly) {
3535 loaded_element_load_start (profiler->loaded_assemblies, assembly);
3540 assembly_end_load (MonoProfiler *profiler, MonoAssembly *assembly, int result) {
3542 MonoAssemblyName aname;
3543 LoadedElement *element;
3545 if (mono_assembly_fill_assembly_name (mono_assembly_get_image (assembly), &aname)) {
3546 name = mono_stringify_assembly_name (&aname);
3548 name = g_strdup_printf ("Dynamic assembly \"%p\"", assembly);
3551 element = loaded_element_load_end (profiler->loaded_assemblies, assembly, name);
3552 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3557 assembly_start_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
3559 loaded_element_unload_start (profiler->loaded_assemblies, assembly);
3560 writer_thread_flush_everything ();
3564 assembly_end_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
3565 LoadedElement *element;
3568 element = loaded_element_unload_end (profiler->loaded_assemblies, assembly);
3569 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY, CURRENT_THREAD_ID ());
3573 #if (DEBUG_LOGGING_PROFILER)
3575 class_event_code_to_string (MonoProfilerClassEvents code) {
3577 case MONO_PROFILER_EVENT_CLASS_LOAD: return "LOAD";
3578 case MONO_PROFILER_EVENT_CLASS_UNLOAD: return "UNLOAD";
3579 case MONO_PROFILER_EVENT_CLASS_ALLOCATION: return "ALLOCATION";
3580 case MONO_PROFILER_EVENT_CLASS_EXCEPTION: return "EXCEPTION";
3581 default: g_assert_not_reached (); return "";
3585 method_event_code_to_string (MonoProfilerMethodEvents code) {
3587 case MONO_PROFILER_EVENT_METHOD_CALL: return "CALL";
3588 case MONO_PROFILER_EVENT_METHOD_JIT: return "JIT";
3589 case MONO_PROFILER_EVENT_METHOD_FREED: return "FREED";
3590 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER: return "ALLOCATION_CALLER";
3591 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER: return "ALLOCATION_JIT_TIME_CALLER";
3592 default: g_assert_not_reached (); return "";
3596 number_event_code_to_string (MonoProfilerEvents code) {
3598 case MONO_PROFILER_EVENT_THREAD: return "THREAD";
3599 case MONO_PROFILER_EVENT_GC_COLLECTION: return "GC_COLLECTION";
3600 case MONO_PROFILER_EVENT_GC_MARK: return "GC_MARK";
3601 case MONO_PROFILER_EVENT_GC_SWEEP: return "GC_SWEEP";
3602 case MONO_PROFILER_EVENT_GC_RESIZE: return "GC_RESIZE";
3603 case MONO_PROFILER_EVENT_GC_STOP_WORLD: return "GC_STOP_WORLD";
3604 case MONO_PROFILER_EVENT_GC_START_WORLD: return "GC_START_WORLD";
3605 default: g_assert_not_reached (); return "";
3609 event_result_to_string (MonoProfilerEventResult code) {
3611 case MONO_PROFILER_EVENT_RESULT_SUCCESS: return "SUCCESS";
3612 case MONO_PROFILER_EVENT_RESULT_FAILURE: return "FAILURE";
3613 default: g_assert_not_reached (); return "";
3617 event_kind_to_string (MonoProfilerEventKind code) {
3619 case MONO_PROFILER_EVENT_KIND_START: return "START";
3620 case MONO_PROFILER_EVENT_KIND_END: return "END";
3621 default: g_assert_not_reached (); return "";
3625 print_event_data (gsize thread_id, ProfilerEventData *event, guint64 value) {
3626 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3627 printf ("[TID %ld] CLASS[%p] event [%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s)\n",
3629 event->data.address,
3631 class_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
3632 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
3633 event_kind_to_string (event->kind),
3638 mono_class_get_namespace ((MonoClass*) event->data.address),
3639 mono_class_get_name ((MonoClass*) event->data.address));
3640 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3641 printf ("[TID %ld] METHOD[%p] event [%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s:%s (?))\n",
3643 event->data.address,
3645 method_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
3646 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
3647 event_kind_to_string (event->kind),
3652 (event->data.address != NULL) ? mono_class_get_namespace (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
3653 (event->data.address != NULL) ? mono_class_get_name (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
3654 (event->data.address != NULL) ? mono_method_get_name ((MonoMethod*) event->data.address) : "<NULL>");
3656 printf ("[TID %ld] NUMBER[%ld] event [%p] %s:%s[%d-%d-%d] %ld\n",
3658 (guint64) event->data.number,
3660 number_event_code_to_string (event->code),
3661 event_kind_to_string (event->kind),
3668 #define LOG_EVENT(tid,ev,val) print_event_data ((tid),(ev),(val))
3670 #define LOG_EVENT(tid,ev,val)
3673 #define RESULT_TO_EVENT_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_EVENT_RESULT_SUCCESS:MONO_PROFILER_EVENT_RESULT_FAILURE)
3675 #define STORE_EVENT_ITEM_COUNTER(p,i,dt,c,k) do {\
3676 ProfilerEventData *event;\
3679 GET_NEXT_FREE_EVENT (data, event);\
3680 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
3681 event->data.address = (i);\
3682 event->data_type = (dt);\
3685 delta = counter - data->last_event_counter;\
3686 if (delta < MAX_EVENT_VALUE) {\
3687 event->value = delta;\
3689 ProfilerEventData *extension = data->next_free_event;\
3690 data->next_free_event ++;\
3691 event->value = MAX_EVENT_VALUE;\
3692 *(guint64*)extension = delta;\
3694 data->last_event_counter = counter;\
3695 LOG_EVENT (data->thread_id, event, delta);\
3697 #define STORE_EVENT_ITEM_VALUE_NEXT(p,i,dt,c,k,v,NEXT_FREE_EVENT) do {\
3698 ProfilerEventData *event;\
3699 NEXT_FREE_EVENT (data, event);\
3700 event->data.address = (i);\
3701 event->data_type = (dt);\
3704 if ((v) < MAX_EVENT_VALUE) {\
3705 event->value = (v);\
3707 ProfilerEventData *extension = data->next_free_event;\
3708 data->next_free_event ++;\
3709 event->value = MAX_EVENT_VALUE;\
3710 *(guint64*)extension = (v);\
3712 LOG_EVENT (data->thread_id, event, (v));\
3714 #define STORE_EVENT_ITEM_VALUE(p,i,dt,c,k,v) STORE_EVENT_ITEM_VALUE_NEXT(p,i,dt,c,k,v,GET_NEXT_FREE_EVENT)
3715 #define STORE_EVENT_ITEM_VALUE_LEAVING_ONE_FREE(p,i,dt,c,k,v) STORE_EVENT_ITEM_VALUE_NEXT(p,i,dt,c,k,v,GET_NEXT_FREE_EVENT_LEAVING_ONE_FREE)
3716 #define STORE_EVENT_NUMBER_COUNTER(p,n,dt,c,k) do {\
3717 ProfilerEventData *event;\
3720 GET_NEXT_FREE_EVENT (data, event);\
3721 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
3722 event->data.number = (n);\
3723 event->data_type = (dt);\
3726 delta = counter - data->last_event_counter;\
3727 if (delta < MAX_EVENT_VALUE) {\
3728 event->value = delta;\
3730 ProfilerEventData *extension = data->next_free_event;\
3731 data->next_free_event ++;\
3732 event->value = MAX_EVENT_VALUE;\
3733 *(guint64*)extension = delta;\
3735 data->last_event_counter = counter;\
3736 LOG_EVENT (data->thread_id, event, delta);\
3738 #define STORE_EVENT_NUMBER_VALUE(p,n,dt,c,k,v) do {\
3739 ProfilerEventData *event;\
3740 GET_NEXT_FREE_EVENT (data, event);\
3741 event->data.number = (n);\
3742 event->data_type = (dt);\
3745 if ((v) < MAX_EVENT_VALUE) {\
3746 event->value = (v);\
3748 ProfilerEventData *extension = data->next_free_event;\
3749 data->next_free_event ++;\
3750 event->value = MAX_EVENT_VALUE;\
3751 *(guint64*)extension = (v);\
3753 LOG_EVENT (data->thread_id, event, (v));\
3757 class_start_load (MonoProfiler *profiler, MonoClass *klass) {
3758 ProfilerPerThreadData *data;
3759 GET_PROFILER_THREAD_DATA (data);
3760 STORE_EVENT_ITEM_COUNTER (profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD, MONO_PROFILER_EVENT_KIND_START);
3763 class_end_load (MonoProfiler *profiler, MonoClass *klass, int result) {
3764 ProfilerPerThreadData *data;
3765 GET_PROFILER_THREAD_DATA (data);
3766 STORE_EVENT_ITEM_COUNTER (profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
3769 class_start_unload (MonoProfiler *profiler, MonoClass *klass) {
3770 ProfilerPerThreadData *data;
3771 GET_PROFILER_THREAD_DATA (data);
3772 STORE_EVENT_ITEM_COUNTER (profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_START);
3775 class_end_unload (MonoProfiler *profiler, MonoClass *klass) {
3776 ProfilerPerThreadData *data;
3777 GET_PROFILER_THREAD_DATA (data);
3778 STORE_EVENT_ITEM_COUNTER (profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_END);
3782 method_start_jit (MonoProfiler *profiler, MonoMethod *method) {
3783 ProfilerPerThreadData *data;
3784 GET_PROFILER_THREAD_DATA (data);
3785 thread_stack_push_jitted_safely (&(data->stack), method, TRUE);
3786 STORE_EVENT_ITEM_COUNTER (profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT, MONO_PROFILER_EVENT_KIND_START);
3789 method_end_jit (MonoProfiler *profiler, MonoMethod *method, int result) {
3790 ProfilerPerThreadData *data;
3791 GET_PROFILER_THREAD_DATA (data);
3792 STORE_EVENT_ITEM_COUNTER (profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
3793 thread_stack_pop (&(data->stack));
3798 method_jit_result (MonoProfiler *prof, MonoMethod *method, MonoJitInfo* jinfo, int result) {
3799 if (profiler->action_flags.oprofile && (result == MONO_PROFILE_OK)) {
3800 MonoClass *klass = mono_method_get_class (method);
3801 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
3802 char *name = g_strdup_printf ("%s.%s:%s (%s)", mono_class_get_namespace (klass), mono_class_get_name (klass), mono_method_get_name (method), signature);
3803 gpointer code_start = mono_jit_info_get_code_start (jinfo);
3804 int code_size = mono_jit_info_get_code_size (jinfo);
3806 if (op_write_native_code (name, code_start, code_size)) {
3807 g_warning ("Problem calling op_write_native_code\n");
3818 method_enter (MonoProfiler *profiler, MonoMethod *method) {
3819 ProfilerPerThreadData *data;
3821 CHECK_PROFILER_ENABLED ();
3822 GET_PROFILER_THREAD_DATA (data);
3823 if (profiler->action_flags.track_calls) {
3824 STORE_EVENT_ITEM_COUNTER (profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_START);
3826 if (profiler->action_flags.track_stack) {
3827 thread_stack_push_safely (&(data->stack), method);
3831 method_leave (MonoProfiler *profiler, MonoMethod *method) {
3832 ProfilerPerThreadData *data;
3834 CHECK_PROFILER_ENABLED ();
3835 GET_PROFILER_THREAD_DATA (data);
3836 if (profiler->action_flags.track_calls) {
3837 STORE_EVENT_ITEM_COUNTER (profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_END);
3839 if (profiler->action_flags.track_stack) {
3840 thread_stack_pop (&(data->stack));
3845 method_free (MonoProfiler *profiler, MonoMethod *method) {
3846 ProfilerPerThreadData *data;
3847 GET_PROFILER_THREAD_DATA (data);
3848 STORE_EVENT_ITEM_COUNTER (profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_FREED, 0);
3852 thread_start (MonoProfiler *profiler, gsize tid) {
3853 ProfilerPerThreadData *data;
3854 GET_PROFILER_THREAD_DATA (data);
3855 STORE_EVENT_NUMBER_COUNTER (profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_START);
3858 thread_end (MonoProfiler *profiler, gsize tid) {
3859 ProfilerPerThreadData *data;
3860 GET_PROFILER_THREAD_DATA (data);
3861 STORE_EVENT_NUMBER_COUNTER (profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_END);
3865 object_allocated (MonoProfiler *profiler, MonoObject *obj, MonoClass *klass) {
3866 ProfilerPerThreadData *data;
3867 GET_PROFILER_THREAD_DATA (data);
3869 STORE_EVENT_ITEM_VALUE_LEAVING_ONE_FREE (profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_ALLOCATION, 0, (guint64) mono_object_get_size (obj));
3870 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
3871 STORE_ALLOCATED_OBJECT (data, obj);
3874 if (profiler->action_flags.track_stack) {
3875 MonoMethod *caller = thread_stack_top (&(data->stack));
3876 gboolean caller_is_jitted = thread_stack_top_is_jitted (&(data->stack));
3878 while ((caller != NULL) && (caller->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)) {
3879 caller = thread_stack_index_from_top (&(data->stack), index);
3880 caller_is_jitted = thread_stack_index_from_top_is_jitted (&(data->stack), index);
3883 if (! caller_is_jitted) {
3884 STORE_EVENT_ITEM_VALUE (profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
3886 STORE_EVENT_ITEM_VALUE (profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
3892 statistical_call_chain (MonoProfiler *profiler, int call_chain_depth, guchar **ips, void *context) {
3893 MonoDomain *domain = mono_domain_get ();
3894 ProfilerStatisticalData *data;
3897 CHECK_PROFILER_ENABLED ();
3899 data = profiler->statistical_data;
3900 index = InterlockedIncrement (&data->next_free_index);
3902 if (index <= data->end_index) {
3903 int base_index = (index - 1) * (profiler->statistical_call_chain_depth + 1);
3904 int call_chain_index = 0;
3906 //printf ("[statistical_call_chain] (%d)\n", call_chain_depth);
3907 while (call_chain_index < call_chain_depth) {
3908 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
3909 //printf ("[statistical_call_chain] [%d] = %p\n", base_index + call_chain_index, ips [call_chain_index]);
3910 hit->address = (gpointer) ips [call_chain_index];
3911 hit->domain = domain;
3912 call_chain_index ++;
3914 while (call_chain_index <= profiler->statistical_call_chain_depth) {
3915 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
3916 //printf ("[statistical_call_chain] [%d] = NULL\n", base_index + call_chain_index);
3917 hit->address = NULL;
3919 call_chain_index ++;
3922 /* Check if we are the one that must swap the buffers */
3923 if (index == data->end_index + 1) {
3924 ProfilerStatisticalData *new_data;
3926 /* In the *impossible* case that the writer thread has not finished yet, */
3927 /* loop waiting for it and meanwhile lose all statistical events... */
3929 /* First, wait that it consumed the ready buffer */
3930 while (profiler->statistical_data_ready != NULL);
3931 /* Then, wait that it produced the free buffer */
3932 new_data = profiler->statistical_data_second_buffer;
3933 } while (new_data == NULL);
3935 profiler->statistical_data_ready = data;
3936 profiler->statistical_data = new_data;
3937 profiler->statistical_data_second_buffer = NULL;
3938 WRITER_EVENT_RAISE ();
3941 /* Loop again, hoping to acquire a free slot this time */
3944 } while (data == NULL);
3948 statistical_hit (MonoProfiler *profiler, guchar *ip, void *context) {
3949 MonoDomain *domain = mono_domain_get ();
3950 ProfilerStatisticalData *data;
3953 CHECK_PROFILER_ENABLED ();
3955 data = profiler->statistical_data;
3956 index = InterlockedIncrement (&data->next_free_index);
3958 if (index <= data->end_index) {
3959 ProfilerStatisticalHit *hit = & (data->hits [index - 1]);
3960 hit->address = (gpointer) ip;
3961 hit->domain = domain;
3963 /* Check if we are the one that must swap the buffers */
3964 if (index == data->end_index + 1) {
3965 ProfilerStatisticalData *new_data;
3967 /* In the *impossible* case that the writer thread has not finished yet, */
3968 /* loop waiting for it and meanwhile lose all statistical events... */
3970 /* First, wait that it consumed the ready buffer */
3971 while (profiler->statistical_data_ready != NULL);
3972 /* Then, wait that it produced the free buffer */
3973 new_data = profiler->statistical_data_second_buffer;
3974 } while (new_data == NULL);
3976 profiler->statistical_data_ready = data;
3977 profiler->statistical_data = new_data;
3978 profiler->statistical_data_second_buffer = NULL;
3979 WRITER_EVENT_RAISE ();
3982 /* Loop again, hoping to acquire a free slot this time */
3985 } while (data == NULL);
3988 static MonoProfilerEvents
3989 gc_event_code_from_profiler_event (MonoGCEvent event) {
3991 case MONO_GC_EVENT_START:
3992 case MONO_GC_EVENT_END:
3993 return MONO_PROFILER_EVENT_GC_COLLECTION;
3994 case MONO_GC_EVENT_MARK_START:
3995 case MONO_GC_EVENT_MARK_END:
3996 return MONO_PROFILER_EVENT_GC_MARK;
3997 case MONO_GC_EVENT_RECLAIM_START:
3998 case MONO_GC_EVENT_RECLAIM_END:
3999 return MONO_PROFILER_EVENT_GC_SWEEP;
4000 case MONO_GC_EVENT_PRE_STOP_WORLD:
4001 case MONO_GC_EVENT_POST_STOP_WORLD:
4002 return MONO_PROFILER_EVENT_GC_STOP_WORLD;
4003 case MONO_GC_EVENT_PRE_START_WORLD:
4004 case MONO_GC_EVENT_POST_START_WORLD:
4005 return MONO_PROFILER_EVENT_GC_START_WORLD;
4007 g_assert_not_reached ();
4012 static MonoProfilerEventKind
4013 gc_event_kind_from_profiler_event (MonoGCEvent event) {
4015 case MONO_GC_EVENT_START:
4016 case MONO_GC_EVENT_MARK_START:
4017 case MONO_GC_EVENT_RECLAIM_START:
4018 case MONO_GC_EVENT_PRE_STOP_WORLD:
4019 case MONO_GC_EVENT_PRE_START_WORLD:
4020 return MONO_PROFILER_EVENT_KIND_START;
4021 case MONO_GC_EVENT_END:
4022 case MONO_GC_EVENT_MARK_END:
4023 case MONO_GC_EVENT_RECLAIM_END:
4024 case MONO_GC_EVENT_POST_START_WORLD:
4025 case MONO_GC_EVENT_POST_STOP_WORLD:
4026 return MONO_PROFILER_EVENT_KIND_END;
4028 g_assert_not_reached ();
4033 #define HEAP_SHOT_COMMAND_FILE_MAX_LENGTH 64
4035 profiler_heap_shot_process_command_file (void) {
4036 //FIXME: Port to Windows as well
4037 struct stat stat_buf;
4039 char buffer [HEAP_SHOT_COMMAND_FILE_MAX_LENGTH + 1];
4041 if (profiler->heap_shot_command_file_name == NULL)
4043 if (stat (profiler->heap_shot_command_file_name, &stat_buf) != 0)
4045 if (stat_buf.st_size > HEAP_SHOT_COMMAND_FILE_MAX_LENGTH)
4047 if ((stat_buf.st_mtim.tv_sec * 1000000) < profiler->heap_shot_command_file_access_time)
4050 fd = open (profiler->heap_shot_command_file_name, O_RDONLY);
4054 if (read (fd, &(buffer [0]), stat_buf.st_size) != stat_buf.st_size) {
4057 buffer [stat_buf.st_size] = 0;
4058 profiler->dump_next_heap_snapshots = atoi (buffer);
4059 MONO_PROFILER_GET_CURRENT_TIME (profiler->heap_shot_command_file_access_time);
4066 dump_current_heap_snapshot (void) {
4069 if (profiler->heap_shot_was_signalled) {
4072 profiler_heap_shot_process_command_file ();
4073 if (profiler->dump_next_heap_snapshots > 0) {
4074 profiler->dump_next_heap_snapshots--;
4076 } else if (profiler->dump_next_heap_snapshots < 0) {
4087 profiler_heap_buffers_setup (ProfilerHeapShotHeapBuffers *heap) {
4088 heap->buffers = g_new (ProfilerHeapShotHeapBuffer, 1);
4089 heap->buffers->previous = NULL;
4090 heap->buffers->next = NULL;
4091 heap->buffers->start_slot = &(heap->buffers->buffer [0]);
4092 heap->buffers->end_slot = &(heap->buffers->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4093 heap->last = heap->buffers;
4094 heap->current = heap->buffers;
4095 heap->first_free_slot = & (heap->buffers->buffer [0]);
4098 profiler_heap_buffers_clear (ProfilerHeapShotHeapBuffers *heap) {
4099 heap->buffers = NULL;
4101 heap->current = NULL;
4102 heap->first_free_slot = NULL;
4105 profiler_heap_buffers_free (ProfilerHeapShotHeapBuffers *heap) {
4106 ProfilerHeapShotHeapBuffer *current = heap->buffers;
4107 while (current != NULL) {
4108 ProfilerHeapShotHeapBuffer *next = current->next;
4112 profiler_heap_buffers_clear (heap);
4116 report_object_references (gpointer *start, ClassIdMappingElement *layout, ProfilerHeapShotWriteJob *job) {
4117 int reported_references = 0;
4120 for (slot = 0; slot < layout->data.layout.slots; slot ++) {
4121 gboolean slot_has_reference;
4122 if (layout->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
4123 if (layout->data.bitmap.compact & (((guint64)1) << slot)) {
4124 slot_has_reference = TRUE;
4126 slot_has_reference = FALSE;
4129 if (layout->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
4130 slot_has_reference = TRUE;
4132 slot_has_reference = FALSE;
4136 if (slot_has_reference) {
4137 gpointer field = start [slot];
4139 if ((field != NULL) && mono_object_is_alive (field)) {
4140 reported_references ++;
4141 WRITE_HEAP_SHOT_JOB_VALUE (job, field);
4146 return reported_references;
4150 profiler_heap_report_object_reachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4152 MonoClass *klass = mono_object_get_class (obj);
4153 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4154 if (class_id == NULL) {
4155 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4157 g_assert (class_id != NULL);
4159 if (job->summary.capacity > 0) {
4160 guint32 id = class_id->id;
4161 g_assert (id < job->summary.capacity);
4163 job->summary.per_class_data [id].reachable.instances ++;
4164 job->summary.per_class_data [id].reachable.bytes += mono_object_get_size (obj);
4166 if (profiler->action_flags.heap_shot && job->dump_heap_data) {
4167 int reference_counter = 0;
4168 gpointer *reference_counter_location;
4170 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, obj, HEAP_CODE_OBJECT);
4171 #if DEBUG_HEAP_PROFILER
4172 printf ("profiler_heap_report_object_reachable: reported object %p at cursor %p\n", obj, (job->cursor - 1));
4174 WRITE_HEAP_SHOT_JOB_VALUE (job, NULL);
4175 reference_counter_location = job->cursor - 1;
4177 if (mono_class_get_rank (klass)) {
4178 MonoArray *array = (MonoArray *) obj;
4179 MonoClass *element_class = mono_class_get_element_class (klass);
4180 ClassIdMappingElement *element_id = class_id_mapping_element_get (element_class);
4182 g_assert (element_id != NULL);
4183 if (element_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4184 class_id_mapping_element_build_layout_bitmap (element_class, element_id);
4186 if (! mono_class_is_valuetype (element_class)) {
4187 int length = mono_array_length (array);
4189 for (i = 0; i < length; i++) {
4190 MonoObject *array_element = mono_array_get (array, MonoObject*, i);
4191 if ((array_element != NULL) && mono_object_is_alive (array_element)) {
4192 reference_counter ++;
4193 WRITE_HEAP_SHOT_JOB_VALUE (job, array_element);
4196 } else if (element_id->data.layout.references > 0) {
4197 int length = mono_array_length (array);
4198 int array_element_size = mono_array_element_size (klass);
4200 for (i = 0; i < length; i++) {
4201 gpointer array_element_address = mono_array_addr_with_size (array, array_element_size, i);
4202 reference_counter += report_object_references (array_element_address, element_id, job);
4206 if (class_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4207 class_id_mapping_element_build_layout_bitmap (klass, class_id);
4209 if (class_id->data.layout.references > 0) {
4210 reference_counter += report_object_references ((gpointer)(((char*)obj) + sizeof (MonoObject)), class_id, job);
4214 *reference_counter_location = GINT_TO_POINTER (reference_counter);
4215 #if DEBUG_HEAP_PROFILER
4216 printf ("profiler_heap_report_object_reachable: updated reference_counter_location %p with value %d\n", reference_counter_location, reference_counter);
4222 profiler_heap_report_object_unreachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4224 MonoClass *klass = mono_object_get_class (obj);
4225 guint32 size = mono_object_get_size (obj);
4227 if (job->summary.capacity > 0) {
4228 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4231 if (class_id == NULL) {
4232 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4234 g_assert (class_id != NULL);
4236 g_assert (id < job->summary.capacity);
4238 job->summary.per_class_data [id].unreachable.instances ++;
4239 job->summary.per_class_data [id].unreachable.bytes += size;
4241 if (profiler->action_flags.unreachable_objects && job->dump_heap_data) {
4242 #if DEBUG_HEAP_PROFILER
4243 printf ("profiler_heap_report_object_unreachable: at job %p writing klass %p\n", job, klass);
4245 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, klass, HEAP_CODE_FREE_OBJECT_CLASS);
4247 #if DEBUG_HEAP_PROFILER
4248 printf ("profiler_heap_report_object_unreachable: at job %p writing size %p\n", job, GUINT_TO_POINTER (size));
4250 WRITE_HEAP_SHOT_JOB_VALUE (job, GUINT_TO_POINTER (size));
4256 profiler_heap_add_object (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4257 if (heap->first_free_slot >= heap->current->end_slot) {
4258 if (heap->current->next != NULL) {
4259 heap->current = heap->current->next;
4261 ProfilerHeapShotHeapBuffer *buffer = g_new (ProfilerHeapShotHeapBuffer, 1);
4262 buffer->previous = heap->last;
4263 buffer->next = NULL;
4264 buffer->start_slot = &(buffer->buffer [0]);
4265 buffer->end_slot = &(buffer->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4266 heap->current = buffer;
4267 heap->last->next = buffer;
4268 heap->last = buffer;
4270 heap->first_free_slot = &(heap->current->buffer [0]);
4273 *(heap->first_free_slot) = obj;
4274 heap->first_free_slot ++;
4275 profiler_heap_report_object_reachable (job, obj);
4279 profiler_heap_pop_object_from_end (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject** current_slot) {
4280 while (heap->first_free_slot != current_slot) {
4283 if (heap->first_free_slot > heap->current->start_slot) {
4284 heap->first_free_slot --;
4286 heap->current = heap->current->previous;
4287 g_assert (heap->current != NULL);
4288 heap->first_free_slot = heap->current->end_slot - 1;
4291 obj = *(heap->first_free_slot);
4293 if (mono_object_is_alive (obj)) {
4294 profiler_heap_report_object_reachable (job, obj);
4297 profiler_heap_report_object_unreachable (job, obj);
4304 profiler_heap_scan (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job) {
4305 ProfilerHeapShotHeapBuffer *current_buffer = heap->buffers;
4306 MonoObject** current_slot = current_buffer->start_slot;
4308 while (current_slot != heap->first_free_slot) {
4309 MonoObject *obj = *current_slot;
4310 if (mono_object_is_alive (obj)) {
4311 profiler_heap_report_object_reachable (job, obj);
4313 profiler_heap_report_object_unreachable (job, obj);
4314 *current_slot = profiler_heap_pop_object_from_end (heap, job, current_slot);
4317 if (*current_slot != NULL) {
4320 if (current_slot == current_buffer->end_slot) {
4321 current_buffer = current_buffer->next;
4322 g_assert (current_buffer != NULL);
4323 current_slot = current_buffer->start_slot;
4329 static inline gboolean
4330 heap_shot_write_job_should_be_created (gboolean dump_heap_data) {
4331 return dump_heap_data || profiler->action_flags.unreachable_objects || profiler->action_flags.collection_summary;
4335 handle_heap_profiling (MonoProfiler *profiler, MonoGCEvent ev) {
4336 static gboolean dump_heap_data;
4339 case MONO_GC_EVENT_PRE_STOP_WORLD:
4340 // Get the lock, so we are sure nobody is flushing events during the collection,
4341 // and we can update all mappings (building the class descriptors).
4344 case MONO_GC_EVENT_POST_STOP_WORLD:
4345 dump_heap_data = dump_current_heap_snapshot ();
4346 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
4347 ProfilerPerThreadData *data;
4348 // Update all mappings, so that we have built all the class descriptors.
4349 flush_all_mappings ();
4350 // Also write all event buffers, so that allocations are recorded.
4351 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4352 write_thread_data_block (data);
4358 case MONO_GC_EVENT_MARK_END: {
4359 ProfilerHeapShotWriteJob *job;
4360 ProfilerPerThreadData *data;
4362 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
4363 job = profiler_heap_shot_write_job_new (profiler->heap_shot_was_signalled, dump_heap_data, profiler->garbage_collection_counter);
4364 profiler->heap_shot_was_signalled = FALSE;
4365 MONO_PROFILER_GET_CURRENT_COUNTER (job->start_counter);
4366 MONO_PROFILER_GET_CURRENT_TIME (job->start_time);
4371 profiler_heap_scan (&(profiler->heap), job);
4373 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4374 ProfilerHeapShotObjectBuffer *buffer;
4375 for (buffer = data->heap_shot_object_buffers; buffer != NULL; buffer = buffer->next) {
4376 MonoObject **cursor;
4377 for (cursor = buffer->first_unprocessed_slot; cursor < buffer->next_free_slot; cursor ++) {
4378 MonoObject *obj = *cursor;
4379 #if DEBUG_HEAP_PROFILER
4380 printf ("gc_event: in object buffer %p(%p-%p) cursor at %p has object %p ", buffer, &(buffer->buffer [0]), buffer->end, cursor, obj);
4382 if (mono_object_is_alive (obj)) {
4383 #if DEBUG_HEAP_PROFILER
4384 printf ("(object is alive, adding to heap)\n");
4386 profiler_heap_add_object (&(profiler->heap), job, obj);
4388 #if DEBUG_HEAP_PROFILER
4389 printf ("(object is unreachable, reporting in job)\n");
4391 profiler_heap_report_object_unreachable (job, obj);
4394 buffer->first_unprocessed_slot = cursor;
4399 MONO_PROFILER_GET_CURRENT_COUNTER (job->end_counter);
4400 MONO_PROFILER_GET_CURRENT_TIME (job->end_time);
4402 profiler_add_heap_shot_write_job (job);
4403 profiler_free_heap_shot_write_jobs ();
4404 WRITER_EVENT_RAISE ();
4414 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation) {
4415 ProfilerPerThreadData *data;
4416 gboolean do_heap_profiling = profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary;
4417 guint32 event_value;
4419 GET_PROFILER_THREAD_DATA (data);
4421 if (ev == MONO_GC_EVENT_START) {
4422 profiler->garbage_collection_counter ++;
4425 event_value = (profiler->garbage_collection_counter << 8) | generation;
4427 if (do_heap_profiling && (ev == MONO_GC_EVENT_POST_STOP_WORLD)) {
4428 handle_heap_profiling (profiler, ev);
4430 STORE_EVENT_NUMBER_COUNTER (profiler, event_value, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, gc_event_code_from_profiler_event (ev), gc_event_kind_from_profiler_event (ev));
4431 if (do_heap_profiling && (ev != MONO_GC_EVENT_POST_STOP_WORLD)) {
4432 handle_heap_profiling (profiler, ev);
4437 gc_resize (MonoProfiler *profiler, gint64 new_size) {
4438 ProfilerPerThreadData *data;
4439 GET_PROFILER_THREAD_DATA (data);
4440 profiler->garbage_collection_counter ++;
4441 STORE_EVENT_NUMBER_VALUE (profiler, new_size, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_GC_RESIZE, 0, profiler->garbage_collection_counter);
4445 runtime_initialized (MonoProfiler *profiler) {
4446 LOG_WRITER_THREAD ("runtime_initialized: waking writer thread to enable it...\n");
4447 WRITER_EVENT_ENABLE_RAISE ();
4448 LOG_WRITER_THREAD ("runtime_initialized: waiting writer thread...\n");
4449 WRITER_EVENT_DONE_WAIT ();
4450 LOG_WRITER_THREAD ("runtime_initialized: writer thread enabled.\n");
4451 mono_add_internal_call ("Mono.Profiler.RuntimeControls::EnableProfiler", enable_profiler);
4452 mono_add_internal_call ("Mono.Profiler.RuntimeControls::DisableProfiler", disable_profiler);
4453 mono_add_internal_call ("Mono.Profiler.RuntimeControls::TakeHeapSnapshot", request_heap_snapshot);
4454 LOG_WRITER_THREAD ("runtime_initialized: initialized internal calls.\n");
4457 /* called at the end of the program */
4459 profiler_shutdown (MonoProfiler *prof)
4461 ProfilerPerThreadData* current_thread_data;
4462 ProfilerPerThreadData* next_thread_data;
4464 LOG_WRITER_THREAD ("profiler_shutdown: zeroing relevant flags");
4465 mono_profiler_set_events (0);
4466 //profiler->flags = 0;
4467 //profiler->action_flags.unreachable_objects = FALSE;
4468 //profiler->action_flags.heap_shot = FALSE;
4470 LOG_WRITER_THREAD ("profiler_shutdown: asking stats thread to exit");
4471 profiler->terminate_writer_thread = TRUE;
4472 WRITER_EVENT_RAISE ();
4473 LOG_WRITER_THREAD ("profiler_shutdown: waiting for stats thread to exit");
4474 WAIT_WRITER_THREAD ();
4475 LOG_WRITER_THREAD ("profiler_shutdown: stats thread should be dead now");
4476 WRITER_EVENT_DESTROY ();
4479 flush_everything ();
4480 MONO_PROFILER_GET_CURRENT_TIME (profiler->end_time);
4481 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->end_counter);
4487 g_free (profiler->file_name);
4488 if (profiler->file_name_suffix != NULL) {
4489 g_free (profiler->file_name_suffix);
4492 method_id_mapping_destroy (profiler->methods);
4493 class_id_mapping_destroy (profiler->classes);
4494 g_hash_table_destroy (profiler->loaded_assemblies);
4495 g_hash_table_destroy (profiler->loaded_modules);
4496 g_hash_table_destroy (profiler->loaded_appdomains);
4498 FREE_PROFILER_THREAD_DATA ();
4500 for (current_thread_data = profiler->per_thread_data; current_thread_data != NULL; current_thread_data = next_thread_data) {
4501 next_thread_data = current_thread_data->next;
4502 profiler_per_thread_data_destroy (current_thread_data);
4504 if (profiler->statistical_data != NULL) {
4505 profiler_statistical_data_destroy (profiler->statistical_data);
4507 if (profiler->statistical_data_ready != NULL) {
4508 profiler_statistical_data_destroy (profiler->statistical_data_ready);
4510 if (profiler->statistical_data_second_buffer != NULL) {
4511 profiler_statistical_data_destroy (profiler->statistical_data_second_buffer);
4513 if (profiler->executable_regions != NULL) {
4514 profiler_executable_memory_regions_destroy (profiler->executable_regions);
4517 profiler_heap_buffers_free (&(profiler->heap));
4518 if (profiler->heap_shot_command_file_name != NULL) {
4519 g_free (profiler->heap_shot_command_file_name);
4522 profiler_free_write_buffers ();
4523 profiler_destroy_heap_shot_write_jobs ();
4525 DELETE_PROFILER_MUTEX ();
4528 if (profiler->action_flags.oprofile) {
4537 #ifndef PLATFORM_WIN32
4539 parse_signal_name (const char *signal_name) {
4540 if (! strcasecmp (signal_name, "SIGUSR1")) {
4542 } else if (! strcasecmp (signal_name, "SIGUSR2")) {
4544 } else if (! strcasecmp (signal_name, "SIGPROF")) {
4547 return atoi (signal_name);
4551 check_signal_number (int signal_number) {
4552 if (((signal_number == SIGPROF) && ! (profiler->flags & MONO_PROFILE_STATISTICAL)) ||
4553 (signal_number == SIGUSR1) ||
4554 (signal_number == SIGUSR2)) {
4562 #define DEFAULT_ARGUMENTS "s"
4564 setup_user_options (const char *arguments) {
4565 gchar **arguments_array, **current_argument;
4566 #ifndef PLATFORM_WIN32
4567 int gc_request_signal_number = 0;
4568 int toggle_signal_number = 0;
4570 detect_fast_timer ();
4572 profiler->file_name = NULL;
4573 profiler->file_name_suffix = NULL;
4574 profiler->per_thread_buffer_size = 10000;
4575 profiler->statistical_buffer_size = 10000;
4576 profiler->statistical_call_chain_depth = 0;
4577 profiler->write_buffer_size = 1024;
4578 profiler->heap_shot_command_file_name = NULL;
4579 profiler->dump_next_heap_snapshots = 0;
4580 profiler->heap_shot_command_file_access_time = 0;
4581 profiler->heap_shot_was_signalled = FALSE;
4582 profiler->flags = MONO_PROFILE_APPDOMAIN_EVENTS|
4583 MONO_PROFILE_ASSEMBLY_EVENTS|
4584 MONO_PROFILE_MODULE_EVENTS|
4585 MONO_PROFILE_CLASS_EVENTS|
4586 MONO_PROFILE_METHOD_EVENTS|
4587 MONO_PROFILE_JIT_COMPILATION;
4588 profiler->profiler_enabled = TRUE;
4590 if (arguments == NULL) {
4591 arguments = DEFAULT_ARGUMENTS;
4592 } else if (strstr (arguments, ":")) {
4593 arguments = strstr (arguments, ":") + 1;
4594 if (arguments [0] == 0) {
4595 arguments = DEFAULT_ARGUMENTS;
4599 arguments_array = g_strsplit (arguments, ",", -1);
4601 for (current_argument = arguments_array; ((current_argument != NULL) && (current_argument [0] != 0)); current_argument ++) {
4602 char *argument = *current_argument;
4603 char *equals = strstr (argument, "=");
4605 if (equals != NULL) {
4606 int equals_position = equals - argument;
4608 if (! (strncmp (argument, "per-thread-buffer-size", equals_position) && strncmp (argument, "tbs", equals_position))) {
4609 int value = atoi (equals + 1);
4611 profiler->per_thread_buffer_size = value;
4613 } else if (! (strncmp (argument, "statistical", equals_position) && strncmp (argument, "stat", equals_position) && strncmp (argument, "s", equals_position))) {
4614 int value = atoi (equals + 1);
4619 profiler->statistical_call_chain_depth = value;
4620 profiler->flags |= MONO_PROFILE_STATISTICAL|MONO_PROFILE_JIT_COMPILATION;
4622 } else if (! (strncmp (argument, "statistical-thread-buffer-size", equals_position) && strncmp (argument, "sbs", equals_position))) {
4623 int value = atoi (equals + 1);
4625 profiler->statistical_buffer_size = value;
4627 } else if (! (strncmp (argument, "write-buffer-size", equals_position) && strncmp (argument, "wbs", equals_position))) {
4628 int value = atoi (equals + 1);
4630 profiler->write_buffer_size = value;
4632 } else if (! (strncmp (argument, "output", equals_position) && strncmp (argument, "out", equals_position) && strncmp (argument, "o", equals_position) && strncmp (argument, "O", equals_position))) {
4633 if (strlen (equals + 1) > 0) {
4634 profiler->file_name = g_strdup (equals + 1);
4636 } else if (! (strncmp (argument, "output-suffix", equals_position) && strncmp (argument, "suffix", equals_position) && strncmp (argument, "os", equals_position) && strncmp (argument, "OS", equals_position))) {
4637 if (strlen (equals + 1) > 0) {
4638 profiler->file_name_suffix = g_strdup (equals + 1);
4640 } else if (! (strncmp (argument, "gc-commands", equals_position) && strncmp (argument, "gc-c", equals_position) && strncmp (argument, "gcc", equals_position))) {
4641 if (strlen (equals + 1) > 0) {
4642 profiler->heap_shot_command_file_name = g_strdup (equals + 1);
4644 } else if (! (strncmp (argument, "gc-dumps", equals_position) && strncmp (argument, "gc-d", equals_position) && strncmp (argument, "gcd", equals_position))) {
4645 if (strlen (equals + 1) > 0) {
4646 profiler->dump_next_heap_snapshots = atoi (equals + 1);
4648 #ifndef PLATFORM_WIN32
4649 } else if (! (strncmp (argument, "gc-signal", equals_position) && strncmp (argument, "gc-s", equals_position) && strncmp (argument, "gcs", equals_position))) {
4650 if (strlen (equals + 1) > 0) {
4651 char *signal_name = equals + 1;
4652 gc_request_signal_number = parse_signal_name (signal_name);
4654 } else if (! (strncmp (argument, "toggle-signal", equals_position) && strncmp (argument, "ts", equals_position))) {
4655 if (strlen (equals + 1) > 0) {
4656 char *signal_name = equals + 1;
4657 toggle_signal_number = parse_signal_name (signal_name);
4661 g_warning ("Cannot parse valued argument %s\n", argument);
4664 if (! (strcmp (argument, "jit") && strcmp (argument, "j"))) {
4665 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
4666 profiler->action_flags.jit_time = TRUE;
4667 } else if (! (strcmp (argument, "allocations") && strcmp (argument, "alloc") && strcmp (argument, "a"))) {
4668 profiler->flags |= MONO_PROFILE_ALLOCATIONS|MONO_PROFILE_GC;
4669 } else if (! (strcmp (argument, "gc") && strcmp (argument, "g"))) {
4670 profiler->flags |= MONO_PROFILE_GC;
4671 } else if (! (strcmp (argument, "allocations-summary") && strcmp (argument, "as"))) {
4672 profiler->flags |= MONO_PROFILE_ALLOCATIONS|MONO_PROFILE_GC;
4673 profiler->action_flags.collection_summary = TRUE;
4674 } else if (! (strcmp (argument, "heap-shot") && strcmp (argument, "heap") && strcmp (argument, "h"))) {
4675 profiler->flags |= MONO_PROFILE_ALLOCATIONS|MONO_PROFILE_GC;
4676 profiler->action_flags.heap_shot = TRUE;
4677 } else if (! (strcmp (argument, "unreachable") && strcmp (argument, "free") && strcmp (argument, "f"))) {
4678 profiler->flags |= MONO_PROFILE_ALLOCATIONS|MONO_PROFILE_GC;
4679 profiler->action_flags.unreachable_objects = TRUE;
4680 } else if (! (strcmp (argument, "threads") && strcmp (argument, "t"))) {
4681 profiler->flags |= MONO_PROFILE_THREADS;
4682 } else if (! (strcmp (argument, "enter-leave") && strcmp (argument, "calls") && strcmp (argument, "c"))) {
4683 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
4684 profiler->action_flags.jit_time = TRUE;
4685 profiler->action_flags.track_calls = TRUE;
4686 } else if (! (strcmp (argument, "statistical") && strcmp (argument, "stat") && strcmp (argument, "s"))) {
4687 profiler->flags |= MONO_PROFILE_STATISTICAL;
4688 } else if (! (strcmp (argument, "track-stack") && strcmp (argument, "ts"))) {
4689 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
4690 profiler->action_flags.track_stack = TRUE;
4691 } else if (! (strcmp (argument, "start-enabled") && strcmp (argument, "se"))) {
4692 profiler->profiler_enabled = TRUE;
4693 } else if (! (strcmp (argument, "start-disabled") && strcmp (argument, "sd"))) {
4694 profiler->profiler_enabled = FALSE;
4695 } else if (! (strcmp (argument, "force-accurate-timer") && strcmp (argument, "fac"))) {
4696 use_fast_timer = FALSE;
4698 } else if (! (strcmp (argument, "oprofile") && strcmp (argument, "oprof"))) {
4699 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
4700 profiler->action_flags.oprofile = TRUE;
4701 if (op_open_agent ()) {
4702 g_warning ("Problem calling op_open_agent\n");
4705 } else if (strcmp (argument, "logging")) {
4706 g_warning ("Cannot parse flag argument %s\n", argument);
4711 g_free (arguments_array);
4713 #ifndef PLATFORM_WIN32
4714 if (gc_request_signal_number != 0) {
4715 if (check_signal_number (gc_request_signal_number) && (gc_request_signal_number != toggle_signal_number)) {
4716 add_gc_request_handler (gc_request_signal_number);
4718 g_error ("Cannot use signal %d", gc_request_signal_number);
4721 if (toggle_signal_number != 0) {
4722 if (check_signal_number (toggle_signal_number) && (toggle_signal_number != gc_request_signal_number)) {
4723 add_toggle_handler (toggle_signal_number);
4725 g_error ("Cannot use signal %d", gc_request_signal_number);
4730 if (profiler->file_name == NULL) {
4731 char *program_name = g_get_prgname ();
4733 if (program_name != NULL) {
4734 char *name_buffer = g_strdup (program_name);
4735 char *name_start = name_buffer;
4738 /* Jump over the last '/' */
4739 cursor = strrchr (name_buffer, '/');
4740 if (cursor == NULL) {
4741 cursor = name_buffer;
4745 name_start = cursor;
4747 /* Then jump over the last '\\' */
4748 cursor = strrchr (name_start, '\\');
4749 if (cursor == NULL) {
4750 cursor = name_start;
4754 name_start = cursor;
4756 /* Finally, find the last '.' */
4757 cursor = strrchr (name_start, '.');
4758 if (cursor != NULL) {
4762 if (profiler->file_name_suffix == NULL) {
4763 profiler->file_name = g_strdup_printf ("%s.mprof", name_start);
4765 profiler->file_name = g_strdup_printf ("%s-%s.mprof", name_start, profiler->file_name_suffix);
4767 g_free (name_buffer);
4769 profiler->file_name = g_strdup_printf ("%s.mprof", "profiler-log");
4775 thread_detach_callback (MonoThread *thread) {
4776 LOG_WRITER_THREAD ("thread_detach_callback: asking writer thread to detach");
4777 profiler->detach_writer_thread = TRUE;
4778 WRITER_EVENT_RAISE ();
4779 LOG_WRITER_THREAD ("thread_detach_callback: done");
4784 data_writer_thread (gpointer nothing) {
4785 static gboolean thread_attached = FALSE;
4786 static gboolean thread_detached = FALSE;
4787 static MonoThread *this_thread = NULL;
4789 /* Wait for the OK to attach to the runtime */
4790 WRITER_EVENT_ENABLE_WAIT ();
4791 if (! profiler->terminate_writer_thread) {
4792 MonoDomain * root_domain = mono_get_root_domain ();
4793 if (root_domain != NULL) {
4794 LOG_WRITER_THREAD ("data_writer_thread: attaching thread");
4795 this_thread = mono_thread_attach (root_domain);
4796 mono_thread_set_manage_callback (this_thread, thread_detach_callback);
4797 thread_attached = TRUE;
4799 g_error ("Cannot get root domain\n");
4802 /* Execution was too short, pretend we attached and detached. */
4803 thread_attached = TRUE;
4804 thread_detached = TRUE;
4806 profiler->writer_thread_enabled = TRUE;
4807 /* Notify that we are attached to the runtime */
4808 WRITER_EVENT_DONE_RAISE ();
4811 ProfilerStatisticalData *statistical_data;
4814 LOG_WRITER_THREAD ("data_writer_thread: going to sleep");
4815 WRITER_EVENT_WAIT ();
4816 LOG_WRITER_THREAD ("data_writer_thread: just woke up");
4818 if (profiler->heap_shot_was_signalled) {
4819 LOG_WRITER_THREAD ("data_writer_thread: starting requested collection");
4820 mono_gc_collect (mono_gc_max_generation ());
4821 LOG_WRITER_THREAD ("data_writer_thread: requested collection done");
4824 statistical_data = profiler->statistical_data_ready;
4825 done = (statistical_data == NULL) && (profiler->heap_shot_write_jobs == NULL) && (profiler->writer_thread_flush_everything == FALSE);
4827 if ((!done) && thread_attached) {
4828 if (profiler->writer_thread_flush_everything) {
4829 /* Note that this assumes the lock is held by the thread that woke us up! */
4830 if (! thread_detached) {
4831 LOG_WRITER_THREAD ("data_writer_thread: flushing everything...");
4832 flush_everything ();
4833 profiler->writer_thread_flush_everything = FALSE;
4834 WRITER_EVENT_DONE_RAISE ();
4835 LOG_WRITER_THREAD ("data_writer_thread: flushed everything.");
4837 LOG_WRITER_THREAD ("data_writer_thread: flushing requested, but thread is detached...");
4838 profiler->writer_thread_flush_everything = FALSE;
4839 WRITER_EVENT_DONE_RAISE ();
4840 LOG_WRITER_THREAD ("data_writer_thread: done event raised.");
4843 LOG_WRITER_THREAD ("data_writer_thread: acquiring lock and writing data");
4846 // This makes sure that all method ids are in place
4847 LOG_WRITER_THREAD ("data_writer_thread: writing mapping...");
4848 flush_all_mappings ();
4849 LOG_WRITER_THREAD ("data_writer_thread: wrote mapping");
4851 if ((statistical_data != NULL) && ! thread_detached) {
4852 LOG_WRITER_THREAD ("data_writer_thread: writing statistical data...");
4853 profiler->statistical_data_ready = NULL;
4854 write_statistical_data_block (statistical_data);
4855 statistical_data->next_free_index = 0;
4856 statistical_data->first_unwritten_index = 0;
4857 profiler->statistical_data_second_buffer = statistical_data;
4858 LOG_WRITER_THREAD ("data_writer_thread: wrote statistical data");
4861 profiler_process_heap_shot_write_jobs ();
4864 LOG_WRITER_THREAD ("data_writer_thread: wrote data and released lock");
4867 if (profiler->writer_thread_flush_everything) {
4868 LOG_WRITER_THREAD ("data_writer_thread: flushing requested, but thread is not attached...");
4869 profiler->writer_thread_flush_everything = FALSE;
4870 WRITER_EVENT_DONE_RAISE ();
4871 LOG_WRITER_THREAD ("data_writer_thread: done event raised.");
4875 if (profiler->detach_writer_thread) {
4876 if (this_thread != NULL) {
4877 LOG_WRITER_THREAD ("data_writer_thread: detach requested, acquiring lock and flushing data");
4879 flush_everything ();
4881 LOG_WRITER_THREAD ("data_writer_thread: flushed data and released lock");
4882 LOG_WRITER_THREAD ("data_writer_thread: detaching thread");
4883 mono_thread_detach (this_thread);
4885 profiler->detach_writer_thread = FALSE;
4886 thread_detached = TRUE;
4888 LOG_WRITER_THREAD ("data_writer_thread: warning: thread has already been detached");
4892 if (profiler->terminate_writer_thread) {
4893 LOG_WRITER_THREAD ("data_writer_thread: exiting thread");
4894 CLEANUP_WRITER_THREAD ();
4902 mono_profiler_startup (const char *desc);
4904 /* the entry point (mono_profiler_load?) */
4906 mono_profiler_startup (const char *desc)
4908 profiler = g_new0 (MonoProfiler, 1);
4910 setup_user_options ((desc != NULL) ? desc : DEFAULT_ARGUMENTS);
4912 INITIALIZE_PROFILER_MUTEX ();
4913 MONO_PROFILER_GET_CURRENT_TIME (profiler->start_time);
4914 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->start_counter);
4915 profiler->last_header_counter = 0;
4917 profiler->methods = method_id_mapping_new ();
4918 profiler->classes = class_id_mapping_new ();
4919 profiler->loaded_assemblies = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
4920 profiler->loaded_modules = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
4921 profiler->loaded_appdomains = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
4923 profiler->statistical_data = profiler_statistical_data_new (profiler);
4924 profiler->statistical_data_second_buffer = profiler_statistical_data_new (profiler);
4926 profiler->write_buffers = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
4927 profiler->write_buffers->next = NULL;
4928 profiler->current_write_buffer = profiler->write_buffers;
4929 profiler->current_write_position = 0;
4930 profiler->full_write_buffers = 0;
4932 profiler->executable_regions = profiler_executable_memory_regions_new (1, 1);
4934 profiler->executable_files.table = g_hash_table_new (g_str_hash, g_str_equal);
4935 profiler->executable_files.new_files = NULL;
4937 profiler->heap_shot_write_jobs = NULL;
4938 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
4939 profiler_heap_buffers_setup (&(profiler->heap));
4941 profiler_heap_buffers_clear (&(profiler->heap));
4943 profiler->garbage_collection_counter = 0;
4945 WRITER_EVENT_INIT ();
4946 LOG_WRITER_THREAD ("mono_profiler_startup: creating writer thread");
4947 CREATE_WRITER_THREAD (data_writer_thread);
4948 LOG_WRITER_THREAD ("mono_profiler_startup: created writer thread");
4950 ALLOCATE_PROFILER_THREAD_DATA ();
4954 write_intro_block ();
4955 write_directives_block (TRUE);
4957 mono_profiler_install (profiler, profiler_shutdown);
4959 mono_profiler_install_appdomain (appdomain_start_load, appdomain_end_load,
4960 appdomain_start_unload, appdomain_end_unload);
4961 mono_profiler_install_assembly (assembly_start_load, assembly_end_load,
4962 assembly_start_unload, assembly_end_unload);
4963 mono_profiler_install_module (module_start_load, module_end_load,
4964 module_start_unload, module_end_unload);
4965 mono_profiler_install_class (class_start_load, class_end_load,
4966 class_start_unload, class_end_unload);
4967 mono_profiler_install_jit_compile (method_start_jit, method_end_jit);
4968 mono_profiler_install_enter_leave (method_enter, method_leave);
4969 mono_profiler_install_method_free (method_free);
4970 mono_profiler_install_thread (thread_start, thread_end);
4971 mono_profiler_install_allocation (object_allocated);
4972 mono_profiler_install_statistical (statistical_hit);
4973 mono_profiler_install_statistical_call_chain (statistical_call_chain, profiler->statistical_call_chain_depth);
4974 mono_profiler_install_gc (gc_event, gc_resize);
4975 mono_profiler_install_runtime_initialized (runtime_initialized);
4977 mono_profiler_install_jit_end (method_jit_result);
4980 mono_profiler_set_events (profiler->flags);