2 * mono-profiler-logging.c: Logging profiler for Mono.
5 * Massimiliano Mantione (massi@ximian.com)
7 * Copyright 2008-2009 Novell, Inc (http://www.novell.com)
10 #include <mono/metadata/profiler.h>
11 #include <mono/metadata/class.h>
12 #include <mono/metadata/class-internals.h>
13 #include <mono/metadata/assembly.h>
14 #include <mono/metadata/loader.h>
15 #include <mono/metadata/threads.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/metadata/mono-gc.h>
18 #include <mono/io-layer/atomic.h>
27 #define HAS_OPROFILE 0
30 #include <libopagent.h>
33 // Needed for heap analysis
34 extern gboolean mono_object_is_alive (MonoObject* obj);
37 MONO_PROFILER_FILE_BLOCK_KIND_INTRO = 1,
38 MONO_PROFILER_FILE_BLOCK_KIND_END = 2,
39 MONO_PROFILER_FILE_BLOCK_KIND_MAPPING = 3,
40 MONO_PROFILER_FILE_BLOCK_KIND_LOADED = 4,
41 MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED = 5,
42 MONO_PROFILER_FILE_BLOCK_KIND_EVENTS = 6,
43 MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL = 7,
44 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA = 8,
45 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY = 9,
46 MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES = 10
47 } MonoProfilerFileBlockKind;
50 MONO_PROFILER_DIRECTIVE_END = 0,
51 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER = 1,
52 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK = 2,
53 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID = 3,
54 MONO_PROFILER_DIRECTIVE_LAST
55 } MonoProfilerDirectives;
58 #define MONO_PROFILER_LOADED_EVENT_MODULE 1
59 #define MONO_PROFILER_LOADED_EVENT_ASSEMBLY 2
60 #define MONO_PROFILER_LOADED_EVENT_APPDOMAIN 4
61 #define MONO_PROFILER_LOADED_EVENT_SUCCESS 8
62 #define MONO_PROFILER_LOADED_EVENT_FAILURE 16
65 MONO_PROFILER_EVENT_DATA_TYPE_OTHER = 0,
66 MONO_PROFILER_EVENT_DATA_TYPE_METHOD = 1,
67 MONO_PROFILER_EVENT_DATA_TYPE_CLASS = 2
68 } MonoProfilerEventDataType;
70 typedef struct _ProfilerEventData {
75 unsigned int data_type:2;
78 unsigned int value:25;
81 #define EVENT_VALUE_BITS (25)
82 #define MAX_EVENT_VALUE ((1<<EVENT_VALUE_BITS)-1)
85 MONO_PROFILER_EVENT_METHOD_JIT = 0,
86 MONO_PROFILER_EVENT_METHOD_FREED = 1,
87 MONO_PROFILER_EVENT_METHOD_CALL = 2,
88 MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER = 3,
89 MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER = 4
90 } MonoProfilerMethodEvents;
92 MONO_PROFILER_EVENT_CLASS_LOAD = 0,
93 MONO_PROFILER_EVENT_CLASS_UNLOAD = 1,
94 MONO_PROFILER_EVENT_CLASS_EXCEPTION = 2,
95 MONO_PROFILER_EVENT_CLASS_ALLOCATION = 3
96 } MonoProfilerClassEvents;
98 MONO_PROFILER_EVENT_RESULT_SUCCESS = 0,
99 MONO_PROFILER_EVENT_RESULT_FAILURE = 4
100 } MonoProfilerEventResult;
101 #define MONO_PROFILER_EVENT_RESULT_MASK MONO_PROFILER_EVENT_RESULT_FAILURE
103 MONO_PROFILER_EVENT_THREAD = 1,
104 MONO_PROFILER_EVENT_GC_COLLECTION = 2,
105 MONO_PROFILER_EVENT_GC_MARK = 3,
106 MONO_PROFILER_EVENT_GC_SWEEP = 4,
107 MONO_PROFILER_EVENT_GC_RESIZE = 5,
108 MONO_PROFILER_EVENT_GC_STOP_WORLD = 6,
109 MONO_PROFILER_EVENT_GC_START_WORLD = 7,
110 MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION = 8,
111 MONO_PROFILER_EVENT_STACK_SECTION = 9,
112 MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID = 10
113 } MonoProfilerEvents;
115 MONO_PROFILER_EVENT_KIND_START = 0,
116 MONO_PROFILER_EVENT_KIND_END = 1
117 } MonoProfilerEventKind;
119 #define MONO_PROFILER_GET_CURRENT_TIME(t) {\
120 struct timeval current_time;\
121 gettimeofday (¤t_time, NULL);\
122 (t) = (((guint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;\
125 static gboolean use_fast_timer = FALSE;
127 #if (defined(__i386__) || defined(__x86_64__)) && ! defined(PLATFORM_WIN32)
129 #if defined(__i386__)
130 static const guchar cpuid_impl [] = {
131 0x55, /* push %ebp */
132 0x89, 0xe5, /* mov %esp,%ebp */
133 0x53, /* push %ebx */
134 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
135 0x0f, 0xa2, /* cpuid */
136 0x50, /* push %eax */
137 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
138 0x89, 0x18, /* mov %ebx,(%eax) */
139 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
140 0x89, 0x08, /* mov %ecx,(%eax) */
141 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
142 0x89, 0x10, /* mov %edx,(%eax) */
144 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
145 0x89, 0x02, /* mov %eax,(%edx) */
151 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
154 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx) {
157 __asm__ __volatile__ (
160 "movl %%eax, %%edx\n"
161 "xorl $0x200000, %%eax\n"
166 "xorl %%edx, %%eax\n"
167 "andl $0x200000, %%eax\n"
189 CpuidFunc func = (CpuidFunc) cpuid_impl;
190 func (id, p_eax, p_ebx, p_ecx, p_edx);
192 * We use this approach because of issues with gcc and pic code, see:
193 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
194 __asm__ __volatile__ ("cpuid"
195 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
203 static void detect_fast_timer (void) {
204 int p_eax, p_ebx, p_ecx, p_edx;
206 if (cpuid (0x1, &p_eax, &p_ebx, &p_ecx, &p_edx)) {
208 use_fast_timer = TRUE;
210 use_fast_timer = FALSE;
213 use_fast_timer = FALSE;
218 #if defined(__x86_64__)
219 static void detect_fast_timer (void) {
221 guint32 eax,ebx,ecx,edx;
222 __asm__ __volatile__ ("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(op));
224 use_fast_timer = TRUE;
226 use_fast_timer = FALSE;
231 static __inline__ guint64 rdtsc(void) {
233 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
234 return ((guint64) lo) | (((guint64) hi) << 32);
236 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) {\
237 if (use_fast_timer) {\
240 MONO_PROFILER_GET_CURRENT_TIME ((c));\
244 static void detect_fast_timer (void) {
245 use_fast_timer = FALSE;
247 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) MONO_PROFILER_GET_CURRENT_TIME ((c))
251 #define CLASS_LAYOUT_PACKED_BITMAP_SIZE 64
252 #define CLASS_LAYOUT_NOT_INITIALIZED (0xFFFF)
255 HEAP_CODE_OBJECT = 1,
256 HEAP_CODE_FREE_OBJECT_CLASS = 2,
258 } HeapProfilerJobValueCode;
259 typedef struct _MonoProfilerClassData {
268 } MonoProfilerClassData;
270 typedef struct _MonoProfilerMethodData {
273 } MonoProfilerMethodData;
275 typedef struct _ClassIdMappingElement {
279 struct _ClassIdMappingElement *next_unwritten;
280 MonoProfilerClassData data;
281 } ClassIdMappingElement;
283 typedef struct _MethodIdMappingElement {
287 struct _MethodIdMappingElement *next_unwritten;
288 MonoProfilerMethodData data;
289 } MethodIdMappingElement;
291 typedef struct _ClassIdMapping {
293 ClassIdMappingElement *unwritten;
297 typedef struct _MethodIdMapping {
299 MethodIdMappingElement *unwritten;
303 typedef struct _LoadedElement {
305 guint64 load_start_counter;
306 guint64 load_end_counter;
307 guint64 unload_start_counter;
308 guint64 unload_end_counter;
312 guint8 unload_written;
315 #define PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE 1024
316 #define PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE 4096
317 #define PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE 4096
319 typedef struct _ProfilerHeapShotObjectBuffer {
320 struct _ProfilerHeapShotObjectBuffer *next;
321 MonoObject **next_free_slot;
323 MonoObject **first_unprocessed_slot;
324 MonoObject *buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE];
325 } ProfilerHeapShotObjectBuffer;
327 typedef struct _ProfilerHeapShotHeapBuffer {
328 struct _ProfilerHeapShotHeapBuffer *next;
329 struct _ProfilerHeapShotHeapBuffer *previous;
330 MonoObject **start_slot;
331 MonoObject **end_slot;
332 MonoObject *buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE];
333 } ProfilerHeapShotHeapBuffer;
335 typedef struct _ProfilerHeapShotHeapBuffers {
336 ProfilerHeapShotHeapBuffer *buffers;
337 ProfilerHeapShotHeapBuffer *last;
338 ProfilerHeapShotHeapBuffer *current;
339 MonoObject **first_free_slot;
340 } ProfilerHeapShotHeapBuffers;
343 typedef struct _ProfilerHeapShotWriteBuffer {
344 struct _ProfilerHeapShotWriteBuffer *next;
345 gpointer buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE];
346 } ProfilerHeapShotWriteBuffer;
348 typedef struct _ProfilerHeapShotClassSummary {
357 } ProfilerHeapShotClassSummary;
359 typedef struct _ProfilerHeapShotCollectionSummary {
360 ProfilerHeapShotClassSummary *per_class_data;
362 } ProfilerHeapShotCollectionSummary;
364 typedef struct _ProfilerHeapShotWriteJob {
365 struct _ProfilerHeapShotWriteJob *next;
366 struct _ProfilerHeapShotWriteJob *next_unwritten;
370 ProfilerHeapShotWriteBuffer *buffers;
371 ProfilerHeapShotWriteBuffer **last_next;
372 guint32 full_buffers;
373 gboolean heap_shot_was_signalled;
374 guint64 start_counter;
379 ProfilerHeapShotCollectionSummary summary;
380 gboolean dump_heap_data;
381 } ProfilerHeapShotWriteJob;
383 typedef struct _ProfilerThreadStack {
386 guint32 last_saved_top;
387 guint32 last_written_frame;
389 guint8 *method_is_jitted;
390 guint32 *written_frames;
391 } ProfilerThreadStack;
393 typedef struct _ProfilerPerThreadData {
394 ProfilerEventData *events;
395 ProfilerEventData *next_free_event;
396 ProfilerEventData *end_event;
397 ProfilerEventData *first_unwritten_event;
398 ProfilerEventData *first_unmapped_event;
399 guint64 start_event_counter;
400 guint64 last_event_counter;
402 ProfilerHeapShotObjectBuffer *heap_shot_object_buffers;
403 ProfilerThreadStack stack;
404 struct _ProfilerPerThreadData* next;
405 } ProfilerPerThreadData;
407 typedef struct _ProfilerStatisticalHit {
410 } ProfilerStatisticalHit;
412 typedef struct _ProfilerStatisticalData {
413 ProfilerStatisticalHit *hits;
414 unsigned int next_free_index;
415 unsigned int end_index;
416 unsigned int first_unwritten_index;
417 } ProfilerStatisticalData;
419 typedef struct _ProfilerUnmanagedSymbol {
424 } ProfilerUnmanagedSymbol;
426 struct _ProfilerExecutableFile;
427 struct _ProfilerExecutableFileSectionRegion;
429 typedef struct _ProfilerExecutableMemoryRegionData {
437 struct _ProfilerExecutableFile *file;
438 struct _ProfilerExecutableFileSectionRegion *file_region_reference;
439 guint32 symbols_count;
440 guint32 symbols_capacity;
441 ProfilerUnmanagedSymbol *symbols;
442 } ProfilerExecutableMemoryRegionData;
444 typedef struct _ProfilerExecutableMemoryRegions {
445 ProfilerExecutableMemoryRegionData **regions;
446 guint32 regions_capacity;
447 guint32 regions_count;
449 guint32 next_unmanaged_function_id;
450 } ProfilerExecutableMemoryRegions;
452 /* Start of ELF definitions */
454 typedef guint16 ElfHalf;
455 typedef guint32 ElfWord;
456 typedef gsize ElfAddr;
457 typedef gsize ElfOff;
460 unsigned char e_ident[EI_NIDENT];
466 ElfOff e_shoff; // Section header table
468 ElfHalf e_ehsize; // Header size
471 ElfHalf e_shentsize; // Section header entry size
472 ElfHalf e_shnum; // Section header entries number
473 ElfHalf e_shstrndx; // String table index
476 #if (SIZEOF_VOID_P == 4)
481 ElfAddr sh_addr; // Address in memory
482 ElfOff sh_offset; // Offset in file
486 ElfWord sh_addralign;
493 unsigned char st_info; // Use ELF32_ST_TYPE to get symbol type
494 unsigned char st_other;
495 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
497 #elif (SIZEOF_VOID_P == 8)
502 ElfAddr sh_addr; // Address in memory
503 ElfOff sh_offset; // Offset in file
512 unsigned char st_info; // Use ELF_ST_TYPE to get symbol type
513 unsigned char st_other;
514 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
519 #error Bad size of void pointer
523 #define ELF_ST_BIND(i) ((i)>>4)
524 #define ELF_ST_TYPE(i) ((i)&0xf)
537 ELF_FILE_TYPE_NONE = 0,
538 ELF_FILE_TYPE_REL = 1,
539 ELF_FILE_TYPE_EXEC = 2,
540 ELF_FILE_TYPE_DYN = 3,
541 ELF_FILE_TYPE_CORE = 4
558 ELF_SHT_PROGBITS = 1,
582 ELF_SHF_EXECINSTR = 4,
585 #define ELF_SHN_UNDEF 0
586 #define ELF_SHN_LORESERVE 0xff00
587 #define ELF_SHN_LOPROC 0xff00
588 #define ELF_SHN_HIPROC 0xff1f
589 #define ELF_SHN_ABS 0xfff1
590 #define ELF_SHN_COMMON 0xfff2
591 #define ELF_SHN_HIRESERVE 0xffff
592 /* End of ELF definitions */
594 typedef struct _ProfilerExecutableFileSectionRegion {
595 ProfilerExecutableMemoryRegionData *region;
596 guint8 *section_address;
597 gsize section_offset;
598 } ProfilerExecutableFileSectionRegion;
600 typedef struct _ProfilerExecutableFile {
601 guint32 reference_count;
603 /* Used for mmap and munmap */
610 guint8 *symbols_start;
611 guint32 symbols_count;
613 const char *symbols_string_table;
614 const char *main_string_table;
616 ProfilerExecutableFileSectionRegion *section_regions;
618 struct _ProfilerExecutableFile *next_new_file;
619 } ProfilerExecutableFile;
621 typedef struct _ProfilerExecutableFiles {
623 ProfilerExecutableFile *new_files;
624 } ProfilerExecutableFiles;
627 #define CLEANUP_WRITER_THREAD() do {profiler->writer_thread_terminated = TRUE;} while (0)
628 #define CHECK_WRITER_THREAD() (! profiler->writer_thread_terminated)
630 #ifndef PLATFORM_WIN32
631 #include <sys/types.h>
632 #include <sys/time.h>
633 #include <sys/stat.h>
637 #include <semaphore.h>
639 #include <sys/mman.h>
640 #include <sys/types.h>
641 #include <sys/stat.h>
645 #define MUTEX_TYPE pthread_mutex_t
646 #define INITIALIZE_PROFILER_MUTEX() pthread_mutex_init (&(profiler->mutex), NULL)
647 #define DELETE_PROFILER_MUTEX() pthread_mutex_destroy (&(profiler->mutex))
648 #define LOCK_PROFILER() do {/*LOG_WRITER_THREAD ("LOCK_PROFILER");*/ pthread_mutex_lock (&(profiler->mutex));} while (0)
649 #define UNLOCK_PROFILER() do {/*LOG_WRITER_THREAD ("UNLOCK_PROFILER");*/ pthread_mutex_unlock (&(profiler->mutex));} while (0)
651 #define THREAD_TYPE pthread_t
652 #define CREATE_WRITER_THREAD(f) pthread_create (&(profiler->data_writer_thread), NULL, ((void*(*)(void*))f), NULL)
653 #define EXIT_THREAD() pthread_exit (NULL);
654 #define WAIT_WRITER_THREAD() do {\
655 if (CHECK_WRITER_THREAD ()) {\
656 pthread_join (profiler->data_writer_thread, NULL);\
659 #define CURRENT_THREAD_ID() (gsize) pthread_self ()
661 #ifndef HAVE_KW_THREAD
662 static pthread_key_t pthread_profiler_key;
663 static pthread_once_t profiler_pthread_once = PTHREAD_ONCE_INIT;
665 make_pthread_profiler_key (void) {
666 (void) pthread_key_create (&pthread_profiler_key, NULL);
668 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) pthread_getspecific (pthread_profiler_key))
669 #define SET_PROFILER_THREAD_DATA(x) (void) pthread_setspecific (pthread_profiler_key, (x))
670 #define ALLOCATE_PROFILER_THREAD_DATA() (void) pthread_once (&profiler_pthread_once, make_pthread_profiler_key)
671 #define FREE_PROFILER_THREAD_DATA() (void) pthread_key_delete (pthread_profiler_key)
674 #define EVENT_TYPE sem_t
675 #define WRITER_EVENT_INIT() do {\
676 sem_init (&(profiler->enable_data_writer_event), 0, 0);\
677 sem_init (&(profiler->wake_data_writer_event), 0, 0);\
678 sem_init (&(profiler->done_data_writer_event), 0, 0);\
680 #define WRITER_EVENT_DESTROY() do {\
681 sem_destroy (&(profiler->enable_data_writer_event));\
682 sem_destroy (&(profiler->wake_data_writer_event));\
683 sem_destroy (&(profiler->done_data_writer_event));\
685 #define WRITER_EVENT_WAIT() (void) sem_wait (&(profiler->wake_data_writer_event))
686 #define WRITER_EVENT_RAISE() (void) sem_post (&(profiler->wake_data_writer_event))
687 #define WRITER_EVENT_ENABLE_WAIT() (void) sem_wait (&(profiler->enable_data_writer_event))
688 #define WRITER_EVENT_ENABLE_RAISE() (void) sem_post (&(profiler->enable_data_writer_event))
689 #define WRITER_EVENT_DONE_WAIT() do {\
690 if (CHECK_WRITER_THREAD ()) {\
691 (void) sem_wait (&(profiler->done_data_writer_event));\
694 #define WRITER_EVENT_DONE_RAISE() (void) sem_post (&(profiler->done_data_writer_event))
697 #define FILE_HANDLE_TYPE FILE*
698 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
699 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
700 #define FLUSH_FILE() fflush (profiler->file)
701 #define CLOSE_FILE() fclose (profiler->file);
703 #define FILE_HANDLE_TYPE int
704 #define OPEN_FILE() profiler->file = open (profiler->file_name, O_WRONLY|O_CREAT|O_TRUNC, 0664);
705 #define WRITE_BUFFER(b,s) write (profiler->file, (b), (s))
707 #define CLOSE_FILE() close (profiler->file);
714 #define MUTEX_TYPE CRITICAL_SECTION
715 #define INITIALIZE_PROFILER_MUTEX() InitializeCriticalSection (&(profiler->mutex))
716 #define DELETE_PROFILER_MUTEX() DeleteCriticalSection (&(profiler->mutex))
717 #define LOCK_PROFILER() EnterCriticalSection (&(profiler->mutex))
718 #define UNLOCK_PROFILER() LeaveCriticalSection (&(profiler->mutex))
720 #define THREAD_TYPE HANDLE
721 #define CREATE_WRITER_THREAD(f) CreateThread (NULL, (1*1024*1024), (f), NULL, 0, NULL);
722 #define EXIT_THREAD() ExitThread (0);
723 #define WAIT_WRITER_THREAD() do {\
724 if (CHECK_WRITER_THREAD ()) {\
725 WaitForSingleObject (profiler->data_writer_thread, INFINITE);\
728 #define CURRENT_THREAD_ID() (gsize) GetCurrentThreadId ()
730 #ifndef HAVE_KW_THREAD
731 static guint32 profiler_thread_id = -1;
732 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*)TlsGetValue (profiler_thread_id))
733 #define SET_PROFILER_THREAD_DATA(x) TlsSetValue (profiler_thread_id, (x));
734 #define ALLOCATE_PROFILER_THREAD_DATA() profiler_thread_id = TlsAlloc ()
735 #define FREE_PROFILER_THREAD_DATA() TlsFree (profiler_thread_id)
738 #define EVENT_TYPE HANDLE
739 #define WRITER_EVENT_INIT() (void) do {\
740 profiler->enable_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
741 profiler->wake_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
742 profiler->done_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
744 #define WRITER_EVENT_DESTROY() CloseHandle (profiler->statistical_data_writer_event)
745 #define WRITER_EVENT_INIT() (void) do {\
746 CloseHandle (profiler->enable_data_writer_event);\
747 CloseHandle (profiler->wake_data_writer_event);\
748 CloseHandle (profiler->done_data_writer_event);\
750 #define WRITER_EVENT_WAIT() WaitForSingleObject (profiler->wake_data_writer_event, INFINITE)
751 #define WRITER_EVENT_RAISE() SetEvent (profiler->wake_data_writer_event)
752 #define WRITER_EVENT_ENABLE_WAIT() WaitForSingleObject (profiler->enable_data_writer_event, INFINITE)
753 #define WRITER_EVENT_ENABLE_RAISE() SetEvent (profiler->enable_data_writer_event)
754 #define WRITER_EVENT_DONE_WAIT() do {\
755 if (CHECK_WRITER_THREAD ()) {\
756 WaitForSingleObject (profiler->done_data_writer_event, INFINITE);\
759 #define WRITER_EVENT_DONE_RAISE() SetEvent (profiler->done_data_writer_event)
761 #define FILE_HANDLE_TYPE FILE*
762 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
763 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
764 #define FLUSH_FILE() fflush (profiler->file)
765 #define CLOSE_FILE() fclose (profiler->file);
769 #ifdef HAVE_KW_THREAD
770 static __thread ProfilerPerThreadData * tls_profiler_per_thread_data;
771 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) tls_profiler_per_thread_data)
772 #define SET_PROFILER_THREAD_DATA(x) tls_profiler_per_thread_data = (x)
773 #define ALLOCATE_PROFILER_THREAD_DATA() /* nop */
774 #define FREE_PROFILER_THREAD_DATA() /* nop */
777 #define GET_PROFILER_THREAD_DATA(data) do {\
778 ProfilerPerThreadData *_result = LOOKUP_PROFILER_THREAD_DATA ();\
780 _result = profiler_per_thread_data_new (profiler->per_thread_buffer_size);\
782 _result->next = profiler->per_thread_data;\
783 profiler->per_thread_data = _result;\
785 SET_PROFILER_THREAD_DATA (_result);\
790 #define PROFILER_FILE_WRITE_BUFFER_SIZE (profiler->write_buffer_size)
791 typedef struct _ProfilerFileWriteBuffer {
792 struct _ProfilerFileWriteBuffer *next;
794 } ProfilerFileWriteBuffer;
796 #define CHECK_PROFILER_ENABLED() do {\
797 if (! profiler->profiler_enabled)\
800 struct _MonoProfiler {
803 MonoProfileFlags flags;
804 gboolean profiler_enabled;
806 char *file_name_suffix;
807 FILE_HANDLE_TYPE file;
810 guint64 start_counter;
814 guint64 last_header_counter;
816 MethodIdMapping *methods;
817 ClassIdMapping *classes;
819 GHashTable *loaded_assemblies;
820 GHashTable *loaded_modules;
821 GHashTable *loaded_appdomains;
823 guint32 per_thread_buffer_size;
824 guint32 statistical_buffer_size;
825 ProfilerPerThreadData* per_thread_data;
826 ProfilerStatisticalData *statistical_data;
827 ProfilerStatisticalData *statistical_data_ready;
828 ProfilerStatisticalData *statistical_data_second_buffer;
829 int statistical_call_chain_depth;
831 THREAD_TYPE data_writer_thread;
832 EVENT_TYPE enable_data_writer_event;
833 EVENT_TYPE wake_data_writer_event;
834 EVENT_TYPE done_data_writer_event;
835 gboolean terminate_writer_thread;
836 gboolean writer_thread_terminated;
837 gboolean detach_writer_thread;
838 gboolean writer_thread_enabled;
839 gboolean writer_thread_flush_everything;
841 ProfilerFileWriteBuffer *write_buffers;
842 ProfilerFileWriteBuffer *current_write_buffer;
843 int write_buffer_size;
844 int current_write_position;
845 int full_write_buffers;
847 ProfilerHeapShotWriteJob *heap_shot_write_jobs;
848 ProfilerHeapShotHeapBuffers heap;
850 char *heap_shot_command_file_name;
851 int dump_next_heap_snapshots;
852 guint64 heap_shot_command_file_access_time;
853 gboolean heap_shot_was_signalled;
854 guint32 garbage_collection_counter;
856 ProfilerExecutableMemoryRegions *executable_regions;
857 ProfilerExecutableFiles executable_files;
864 gboolean unreachable_objects;
865 gboolean collection_summary;
867 gboolean track_stack;
868 gboolean track_calls;
869 gboolean save_allocation_caller;
870 gboolean save_allocation_stack;
871 gboolean allocations_carry_id;
874 static MonoProfiler *profiler;
876 #ifndef PLATFORM_WIN32
879 #ifdef MONO_ARCH_USE_SIGACTION
880 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy, siginfo_t *info, void *context)
881 #elif defined(__sparc__)
882 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy, void *sigctx)
884 #define SIG_HANDLER_SIGNATURE(ftn) ftn (int _dummy)
888 request_heap_snapshot (void) {
889 profiler->heap_shot_was_signalled = TRUE;
890 mono_gc_collect (mono_gc_max_generation ());
894 SIG_HANDLER_SIGNATURE (gc_request_handler) {
895 profiler->heap_shot_was_signalled = TRUE;
896 WRITER_EVENT_RAISE ();
900 add_gc_request_handler (int signal_number)
904 #ifdef MONO_ARCH_USE_SIGACTION
905 sa.sa_sigaction = gc_request_handler;
906 sigemptyset (&sa.sa_mask);
907 sa.sa_flags = SA_SIGINFO;
909 sa.sa_handler = gc_request_handler;
910 sigemptyset (&sa.sa_mask);
914 g_assert (sigaction (signal_number, &sa, NULL) != -1);
918 enable_profiler (void) {
919 profiler->profiler_enabled = TRUE;
923 disable_profiler (void) {
924 profiler->profiler_enabled = FALSE;
930 SIG_HANDLER_SIGNATURE (toggle_handler) {
931 if (profiler->profiler_enabled) {
932 profiler->profiler_enabled = FALSE;
934 profiler->profiler_enabled = TRUE;
939 add_toggle_handler (int signal_number)
943 #ifdef MONO_ARCH_USE_SIGACTION
944 sa.sa_sigaction = toggle_handler;
945 sigemptyset (&sa.sa_mask);
946 sa.sa_flags = SA_SIGINFO;
948 sa.sa_handler = toggle_handler;
949 sigemptyset (&sa.sa_mask);
953 g_assert (sigaction (signal_number, &sa, NULL) != -1);
959 #define DEBUG_LOAD_EVENTS 0
960 #define DEBUG_MAPPING_EVENTS 0
961 #define DEBUG_LOGGING_PROFILER 0
962 #define DEBUG_HEAP_PROFILER 0
963 #define DEBUG_CLASS_BITMAPS 0
964 #define DEBUG_STATISTICAL_PROFILER 0
965 #define DEBUG_WRITER_THREAD 0
966 #define DEBUG_FILE_WRITES 0
967 #if (DEBUG_LOGGING_PROFILER || DEBUG_STATISTICAL_PROFILER || DEBUG_HEAP_PROFILER || DEBUG_WRITER_THREAD || DEBUG_FILE_WRITES)
968 #define LOG_WRITER_THREAD(m) printf ("WRITER-THREAD-LOG %s\n", m)
970 #define LOG_WRITER_THREAD(m)
973 #if DEBUG_LOGGING_PROFILER
974 static int event_counter = 0;
975 #define EVENT_MARK() printf ("[EVENT:%d]", ++ event_counter)
979 thread_stack_initialize_empty (ProfilerThreadStack *stack) {
982 stack->last_saved_top = 0;
983 stack->last_written_frame = 0;
985 stack->method_is_jitted = NULL;
986 stack->written_frames = NULL;
990 thread_stack_free (ProfilerThreadStack *stack) {
993 stack->last_saved_top = 0;
994 stack->last_written_frame = 0;
995 if (stack->stack != NULL) {
996 g_free (stack->stack);
999 if (stack->method_is_jitted != NULL) {
1000 g_free (stack->method_is_jitted);
1001 stack->method_is_jitted = NULL;
1003 if (stack->written_frames != NULL) {
1004 g_free (stack->written_frames);
1005 stack->written_frames = NULL;
1010 thread_stack_initialize (ProfilerThreadStack *stack, guint32 capacity) {
1011 stack->capacity = capacity;
1013 stack->last_saved_top = 0;
1014 stack->last_written_frame = 0;
1015 stack->stack = g_new0 (MonoMethod*, capacity);
1016 stack->method_is_jitted = g_new0 (guint8, capacity);
1017 stack->written_frames = g_new0 (guint32, capacity);
1021 thread_stack_push_jitted (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1022 if (stack->top >= stack->capacity) {
1023 MonoMethod **old_stack = stack->stack;
1024 guint8 *old_method_is_jitted = stack->method_is_jitted;
1025 guint32 *old_written_frames = stack->written_frames;
1026 guint32 top = stack->top;
1027 guint32 last_saved_top = stack->last_saved_top;
1028 guint32 last_written_frame = stack->last_written_frame;
1029 thread_stack_initialize (stack, stack->capacity * 2);
1030 memcpy (stack->stack, old_stack, top * sizeof (MonoMethod*));
1031 memcpy (stack->method_is_jitted, old_method_is_jitted, top * sizeof (guint8));
1032 memcpy (stack->written_frames, old_written_frames, top * sizeof (guint32));
1034 g_free (old_method_is_jitted);
1035 g_free (old_written_frames);
1037 stack->last_saved_top = last_saved_top;
1038 stack->last_written_frame = last_written_frame;
1040 stack->stack [stack->top] = method;
1041 stack->method_is_jitted [stack->top] = method_is_jitted;
1046 thread_stack_push (ProfilerThreadStack *stack, MonoMethod* method) {
1047 thread_stack_push_jitted (stack, method, FALSE);
1051 thread_stack_pop (ProfilerThreadStack *stack) {
1052 if (stack->top > 0) {
1054 if (stack->last_saved_top > stack->top) {
1055 stack->last_saved_top = stack->top;
1057 return stack->stack [stack->top];
1064 thread_stack_top (ProfilerThreadStack *stack) {
1065 if (stack->top > 0) {
1066 return stack->stack [stack->top - 1];
1073 thread_stack_top_is_jitted (ProfilerThreadStack *stack) {
1074 if (stack->top > 0) {
1075 return stack->method_is_jitted [stack->top - 1];
1082 thread_stack_index_from_top (ProfilerThreadStack *stack, int index) {
1083 if (stack->top > index) {
1084 return stack->stack [stack->top - (index + 1)];
1091 thread_stack_index_from_top_is_jitted (ProfilerThreadStack *stack, int index) {
1092 if (stack->top > index) {
1093 return stack->method_is_jitted [stack->top - (index + 1)];
1100 thread_stack_push_safely (ProfilerThreadStack *stack, MonoMethod* method) {
1101 if (stack->stack != NULL) {
1102 thread_stack_push (stack, method);
1107 thread_stack_push_jitted_safely (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1108 if (stack->stack != NULL) {
1109 thread_stack_push_jitted (stack, method, method_is_jitted);
1114 thread_stack_count_unsaved_frames (ProfilerThreadStack *stack) {
1115 int result = stack->top - stack->last_saved_top;
1116 return (result > 0) ? result : 0;
1120 thread_stack_get_last_written_frame (ProfilerThreadStack *stack) {
1121 return stack->last_written_frame;
1125 thread_stack_set_last_written_frame (ProfilerThreadStack *stack, int last_written_frame) {
1126 stack->last_written_frame = last_written_frame;
1129 static inline guint32
1130 thread_stack_written_frame_at_index (ProfilerThreadStack *stack, int index) {
1131 return stack->written_frames [index];
1135 thread_stack_write_frame_at_index (ProfilerThreadStack *stack, int index, guint32 method_id_and_is_jitted) {
1136 stack->written_frames [index] = method_id_and_is_jitted;
1139 static ClassIdMappingElement*
1140 class_id_mapping_element_get (MonoClass *klass) {
1141 return g_hash_table_lookup (profiler->classes->table, (gconstpointer) klass);
1144 static MethodIdMappingElement*
1145 method_id_mapping_element_get (MonoMethod *method) {
1146 return g_hash_table_lookup (profiler->methods->table, (gconstpointer) method);
1149 #define BITS_TO_BYTES(v) do {\
1155 static ClassIdMappingElement*
1156 class_id_mapping_element_new (MonoClass *klass) {
1157 ClassIdMappingElement *result = g_new (ClassIdMappingElement, 1);
1159 result->name = mono_type_full_name (mono_class_get_type (klass));
1160 result->klass = klass;
1161 result->next_unwritten = profiler->classes->unwritten;
1162 profiler->classes->unwritten = result;
1163 result->id = profiler->classes->next_id;
1164 profiler->classes->next_id ++;
1166 result->data.bitmap.compact = 0;
1167 result->data.layout.slots = CLASS_LAYOUT_NOT_INITIALIZED;
1168 result->data.layout.references = CLASS_LAYOUT_NOT_INITIALIZED;
1170 g_hash_table_insert (profiler->classes->table, klass, result);
1172 #if (DEBUG_MAPPING_EVENTS)
1173 printf ("Created new CLASS mapping element \"%s\" (%p)[%d]\n", result->name, klass, result->id);
1179 class_id_mapping_element_build_layout_bitmap (MonoClass *klass, ClassIdMappingElement *klass_id) {
1180 MonoClass *parent_class = mono_class_get_parent (klass);
1181 int number_of_reference_fields = 0;
1182 int max_offset_of_reference_fields = 0;
1183 ClassIdMappingElement *parent_id;
1185 MonoClassField *field;
1187 #if (DEBUG_CLASS_BITMAPS)
1188 printf ("class_id_mapping_element_build_layout_bitmap: building layout for class %s.%s: ", mono_class_get_namespace (klass), mono_class_get_name (klass));
1191 if (parent_class != NULL) {
1192 parent_id = class_id_mapping_element_get (parent_class);
1193 g_assert (parent_id != NULL);
1195 if (parent_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1196 #if (DEBUG_CLASS_BITMAPS)
1197 printf ("[recursively building bitmap for father class]\n");
1199 class_id_mapping_element_build_layout_bitmap (parent_class, parent_id);
1206 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1207 MonoType* field_type = mono_field_get_type (field);
1208 // For now, skip static fields
1209 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1212 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1213 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1214 if (field_offset > max_offset_of_reference_fields) {
1215 max_offset_of_reference_fields = field_offset;
1217 number_of_reference_fields ++;
1219 MonoClass *field_class = mono_class_from_mono_type (field_type);
1220 if (field_class && mono_class_is_valuetype (field_class)) {
1221 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1222 g_assert (field_id != NULL);
1224 if (field_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1225 if (field_id != klass_id) {
1226 #if (DEBUG_CLASS_BITMAPS)
1227 printf ("[recursively building bitmap for field %s]\n", mono_field_get_name (field));
1229 class_id_mapping_element_build_layout_bitmap (field_class, field_id);
1231 #if (DEBUG_CLASS_BITMAPS)
1232 printf ("[breaking recursive bitmap build for field %s]", mono_field_get_name (field));
1235 klass_id->data.bitmap.compact = 0;
1236 klass_id->data.layout.slots = 0;
1237 klass_id->data.layout.references = 0;
1241 if (field_id->data.layout.references > 0) {
1242 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1243 int max_offset_reference_in_field = (field_id->data.layout.slots - 1) * sizeof (gpointer);
1245 if ((field_offset + max_offset_reference_in_field) > max_offset_of_reference_fields) {
1246 max_offset_of_reference_fields = field_offset + max_offset_reference_in_field;
1249 number_of_reference_fields += field_id->data.layout.references;
1255 #if (DEBUG_CLASS_BITMAPS)
1256 printf ("[allocating bitmap for class %s.%s (references %d, max offset %d, slots %d)]", mono_class_get_namespace (klass), mono_class_get_name (klass), number_of_reference_fields, max_offset_of_reference_fields, (int)(max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1258 if ((number_of_reference_fields == 0) && ((parent_id == NULL) || (parent_id->data.layout.references == 0))) {
1259 #if (DEBUG_CLASS_BITMAPS)
1260 printf ("[no references at all]");
1262 klass_id->data.bitmap.compact = 0;
1263 klass_id->data.layout.slots = 0;
1264 klass_id->data.layout.references = 0;
1266 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1267 #if (DEBUG_CLASS_BITMAPS)
1268 printf ("[parent %s.%s has %d references in %d slots]", mono_class_get_namespace (parent_class), mono_class_get_name (parent_class), parent_id->data.layout.references, parent_id->data.layout.slots);
1270 klass_id->data.layout.slots = parent_id->data.layout.slots;
1271 klass_id->data.layout.references = parent_id->data.layout.references;
1273 #if (DEBUG_CLASS_BITMAPS)
1274 printf ("[no references from parent]");
1276 klass_id->data.layout.slots = 0;
1277 klass_id->data.layout.references = 0;
1280 if (number_of_reference_fields > 0) {
1281 klass_id->data.layout.slots += ((max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1282 klass_id->data.layout.references += number_of_reference_fields;
1283 #if (DEBUG_CLASS_BITMAPS)
1284 printf ("[adding data, going to %d references in %d slots]", klass_id->data.layout.references, klass_id->data.layout.slots);
1288 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1289 #if (DEBUG_CLASS_BITMAPS)
1290 printf ("[zeroing bitmap]");
1292 klass_id->data.bitmap.compact = 0;
1293 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1294 #if (DEBUG_CLASS_BITMAPS)
1295 printf ("[copying compact father bitmap]");
1297 klass_id->data.bitmap.compact = parent_id->data.bitmap.compact;
1300 int size_of_bitmap = klass_id->data.layout.slots;
1301 BITS_TO_BYTES (size_of_bitmap);
1302 #if (DEBUG_CLASS_BITMAPS)
1303 printf ("[allocating %d bytes for bitmap]", size_of_bitmap);
1305 klass_id->data.bitmap.extended = g_malloc0 (size_of_bitmap);
1306 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1307 int size_of_father_bitmap = parent_id->data.layout.slots;
1308 if (size_of_father_bitmap <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1310 #if (DEBUG_CLASS_BITMAPS)
1311 printf ("[copying %d bits from father bitmap]", size_of_father_bitmap);
1313 for (father_slot = 0; father_slot < size_of_father_bitmap; father_slot ++) {
1314 if (parent_id->data.bitmap.compact & (((guint64)1) << father_slot)) {
1315 klass_id->data.bitmap.extended [father_slot >> 3] |= (1 << (father_slot & 7));
1319 BITS_TO_BYTES (size_of_father_bitmap);
1320 #if (DEBUG_CLASS_BITMAPS)
1321 printf ("[copying %d bytes from father bitmap]", size_of_father_bitmap);
1323 memcpy (klass_id->data.bitmap.extended, parent_id->data.bitmap.extended, size_of_father_bitmap);
1329 #if (DEBUG_CLASS_BITMAPS)
1330 printf ("[starting filling iteration]\n");
1333 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1334 MonoType* field_type = mono_field_get_type (field);
1335 // For now, skip static fields
1336 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1339 #if (DEBUG_CLASS_BITMAPS)
1340 printf ("[Working on field %s]", mono_field_get_name (field));
1342 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1343 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1345 g_assert ((field_offset % sizeof (gpointer)) == 0);
1346 field_slot = field_offset / sizeof (gpointer);
1347 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1348 klass_id->data.bitmap.compact |= (((guint64)1) << field_slot);
1350 klass_id->data.bitmap.extended [field_slot >> 3] |= (1 << (field_slot & 7));
1352 #if (DEBUG_CLASS_BITMAPS)
1353 printf ("[reference at offset %d, slot %d]", field_offset, field_slot);
1356 MonoClass *field_class = mono_class_from_mono_type (field_type);
1357 if (field_class && mono_class_is_valuetype (field_class)) {
1358 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1362 g_assert (field_id != NULL);
1363 field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1364 g_assert ((field_id->data.layout.references == 0) || ((field_offset % sizeof (gpointer)) == 0));
1365 field_slot = field_offset / sizeof (gpointer);
1366 #if (DEBUG_CLASS_BITMAPS)
1367 printf ("[value type at offset %d, slot %d, with %d references in %d slots]", field_offset, field_slot, field_id->data.layout.references, field_id->data.layout.slots);
1370 if (field_id->data.layout.references > 0) {
1372 if (field_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1373 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1374 if (field_id->data.bitmap.compact & (((guint64)1) << sub_field_slot)) {
1375 int actual_slot = field_slot + sub_field_slot;
1376 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1377 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1379 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1384 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1385 if (field_id->data.bitmap.extended [sub_field_slot >> 3] & (1 << (sub_field_slot & 7))) {
1386 int actual_slot = field_slot + sub_field_slot;
1387 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1388 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1390 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1399 #if (DEBUG_CLASS_BITMAPS)
1402 printf ("\nLayot of class \"%s.%s\": references %d, slots %d, bitmap {", mono_class_get_namespace (klass), mono_class_get_name (klass), klass_id->data.layout.references, klass_id->data.layout.slots);
1403 for (slot = 0; slot < klass_id->data.layout.slots; slot ++) {
1404 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1405 if (klass_id->data.bitmap.compact & (((guint64)1) << slot)) {
1411 if (klass_id->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
1425 static MethodIdMappingElement*
1426 method_id_mapping_element_new (MonoMethod *method) {
1427 MethodIdMappingElement *result = g_new (MethodIdMappingElement, 1);
1428 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
1430 result->name = g_strdup_printf ("%s (%s)", mono_method_get_name (method), signature);
1432 result->method = method;
1433 result->next_unwritten = profiler->methods->unwritten;
1434 profiler->methods->unwritten = result;
1435 result->id = profiler->methods->next_id;
1436 profiler->methods->next_id ++;
1437 g_hash_table_insert (profiler->methods->table, method, result);
1439 result->data.code_start = NULL;
1440 result->data.code_size = 0;
1442 #if (DEBUG_MAPPING_EVENTS)
1443 printf ("Created new METHOD mapping element \"%s\" (%p)[%d]\n", result->name, method, result->id);
1450 method_id_mapping_element_destroy (gpointer element) {
1451 MethodIdMappingElement *e = (MethodIdMappingElement*) element;
1458 class_id_mapping_element_destroy (gpointer element) {
1459 ClassIdMappingElement *e = (ClassIdMappingElement*) element;
1462 if ((e->data.layout.slots != CLASS_LAYOUT_NOT_INITIALIZED) && (e->data.layout.slots > CLASS_LAYOUT_PACKED_BITMAP_SIZE))
1463 g_free (e->data.bitmap.extended);
1467 static MethodIdMapping*
1468 method_id_mapping_new (void) {
1469 MethodIdMapping *result = g_new (MethodIdMapping, 1);
1470 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, method_id_mapping_element_destroy);
1471 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, method_id_mapping_element_destroy);
1472 result->unwritten = NULL;
1473 result->next_id = 1;
1477 static ClassIdMapping*
1478 class_id_mapping_new (void) {
1479 ClassIdMapping *result = g_new (ClassIdMapping, 1);
1480 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, class_id_mapping_element_destroy);
1481 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, class_id_mapping_element_destroy);
1482 result->unwritten = NULL;
1483 result->next_id = 1;
1488 method_id_mapping_destroy (MethodIdMapping *map) {
1489 g_hash_table_destroy (map->table);
1494 class_id_mapping_destroy (ClassIdMapping *map) {
1495 g_hash_table_destroy (map->table);
1499 #if (DEBUG_LOAD_EVENTS)
1501 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element);
1504 static LoadedElement*
1505 loaded_element_load_start (GHashTable *table, gpointer item) {
1506 LoadedElement *element = g_new0 (LoadedElement, 1);
1507 #if (DEBUG_LOAD_EVENTS)
1508 print_load_event ("LOAD START", table, item, element);
1510 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_start_counter);
1511 g_hash_table_insert (table, item, element);
1515 static LoadedElement*
1516 loaded_element_load_end (GHashTable *table, gpointer item, char *name) {
1517 LoadedElement *element = g_hash_table_lookup (table, item);
1518 #if (DEBUG_LOAD_EVENTS)
1519 print_load_event ("LOAD END", table, item, element);
1521 g_assert (element != NULL);
1522 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_end_counter);
1523 element->name = name;
1524 element->loaded = TRUE;
1528 static LoadedElement*
1529 loaded_element_unload_start (GHashTable *table, gpointer item) {
1530 LoadedElement *element = g_hash_table_lookup (table, item);
1531 #if (DEBUG_LOAD_EVENTS)
1532 print_load_event ("UNLOAD START", table, item, element);
1534 g_assert (element != NULL);
1535 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_start_counter);
1539 static LoadedElement*
1540 loaded_element_unload_end (GHashTable *table, gpointer item) {
1541 LoadedElement *element = g_hash_table_lookup (table, item);
1542 #if (DEBUG_LOAD_EVENTS)
1543 print_load_event ("UNLOAD END", table, item, element);
1545 g_assert (element != NULL);
1546 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_end_counter);
1547 element->unloaded = TRUE;
1553 loaded_element_destroy (gpointer element) {
1554 if (((LoadedElement*)element)->name)
1555 g_free (((LoadedElement*)element)->name);
1559 #if (DEBUG_LOAD_EVENTS)
1561 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element) {
1562 const char* item_name;
1565 if (table == profiler->loaded_assemblies) {
1566 //item_info = g_strdup_printf("ASSEMBLY %p (dynamic %d)", item, mono_image_is_dynamic (mono_assembly_get_image((MonoAssembly*)item)));
1567 item_info = g_strdup_printf("ASSEMBLY %p", item);
1568 } else if (table == profiler->loaded_modules) {
1569 //item_info = g_strdup_printf("MODULE %p (dynamic %d)", item, mono_image_is_dynamic ((MonoImage*)item));
1570 item_info = g_strdup_printf("MODULE %p", item);
1571 } else if (table == profiler->loaded_appdomains) {
1572 item_info = g_strdup_printf("APPDOMAIN %p (id %d)", item, mono_domain_get_id ((MonoDomain*)item));
1575 g_assert_not_reached ();
1578 if (element != NULL) {
1579 item_name = element->name;
1581 item_name = "<NULL>";
1584 printf ("%s EVENT for %s (%s)\n", event_name, item_info, item_name);
1590 profiler_heap_shot_object_buffers_destroy (ProfilerHeapShotObjectBuffer *buffer) {
1591 while (buffer != NULL) {
1592 ProfilerHeapShotObjectBuffer *next = buffer->next;
1593 #if DEBUG_HEAP_PROFILER
1594 printf ("profiler_heap_shot_object_buffers_destroy: destroyed buffer %p (%p-%p)\n", buffer, & (buffer->buffer [0]), buffer->end);
1601 static ProfilerHeapShotObjectBuffer*
1602 profiler_heap_shot_object_buffer_new (ProfilerPerThreadData *data) {
1603 ProfilerHeapShotObjectBuffer *buffer;
1604 ProfilerHeapShotObjectBuffer *result = g_new (ProfilerHeapShotObjectBuffer, 1);
1605 result->next_free_slot = & (result->buffer [0]);
1606 result->end = & (result->buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE]);
1607 result->first_unprocessed_slot = & (result->buffer [0]);
1608 result->next = data->heap_shot_object_buffers;
1609 data->heap_shot_object_buffers = result;
1610 #if DEBUG_HEAP_PROFILER
1611 printf ("profiler_heap_shot_object_buffer_new: created buffer %p (%p-%p)\n", result, result->next_free_slot, result->end);
1613 for (buffer = result; buffer != NULL; buffer = buffer->next) {
1614 ProfilerHeapShotObjectBuffer *last = buffer->next;
1615 if ((last != NULL) && (last->first_unprocessed_slot == last->end)) {
1616 buffer->next = NULL;
1617 profiler_heap_shot_object_buffers_destroy (last);
1624 static ProfilerHeapShotWriteJob*
1625 profiler_heap_shot_write_job_new (gboolean heap_shot_was_signalled, gboolean dump_heap_data, guint32 collection) {
1626 ProfilerHeapShotWriteJob *job = g_new (ProfilerHeapShotWriteJob, 1);
1628 job->next_unwritten = NULL;
1630 if (profiler->action_flags.unreachable_objects || dump_heap_data) {
1631 job->buffers = g_new (ProfilerHeapShotWriteBuffer, 1);
1632 job->buffers->next = NULL;
1633 job->last_next = & (job->buffers->next);
1634 job->start = & (job->buffers->buffer [0]);
1635 job->cursor = job->start;
1636 job->end = & (job->buffers->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1638 job->buffers = NULL;
1639 job->last_next = NULL;
1644 job->full_buffers = 0;
1646 if (profiler->action_flags.collection_summary) {
1647 job->summary.capacity = profiler->classes->next_id;
1648 job->summary.per_class_data = g_new0 (ProfilerHeapShotClassSummary, job->summary.capacity);
1650 job->summary.capacity = 0;
1651 job->summary.per_class_data = NULL;
1654 job->heap_shot_was_signalled = heap_shot_was_signalled;
1655 job->collection = collection;
1656 job->dump_heap_data = dump_heap_data;
1657 #if DEBUG_HEAP_PROFILER
1658 printf ("profiler_heap_shot_write_job_new: created job %p with buffer %p(%p-%p) (collection %d, dump %d)\n", job, job->buffers, job->start, job->end, collection, dump_heap_data);
1664 profiler_heap_shot_write_job_has_data (ProfilerHeapShotWriteJob *job) {
1665 return ((job->buffers != NULL) || (job->summary.capacity > 0));
1669 profiler_heap_shot_write_job_add_buffer (ProfilerHeapShotWriteJob *job, gpointer value) {
1670 ProfilerHeapShotWriteBuffer *buffer = g_new (ProfilerHeapShotWriteBuffer, 1);
1671 buffer->next = NULL;
1672 *(job->last_next) = buffer;
1673 job->last_next = & (buffer->next);
1674 job->full_buffers ++;
1675 buffer->buffer [0] = value;
1676 job->start = & (buffer->buffer [0]);
1677 job->cursor = & (buffer->buffer [1]);
1678 job->end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1679 #if DEBUG_HEAP_PROFILER
1680 printf ("profiler_heap_shot_write_job_add_buffer: in job %p, added buffer %p(%p-%p) with value %p at address %p (cursor now %p)\n", job, buffer, job->start, job->end, value, &(buffer->buffer [0]), job->cursor);
1682 ProfilerHeapShotWriteBuffer *current_buffer;
1683 for (current_buffer = job->buffers; current_buffer != NULL; current_buffer = current_buffer->next) {
1684 printf ("profiler_heap_shot_write_job_add_buffer: now job %p has buffer %p\n", job, current_buffer);
1691 profiler_heap_shot_write_job_free_buffers (ProfilerHeapShotWriteJob *job) {
1692 ProfilerHeapShotWriteBuffer *buffer = job->buffers;
1694 while (buffer != NULL) {
1695 ProfilerHeapShotWriteBuffer *next = buffer->next;
1696 #if DEBUG_HEAP_PROFILER
1697 printf ("profiler_heap_shot_write_job_free_buffers: in job %p, freeing buffer %p\n", job, buffer);
1703 job->buffers = NULL;
1705 if (job->summary.per_class_data != NULL) {
1706 g_free (job->summary.per_class_data);
1707 job->summary.per_class_data = NULL;
1709 job->summary.capacity = 0;
1713 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job);
1716 profiler_process_heap_shot_write_jobs (void) {
1717 gboolean done = FALSE;
1720 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1721 ProfilerHeapShotWriteJob *previous_job = NULL;
1722 ProfilerHeapShotWriteJob *next_job;
1725 while (current_job != NULL) {
1726 next_job = current_job->next_unwritten;
1728 if (next_job != NULL) {
1729 if (profiler_heap_shot_write_job_has_data (current_job)) {
1732 if (! profiler_heap_shot_write_job_has_data (next_job)) {
1733 current_job->next_unwritten = NULL;
1737 if (profiler_heap_shot_write_job_has_data (current_job)) {
1738 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: writing...");
1739 profiler_heap_shot_write_block (current_job);
1740 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: done");
1741 if (previous_job != NULL) {
1742 previous_job->next_unwritten = NULL;
1747 previous_job = current_job;
1748 current_job = next_job;
1754 profiler_free_heap_shot_write_jobs (void) {
1755 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1756 ProfilerHeapShotWriteJob *next_job;
1758 if (current_job != NULL) {
1759 while (current_job->next_unwritten != NULL) {
1760 #if DEBUG_HEAP_PROFILER
1761 printf ("profiler_free_heap_shot_write_jobs: job %p must not be freed\n", current_job);
1763 current_job = current_job->next_unwritten;
1766 next_job = current_job->next;
1767 current_job->next = NULL;
1768 current_job = next_job;
1770 while (current_job != NULL) {
1771 #if DEBUG_HEAP_PROFILER
1772 printf ("profiler_free_heap_shot_write_jobs: job %p will be freed\n", current_job);
1774 next_job = current_job->next;
1775 profiler_heap_shot_write_job_free_buffers (current_job);
1776 g_free (current_job);
1777 current_job = next_job;
1783 profiler_destroy_heap_shot_write_jobs (void) {
1784 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1785 ProfilerHeapShotWriteJob *next_job;
1787 while (current_job != NULL) {
1788 next_job = current_job->next;
1789 profiler_heap_shot_write_job_free_buffers (current_job);
1790 g_free (current_job);
1791 current_job = next_job;
1796 profiler_add_heap_shot_write_job (ProfilerHeapShotWriteJob *job) {
1797 job->next = profiler->heap_shot_write_jobs;
1798 job->next_unwritten = job->next;
1799 profiler->heap_shot_write_jobs = job;
1800 #if DEBUG_HEAP_PROFILER
1801 printf ("profiler_add_heap_shot_write_job: added job %p\n", job);
1805 #if DEBUG_HEAP_PROFILER
1806 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p\n", (d)->thread_id, (o), (d)->heap_shot_object_buffers->next_free_slot)
1807 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p in new buffer %p\n", (d)->thread_id, (o), buffer->next_free_slot, buffer)
1809 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o)
1810 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o)
1812 #define STORE_ALLOCATED_OBJECT(d,o) do {\
1813 if ((d)->heap_shot_object_buffers->next_free_slot < (d)->heap_shot_object_buffers->end) {\
1814 STORE_ALLOCATED_OBJECT_MESSAGE1 ((d), (o));\
1815 *((d)->heap_shot_object_buffers->next_free_slot) = (o);\
1816 (d)->heap_shot_object_buffers->next_free_slot ++;\
1818 ProfilerHeapShotObjectBuffer *buffer = profiler_heap_shot_object_buffer_new (d);\
1819 STORE_ALLOCATED_OBJECT_MESSAGE2 ((d), (o));\
1820 *((buffer)->next_free_slot) = (o);\
1821 (buffer)->next_free_slot ++;\
1825 static ProfilerPerThreadData*
1826 profiler_per_thread_data_new (guint32 buffer_size)
1828 ProfilerPerThreadData *data = g_new (ProfilerPerThreadData, 1);
1830 data->events = g_new0 (ProfilerEventData, buffer_size);
1831 data->next_free_event = data->events;
1832 data->end_event = data->events + (buffer_size - 1);
1833 data->first_unwritten_event = data->events;
1834 data->first_unmapped_event = data->events;
1835 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
1836 data->last_event_counter = data->start_event_counter;
1837 data->thread_id = CURRENT_THREAD_ID ();
1838 data->heap_shot_object_buffers = NULL;
1839 if ((profiler->action_flags.unreachable_objects == TRUE) ||
1840 (profiler->action_flags.heap_shot == TRUE) ||
1841 (profiler->action_flags.collection_summary == TRUE)) {
1842 profiler_heap_shot_object_buffer_new (data);
1844 if (profiler->action_flags.track_stack) {
1845 thread_stack_initialize (&(data->stack), 64);
1847 thread_stack_initialize_empty (&(data->stack));
1853 profiler_per_thread_data_destroy (ProfilerPerThreadData *data) {
1854 g_free (data->events);
1855 profiler_heap_shot_object_buffers_destroy (data->heap_shot_object_buffers);
1856 thread_stack_free (&(data->stack));
1860 static ProfilerStatisticalData*
1861 profiler_statistical_data_new (MonoProfiler *profiler) {
1862 int buffer_size = profiler->statistical_buffer_size * (profiler->statistical_call_chain_depth + 1);
1863 ProfilerStatisticalData *data = g_new (ProfilerStatisticalData, 1);
1865 data->hits = g_new0 (ProfilerStatisticalHit, buffer_size);
1866 data->next_free_index = 0;
1867 data->end_index = profiler->statistical_buffer_size;
1868 data->first_unwritten_index = 0;
1874 profiler_statistical_data_destroy (ProfilerStatisticalData *data) {
1875 g_free (data->hits);
1880 profiler_add_write_buffer (void) {
1881 if (profiler->current_write_buffer->next == NULL) {
1882 profiler->current_write_buffer->next = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
1883 profiler->current_write_buffer->next->next = NULL;
1885 //printf ("Added next buffer %p, to buffer %p\n", profiler->current_write_buffer->next, profiler->current_write_buffer);
1888 profiler->current_write_buffer = profiler->current_write_buffer->next;
1889 profiler->current_write_position = 0;
1890 profiler->full_write_buffers ++;
1894 profiler_free_write_buffers (void) {
1895 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
1896 while (current_buffer != NULL) {
1897 ProfilerFileWriteBuffer *next_buffer = current_buffer->next;
1899 //printf ("Freeing write buffer %p, next is %p\n", current_buffer, next_buffer);
1901 g_free (current_buffer);
1902 current_buffer = next_buffer;
1906 #define WRITE_BYTE(b) do {\
1907 if (profiler->current_write_position >= PROFILER_FILE_WRITE_BUFFER_SIZE) {\
1908 profiler_add_write_buffer ();\
1910 profiler->current_write_buffer->buffer [profiler->current_write_position] = (b);\
1911 profiler->current_write_position ++;\
1916 write_current_block (guint16 code) {
1917 guint32 size = (profiler->full_write_buffers * PROFILER_FILE_WRITE_BUFFER_SIZE) + profiler->current_write_position;
1918 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
1919 guint64 current_counter;
1920 guint32 counter_delta;
1923 MONO_PROFILER_GET_CURRENT_COUNTER (current_counter);
1924 if (profiler->last_header_counter != 0) {
1925 counter_delta = current_counter - profiler->last_header_counter;
1929 profiler->last_header_counter = current_counter;
1931 header [0] = code & 0xff;
1932 header [1] = (code >> 8) & 0xff;
1933 header [2] = size & 0xff;
1934 header [3] = (size >> 8) & 0xff;
1935 header [4] = (size >> 16) & 0xff;
1936 header [5] = (size >> 24) & 0xff;
1937 header [6] = counter_delta & 0xff;
1938 header [7] = (counter_delta >> 8) & 0xff;
1939 header [8] = (counter_delta >> 16) & 0xff;
1940 header [9] = (counter_delta >> 24) & 0xff;
1942 #if (DEBUG_FILE_WRITES)
1943 printf ("write_current_block: writing header (code %d)\n", code);
1945 WRITE_BUFFER (& (header [0]), 10);
1947 while ((current_buffer != NULL) && (profiler->full_write_buffers > 0)) {
1948 #if (DEBUG_FILE_WRITES)
1949 printf ("write_current_block: writing buffer (size %d)\n", PROFILER_FILE_WRITE_BUFFER_SIZE);
1951 WRITE_BUFFER (& (current_buffer->buffer [0]), PROFILER_FILE_WRITE_BUFFER_SIZE);
1952 profiler->full_write_buffers --;
1953 current_buffer = current_buffer->next;
1955 if (profiler->current_write_position > 0) {
1956 #if (DEBUG_FILE_WRITES)
1957 printf ("write_current_block: writing last buffer (size %d)\n", profiler->current_write_position);
1959 WRITE_BUFFER (& (current_buffer->buffer [0]), profiler->current_write_position);
1962 #if (DEBUG_FILE_WRITES)
1963 printf ("write_current_block: buffers flushed\n");
1966 profiler->current_write_buffer = profiler->write_buffers;
1967 profiler->current_write_position = 0;
1968 profiler->full_write_buffers = 0;
1972 #define SEVEN_BITS_MASK (0x7f)
1973 #define EIGHT_BIT_MASK (0x80)
1976 write_uint32 (guint32 value) {
1977 while (value > SEVEN_BITS_MASK) {
1978 WRITE_BYTE (value & SEVEN_BITS_MASK);
1981 WRITE_BYTE (value | EIGHT_BIT_MASK);
1984 write_uint64 (guint64 value) {
1985 while (value > SEVEN_BITS_MASK) {
1986 WRITE_BYTE (value & SEVEN_BITS_MASK);
1989 WRITE_BYTE (value | EIGHT_BIT_MASK);
1992 write_string (const char *string) {
1993 while (*string != 0) {
1994 WRITE_BYTE (*string);
2000 static void write_clock_data (void);
2002 write_directives_block (gboolean start) {
2003 write_clock_data ();
2006 if (profiler->action_flags.save_allocation_caller) {
2007 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER);
2009 if (profiler->action_flags.save_allocation_stack || profiler->action_flags.track_calls) {
2010 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK);
2012 if (profiler->action_flags.allocations_carry_id) {
2013 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID);
2016 write_uint32 (MONO_PROFILER_DIRECTIVE_END);
2018 write_clock_data ();
2019 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES);
2022 #if DEBUG_HEAP_PROFILER
2023 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c) printf ("WRITE_HEAP_SHOT_JOB_VALUE: writing value %p at cursor %p\n", (v), (c))
2025 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c)
2027 #define WRITE_HEAP_SHOT_JOB_VALUE(j,v) do {\
2028 if ((j)->cursor < (j)->end) {\
2029 WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE ((v), ((j)->cursor));\
2030 *((j)->cursor) = (v);\
2033 profiler_heap_shot_write_job_add_buffer (j, v);\
2038 #undef GUINT_TO_POINTER
2039 #undef GPOINTER_TO_UINT
2040 #if (SIZEOF_VOID_P == 4)
2041 #define GUINT_TO_POINTER(u) ((void*)(guint32)(u))
2042 #define GPOINTER_TO_UINT(p) ((guint32)(void*)(p))
2043 #elif (SIZEOF_VOID_P == 8)
2044 #define GUINT_TO_POINTER(u) ((void*)(guint64)(u))
2045 #define GPOINTER_TO_UINT(p) ((guint64)(void*)(p))
2047 #error Bad size of void pointer
2050 #define WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE(j,v,c) WRITE_HEAP_SHOT_JOB_VALUE (j, GUINT_TO_POINTER (GPOINTER_TO_UINT (v)|(c)))
2052 #if DEBUG_HEAP_PROFILER
2053 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE() printf ("profiler_heap_shot_write_block[UPDATE_JOB_BUFFER_CURSOR]: in job %p, moving to buffer %p and cursor %p\n", job, buffer, cursor)
2055 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE()
2057 #define UPDATE_JOB_BUFFER_CURSOR() do {\
2059 if (cursor >= end) {\
2060 buffer = buffer->next;\
2061 if (buffer != NULL) {\
2062 cursor = & (buffer->buffer [0]);\
2063 if (buffer->next != NULL) {\
2064 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);\
2072 UPDATE_JOB_BUFFER_CURSOR_MESSAGE ();\
2076 profiler_heap_shot_write_data_block (ProfilerHeapShotWriteJob *job) {
2077 ProfilerHeapShotWriteBuffer *buffer;
2080 guint64 start_counter;
2082 guint64 end_counter;
2085 write_uint64 (job->start_counter);
2086 write_uint64 (job->start_time);
2087 write_uint64 (job->end_counter);
2088 write_uint64 (job->end_time);
2089 write_uint32 (job->collection);
2090 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2091 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2092 write_uint64 (start_counter);
2093 write_uint64 (start_time);
2094 #if DEBUG_HEAP_PROFILER
2095 printf ("profiler_heap_shot_write_data_block: start writing job %p (start %p, end %p)...\n", job, & (job->buffers->buffer [0]), job->cursor);
2097 buffer = job->buffers;
2098 cursor = & (buffer->buffer [0]);
2099 if (buffer->next != NULL) {
2100 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
2104 if (cursor >= end) {
2107 #if DEBUG_HEAP_PROFILER
2108 printf ("profiler_heap_shot_write_data_block: in job %p, starting at buffer %p and cursor %p\n", job, buffer, cursor);
2110 while (cursor != NULL) {
2111 gpointer value = *cursor;
2112 HeapProfilerJobValueCode code = GPOINTER_TO_UINT (value) & HEAP_CODE_MASK;
2113 #if DEBUG_HEAP_PROFILER
2114 printf ("profiler_heap_shot_write_data_block: got value %p and code %d\n", value, code);
2117 UPDATE_JOB_BUFFER_CURSOR ();
2118 if (code == HEAP_CODE_FREE_OBJECT_CLASS) {
2119 MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2120 //MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) % 4);
2121 ClassIdMappingElement *class_id;
2124 class_id = class_id_mapping_element_get (klass);
2125 if (class_id == NULL) {
2126 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2128 g_assert (class_id != NULL);
2129 write_uint32 ((class_id->id << 2) | HEAP_CODE_FREE_OBJECT_CLASS);
2131 size = GPOINTER_TO_UINT (*cursor);
2132 UPDATE_JOB_BUFFER_CURSOR ();
2133 write_uint32 (size);
2134 #if DEBUG_HEAP_PROFILER
2135 printf ("profiler_heap_shot_write_data_block: wrote unreachable object of class %p (id %d, size %d)\n", klass, class_id->id, size);
2137 } else if (code == HEAP_CODE_OBJECT) {
2138 MonoObject *object = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2139 MonoClass *klass = mono_object_get_class (object);
2140 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
2141 guint32 size = mono_object_get_size (object);
2142 guint32 references = GPOINTER_TO_UINT (*cursor);
2143 UPDATE_JOB_BUFFER_CURSOR ();
2145 if (class_id == NULL) {
2146 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2148 g_assert (class_id != NULL);
2150 write_uint64 (GPOINTER_TO_UINT (value));
2151 write_uint32 (class_id->id);
2152 write_uint32 (size);
2153 write_uint32 (references);
2154 #if DEBUG_HEAP_PROFILER
2155 printf ("profiler_heap_shot_write_data_block: writing object %p (references %d)\n", value, references);
2158 while (references > 0) {
2159 gpointer reference = *cursor;
2160 write_uint64 (GPOINTER_TO_UINT (reference));
2161 UPDATE_JOB_BUFFER_CURSOR ();
2163 #if DEBUG_HEAP_PROFILER
2164 printf ("profiler_heap_shot_write_data_block: inside object %p, wrote reference %p)\n", value, reference);
2168 #if DEBUG_HEAP_PROFILER
2169 printf ("profiler_heap_shot_write_data_block: unknown code %d in value %p\n", code, value);
2171 g_assert_not_reached ();
2176 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2177 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2178 write_uint64 (end_counter);
2179 write_uint64 (end_time);
2181 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA);
2182 #if DEBUG_HEAP_PROFILER
2183 printf ("profiler_heap_shot_write_data_block: writing job %p done.\n", job);
2187 profiler_heap_shot_write_summary_block (ProfilerHeapShotWriteJob *job) {
2188 guint64 start_counter;
2190 guint64 end_counter;
2194 #if DEBUG_HEAP_PROFILER
2195 printf ("profiler_heap_shot_write_summary_block: start writing job %p...\n", job);
2197 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2198 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2199 write_uint64 (start_counter);
2200 write_uint64 (start_time);
2202 write_uint32 (job->collection);
2204 for (id = 0; id < job->summary.capacity; id ++) {
2205 if ((job->summary.per_class_data [id].reachable.instances > 0) || (job->summary.per_class_data [id].unreachable.instances > 0)) {
2207 write_uint32 (job->summary.per_class_data [id].reachable.instances);
2208 write_uint32 (job->summary.per_class_data [id].reachable.bytes);
2209 write_uint32 (job->summary.per_class_data [id].unreachable.instances);
2210 write_uint32 (job->summary.per_class_data [id].unreachable.bytes);
2215 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2216 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2217 write_uint64 (end_counter);
2218 write_uint64 (end_time);
2220 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY);
2221 #if DEBUG_HEAP_PROFILER
2222 printf ("profiler_heap_shot_write_summary_block: writing job %p done.\n", job);
2227 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job) {
2228 #if DEBUG_HEAP_PROFILER
2229 printf ("profiler_heap_shot_write_block: working on job %p...\n", job);
2232 if (profiler->action_flags.collection_summary == TRUE) {
2233 profiler_heap_shot_write_summary_block (job);
2236 if ((profiler->action_flags.unreachable_objects == TRUE) || (profiler->action_flags.heap_shot == TRUE)) {
2237 profiler_heap_shot_write_data_block (job);
2240 profiler_heap_shot_write_job_free_buffers (job);
2241 #if DEBUG_HEAP_PROFILER
2242 printf ("profiler_heap_shot_write_block: work on job %p done.\n", job);
2247 write_element_load_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2249 write_uint64 (element->load_start_counter);
2250 write_uint64 (element->load_end_counter);
2251 write_uint64 (thread_id);
2252 write_string (element->name);
2253 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_LOADED);
2254 element->load_written = TRUE;
2258 write_element_unload_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2260 write_uint64 (element->unload_start_counter);
2261 write_uint64 (element->unload_end_counter);
2262 write_uint64 (thread_id);
2263 write_string (element->name);
2264 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED);
2265 element->unload_written = TRUE;
2269 write_clock_data (void) {
2273 MONO_PROFILER_GET_CURRENT_COUNTER (counter);
2274 MONO_PROFILER_GET_CURRENT_TIME (time);
2276 write_uint64 (counter);
2277 write_uint64 (time);
2281 write_mapping_block (gsize thread_id) {
2282 ClassIdMappingElement *current_class;
2283 MethodIdMappingElement *current_method;
2285 if ((profiler->classes->unwritten == NULL) && (profiler->methods->unwritten == NULL))
2288 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2289 printf ("[write_mapping_block][TID %ld] START\n", thread_id);
2292 write_clock_data ();
2293 write_uint64 (thread_id);
2295 for (current_class = profiler->classes->unwritten; current_class != NULL; current_class = current_class->next_unwritten) {
2296 write_uint32 (current_class->id);
2297 write_string (current_class->name);
2298 #if (DEBUG_MAPPING_EVENTS)
2299 printf ("mapping CLASS (%d => %s)\n", current_class->id, current_class->name);
2301 g_free (current_class->name);
2302 current_class->name = NULL;
2305 profiler->classes->unwritten = NULL;
2307 for (current_method = profiler->methods->unwritten; current_method != NULL; current_method = current_method->next_unwritten) {
2308 MonoMethod *method = current_method->method;
2309 MonoClass *klass = mono_method_get_class (method);
2310 ClassIdMappingElement *class_element = class_id_mapping_element_get (klass);
2311 g_assert (class_element != NULL);
2312 write_uint32 (current_method->id);
2313 write_uint32 (class_element->id);
2314 write_string (current_method->name);
2315 #if (DEBUG_MAPPING_EVENTS)
2316 printf ("mapping METHOD ([%d]%d => %s)\n", class_element?class_element->id:1, current_method->id, current_method->name);
2318 g_free (current_method->name);
2319 current_method->name = NULL;
2322 profiler->methods->unwritten = NULL;
2324 write_clock_data ();
2325 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_MAPPING);
2327 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2328 printf ("[write_mapping_block][TID %ld] END\n", thread_id);
2333 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER = 1,
2334 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_IMPLICIT = 2,
2335 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT = 3,
2336 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION = 4,
2337 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT = 5,
2338 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT = 6,
2339 MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT = 7
2340 } MonoProfilerPackedEventCode;
2341 #define MONO_PROFILER_PACKED_EVENT_CODE_BITS 3
2342 #define MONO_PROFILER_PACKED_EVENT_DATA_BITS (8-MONO_PROFILER_PACKED_EVENT_CODE_BITS)
2343 #define MONO_PROFILER_PACKED_EVENT_DATA_MASK ((1<<MONO_PROFILER_PACKED_EVENT_DATA_BITS)-1)
2345 #define MONO_PROFILER_EVENT_MAKE_PACKED_CODE(result,data,base) do {\
2346 result = ((base)|((data & MONO_PROFILER_PACKED_EVENT_DATA_MASK) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2347 data >>= MONO_PROFILER_PACKED_EVENT_DATA_BITS;\
2349 #define MONO_PROFILER_EVENT_MAKE_FULL_CODE(result,code,kind,base) do {\
2350 result = ((base)|((((kind)<<4) | (code)) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2354 rewrite_last_written_stack (ProfilerThreadStack *stack) {
2356 int i = thread_stack_get_last_written_frame (stack);
2358 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2359 WRITE_BYTE (event_code);
2365 write_uint32 (thread_stack_written_frame_at_index (stack, i));
2370 static ProfilerEventData*
2371 write_stack_section_event (ProfilerEventData *events, ProfilerPerThreadData *data) {
2372 int last_saved_frame = events->data.number;
2373 int saved_frames = events->value;
2377 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2378 WRITE_BYTE (event_code);
2379 write_uint32 (last_saved_frame);
2380 write_uint32 (saved_frames);
2381 thread_stack_set_last_written_frame (&(data->stack), last_saved_frame + saved_frames);
2384 for (i = 0; i < saved_frames; i++) {
2385 guint8 code = events->code;
2387 MethodIdMappingElement *method;
2388 guint32 frame_value;
2390 if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) {
2392 } else if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER) {
2395 g_assert_not_reached ();
2399 method = method_id_mapping_element_get (events->data.address);
2400 g_assert (method != NULL);
2401 frame_value = (method->id << 1) | jit_flag;
2402 write_uint32 (frame_value);
2403 thread_stack_write_frame_at_index (&(data->stack), last_saved_frame + saved_frames - (1 + i), frame_value);
2410 static ProfilerEventData*
2411 write_event (ProfilerEventData *event, ProfilerPerThreadData *data) {
2412 ProfilerEventData *next = event + 1;
2413 gboolean write_event_value = TRUE;
2416 guint64 event_value;
2417 gboolean write_event_value_extension_1 = FALSE;
2418 guint64 event_value_extension_1 = 0;
2419 gboolean write_event_value_extension_2 = FALSE;
2420 guint64 event_value_extension_2 = 0;
2422 event_value = event->value;
2423 if (event_value == MAX_EVENT_VALUE) {
2424 event_value = *((guint64*)next);
2428 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
2429 MethodIdMappingElement *element = method_id_mapping_element_get (event->data.address);
2430 g_assert (element != NULL);
2431 event_data = element->id;
2433 if (event->code == MONO_PROFILER_EVENT_METHOD_CALL) {
2434 if (event->kind == MONO_PROFILER_EVENT_KIND_START) {
2435 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER);
2437 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT);
2440 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT);
2442 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
2443 ClassIdMappingElement *element = class_id_mapping_element_get (event->data.address);
2444 g_assert (element != NULL);
2445 event_data = element->id;
2447 if (event->code == MONO_PROFILER_EVENT_CLASS_ALLOCATION) {
2448 if ((! profiler->action_flags.save_allocation_caller) || (! (next->code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER))) {
2449 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION);
2451 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2454 if (profiler->action_flags.save_allocation_caller) {
2455 MonoMethod *caller_method = next->data.address;
2457 if ((next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) && (next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER)) {
2458 g_assert_not_reached ();
2461 if (caller_method != NULL) {
2462 MethodIdMappingElement *caller = method_id_mapping_element_get (caller_method);
2463 g_assert (caller != NULL);
2464 event_value_extension_1 = caller->id;
2467 write_event_value_extension_1 = TRUE;
2471 if (profiler->action_flags.allocations_carry_id) {
2472 event_value_extension_2 = GPOINTER_TO_UINT (next->data.address);
2474 if (next->code != MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID) {
2475 g_assert_not_reached ();
2478 write_event_value_extension_2 = TRUE;
2482 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2485 if (event->code == MONO_PROFILER_EVENT_STACK_SECTION) {
2486 return write_stack_section_event (event, data);
2488 event_data = event->data.number;
2489 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2493 /* Skip writing JIT events if the user did not ask for them */
2494 if ((event->code == MONO_PROFILER_EVENT_METHOD_JIT) && ! profiler->action_flags.jit_time) {
2498 #if (DEBUG_LOGGING_PROFILER)
2500 printf ("writing EVENT[%p] data_type:%d, kind:%d, code:%d (%d:%ld:%ld)\n", event,
2501 event->data_type, event->kind, event->code,
2502 event_code, event_data, event_value);
2505 WRITE_BYTE (event_code);
2506 write_uint64 (event_data);
2507 if (write_event_value) {
2508 write_uint64 (event_value);
2509 if (write_event_value_extension_1) {
2510 write_uint64 (event_value_extension_1);
2512 if (write_event_value_extension_2) {
2513 write_uint64 (event_value_extension_2);
2521 write_thread_data_block (ProfilerPerThreadData *data) {
2522 ProfilerEventData *start = data->first_unwritten_event;
2523 ProfilerEventData *end = data->first_unmapped_event;
2527 #if (DEBUG_FILE_WRITES)
2528 printf ("write_thread_data_block: preparing buffer for thread %ld\n", (guint64) data->thread_id);
2530 write_clock_data ();
2531 write_uint64 (data->thread_id);
2533 write_uint64 (data->start_event_counter);
2535 /* Make sure that stack sections can be fully reconstructed even reading only one block */
2536 rewrite_last_written_stack (&(data->stack));
2538 while (start < end) {
2539 start = write_event (start, data);
2542 data->first_unwritten_event = end;
2544 write_clock_data ();
2545 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_EVENTS);
2546 #if (DEBUG_FILE_WRITES)
2547 printf ("write_thread_data_block: buffer for thread %ld written\n", (guint64) data->thread_id);
2551 static ProfilerExecutableMemoryRegionData*
2552 profiler_executable_memory_region_new (gpointer *start, gpointer *end, guint32 file_offset, char *file_name, guint32 id) {
2553 ProfilerExecutableMemoryRegionData *result = g_new (ProfilerExecutableMemoryRegionData, 1);
2554 result->start = start;
2556 result->file_offset = file_offset;
2557 result->file_name = g_strdup (file_name);
2559 result->is_new = TRUE;
2561 result->file = NULL;
2562 result->file_region_reference = NULL;
2563 result->symbols_capacity = id;
2564 result->symbols_count = id;
2565 result->symbols = NULL;
2571 executable_file_close (ProfilerExecutableMemoryRegionData *region);
2574 profiler_executable_memory_region_destroy (ProfilerExecutableMemoryRegionData *data) {
2575 if (data->file != NULL) {
2576 executable_file_close (data);
2579 if (data->symbols != NULL) {
2580 g_free (data->symbols);
2581 data->symbols = NULL;
2583 if (data->file_name != NULL) {
2584 g_free (data->file_name);
2585 data->file_name = NULL;
2590 static ProfilerExecutableMemoryRegions*
2591 profiler_executable_memory_regions_new (int next_id, int next_unmanaged_function_id) {
2592 ProfilerExecutableMemoryRegions *result = g_new (ProfilerExecutableMemoryRegions, 1);
2593 result->regions = g_new0 (ProfilerExecutableMemoryRegionData*, 32);
2594 result->regions_capacity = 32;
2595 result->regions_count = 0;
2596 result->next_id = next_id;
2597 result->next_unmanaged_function_id = next_unmanaged_function_id;
2602 profiler_executable_memory_regions_destroy (ProfilerExecutableMemoryRegions *regions) {
2605 for (i = 0; i < regions->regions_count; i++) {
2606 profiler_executable_memory_region_destroy (regions->regions [i]);
2608 g_free (regions->regions);
2612 static ProfilerExecutableMemoryRegionData*
2613 find_address_region (ProfilerExecutableMemoryRegions *regions, gpointer address) {
2615 int high_index = regions->regions_count;
2616 int middle_index = 0;
2617 ProfilerExecutableMemoryRegionData *middle_region = regions->regions [0];
2619 if ((regions->regions_count == 0) || (regions->regions [low_index]->start > address) || (regions->regions [high_index - 1]->end < address)) {
2623 //printf ("find_address_region: Looking for address %p in %d regions (from %p to %p)\n", address, regions->regions_count, regions->regions [low_index]->start, regions->regions [high_index - 1]->end);
2625 while (low_index != high_index) {
2626 middle_index = low_index + ((high_index - low_index) / 2);
2627 middle_region = regions->regions [middle_index];
2629 //printf ("find_address_region: Looking for address %p, considering index %d[%p-%p] (%d-%d)\n", address, middle_index, middle_region->start, middle_region->end, low_index, high_index);
2631 if (middle_region->start > address) {
2632 if (middle_index > 0) {
2633 high_index = middle_index;
2637 } else if (middle_region->end < address) {
2638 if (middle_index < regions->regions_count - 1) {
2639 low_index = middle_index + 1;
2644 return middle_region;
2648 if ((middle_region == NULL) || (middle_region->start > address) || (middle_region->end < address)) {
2651 return middle_region;
2656 append_region (ProfilerExecutableMemoryRegions *regions, gpointer *start, gpointer *end, guint32 file_offset, char *file_name) {
2657 if (regions->regions_count >= regions->regions_capacity) {
2658 ProfilerExecutableMemoryRegionData **new_regions = g_new0 (ProfilerExecutableMemoryRegionData*, regions->regions_capacity * 2);
2659 memcpy (new_regions, regions->regions, regions->regions_capacity * sizeof (ProfilerExecutableMemoryRegionData*));
2660 g_free (regions->regions);
2661 regions->regions = new_regions;
2662 regions->regions_capacity = regions->regions_capacity * 2;
2664 regions->regions [regions->regions_count] = profiler_executable_memory_region_new (start, end, file_offset, file_name, regions->next_id);
2665 regions->regions_count ++;
2666 regions->next_id ++;
2670 regions_are_equivalent (ProfilerExecutableMemoryRegionData *region1, ProfilerExecutableMemoryRegionData *region2) {
2671 if ((region1->start == region2->start) &&
2672 (region1->end == region2->end) &&
2673 (region1->file_offset == region2->file_offset) &&
2674 ! strcmp (region1->file_name, region2->file_name)) {
2682 compare_regions (const void *a1, const void *a2) {
2683 ProfilerExecutableMemoryRegionData *r1 = * (ProfilerExecutableMemoryRegionData**) a1;
2684 ProfilerExecutableMemoryRegionData *r2 = * (ProfilerExecutableMemoryRegionData**) a2;
2685 return (r1->start < r2->start)? -1 : ((r1->start > r2->start)? 1 : 0);
2689 restore_old_regions (ProfilerExecutableMemoryRegions *old_regions, ProfilerExecutableMemoryRegions *new_regions) {
2693 for (new_i = 0; new_i < new_regions->regions_count; new_i++) {
2694 ProfilerExecutableMemoryRegionData *new_region = new_regions->regions [new_i];
2695 for (old_i = 0; old_i < old_regions->regions_count; old_i++) {
2696 ProfilerExecutableMemoryRegionData *old_region = old_regions->regions [old_i];
2697 if ( regions_are_equivalent (old_region, new_region)) {
2698 new_regions->regions [new_i] = old_region;
2699 old_regions->regions [old_i] = new_region;
2701 // FIXME (sanity check)
2702 g_assert (new_region->is_new && ! old_region->is_new);
2709 sort_regions (ProfilerExecutableMemoryRegions *regions) {
2710 if (regions->regions_count > 1) {
2713 qsort (regions->regions, regions->regions_count, sizeof (ProfilerExecutableMemoryRegionData *), compare_regions);
2716 while (i < regions->regions_count) {
2717 ProfilerExecutableMemoryRegionData *current_region = regions->regions [i];
2718 ProfilerExecutableMemoryRegionData *previous_region = regions->regions [i - 1];
2720 if (regions_are_equivalent (previous_region, current_region)) {
2723 if (! current_region->is_new) {
2724 profiler_executable_memory_region_destroy (previous_region);
2725 regions->regions [i - 1] = current_region;
2727 profiler_executable_memory_region_destroy (current_region);
2730 for (j = i + 1; j < regions->regions_count; j++) {
2731 regions->regions [j - 1] = regions->regions [j];
2734 regions->regions_count --;
2743 fix_region_references (ProfilerExecutableMemoryRegions *regions) {
2745 for (i = 0; i < regions->regions_count; i++) {
2746 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
2747 if (region->file_region_reference != NULL) {
2748 region->file_region_reference->region = region;
2754 executable_file_add_region_reference (ProfilerExecutableFile *file, ProfilerExecutableMemoryRegionData *region) {
2755 guint8 *section_headers = file->data + file->header->e_shoff;
2758 for (section_index = 1; section_index < file->header->e_shnum; section_index ++) {
2759 ElfSection *section_header = (ElfSection*) (section_headers + (file->header->e_shentsize * section_index));
2761 if ((section_header->sh_addr != 0) && (section_header->sh_flags & ELF_SHF_EXECINSTR) &&
2762 (region->file_offset <= section_header->sh_offset) && (region->file_offset + (((guint8*)region->end)-((guint8*)region->start)) >= (section_header->sh_offset + section_header->sh_size))) {
2763 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [section_index]);
2764 section_region->region = region;
2765 section_region->section_address = (gpointer) section_header->sh_addr;
2766 section_region->section_offset = section_header->sh_offset;
2767 region->file_region_reference = section_region;
2772 static gboolean check_elf_header (ElfHeader* header) {
2773 guint16 test = 0x0102;
2775 if ((header->e_ident [EI_MAG0] != 0x7f) || (header->e_ident [EI_MAG1] != 'E') ||
2776 (header->e_ident [EI_MAG2] != 'L') || (header->e_ident [EI_MAG3] != 'F')) {
2780 if (sizeof (gsize) == 4) {
2781 if (header->e_ident [EI_CLASS] != ELF_CLASS_32) {
2782 g_warning ("Class is not ELF_CLASS_32 with gsize size %d", (int) sizeof (gsize));
2785 } else if (sizeof (gsize) == 8) {
2786 if (header->e_ident [EI_CLASS] != ELF_CLASS_64) {
2787 g_warning ("Class is not ELF_CLASS_64 with gsize size %d", (int) sizeof (gsize));
2791 g_warning ("Absurd gsize size %d", (int) sizeof (gsize));
2795 if ((*(guint8*)(&test)) == 0x01) {
2796 if (header->e_ident [EI_DATA] != ELF_DATA_MSB) {
2797 g_warning ("Data is not ELF_DATA_MSB with first test byte 0x01");
2800 } else if ((*(guint8*)(&test)) == 0x02) {
2801 if (header->e_ident [EI_DATA] != ELF_DATA_LSB) {
2802 g_warning ("Data is not ELF_DATA_LSB with first test byte 0x02");
2806 g_warning ("Absurd test byte value");
2813 static gboolean check_elf_file (int fd) {
2814 void *header = malloc (sizeof (ElfHeader));
2815 ssize_t read_result = read (fd, header, sizeof (ElfHeader));
2818 if (read_result != sizeof (ElfHeader)) {
2821 result = check_elf_header ((ElfHeader*) header);
2828 static ProfilerExecutableFile*
2829 executable_file_open (ProfilerExecutableMemoryRegionData *region) {
2830 ProfilerExecutableFiles *files = & (profiler->executable_files);
2831 ProfilerExecutableFile *file = region->file;
2834 file = (ProfilerExecutableFile*) g_hash_table_lookup (files->table, region->file_name);
2837 struct stat stat_buffer;
2838 int symtab_index = 0;
2839 int strtab_index = 0;
2840 int dynsym_index = 0;
2841 int dynstr_index = 0;
2843 guint8 *section_headers;
2847 file = g_new0 (ProfilerExecutableFile, 1);
2848 region->file = file;
2849 g_hash_table_insert (files->table, region->file_name, file);
2850 file->reference_count ++;
2851 file->next_new_file = files->new_files;
2852 files->new_files = file;
2854 file->fd = open (region->file_name, O_RDONLY);
2855 if (file->fd == -1) {
2856 //g_warning ("Cannot open file '%s': '%s'", region->file_name, strerror (errno));
2859 if (fstat (file->fd, &stat_buffer) != 0) {
2860 //g_warning ("Cannot stat file '%s': '%s'", region->file_name, strerror (errno));
2862 } else if (! check_elf_file (file->fd)) {
2865 size_t region_length = ((guint8*)region->end) - ((guint8*)region->start);
2866 file->length = stat_buffer.st_size;
2868 if (file->length == region_length) {
2869 file->data = region->start;
2873 file->data = mmap (NULL, file->length, PROT_READ, MAP_PRIVATE, file->fd, 0);
2875 if (file->data == MAP_FAILED) {
2877 //g_warning ("Cannot map file '%s': '%s'", region->file_name, strerror (errno));
2885 /* OK, this is a usable elf file, and we mmapped it... */
2886 header = (ElfHeader*) file->data;
2887 file->header = header;
2888 section_headers = file->data + file->header->e_shoff;
2889 file->main_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * header->e_shstrndx)))->sh_offset);
2891 for (section_index = 0; section_index < header->e_shnum; section_index ++) {
2892 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
2894 if (section_header->sh_type == ELF_SHT_SYMTAB) {
2895 symtab_index = section_index;
2896 } else if (section_header->sh_type == ELF_SHT_DYNSYM) {
2897 dynsym_index = section_index;
2898 } else if (section_header->sh_type == ELF_SHT_STRTAB) {
2899 if (! strcmp (file->main_string_table + section_header->sh_name, ".strtab")) {
2900 strtab_index = section_index;
2901 } else if (! strcmp (file->main_string_table + section_header->sh_name, ".dynstr")) {
2902 dynstr_index = section_index;
2907 if ((symtab_index != 0) && (strtab_index != 0)) {
2908 section_index = symtab_index;
2909 strings_index = strtab_index;
2910 } else if ((dynsym_index != 0) && (dynstr_index != 0)) {
2911 section_index = dynsym_index;
2912 strings_index = dynstr_index;
2918 if (section_index != 0) {
2919 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
2920 file->symbol_size = section_header->sh_entsize;
2921 file->symbols_count = (guint32) (section_header->sh_size / section_header->sh_entsize);
2922 file->symbols_start = file->data + section_header->sh_offset;
2923 file->symbols_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * strings_index)))->sh_offset);
2926 file->section_regions = g_new0 (ProfilerExecutableFileSectionRegion, file->header->e_shnum);
2928 region->file = file;
2929 file->reference_count ++;
2933 if (file->header != NULL) {
2934 executable_file_add_region_reference (file, region);
2941 executable_file_free (ProfilerExecutableFile* file) {
2942 if (file->fd != -1) {
2943 if (close (file->fd) != 0) {
2944 g_warning ("Cannot close file: '%s'", strerror (errno));
2946 if (file->data != NULL) {
2947 if (munmap (file->data, file->length) != 0) {
2948 g_warning ("Cannot unmap file: '%s'", strerror (errno));
2952 if (file->section_regions != NULL) {
2953 g_free (file->section_regions);
2954 file->section_regions = NULL;
2960 executable_file_close (ProfilerExecutableMemoryRegionData *region) {
2961 region->file->reference_count --;
2963 if ((region->file_region_reference != NULL) && (region->file_region_reference->region == region)) {
2964 region->file_region_reference->region = NULL;
2965 region->file_region_reference->section_address = 0;
2966 region->file_region_reference->section_offset = 0;
2969 if (region->file->reference_count <= 0) {
2970 ProfilerExecutableFiles *files = & (profiler->executable_files);
2971 g_hash_table_remove (files->table, region->file_name);
2972 executable_file_free (region->file);
2973 region->file = NULL;
2978 executable_file_count_symbols (ProfilerExecutableFile *file) {
2981 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
2982 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
2984 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
2985 (symbol->st_shndx > 0) &&
2986 (symbol->st_shndx < file->header->e_shnum)) {
2987 int symbol_section_index = symbol->st_shndx;
2988 ProfilerExecutableMemoryRegionData *region = file->section_regions [symbol_section_index].region;
2989 if ((region != NULL) && (region->symbols == NULL)) {
2990 region->symbols_count ++;
2997 executable_memory_regions_prepare_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
2999 for (i = 0; i < regions->regions_count; i++) {
3000 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3001 if ((region->symbols_count > 0) && (region->symbols == NULL)) {
3002 region->symbols = g_new (ProfilerUnmanagedSymbol, region->symbols_count);
3003 region->symbols_capacity = region->symbols_count;
3004 region->symbols_count = 0;
3010 executable_region_symbol_get_name (ProfilerExecutableMemoryRegionData *region, ProfilerUnmanagedSymbol *symbol) {
3011 ElfSymbol *elf_symbol = (ElfSymbol*) (region->file->symbols_start + (symbol->index * region->file->symbol_size));
3012 return region->file->symbols_string_table + elf_symbol->st_name;
3016 executable_file_build_symbol_tables (ProfilerExecutableFile *file) {
3019 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
3020 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
3022 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
3023 (symbol->st_shndx > 0) &&
3024 (symbol->st_shndx < file->header->e_shnum)) {
3025 int symbol_section_index = symbol->st_shndx;
3026 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [symbol_section_index]);
3027 ProfilerExecutableMemoryRegionData *region = section_region->region;
3029 if (region != NULL) {
3030 ProfilerUnmanagedSymbol *new_symbol = & (region->symbols [region->symbols_count]);
3031 region->symbols_count ++;
3034 new_symbol->index = symbol_index;
3035 new_symbol->size = symbol->st_size;
3036 new_symbol->offset = (((guint8*) symbol->st_value) - section_region->section_address) - (region->file_offset - section_region->section_offset);
3043 compare_region_symbols (const void *p1, const void *p2) {
3044 const ProfilerUnmanagedSymbol *s1 = p1;
3045 const ProfilerUnmanagedSymbol *s2 = p2;
3046 return (s1->offset < s2->offset)? -1 : ((s1->offset > s2->offset)? 1 : 0);
3050 executable_memory_regions_sort_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
3052 for (i = 0; i < regions->regions_count; i++) {
3053 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3054 if ((region->is_new) && (region->symbols != NULL)) {
3055 qsort (region->symbols, region->symbols_count, sizeof (ProfilerUnmanagedSymbol), compare_region_symbols);
3061 build_symbol_tables (ProfilerExecutableMemoryRegions *regions, ProfilerExecutableFiles *files) {
3063 ProfilerExecutableFile *file;
3065 for (i = 0; i < regions->regions_count; i++) {
3066 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3067 if ((region->is_new) && (region->file == NULL)) {
3068 executable_file_open (region);
3072 for (file = files->new_files; file != NULL; file = file->next_new_file) {
3073 executable_file_count_symbols (file);
3076 executable_memory_regions_prepare_symbol_tables (regions);
3078 for (file = files->new_files; file != NULL; file = file->next_new_file) {
3079 executable_file_build_symbol_tables (file);
3082 executable_memory_regions_sort_symbol_tables (regions);
3084 file = files->new_files;
3085 while (file != NULL) {
3086 ProfilerExecutableFile *next_file = file->next_new_file;
3087 file->next_new_file = NULL;
3090 files->new_files = NULL;
3093 static ProfilerUnmanagedSymbol*
3094 executable_memory_region_find_symbol (ProfilerExecutableMemoryRegionData *region, guint32 offset) {
3095 if (region->symbols_count > 0) {
3096 ProfilerUnmanagedSymbol *low = region->symbols;
3097 ProfilerUnmanagedSymbol *high = region->symbols + (region->symbols_count - 1);
3098 int step = region->symbols_count >> 1;
3099 ProfilerUnmanagedSymbol *current = region->symbols + step;
3102 step = (high - low) >> 1;
3104 if (offset < current->offset) {
3106 current = high - step;
3107 } else if (offset >= current->offset) {
3108 if (offset >= (current->offset + current->size)) {
3110 current = low + step;
3117 if ((offset >= current->offset) && (offset < (current->offset + current->size))) {
3127 //FIXME: make also Win32 and BSD variants
3128 #define MAPS_BUFFER_SIZE 4096
3129 #define MAPS_FILENAME_SIZE 2048
3132 update_regions_buffer (int fd, char *buffer) {
3133 ssize_t result = read (fd, buffer, MAPS_BUFFER_SIZE);
3135 if (result == MAPS_BUFFER_SIZE) {
3137 } else if (result >= 0) {
3138 *(buffer + result) = 0;
3146 #define GOTO_NEXT_CHAR(c,b,fd) do {\
3148 if (((c) - (b) >= MAPS_BUFFER_SIZE) || ((*(c) == 0) && ((c) != (b)))) {\
3149 update_regions_buffer ((fd), (b));\
3154 static int hex_digit_value (char c) {
3155 if ((c >= '0') && (c <= '9')) {
3157 } else if ((c >= 'a') && (c <= 'f')) {
3158 return c - 'a' + 10;
3159 } else if ((c >= 'A') && (c <= 'F')) {
3160 return c - 'A' + 10;
3182 MAP_LINE_PARSER_STATE_INVALID,
3183 MAP_LINE_PARSER_STATE_START_ADDRESS,
3184 MAP_LINE_PARSER_STATE_END_ADDRESS,
3185 MAP_LINE_PARSER_STATE_PERMISSIONS,
3186 MAP_LINE_PARSER_STATE_OFFSET,
3187 MAP_LINE_PARSER_STATE_DEVICE,
3188 MAP_LINE_PARSER_STATE_INODE,
3189 MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME,
3190 MAP_LINE_PARSER_STATE_FILENAME,
3191 MAP_LINE_PARSER_STATE_DONE
3192 } MapLineParserState;
3194 const char *map_line_parser_state [] = {
3202 "BLANK_BEFORE_FILENAME",
3208 parse_map_line (ProfilerExecutableMemoryRegions *regions, int fd, char *buffer, char *filename, char *current) {
3209 MapLineParserState state = MAP_LINE_PARSER_STATE_START_ADDRESS;
3210 gsize start_address = 0;
3211 gsize end_address = 0;
3213 int filename_index = 0;
3214 gboolean is_executable = FALSE;
3215 gboolean done = FALSE;
3221 case MAP_LINE_PARSER_STATE_START_ADDRESS:
3223 start_address <<= 4;
3224 start_address |= hex_digit_value (c);
3225 } else if (c == '-') {
3226 state = MAP_LINE_PARSER_STATE_END_ADDRESS;
3228 state = MAP_LINE_PARSER_STATE_INVALID;
3231 case MAP_LINE_PARSER_STATE_END_ADDRESS:
3234 end_address |= hex_digit_value (c);
3235 } else if (isblank (c)) {
3236 state = MAP_LINE_PARSER_STATE_PERMISSIONS;
3238 state = MAP_LINE_PARSER_STATE_INVALID;
3241 case MAP_LINE_PARSER_STATE_PERMISSIONS:
3243 is_executable = TRUE;
3244 } else if (isblank (c)) {
3245 state = MAP_LINE_PARSER_STATE_OFFSET;
3246 } else if ((c != '-') && ! isalpha (c)) {
3247 state = MAP_LINE_PARSER_STATE_INVALID;
3250 case MAP_LINE_PARSER_STATE_OFFSET:
3253 offset |= hex_digit_value (c);
3254 } else if (isblank (c)) {
3255 state = MAP_LINE_PARSER_STATE_DEVICE;
3257 state = MAP_LINE_PARSER_STATE_INVALID;
3260 case MAP_LINE_PARSER_STATE_DEVICE:
3262 state = MAP_LINE_PARSER_STATE_INODE;
3263 } else if ((c != ':') && ! isxdigit (c)) {
3264 state = MAP_LINE_PARSER_STATE_INVALID;
3267 case MAP_LINE_PARSER_STATE_INODE:
3269 state = MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME;
3270 } else if (! isdigit (c)) {
3271 state = MAP_LINE_PARSER_STATE_INVALID;
3274 case MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME:
3275 if ((c == '/') || (c == '[')) {
3276 state = MAP_LINE_PARSER_STATE_FILENAME;
3277 filename [filename_index] = *current;
3279 } else if (! isblank (c)) {
3280 state = MAP_LINE_PARSER_STATE_INVALID;
3283 case MAP_LINE_PARSER_STATE_FILENAME:
3284 if (filename_index < MAPS_FILENAME_SIZE) {
3286 state = MAP_LINE_PARSER_STATE_DONE;
3288 filename [filename_index] = 0;
3290 filename [filename_index] = *current;
3294 filename [filename_index] = 0;
3295 g_warning ("ELF filename too long: \"%s\"...\n", filename);
3298 case MAP_LINE_PARSER_STATE_DONE:
3299 if (done && is_executable) {
3300 filename [filename_index] = 0;
3301 append_region (regions, (gpointer) start_address, (gpointer) end_address, offset, filename);
3304 case MAP_LINE_PARSER_STATE_INVALID:
3306 state = MAP_LINE_PARSER_STATE_DONE;
3313 } else if (c == '\n') {
3314 state = MAP_LINE_PARSER_STATE_DONE;
3317 GOTO_NEXT_CHAR(current, buffer, fd);
3323 scan_process_regions (ProfilerExecutableMemoryRegions *regions) {
3329 fd = open ("/proc/self/maps", O_RDONLY);
3334 buffer = malloc (MAPS_BUFFER_SIZE);
3335 filename = malloc (MAPS_FILENAME_SIZE);
3336 update_regions_buffer (fd, buffer);
3338 while (current != NULL) {
3339 current = parse_map_line (regions, fd, buffer, filename, current);
3351 MONO_PROFILER_STATISTICAL_CODE_END = 0,
3352 MONO_PROFILER_STATISTICAL_CODE_METHOD = 1,
3353 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID = 2,
3354 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID = 3,
3355 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION = 4,
3356 MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN = 5,
3357 MONO_PROFILER_STATISTICAL_CODE_REGIONS = 7
3358 } MonoProfilerStatisticalCode;
3361 refresh_memory_regions (void) {
3362 ProfilerExecutableMemoryRegions *old_regions = profiler->executable_regions;
3363 ProfilerExecutableMemoryRegions *new_regions = profiler_executable_memory_regions_new (old_regions->next_id, old_regions->next_unmanaged_function_id);
3366 LOG_WRITER_THREAD ("Refreshing memory regions...");
3367 scan_process_regions (new_regions);
3368 sort_regions (new_regions);
3369 restore_old_regions (old_regions, new_regions);
3370 fix_region_references (new_regions);
3371 LOG_WRITER_THREAD ("Refreshed memory regions.");
3373 LOG_WRITER_THREAD ("Building symbol tables...");
3374 build_symbol_tables (new_regions, & (profiler->executable_files));
3376 printf ("Symbol tables done!\n");
3377 printf ("Region summary...\n");
3378 for (i = 0; i < new_regions->regions_count; i++) {
3379 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3380 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3381 region->start, region->end, region->file_offset, region->file_name);
3383 printf ("New symbol tables dump...\n");
3384 for (i = 0; i < new_regions->regions_count; i++) {
3385 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3387 if (region->is_new) {
3390 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3391 region->start, region->end, region->file_offset, region->file_name);
3392 for (symbol_index = 0; symbol_index < region->symbols_count; symbol_index ++) {
3393 ProfilerUnmanagedSymbol *symbol = & (region->symbols [symbol_index]);
3394 printf (" [%d] Symbol %s (offset %d, size %d)\n", symbol_index,
3395 executable_region_symbol_get_name (region, symbol),
3396 symbol->offset, symbol->size);
3401 LOG_WRITER_THREAD ("Built symbol tables.");
3403 // This marks the region "sub-block"
3404 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_REGIONS);
3406 // First write the "removed" regions
3407 for (i = 0; i < old_regions->regions_count; i++) {
3408 ProfilerExecutableMemoryRegionData *region = old_regions->regions [i];
3409 if (! region->is_new) {
3410 #if DEBUG_STATISTICAL_PROFILER
3411 printf ("[refresh_memory_regions] Invalidated region %d\n", region->id);
3413 write_uint32 (region->id);
3418 // Then write the new ones
3419 for (i = 0; i < new_regions->regions_count; i++) {
3420 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3421 if (region->is_new) {
3422 region->is_new = FALSE;
3424 #if DEBUG_STATISTICAL_PROFILER
3425 printf ("[refresh_memory_regions] Wrote region %d (%p-%p[%d] '%s')\n", region->id, region->start, region->end, region->file_offset, region->file_name);
3427 write_uint32 (region->id);
3428 write_uint64 (GPOINTER_TO_UINT (region->start));
3429 write_uint32 (GPOINTER_TO_UINT (region->end) - GPOINTER_TO_UINT (region->start));
3430 write_uint32 (region->file_offset);
3431 write_string (region->file_name);
3436 // Finally, free the old ones, and replace them
3437 profiler_executable_memory_regions_destroy (old_regions);
3438 profiler->executable_regions = new_regions;
3442 write_statistical_hit (MonoDomain *domain, gpointer address, gboolean regions_refreshed) {
3443 MonoJitInfo *ji = (domain != NULL) ? mono_jit_info_table_find (domain, (char*) address) : NULL;
3446 MonoMethod *method = mono_jit_info_get_method (ji);
3447 MethodIdMappingElement *element = method_id_mapping_element_get (method);
3449 if (element != NULL) {
3450 #if DEBUG_STATISTICAL_PROFILER
3451 printf ("[write_statistical_hit] Wrote method %d\n", element->id);
3453 write_uint32 ((element->id << 3) | MONO_PROFILER_STATISTICAL_CODE_METHOD);
3455 #if DEBUG_STATISTICAL_PROFILER
3456 printf ("[write_statistical_hit] Wrote unknown method %p\n", method);
3458 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_METHOD);
3461 ProfilerExecutableMemoryRegionData *region = find_address_region (profiler->executable_regions, address);
3463 if (region == NULL && ! regions_refreshed) {
3464 #if DEBUG_STATISTICAL_PROFILER
3465 printf ("[write_statistical_hit] Cannot find region for address %p, refreshing...\n", address);
3467 refresh_memory_regions ();
3468 regions_refreshed = TRUE;
3469 region = find_address_region (profiler->executable_regions, address);
3472 if (region != NULL) {
3473 guint32 offset = ((guint8*)address) - ((guint8*)region->start);
3474 ProfilerUnmanagedSymbol *symbol = executable_memory_region_find_symbol (region, offset);
3476 if (symbol != NULL) {
3477 if (symbol->id > 0) {
3478 #if DEBUG_STATISTICAL_PROFILER
3479 printf ("[write_statistical_hit] Wrote unmanaged symbol %d\n", symbol->id);
3481 write_uint32 ((symbol->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID);
3483 ProfilerExecutableMemoryRegions *regions = profiler->executable_regions;
3484 const char *symbol_name = executable_region_symbol_get_name (region, symbol);
3485 symbol->id = regions->next_unmanaged_function_id;
3486 regions->next_unmanaged_function_id ++;
3487 #if DEBUG_STATISTICAL_PROFILER
3488 printf ("[write_statistical_hit] Wrote new unmanaged symbol in region %d[%d]\n", region->id, offset);
3490 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID);
3491 write_uint32 (symbol->id);
3492 write_string (symbol_name);
3495 #if DEBUG_STATISTICAL_PROFILER
3496 printf ("[write_statistical_hit] Wrote unknown unmanaged hit in region %d[%d] (address %p)\n", region->id, offset, address);
3498 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3499 write_uint32 (offset);
3502 #if DEBUG_STATISTICAL_PROFILER
3503 printf ("[write_statistical_hit] Wrote unknown unmanaged hit %p\n", address);
3505 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3506 write_uint64 (GPOINTER_TO_UINT (address));
3510 return regions_refreshed;
3514 flush_all_mappings (void);
3517 write_statistical_data_block (ProfilerStatisticalData *data) {
3518 MonoThread *current_thread = mono_thread_current ();
3519 int start_index = data->first_unwritten_index;
3520 int end_index = data->next_free_index;
3521 gboolean regions_refreshed = FALSE;
3522 int call_chain_depth = profiler->statistical_call_chain_depth;
3525 if (end_index > data->end_index)
3526 end_index = data->end_index;
3528 if (start_index == end_index)
3531 data->first_unwritten_index = end_index;
3533 write_clock_data ();
3535 #if DEBUG_STATISTICAL_PROFILER
3536 printf ("[write_statistical_data_block] Starting loop at index %d\n", start_index);
3539 for (index = start_index; index < end_index; index ++) {
3540 int base_index = index * (call_chain_depth + 1);
3541 ProfilerStatisticalHit hit = data->hits [base_index];
3544 regions_refreshed = write_statistical_hit ((current_thread != NULL) ? hit.domain : NULL, hit.address, regions_refreshed);
3547 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3548 hit = data->hits [base_index + callers_count];
3549 if (hit.address == NULL) {
3554 if (callers_count > 0) {
3555 write_uint32 ((callers_count << 3) | MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN);
3557 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3558 hit = data->hits [base_index + callers_count];
3559 if (hit.address != NULL) {
3560 regions_refreshed = write_statistical_hit ((current_thread != NULL) ? hit.domain : NULL, hit.address, regions_refreshed);
3567 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_END);
3569 #if DEBUG_STATISTICAL_PROFILER
3570 printf ("[write_statistical_data_block] Ending loop at index %d\n", end_index);
3572 write_clock_data ();
3574 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL);
3578 write_intro_block (void) {
3580 write_string ("mono");
3581 write_uint32 (profiler->flags);
3582 write_uint64 (profiler->start_counter);
3583 write_uint64 (profiler->start_time);
3584 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_INTRO);
3588 write_end_block (void) {
3590 write_uint64 (profiler->end_counter);
3591 write_uint64 (profiler->end_time);
3592 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_END);
3596 update_mapping (ProfilerPerThreadData *data) {
3597 ProfilerEventData *start = data->first_unmapped_event;
3598 ProfilerEventData *end = data->next_free_event;
3599 data->first_unmapped_event = end;
3601 #if (DEBUG_LOGGING_PROFILER)
3602 printf ("[update_mapping][TID %ld] START\n", data->thread_id);
3604 while (start < end) {
3605 #if DEBUG_LOGGING_PROFILER
3606 printf ("Examining event %p[TID %ld] looking for a new mapping...\n", start, data->thread_id);
3608 if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3609 ClassIdMappingElement *element = class_id_mapping_element_get (start->data.address);
3610 if (element == NULL) {
3611 MonoClass *klass = start->data.address;
3612 class_id_mapping_element_new (klass);
3614 } else if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3615 MethodIdMappingElement *element = method_id_mapping_element_get (start->data.address);
3616 if (element == NULL) {
3617 MonoMethod *method = start->data.address;
3618 if (method != NULL) {
3619 method_id_mapping_element_new (method);
3624 if (start->value == MAX_EVENT_VALUE) {
3629 #if (DEBUG_LOGGING_PROFILER)
3630 printf ("[update_mapping][TID %ld] END\n", data->thread_id);
3635 flush_all_mappings (void) {
3636 ProfilerPerThreadData *data;
3638 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3639 update_mapping (data);
3641 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3642 write_mapping_block (data->thread_id);
3647 flush_full_event_data_buffer (ProfilerPerThreadData *data) {
3650 // We flush all mappings because some id definitions could come
3651 // from other threads
3652 flush_all_mappings ();
3653 g_assert (data->first_unmapped_event >= data->next_free_event);
3655 write_thread_data_block (data);
3657 data->next_free_event = data->events;
3658 data->first_unwritten_event = data->events;
3659 data->first_unmapped_event = data->events;
3660 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
3661 data->last_event_counter = data->start_event_counter;
3666 /* The ">=" operator is intentional, to leave one spare slot for "extended values" */
3667 #define RESERVE_EVENTS(d,e,count) {\
3668 if ((d)->next_free_event >= ((d)->end_event - (count))) {\
3669 flush_full_event_data_buffer (d);\
3671 (e) = (d)->next_free_event;\
3672 (d)->next_free_event += (count);\
3674 #define GET_NEXT_FREE_EVENT(d,e) RESERVE_EVENTS ((d),(e),1)
3677 flush_everything (void) {
3678 ProfilerPerThreadData *data;
3680 flush_all_mappings ();
3681 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3682 write_thread_data_block (data);
3684 write_statistical_data_block (profiler->statistical_data);
3687 /* This assumes the lock is held: it just offloads the work to the writer thread. */
3689 writer_thread_flush_everything (void) {
3690 if (CHECK_WRITER_THREAD ()) {
3691 profiler->writer_thread_flush_everything = TRUE;
3692 LOG_WRITER_THREAD ("writer_thread_flush_everything: raising event...");
3693 WRITER_EVENT_RAISE ();
3694 LOG_WRITER_THREAD ("writer_thread_flush_everything: waiting event...");
3695 WRITER_EVENT_DONE_WAIT ();
3696 LOG_WRITER_THREAD ("writer_thread_flush_everything: got event.");
3698 LOG_WRITER_THREAD ("writer_thread_flush_everything: no thread.");
3702 #define RESULT_TO_LOAD_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_LOADED_EVENT_SUCCESS:MONO_PROFILER_LOADED_EVENT_FAILURE)
3704 appdomain_start_load (MonoProfiler *profiler, MonoDomain *domain) {
3706 loaded_element_load_start (profiler->loaded_appdomains, domain);
3711 appdomain_end_load (MonoProfiler *profiler, MonoDomain *domain, int result) {
3713 LoadedElement *element;
3715 name = g_strdup_printf ("%d", mono_domain_get_id (domain));
3717 element = loaded_element_load_end (profiler->loaded_appdomains, domain, name);
3718 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3723 appdomain_start_unload (MonoProfiler *profiler, MonoDomain *domain) {
3725 loaded_element_unload_start (profiler->loaded_appdomains, domain);
3726 writer_thread_flush_everything ();
3731 appdomain_end_unload (MonoProfiler *profiler, MonoDomain *domain) {
3732 LoadedElement *element;
3735 element = loaded_element_unload_end (profiler->loaded_appdomains, domain);
3736 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN, CURRENT_THREAD_ID ());
3741 module_start_load (MonoProfiler *profiler, MonoImage *module) {
3743 loaded_element_load_start (profiler->loaded_modules, module);
3748 module_end_load (MonoProfiler *profiler, MonoImage *module, int result) {
3750 MonoAssemblyName aname;
3751 LoadedElement *element;
3753 if (mono_assembly_fill_assembly_name (module, &aname)) {
3754 name = mono_stringify_assembly_name (&aname);
3756 name = g_strdup_printf ("Dynamic module \"%p\"", module);
3759 element = loaded_element_load_end (profiler->loaded_modules, module, name);
3760 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_MODULE | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3765 module_start_unload (MonoProfiler *profiler, MonoImage *module) {
3767 loaded_element_unload_start (profiler->loaded_modules, module);
3768 writer_thread_flush_everything ();
3773 module_end_unload (MonoProfiler *profiler, MonoImage *module) {
3774 LoadedElement *element;
3777 element = loaded_element_unload_end (profiler->loaded_modules, module);
3778 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_MODULE, CURRENT_THREAD_ID ());
3783 assembly_start_load (MonoProfiler *profiler, MonoAssembly *assembly) {
3785 loaded_element_load_start (profiler->loaded_assemblies, assembly);
3790 assembly_end_load (MonoProfiler *profiler, MonoAssembly *assembly, int result) {
3792 MonoAssemblyName aname;
3793 LoadedElement *element;
3795 if (mono_assembly_fill_assembly_name (mono_assembly_get_image (assembly), &aname)) {
3796 name = mono_stringify_assembly_name (&aname);
3798 name = g_strdup_printf ("Dynamic assembly \"%p\"", assembly);
3801 element = loaded_element_load_end (profiler->loaded_assemblies, assembly, name);
3802 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID ());
3807 assembly_start_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
3809 loaded_element_unload_start (profiler->loaded_assemblies, assembly);
3810 writer_thread_flush_everything ();
3814 assembly_end_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
3815 LoadedElement *element;
3818 element = loaded_element_unload_end (profiler->loaded_assemblies, assembly);
3819 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY, CURRENT_THREAD_ID ());
3823 #if (DEBUG_LOGGING_PROFILER)
3825 class_event_code_to_string (MonoProfilerClassEvents code) {
3827 case MONO_PROFILER_EVENT_CLASS_LOAD: return "LOAD";
3828 case MONO_PROFILER_EVENT_CLASS_UNLOAD: return "UNLOAD";
3829 case MONO_PROFILER_EVENT_CLASS_ALLOCATION: return "ALLOCATION";
3830 case MONO_PROFILER_EVENT_CLASS_EXCEPTION: return "EXCEPTION";
3831 default: g_assert_not_reached (); return "";
3835 method_event_code_to_string (MonoProfilerMethodEvents code) {
3837 case MONO_PROFILER_EVENT_METHOD_CALL: return "CALL";
3838 case MONO_PROFILER_EVENT_METHOD_JIT: return "JIT";
3839 case MONO_PROFILER_EVENT_METHOD_FREED: return "FREED";
3840 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER: return "ALLOCATION_CALLER";
3841 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER: return "ALLOCATION_JIT_TIME_CALLER";
3842 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
3843 default: g_assert_not_reached (); return "";
3847 number_event_code_to_string (MonoProfilerEvents code) {
3849 case MONO_PROFILER_EVENT_THREAD: return "THREAD";
3850 case MONO_PROFILER_EVENT_GC_COLLECTION: return "GC_COLLECTION";
3851 case MONO_PROFILER_EVENT_GC_MARK: return "GC_MARK";
3852 case MONO_PROFILER_EVENT_GC_SWEEP: return "GC_SWEEP";
3853 case MONO_PROFILER_EVENT_GC_RESIZE: return "GC_RESIZE";
3854 case MONO_PROFILER_EVENT_GC_STOP_WORLD: return "GC_STOP_WORLD";
3855 case MONO_PROFILER_EVENT_GC_START_WORLD: return "GC_START_WORLD";
3856 case MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION: return "JIT_TIME_ALLOCATION";
3857 case MONO_PROFILER_EVENT_STACK_SECTION: return "STACK_SECTION";
3858 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
3859 default: g_assert_not_reached (); return "";
3863 event_result_to_string (MonoProfilerEventResult code) {
3865 case MONO_PROFILER_EVENT_RESULT_SUCCESS: return "SUCCESS";
3866 case MONO_PROFILER_EVENT_RESULT_FAILURE: return "FAILURE";
3867 default: g_assert_not_reached (); return "";
3871 event_kind_to_string (MonoProfilerEventKind code) {
3873 case MONO_PROFILER_EVENT_KIND_START: return "START";
3874 case MONO_PROFILER_EVENT_KIND_END: return "END";
3875 default: g_assert_not_reached (); return "";
3879 print_event_data (ProfilerPerThreadData *data, ProfilerEventData *event, guint64 value) {
3880 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3881 printf ("STORE EVENT [TID %ld][EVENT %ld] CLASS[%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s)\n",
3883 event - data->events,
3884 event->data.address,
3885 class_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
3886 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
3887 event_kind_to_string (event->kind),
3892 mono_class_get_namespace ((MonoClass*) event->data.address),
3893 mono_class_get_name ((MonoClass*) event->data.address));
3894 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3895 printf ("STORE EVENT [TID %ld][EVENT %ld] METHOD[%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s:%s (?))\n",
3897 event - data->events,
3898 event->data.address,
3899 method_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
3900 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
3901 event_kind_to_string (event->kind),
3906 (event->data.address != NULL) ? mono_class_get_namespace (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
3907 (event->data.address != NULL) ? mono_class_get_name (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
3908 (event->data.address != NULL) ? mono_method_get_name ((MonoMethod*) event->data.address) : "<NULL>");
3910 printf ("STORE EVENT [TID %ld][EVENT %ld] NUMBER[%ld] %s:%s[%d-%d-%d] %ld\n",
3912 event - data->events,
3913 (guint64) event->data.number,
3914 number_event_code_to_string (event->code),
3915 event_kind_to_string (event->kind),
3922 #define LOG_EVENT(data,ev,val) print_event_data ((data),(ev),(val))
3924 #define LOG_EVENT(data,ev,val)
3927 #define RESULT_TO_EVENT_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_EVENT_RESULT_SUCCESS:MONO_PROFILER_EVENT_RESULT_FAILURE)
3929 #define STORE_EVENT_ITEM_COUNTER(event,p,i,dt,c,k) do {\
3932 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
3933 (event)->data.address = (i);\
3934 (event)->data_type = (dt);\
3935 (event)->code = (c);\
3936 (event)->kind = (k);\
3937 delta = counter - data->last_event_counter;\
3938 if (delta < MAX_EVENT_VALUE) {\
3939 (event)->value = delta;\
3941 ProfilerEventData *extension = data->next_free_event;\
3942 data->next_free_event ++;\
3943 (event)->value = MAX_EVENT_VALUE;\
3944 *(guint64*)extension = delta;\
3946 data->last_event_counter = counter;\
3947 LOG_EVENT (data, (event), delta);\
3949 #define STORE_EVENT_ITEM_VALUE(event,p,i,dt,c,k,v) do {\
3950 (event)->data.address = (i);\
3951 (event)->data_type = (dt);\
3952 (event)->code = (c);\
3953 (event)->kind = (k);\
3954 if ((v) < MAX_EVENT_VALUE) {\
3955 (event)->value = (v);\
3957 ProfilerEventData *extension = data->next_free_event;\
3958 data->next_free_event ++;\
3959 (event)->value = MAX_EVENT_VALUE;\
3960 *(guint64*)extension = (v);\
3962 LOG_EVENT (data, (event), (v));\
3964 #define STORE_EVENT_NUMBER_COUNTER(event,p,n,dt,c,k) do {\
3967 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
3968 (event)->data.number = (n);\
3969 (event)->data_type = (dt);\
3970 (event)->code = (c);\
3971 (event)->kind = (k);\
3972 delta = counter - data->last_event_counter;\
3973 if (delta < MAX_EVENT_VALUE) {\
3974 (event)->value = delta;\
3976 ProfilerEventData *extension = data->next_free_event;\
3977 data->next_free_event ++;\
3978 (event)->value = MAX_EVENT_VALUE;\
3979 *(guint64*)extension = delta;\
3981 data->last_event_counter = counter;\
3982 LOG_EVENT (data, (event), delta);\
3984 #define STORE_EVENT_NUMBER_VALUE(event,p,n,dt,c,k,v) do {\
3985 (event)->data.number = (n);\
3986 (event)->data_type = (dt);\
3987 (event)->code = (c);\
3988 (event)->kind = (k);\
3989 if ((v) < MAX_EVENT_VALUE) {\
3990 (event)->value = (v);\
3992 ProfilerEventData *extension = data->next_free_event;\
3993 data->next_free_event ++;\
3994 (event)->value = MAX_EVENT_VALUE;\
3995 *(guint64*)extension = (v);\
3997 LOG_EVENT (data, (event), (v));\
4001 class_start_load (MonoProfiler *profiler, MonoClass *klass) {
4002 ProfilerPerThreadData *data;
4003 ProfilerEventData *event;
4004 GET_PROFILER_THREAD_DATA (data);
4005 GET_NEXT_FREE_EVENT (data, event);
4006 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD, MONO_PROFILER_EVENT_KIND_START);
4009 class_end_load (MonoProfiler *profiler, MonoClass *klass, int result) {
4010 ProfilerPerThreadData *data;
4011 ProfilerEventData *event;
4012 GET_PROFILER_THREAD_DATA (data);
4013 GET_NEXT_FREE_EVENT (data, event);
4014 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
4017 class_start_unload (MonoProfiler *profiler, MonoClass *klass) {
4018 ProfilerPerThreadData *data;
4019 ProfilerEventData *event;
4020 GET_PROFILER_THREAD_DATA (data);
4021 GET_NEXT_FREE_EVENT (data, event);
4022 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_START);
4025 class_end_unload (MonoProfiler *profiler, MonoClass *klass) {
4026 ProfilerPerThreadData *data;
4027 ProfilerEventData *event;
4028 GET_PROFILER_THREAD_DATA (data);
4029 GET_NEXT_FREE_EVENT (data, event);
4030 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_END);
4034 method_start_jit (MonoProfiler *profiler, MonoMethod *method) {
4035 ProfilerPerThreadData *data;
4036 ProfilerEventData *event;
4037 GET_PROFILER_THREAD_DATA (data);
4038 GET_NEXT_FREE_EVENT (data, event);
4039 thread_stack_push_jitted_safely (&(data->stack), method, TRUE);
4040 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT, MONO_PROFILER_EVENT_KIND_START);
4043 method_end_jit (MonoProfiler *profiler, MonoMethod *method, int result) {
4044 ProfilerPerThreadData *data;
4045 ProfilerEventData *event;
4046 GET_PROFILER_THREAD_DATA (data);
4047 GET_NEXT_FREE_EVENT (data, event);
4048 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
4049 thread_stack_pop (&(data->stack));
4054 method_jit_result (MonoProfiler *prof, MonoMethod *method, MonoJitInfo* jinfo, int result) {
4055 if (profiler->action_flags.oprofile && (result == MONO_PROFILE_OK)) {
4056 MonoClass *klass = mono_method_get_class (method);
4057 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
4058 char *name = g_strdup_printf ("%s.%s:%s (%s)", mono_class_get_namespace (klass), mono_class_get_name (klass), mono_method_get_name (method), signature);
4059 gpointer code_start = mono_jit_info_get_code_start (jinfo);
4060 int code_size = mono_jit_info_get_code_size (jinfo);
4062 if (op_write_native_code (name, code_start, code_size)) {
4063 g_warning ("Problem calling op_write_native_code\n");
4074 method_enter (MonoProfiler *profiler, MonoMethod *method) {
4075 ProfilerPerThreadData *data;
4077 CHECK_PROFILER_ENABLED ();
4078 GET_PROFILER_THREAD_DATA (data);
4079 if (profiler->action_flags.track_calls) {
4080 ProfilerEventData *event;
4081 GET_NEXT_FREE_EVENT (data, event);
4082 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_START);
4084 if (profiler->action_flags.track_stack) {
4085 thread_stack_push_safely (&(data->stack), method);
4089 method_leave (MonoProfiler *profiler, MonoMethod *method) {
4090 ProfilerPerThreadData *data;
4092 CHECK_PROFILER_ENABLED ();
4093 GET_PROFILER_THREAD_DATA (data);
4094 if (profiler->action_flags.track_calls) {
4095 ProfilerEventData *event;
4096 GET_NEXT_FREE_EVENT (data, event);
4097 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_END);
4099 if (profiler->action_flags.track_stack) {
4100 thread_stack_pop (&(data->stack));
4105 method_free (MonoProfiler *profiler, MonoMethod *method) {
4106 ProfilerPerThreadData *data;
4107 ProfilerEventData *event;
4108 GET_PROFILER_THREAD_DATA (data);
4109 GET_NEXT_FREE_EVENT (data, event);
4110 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_FREED, 0);
4114 thread_start (MonoProfiler *profiler, gsize tid) {
4115 ProfilerPerThreadData *data;
4116 ProfilerEventData *event;
4117 GET_PROFILER_THREAD_DATA (data);
4118 GET_NEXT_FREE_EVENT (data, event);
4119 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_START);
4122 thread_end (MonoProfiler *profiler, gsize tid) {
4123 ProfilerPerThreadData *data;
4124 ProfilerEventData *event;
4125 GET_PROFILER_THREAD_DATA (data);
4126 GET_NEXT_FREE_EVENT (data, event);
4127 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_END);
4131 object_allocated (MonoProfiler *profiler, MonoObject *obj, MonoClass *klass) {
4132 ProfilerPerThreadData *data;
4133 ProfilerEventData *events;
4135 int event_slot_count;
4137 GET_PROFILER_THREAD_DATA (data);
4138 event_slot_count = 1;
4139 if (profiler->action_flags.save_allocation_caller) {
4140 event_slot_count ++;
4142 if (profiler->action_flags.allocations_carry_id) {
4143 event_slot_count ++;
4145 if (profiler->action_flags.save_allocation_stack) {
4146 unsaved_frames = thread_stack_count_unsaved_frames (&(data->stack));
4147 event_slot_count += (unsaved_frames + 1);
4151 RESERVE_EVENTS (data, events, event_slot_count);
4153 if (profiler->action_flags.save_allocation_stack) {
4156 STORE_EVENT_NUMBER_VALUE (events, profiler, data->stack.last_saved_top, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_STACK_SECTION, 0, unsaved_frames);
4158 for (i = 0; i < unsaved_frames; i++) {
4159 if (! thread_stack_index_from_top_is_jitted (&(data->stack), i)) {
4160 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4162 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4167 data->stack.last_saved_top = data->stack.top;
4170 STORE_EVENT_ITEM_VALUE (events, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_ALLOCATION, 0, (guint64) mono_object_get_size (obj));
4171 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
4172 STORE_ALLOCATED_OBJECT (data, obj);
4175 if (profiler->action_flags.save_allocation_caller) {
4176 MonoMethod *caller = thread_stack_top (&(data->stack));
4177 gboolean caller_is_jitted = thread_stack_top_is_jitted (&(data->stack));
4181 while ((caller != NULL) && (caller->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)) {
4182 caller = thread_stack_index_from_top (&(data->stack), index);
4183 caller_is_jitted = thread_stack_index_from_top_is_jitted (&(data->stack), index);
4186 if (! caller_is_jitted) {
4187 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4189 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4192 if (profiler->action_flags.allocations_carry_id) {
4194 STORE_EVENT_ITEM_VALUE (events, profiler, obj, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID, 0, 0);
4199 statistical_call_chain (MonoProfiler *profiler, int call_chain_depth, guchar **ips, void *context) {
4200 MonoDomain *domain = mono_domain_get ();
4201 ProfilerStatisticalData *data;
4204 CHECK_PROFILER_ENABLED ();
4206 data = profiler->statistical_data;
4207 index = InterlockedIncrement ((int*) &data->next_free_index);
4209 if (index <= data->end_index) {
4210 unsigned int base_index = (index - 1) * (profiler->statistical_call_chain_depth + 1);
4211 unsigned int call_chain_index = 0;
4213 //printf ("[statistical_call_chain] (%d)\n", call_chain_depth);
4214 while (call_chain_index < call_chain_depth) {
4215 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4216 //printf ("[statistical_call_chain] [%d] = %p\n", base_index + call_chain_index, ips [call_chain_index]);
4217 hit->address = (gpointer) ips [call_chain_index];
4218 hit->domain = domain;
4219 call_chain_index ++;
4221 while (call_chain_index <= profiler->statistical_call_chain_depth) {
4222 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4223 //printf ("[statistical_call_chain] [%d] = NULL\n", base_index + call_chain_index);
4224 hit->address = NULL;
4226 call_chain_index ++;
4229 /* Check if we are the one that must swap the buffers */
4230 if (index == data->end_index + 1) {
4231 ProfilerStatisticalData *new_data;
4233 /* In the *impossible* case that the writer thread has not finished yet, */
4234 /* loop waiting for it and meanwhile lose all statistical events... */
4236 /* First, wait that it consumed the ready buffer */
4237 while (profiler->statistical_data_ready != NULL);
4238 /* Then, wait that it produced the free buffer */
4239 new_data = profiler->statistical_data_second_buffer;
4240 } while (new_data == NULL);
4242 profiler->statistical_data_ready = data;
4243 profiler->statistical_data = new_data;
4244 profiler->statistical_data_second_buffer = NULL;
4245 WRITER_EVENT_RAISE ();
4246 /* Otherwise exit from the handler and drop the event... */
4251 /* Loop again, hoping to acquire a free slot this time (otherwise the event will be dropped) */
4254 } while (data == NULL);
4258 statistical_hit (MonoProfiler *profiler, guchar *ip, void *context) {
4259 MonoDomain *domain = mono_domain_get ();
4260 ProfilerStatisticalData *data;
4263 CHECK_PROFILER_ENABLED ();
4265 data = profiler->statistical_data;
4266 index = InterlockedIncrement ((int*) &data->next_free_index);
4268 if (index <= data->end_index) {
4269 ProfilerStatisticalHit *hit = & (data->hits [index - 1]);
4270 hit->address = (gpointer) ip;
4271 hit->domain = domain;
4273 /* Check if we are the one that must swap the buffers */
4274 if (index == data->end_index + 1) {
4275 ProfilerStatisticalData *new_data;
4277 /* In the *impossible* case that the writer thread has not finished yet, */
4278 /* loop waiting for it and meanwhile lose all statistical events... */
4280 /* First, wait that it consumed the ready buffer */
4281 while (profiler->statistical_data_ready != NULL);
4282 /* Then, wait that it produced the free buffer */
4283 new_data = profiler->statistical_data_second_buffer;
4284 } while (new_data == NULL);
4286 profiler->statistical_data_ready = data;
4287 profiler->statistical_data = new_data;
4288 profiler->statistical_data_second_buffer = NULL;
4289 WRITER_EVENT_RAISE ();
4292 /* Loop again, hoping to acquire a free slot this time */
4295 } while (data == NULL);
4298 static MonoProfilerEvents
4299 gc_event_code_from_profiler_event (MonoGCEvent event) {
4301 case MONO_GC_EVENT_START:
4302 case MONO_GC_EVENT_END:
4303 return MONO_PROFILER_EVENT_GC_COLLECTION;
4304 case MONO_GC_EVENT_MARK_START:
4305 case MONO_GC_EVENT_MARK_END:
4306 return MONO_PROFILER_EVENT_GC_MARK;
4307 case MONO_GC_EVENT_RECLAIM_START:
4308 case MONO_GC_EVENT_RECLAIM_END:
4309 return MONO_PROFILER_EVENT_GC_SWEEP;
4310 case MONO_GC_EVENT_PRE_STOP_WORLD:
4311 case MONO_GC_EVENT_POST_STOP_WORLD:
4312 return MONO_PROFILER_EVENT_GC_STOP_WORLD;
4313 case MONO_GC_EVENT_PRE_START_WORLD:
4314 case MONO_GC_EVENT_POST_START_WORLD:
4315 return MONO_PROFILER_EVENT_GC_START_WORLD;
4317 g_assert_not_reached ();
4322 static MonoProfilerEventKind
4323 gc_event_kind_from_profiler_event (MonoGCEvent event) {
4325 case MONO_GC_EVENT_START:
4326 case MONO_GC_EVENT_MARK_START:
4327 case MONO_GC_EVENT_RECLAIM_START:
4328 case MONO_GC_EVENT_PRE_STOP_WORLD:
4329 case MONO_GC_EVENT_PRE_START_WORLD:
4330 return MONO_PROFILER_EVENT_KIND_START;
4331 case MONO_GC_EVENT_END:
4332 case MONO_GC_EVENT_MARK_END:
4333 case MONO_GC_EVENT_RECLAIM_END:
4334 case MONO_GC_EVENT_POST_START_WORLD:
4335 case MONO_GC_EVENT_POST_STOP_WORLD:
4336 return MONO_PROFILER_EVENT_KIND_END;
4338 g_assert_not_reached ();
4343 #define HEAP_SHOT_COMMAND_FILE_MAX_LENGTH 64
4345 profiler_heap_shot_process_command_file (void) {
4346 //FIXME: Port to Windows as well
4347 struct stat stat_buf;
4349 char buffer [HEAP_SHOT_COMMAND_FILE_MAX_LENGTH + 1];
4351 if (profiler->heap_shot_command_file_name == NULL)
4353 if (stat (profiler->heap_shot_command_file_name, &stat_buf) != 0)
4355 if (stat_buf.st_size > HEAP_SHOT_COMMAND_FILE_MAX_LENGTH)
4357 if ((stat_buf.st_mtim.tv_sec * 1000000) < profiler->heap_shot_command_file_access_time)
4360 fd = open (profiler->heap_shot_command_file_name, O_RDONLY);
4364 if (read (fd, &(buffer [0]), stat_buf.st_size) != stat_buf.st_size) {
4367 buffer [stat_buf.st_size] = 0;
4368 profiler->dump_next_heap_snapshots = atoi (buffer);
4369 MONO_PROFILER_GET_CURRENT_TIME (profiler->heap_shot_command_file_access_time);
4376 dump_current_heap_snapshot (void) {
4379 if (profiler->heap_shot_was_signalled) {
4382 profiler_heap_shot_process_command_file ();
4383 if (profiler->dump_next_heap_snapshots > 0) {
4384 profiler->dump_next_heap_snapshots--;
4386 } else if (profiler->dump_next_heap_snapshots < 0) {
4397 profiler_heap_buffers_setup (ProfilerHeapShotHeapBuffers *heap) {
4398 heap->buffers = g_new (ProfilerHeapShotHeapBuffer, 1);
4399 heap->buffers->previous = NULL;
4400 heap->buffers->next = NULL;
4401 heap->buffers->start_slot = &(heap->buffers->buffer [0]);
4402 heap->buffers->end_slot = &(heap->buffers->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4403 heap->last = heap->buffers;
4404 heap->current = heap->buffers;
4405 heap->first_free_slot = & (heap->buffers->buffer [0]);
4408 profiler_heap_buffers_clear (ProfilerHeapShotHeapBuffers *heap) {
4409 heap->buffers = NULL;
4411 heap->current = NULL;
4412 heap->first_free_slot = NULL;
4415 profiler_heap_buffers_free (ProfilerHeapShotHeapBuffers *heap) {
4416 ProfilerHeapShotHeapBuffer *current = heap->buffers;
4417 while (current != NULL) {
4418 ProfilerHeapShotHeapBuffer *next = current->next;
4422 profiler_heap_buffers_clear (heap);
4426 report_object_references (gpointer *start, ClassIdMappingElement *layout, ProfilerHeapShotWriteJob *job) {
4427 int reported_references = 0;
4430 for (slot = 0; slot < layout->data.layout.slots; slot ++) {
4431 gboolean slot_has_reference;
4432 if (layout->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
4433 if (layout->data.bitmap.compact & (((guint64)1) << slot)) {
4434 slot_has_reference = TRUE;
4436 slot_has_reference = FALSE;
4439 if (layout->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
4440 slot_has_reference = TRUE;
4442 slot_has_reference = FALSE;
4446 if (slot_has_reference) {
4447 gpointer field = start [slot];
4449 if ((field != NULL) && mono_object_is_alive (field)) {
4450 reported_references ++;
4451 WRITE_HEAP_SHOT_JOB_VALUE (job, field);
4456 return reported_references;
4460 profiler_heap_report_object_reachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4462 MonoClass *klass = mono_object_get_class (obj);
4463 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4464 if (class_id == NULL) {
4465 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4467 g_assert (class_id != NULL);
4469 if (job->summary.capacity > 0) {
4470 guint32 id = class_id->id;
4471 g_assert (id < job->summary.capacity);
4473 job->summary.per_class_data [id].reachable.instances ++;
4474 job->summary.per_class_data [id].reachable.bytes += mono_object_get_size (obj);
4476 if (profiler->action_flags.heap_shot && job->dump_heap_data) {
4477 int reference_counter = 0;
4478 gpointer *reference_counter_location;
4480 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, obj, HEAP_CODE_OBJECT);
4481 #if DEBUG_HEAP_PROFILER
4482 printf ("profiler_heap_report_object_reachable: reported object %p at cursor %p\n", obj, (job->cursor - 1));
4484 WRITE_HEAP_SHOT_JOB_VALUE (job, NULL);
4485 reference_counter_location = job->cursor - 1;
4487 if (mono_class_get_rank (klass)) {
4488 MonoArray *array = (MonoArray *) obj;
4489 MonoClass *element_class = mono_class_get_element_class (klass);
4490 ClassIdMappingElement *element_id = class_id_mapping_element_get (element_class);
4492 g_assert (element_id != NULL);
4493 if (element_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4494 class_id_mapping_element_build_layout_bitmap (element_class, element_id);
4496 if (! mono_class_is_valuetype (element_class)) {
4497 int length = mono_array_length (array);
4499 for (i = 0; i < length; i++) {
4500 MonoObject *array_element = mono_array_get (array, MonoObject*, i);
4501 if ((array_element != NULL) && mono_object_is_alive (array_element)) {
4502 reference_counter ++;
4503 WRITE_HEAP_SHOT_JOB_VALUE (job, array_element);
4506 } else if (element_id->data.layout.references > 0) {
4507 int length = mono_array_length (array);
4508 int array_element_size = mono_array_element_size (klass);
4510 for (i = 0; i < length; i++) {
4511 gpointer array_element_address = mono_array_addr_with_size (array, array_element_size, i);
4512 reference_counter += report_object_references (array_element_address, element_id, job);
4516 if (class_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4517 class_id_mapping_element_build_layout_bitmap (klass, class_id);
4519 if (class_id->data.layout.references > 0) {
4520 reference_counter += report_object_references ((gpointer)(((char*)obj) + sizeof (MonoObject)), class_id, job);
4524 *reference_counter_location = GINT_TO_POINTER (reference_counter);
4525 #if DEBUG_HEAP_PROFILER
4526 printf ("profiler_heap_report_object_reachable: updated reference_counter_location %p with value %d\n", reference_counter_location, reference_counter);
4532 profiler_heap_report_object_unreachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4534 MonoClass *klass = mono_object_get_class (obj);
4535 guint32 size = mono_object_get_size (obj);
4537 if (job->summary.capacity > 0) {
4538 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4541 if (class_id == NULL) {
4542 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4544 g_assert (class_id != NULL);
4546 g_assert (id < job->summary.capacity);
4548 job->summary.per_class_data [id].unreachable.instances ++;
4549 job->summary.per_class_data [id].unreachable.bytes += size;
4551 if (profiler->action_flags.unreachable_objects && job->dump_heap_data) {
4552 #if DEBUG_HEAP_PROFILER
4553 printf ("profiler_heap_report_object_unreachable: at job %p writing klass %p\n", job, klass);
4555 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, klass, HEAP_CODE_FREE_OBJECT_CLASS);
4557 #if DEBUG_HEAP_PROFILER
4558 printf ("profiler_heap_report_object_unreachable: at job %p writing size %p\n", job, GUINT_TO_POINTER (size));
4560 WRITE_HEAP_SHOT_JOB_VALUE (job, GUINT_TO_POINTER (size));
4566 profiler_heap_add_object (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4567 if (heap->first_free_slot >= heap->current->end_slot) {
4568 if (heap->current->next != NULL) {
4569 heap->current = heap->current->next;
4571 ProfilerHeapShotHeapBuffer *buffer = g_new (ProfilerHeapShotHeapBuffer, 1);
4572 buffer->previous = heap->last;
4573 buffer->next = NULL;
4574 buffer->start_slot = &(buffer->buffer [0]);
4575 buffer->end_slot = &(buffer->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4576 heap->current = buffer;
4577 heap->last->next = buffer;
4578 heap->last = buffer;
4580 heap->first_free_slot = &(heap->current->buffer [0]);
4583 *(heap->first_free_slot) = obj;
4584 heap->first_free_slot ++;
4585 profiler_heap_report_object_reachable (job, obj);
4589 profiler_heap_pop_object_from_end (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject** current_slot) {
4590 while (heap->first_free_slot != current_slot) {
4593 if (heap->first_free_slot > heap->current->start_slot) {
4594 heap->first_free_slot --;
4596 heap->current = heap->current->previous;
4597 g_assert (heap->current != NULL);
4598 heap->first_free_slot = heap->current->end_slot - 1;
4601 obj = *(heap->first_free_slot);
4603 if (mono_object_is_alive (obj)) {
4604 profiler_heap_report_object_reachable (job, obj);
4607 profiler_heap_report_object_unreachable (job, obj);
4614 profiler_heap_scan (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job) {
4615 ProfilerHeapShotHeapBuffer *current_buffer = heap->buffers;
4616 MonoObject** current_slot = current_buffer->start_slot;
4618 while (current_slot != heap->first_free_slot) {
4619 MonoObject *obj = *current_slot;
4620 if (mono_object_is_alive (obj)) {
4621 profiler_heap_report_object_reachable (job, obj);
4623 profiler_heap_report_object_unreachable (job, obj);
4624 *current_slot = profiler_heap_pop_object_from_end (heap, job, current_slot);
4627 if (*current_slot != NULL) {
4630 if (current_slot == current_buffer->end_slot) {
4631 current_buffer = current_buffer->next;
4632 g_assert (current_buffer != NULL);
4633 current_slot = current_buffer->start_slot;
4639 static inline gboolean
4640 heap_shot_write_job_should_be_created (gboolean dump_heap_data) {
4641 return dump_heap_data || profiler->action_flags.unreachable_objects || profiler->action_flags.collection_summary;
4645 handle_heap_profiling (MonoProfiler *profiler, MonoGCEvent ev) {
4646 static gboolean dump_heap_data;
4649 case MONO_GC_EVENT_PRE_STOP_WORLD:
4650 // Get the lock, so we are sure nobody is flushing events during the collection,
4651 // and we can update all mappings (building the class descriptors).
4654 case MONO_GC_EVENT_POST_STOP_WORLD:
4655 dump_heap_data = dump_current_heap_snapshot ();
4656 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
4657 ProfilerPerThreadData *data;
4658 // Update all mappings, so that we have built all the class descriptors.
4659 flush_all_mappings ();
4660 // Also write all event buffers, so that allocations are recorded.
4661 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4662 write_thread_data_block (data);
4668 case MONO_GC_EVENT_MARK_END: {
4669 ProfilerHeapShotWriteJob *job;
4670 ProfilerPerThreadData *data;
4672 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
4673 job = profiler_heap_shot_write_job_new (profiler->heap_shot_was_signalled, dump_heap_data, profiler->garbage_collection_counter);
4674 profiler->heap_shot_was_signalled = FALSE;
4675 MONO_PROFILER_GET_CURRENT_COUNTER (job->start_counter);
4676 MONO_PROFILER_GET_CURRENT_TIME (job->start_time);
4681 profiler_heap_scan (&(profiler->heap), job);
4683 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4684 ProfilerHeapShotObjectBuffer *buffer;
4685 for (buffer = data->heap_shot_object_buffers; buffer != NULL; buffer = buffer->next) {
4686 MonoObject **cursor;
4687 for (cursor = buffer->first_unprocessed_slot; cursor < buffer->next_free_slot; cursor ++) {
4688 MonoObject *obj = *cursor;
4689 #if DEBUG_HEAP_PROFILER
4690 printf ("gc_event: in object buffer %p(%p-%p) cursor at %p has object %p ", buffer, &(buffer->buffer [0]), buffer->end, cursor, obj);
4692 if (mono_object_is_alive (obj)) {
4693 #if DEBUG_HEAP_PROFILER
4694 printf ("(object is alive, adding to heap)\n");
4696 profiler_heap_add_object (&(profiler->heap), job, obj);
4698 #if DEBUG_HEAP_PROFILER
4699 printf ("(object is unreachable, reporting in job)\n");
4701 profiler_heap_report_object_unreachable (job, obj);
4704 buffer->first_unprocessed_slot = cursor;
4709 MONO_PROFILER_GET_CURRENT_COUNTER (job->end_counter);
4710 MONO_PROFILER_GET_CURRENT_TIME (job->end_time);
4712 profiler_add_heap_shot_write_job (job);
4713 profiler_free_heap_shot_write_jobs ();
4714 WRITER_EVENT_RAISE ();
4724 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation) {
4725 ProfilerPerThreadData *data;
4726 ProfilerEventData *event;
4727 gboolean do_heap_profiling = profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary;
4728 guint32 event_value;
4730 if (ev == MONO_GC_EVENT_START) {
4731 profiler->garbage_collection_counter ++;
4734 event_value = (profiler->garbage_collection_counter << 8) | generation;
4736 if (do_heap_profiling && (ev == MONO_GC_EVENT_POST_STOP_WORLD)) {
4737 handle_heap_profiling (profiler, ev);
4740 GET_PROFILER_THREAD_DATA (data);
4741 GET_NEXT_FREE_EVENT (data, event);
4742 STORE_EVENT_NUMBER_COUNTER (event, profiler, event_value, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, gc_event_code_from_profiler_event (ev), gc_event_kind_from_profiler_event (ev));
4744 if (do_heap_profiling && (ev != MONO_GC_EVENT_POST_STOP_WORLD)) {
4745 handle_heap_profiling (profiler, ev);
4750 gc_resize (MonoProfiler *profiler, gint64 new_size) {
4751 ProfilerPerThreadData *data;
4752 ProfilerEventData *event;
4753 GET_PROFILER_THREAD_DATA (data);
4754 GET_NEXT_FREE_EVENT (data, event);
4755 profiler->garbage_collection_counter ++;
4756 STORE_EVENT_NUMBER_VALUE (event, profiler, new_size, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_GC_RESIZE, 0, profiler->garbage_collection_counter);
4760 runtime_initialized (MonoProfiler *profiler) {
4761 LOG_WRITER_THREAD ("runtime_initialized: waking writer thread to enable it...\n");
4762 WRITER_EVENT_ENABLE_RAISE ();
4763 LOG_WRITER_THREAD ("runtime_initialized: waiting writer thread...\n");
4764 WRITER_EVENT_DONE_WAIT ();
4765 LOG_WRITER_THREAD ("runtime_initialized: writer thread enabled.\n");
4766 mono_add_internal_call ("Mono.Profiler.RuntimeControls::EnableProfiler", enable_profiler);
4767 mono_add_internal_call ("Mono.Profiler.RuntimeControls::DisableProfiler", disable_profiler);
4768 mono_add_internal_call ("Mono.Profiler.RuntimeControls::TakeHeapSnapshot", request_heap_snapshot);
4769 LOG_WRITER_THREAD ("runtime_initialized: initialized internal calls.\n");
4772 /* called at the end of the program */
4774 profiler_shutdown (MonoProfiler *prof)
4776 ProfilerPerThreadData* current_thread_data;
4777 ProfilerPerThreadData* next_thread_data;
4779 LOG_WRITER_THREAD ("profiler_shutdown: zeroing relevant flags");
4780 mono_profiler_set_events (0);
4781 //profiler->flags = 0;
4782 //profiler->action_flags.unreachable_objects = FALSE;
4783 //profiler->action_flags.heap_shot = FALSE;
4785 LOG_WRITER_THREAD ("profiler_shutdown: asking stats thread to exit");
4786 profiler->terminate_writer_thread = TRUE;
4787 WRITER_EVENT_RAISE ();
4788 LOG_WRITER_THREAD ("profiler_shutdown: waiting for stats thread to exit");
4789 WAIT_WRITER_THREAD ();
4790 LOG_WRITER_THREAD ("profiler_shutdown: stats thread should be dead now");
4791 WRITER_EVENT_DESTROY ();
4794 flush_everything ();
4795 MONO_PROFILER_GET_CURRENT_TIME (profiler->end_time);
4796 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->end_counter);
4802 g_free (profiler->file_name);
4803 if (profiler->file_name_suffix != NULL) {
4804 g_free (profiler->file_name_suffix);
4807 method_id_mapping_destroy (profiler->methods);
4808 class_id_mapping_destroy (profiler->classes);
4809 g_hash_table_destroy (profiler->loaded_assemblies);
4810 g_hash_table_destroy (profiler->loaded_modules);
4811 g_hash_table_destroy (profiler->loaded_appdomains);
4813 FREE_PROFILER_THREAD_DATA ();
4815 for (current_thread_data = profiler->per_thread_data; current_thread_data != NULL; current_thread_data = next_thread_data) {
4816 next_thread_data = current_thread_data->next;
4817 profiler_per_thread_data_destroy (current_thread_data);
4819 if (profiler->statistical_data != NULL) {
4820 profiler_statistical_data_destroy (profiler->statistical_data);
4822 if (profiler->statistical_data_ready != NULL) {
4823 profiler_statistical_data_destroy (profiler->statistical_data_ready);
4825 if (profiler->statistical_data_second_buffer != NULL) {
4826 profiler_statistical_data_destroy (profiler->statistical_data_second_buffer);
4828 if (profiler->executable_regions != NULL) {
4829 profiler_executable_memory_regions_destroy (profiler->executable_regions);
4832 profiler_heap_buffers_free (&(profiler->heap));
4833 if (profiler->heap_shot_command_file_name != NULL) {
4834 g_free (profiler->heap_shot_command_file_name);
4837 profiler_free_write_buffers ();
4838 profiler_destroy_heap_shot_write_jobs ();
4840 DELETE_PROFILER_MUTEX ();
4843 if (profiler->action_flags.oprofile) {
4852 #ifndef PLATFORM_WIN32
4854 parse_signal_name (const char *signal_name) {
4855 if (! strcasecmp (signal_name, "SIGUSR1")) {
4857 } else if (! strcasecmp (signal_name, "SIGUSR2")) {
4859 } else if (! strcasecmp (signal_name, "SIGPROF")) {
4862 return atoi (signal_name);
4866 check_signal_number (int signal_number) {
4867 if (((signal_number == SIGPROF) && ! (profiler->flags & MONO_PROFILE_STATISTICAL)) ||
4868 (signal_number == SIGUSR1) ||
4869 (signal_number == SIGUSR2)) {
4877 #define FAIL_ARGUMENT_CHECK(message) do {\
4878 failure_message = (message);\
4879 goto failure_handling;\
4881 #define FAIL_PARSING_VALUED_ARGUMENT FAIL_ARGUMENT_CHECK("cannot parse valued argument %s")
4882 #define FAIL_PARSING_FLAG_ARGUMENT FAIL_ARGUMENT_CHECK("cannot parse flag argument %s")
4883 #define CHECK_CONDITION(condition,message) do {\
4884 gboolean result = (condition);\
4886 FAIL_ARGUMENT_CHECK (message);\
4889 #define FAIL_IF_HAS_MINUS CHECK_CONDITION(has_minus,"minus ('-') modifier not allowed for argument %s")
4890 #define TRUE_IF_NOT_MINUS ((!has_minus)?TRUE:FALSE)
4892 #define DEFAULT_ARGUMENTS "s"
4894 setup_user_options (const char *arguments) {
4895 gchar **arguments_array, **current_argument;
4896 #ifndef PLATFORM_WIN32
4897 int gc_request_signal_number = 0;
4898 int toggle_signal_number = 0;
4900 detect_fast_timer ();
4902 profiler->file_name = NULL;
4903 profiler->file_name_suffix = NULL;
4904 profiler->per_thread_buffer_size = 10000;
4905 profiler->statistical_buffer_size = 10000;
4906 profiler->statistical_call_chain_depth = 0;
4907 profiler->write_buffer_size = 1024;
4908 profiler->heap_shot_command_file_name = NULL;
4909 profiler->dump_next_heap_snapshots = 0;
4910 profiler->heap_shot_command_file_access_time = 0;
4911 profiler->heap_shot_was_signalled = FALSE;
4912 profiler->flags = MONO_PROFILE_APPDOMAIN_EVENTS|
4913 MONO_PROFILE_ASSEMBLY_EVENTS|
4914 MONO_PROFILE_MODULE_EVENTS|
4915 MONO_PROFILE_CLASS_EVENTS|
4916 MONO_PROFILE_METHOD_EVENTS|
4917 MONO_PROFILE_JIT_COMPILATION;
4918 profiler->profiler_enabled = TRUE;
4920 if (arguments == NULL) {
4921 arguments = DEFAULT_ARGUMENTS;
4922 } else if (strstr (arguments, ":")) {
4923 arguments = strstr (arguments, ":") + 1;
4924 if (arguments [0] == 0) {
4925 arguments = DEFAULT_ARGUMENTS;
4929 arguments_array = g_strsplit (arguments, ",", -1);
4931 for (current_argument = arguments_array; ((current_argument != NULL) && (current_argument [0] != 0)); current_argument ++) {
4932 char *argument = *current_argument;
4933 char *equals = strstr (argument, "=");
4934 const char *failure_message = NULL;
4938 if (*argument == '+') {
4942 } else if (*argument == '-') {
4951 if (equals != NULL) {
4952 int equals_position = equals - argument;
4954 if (! (strncmp (argument, "per-thread-buffer-size", equals_position) && strncmp (argument, "tbs", equals_position))) {
4955 int value = atoi (equals + 1);
4958 profiler->per_thread_buffer_size = value;
4960 } else if (! (strncmp (argument, "statistical", equals_position) && strncmp (argument, "stat", equals_position) && strncmp (argument, "s", equals_position))) {
4961 int value = atoi (equals + 1);
4967 profiler->statistical_call_chain_depth = value;
4968 profiler->flags |= MONO_PROFILE_STATISTICAL;
4970 } else if (! (strncmp (argument, "statistical-thread-buffer-size", equals_position) && strncmp (argument, "sbs", equals_position))) {
4971 int value = atoi (equals + 1);
4974 profiler->statistical_buffer_size = value;
4976 } else if (! (strncmp (argument, "write-buffer-size", equals_position) && strncmp (argument, "wbs", equals_position))) {
4977 int value = atoi (equals + 1);
4980 profiler->write_buffer_size = value;
4982 } else if (! (strncmp (argument, "output", equals_position) && strncmp (argument, "out", equals_position) && strncmp (argument, "o", equals_position) && strncmp (argument, "O", equals_position))) {
4984 if (strlen (equals + 1) > 0) {
4985 profiler->file_name = g_strdup (equals + 1);
4987 } else if (! (strncmp (argument, "output-suffix", equals_position) && strncmp (argument, "suffix", equals_position) && strncmp (argument, "os", equals_position) && strncmp (argument, "OS", equals_position))) {
4989 if (strlen (equals + 1) > 0) {
4990 profiler->file_name_suffix = g_strdup (equals + 1);
4992 } else if (! (strncmp (argument, "heap-shot", equals_position) && strncmp (argument, "heap", equals_position) && strncmp (argument, "h", equals_position))) {
4993 char *parameter = equals + 1;
4994 if (! strcmp (parameter, "all")) {
4995 profiler->dump_next_heap_snapshots = -1;
4997 gc_request_signal_number = parse_signal_name (parameter);
5001 profiler->action_flags.save_allocation_caller = TRUE;
5002 profiler->action_flags.save_allocation_stack = TRUE;
5003 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5005 profiler->action_flags.heap_shot = TRUE_IF_NOT_MINUS;
5006 } else if (! (strncmp (argument, "gc-commands", equals_position) && strncmp (argument, "gc-c", equals_position) && strncmp (argument, "gcc", equals_position))) {
5008 if (strlen (equals + 1) > 0) {
5009 profiler->heap_shot_command_file_name = g_strdup (equals + 1);
5011 } else if (! (strncmp (argument, "gc-dumps", equals_position) && strncmp (argument, "gc-d", equals_position) && strncmp (argument, "gcd", equals_position))) {
5013 if (strlen (equals + 1) > 0) {
5014 profiler->dump_next_heap_snapshots = atoi (equals + 1);
5016 #ifndef PLATFORM_WIN32
5017 } else if (! (strncmp (argument, "gc-signal", equals_position) && strncmp (argument, "gc-s", equals_position) && strncmp (argument, "gcs", equals_position))) {
5019 if (strlen (equals + 1) > 0) {
5020 char *signal_name = equals + 1;
5021 gc_request_signal_number = parse_signal_name (signal_name);
5023 } else if (! (strncmp (argument, "toggle-signal", equals_position) && strncmp (argument, "ts", equals_position))) {
5025 if (strlen (equals + 1) > 0) {
5026 char *signal_name = equals + 1;
5027 toggle_signal_number = parse_signal_name (signal_name);
5031 FAIL_PARSING_VALUED_ARGUMENT;
5034 if (! (strcmp (argument, "jit") && strcmp (argument, "j"))) {
5035 profiler->action_flags.jit_time = TRUE_IF_NOT_MINUS;
5036 } else if (! (strcmp (argument, "allocations") && strcmp (argument, "alloc") && strcmp (argument, "a"))) {
5039 profiler->action_flags.save_allocation_caller = TRUE;
5040 profiler->action_flags.save_allocation_stack = TRUE;
5043 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5045 profiler->flags &= ~MONO_PROFILE_ALLOCATIONS;
5047 } else if (! (strcmp (argument, "gc") && strcmp (argument, "g"))) {
5049 profiler->flags |= MONO_PROFILE_GC;
5050 } else if (! (strcmp (argument, "allocations-summary") && strcmp (argument, "as"))) {
5051 profiler->action_flags.collection_summary = TRUE_IF_NOT_MINUS;
5052 } else if (! (strcmp (argument, "heap-shot") && strcmp (argument, "heap") && strcmp (argument, "h"))) {
5055 profiler->action_flags.save_allocation_caller = TRUE;
5056 profiler->action_flags.save_allocation_stack = TRUE;
5057 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5059 profiler->action_flags.heap_shot = TRUE_IF_NOT_MINUS;
5060 } else if (! (strcmp (argument, "unreachable") && strcmp (argument, "free") && strcmp (argument, "f"))) {
5061 profiler->action_flags.unreachable_objects = TRUE_IF_NOT_MINUS;
5062 } else if (! (strcmp (argument, "threads") && strcmp (argument, "t"))) {
5064 profiler->flags |= MONO_PROFILE_THREADS;
5066 profiler->flags &= ~MONO_PROFILE_THREADS;
5068 } else if (! (strcmp (argument, "enter-leave") && strcmp (argument, "calls") && strcmp (argument, "c"))) {
5069 profiler->action_flags.track_calls = TRUE_IF_NOT_MINUS;
5070 } else if (! (strcmp (argument, "statistical") && strcmp (argument, "stat") && strcmp (argument, "s"))) {
5072 profiler->flags |= MONO_PROFILE_STATISTICAL;
5074 profiler->flags &= ~MONO_PROFILE_STATISTICAL;
5076 } else if (! (strcmp (argument, "save-allocation-caller") && strcmp (argument, "sac"))) {
5077 profiler->action_flags.save_allocation_caller = TRUE_IF_NOT_MINUS;
5078 } else if (! (strcmp (argument, "save-allocation-stack") && strcmp (argument, "sas"))) {
5079 profiler->action_flags.save_allocation_stack = TRUE_IF_NOT_MINUS;
5080 } else if (! (strcmp (argument, "allocations-carry-id") && strcmp (argument, "aci"))) {
5081 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5082 } else if (! (strcmp (argument, "start-enabled") && strcmp (argument, "se"))) {
5083 profiler->profiler_enabled = TRUE_IF_NOT_MINUS;
5084 } else if (! (strcmp (argument, "start-disabled") && strcmp (argument, "sd"))) {
5085 profiler->profiler_enabled = TRUE_IF_NOT_MINUS;
5086 } else if (! (strcmp (argument, "force-accurate-timer") && strcmp (argument, "fac"))) {
5087 use_fast_timer = TRUE_IF_NOT_MINUS;
5089 } else if (! (strcmp (argument, "oprofile") && strcmp (argument, "oprof"))) {
5090 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5091 profiler->action_flags.oprofile = TRUE;
5092 if (op_open_agent ()) {
5093 FAIL_ARGUMENT_CHECK ("problem calling op_open_agent");
5096 } else if (strcmp (argument, "logging")) {
5097 FAIL_PARSING_FLAG_ARGUMENT;
5102 if (failure_message != NULL) {
5103 g_warning (failure_message, argument);
5104 failure_message = NULL;
5108 g_free (arguments_array);
5110 #ifndef PLATFORM_WIN32
5111 if (gc_request_signal_number != 0) {
5112 if (check_signal_number (gc_request_signal_number) && (gc_request_signal_number != toggle_signal_number)) {
5113 add_gc_request_handler (gc_request_signal_number);
5115 g_error ("Cannot use signal %d", gc_request_signal_number);
5118 if (toggle_signal_number != 0) {
5119 if (check_signal_number (toggle_signal_number) && (toggle_signal_number != gc_request_signal_number)) {
5120 add_toggle_handler (toggle_signal_number);
5122 g_error ("Cannot use signal %d", gc_request_signal_number);
5127 /* Ensure that the profiler flags needed to support required action flags are active */
5128 if (profiler->action_flags.jit_time) {
5129 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5131 if (profiler->action_flags.save_allocation_caller || profiler->action_flags.save_allocation_stack || profiler->action_flags.allocations_carry_id) {
5132 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5134 if (profiler->action_flags.collection_summary || profiler->action_flags.heap_shot || profiler->action_flags.unreachable_objects) {
5135 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5137 if (profiler->action_flags.track_calls) {
5138 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5139 profiler->action_flags.jit_time = TRUE;
5141 if (profiler->action_flags.save_allocation_caller || profiler->action_flags.save_allocation_stack) {
5142 profiler->action_flags.track_stack = TRUE;
5143 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5146 /* Without JIT events the stat profiler will not find method IDs... */
5147 if (profiler->flags | MONO_PROFILE_STATISTICAL) {
5148 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5150 /* Profiling allocations without knowing which gc we are doing is not nice... */
5151 if (profiler->flags | MONO_PROFILE_ALLOCATIONS) {
5152 profiler->flags |= MONO_PROFILE_GC;
5156 if (profiler->file_name == NULL) {
5157 char *program_name = g_get_prgname ();
5159 if (program_name != NULL) {
5160 char *name_buffer = g_strdup (program_name);
5161 char *name_start = name_buffer;
5164 /* Jump over the last '/' */
5165 cursor = strrchr (name_buffer, '/');
5166 if (cursor == NULL) {
5167 cursor = name_buffer;
5171 name_start = cursor;
5173 /* Then jump over the last '\\' */
5174 cursor = strrchr (name_start, '\\');
5175 if (cursor == NULL) {
5176 cursor = name_start;
5180 name_start = cursor;
5182 /* Finally, find the last '.' */
5183 cursor = strrchr (name_start, '.');
5184 if (cursor != NULL) {
5188 if (profiler->file_name_suffix == NULL) {
5189 profiler->file_name = g_strdup_printf ("%s.mprof", name_start);
5191 profiler->file_name = g_strdup_printf ("%s-%s.mprof", name_start, profiler->file_name_suffix);
5193 g_free (name_buffer);
5195 profiler->file_name = g_strdup_printf ("%s.mprof", "profiler-log");
5201 thread_detach_callback (MonoThread *thread) {
5202 LOG_WRITER_THREAD ("thread_detach_callback: asking writer thread to detach");
5203 profiler->detach_writer_thread = TRUE;
5204 WRITER_EVENT_RAISE ();
5205 LOG_WRITER_THREAD ("thread_detach_callback: done");
5210 data_writer_thread (gpointer nothing) {
5211 static gboolean thread_attached = FALSE;
5212 static gboolean thread_detached = FALSE;
5213 static MonoThread *this_thread = NULL;
5215 /* Wait for the OK to attach to the runtime */
5216 WRITER_EVENT_ENABLE_WAIT ();
5217 if (! profiler->terminate_writer_thread) {
5218 MonoDomain * root_domain = mono_get_root_domain ();
5219 if (root_domain != NULL) {
5220 LOG_WRITER_THREAD ("data_writer_thread: attaching thread");
5221 this_thread = mono_thread_attach (root_domain);
5222 mono_thread_set_manage_callback (this_thread, thread_detach_callback);
5223 thread_attached = TRUE;
5225 g_error ("Cannot get root domain\n");
5228 /* Execution was too short, pretend we attached and detached. */
5229 thread_attached = TRUE;
5230 thread_detached = TRUE;
5232 profiler->writer_thread_enabled = TRUE;
5233 /* Notify that we are attached to the runtime */
5234 WRITER_EVENT_DONE_RAISE ();
5237 ProfilerStatisticalData *statistical_data;
5240 LOG_WRITER_THREAD ("data_writer_thread: going to sleep");
5241 WRITER_EVENT_WAIT ();
5242 LOG_WRITER_THREAD ("data_writer_thread: just woke up");
5244 if (profiler->heap_shot_was_signalled) {
5245 LOG_WRITER_THREAD ("data_writer_thread: starting requested collection");
5246 mono_gc_collect (mono_gc_max_generation ());
5247 LOG_WRITER_THREAD ("data_writer_thread: requested collection done");
5250 statistical_data = profiler->statistical_data_ready;
5251 done = (statistical_data == NULL) && (profiler->heap_shot_write_jobs == NULL) && (profiler->writer_thread_flush_everything == FALSE);
5253 if ((!done) && thread_attached) {
5254 if (profiler->writer_thread_flush_everything) {
5255 /* Note that this assumes the lock is held by the thread that woke us up! */
5256 if (! thread_detached) {
5257 LOG_WRITER_THREAD ("data_writer_thread: flushing everything...");
5258 flush_everything ();
5259 profiler->writer_thread_flush_everything = FALSE;
5260 WRITER_EVENT_DONE_RAISE ();
5261 LOG_WRITER_THREAD ("data_writer_thread: flushed everything.");
5263 LOG_WRITER_THREAD ("data_writer_thread: flushing requested, but thread is detached...");
5264 profiler->writer_thread_flush_everything = FALSE;
5265 WRITER_EVENT_DONE_RAISE ();
5266 LOG_WRITER_THREAD ("data_writer_thread: done event raised.");
5269 LOG_WRITER_THREAD ("data_writer_thread: acquiring lock and writing data");
5272 // This makes sure that all method ids are in place
5273 LOG_WRITER_THREAD ("data_writer_thread: writing mapping...");
5274 flush_all_mappings ();
5275 LOG_WRITER_THREAD ("data_writer_thread: wrote mapping");
5277 if ((statistical_data != NULL) && ! thread_detached) {
5278 LOG_WRITER_THREAD ("data_writer_thread: writing statistical data...");
5279 profiler->statistical_data_ready = NULL;
5280 write_statistical_data_block (statistical_data);
5281 statistical_data->next_free_index = 0;
5282 statistical_data->first_unwritten_index = 0;
5283 profiler->statistical_data_second_buffer = statistical_data;
5284 LOG_WRITER_THREAD ("data_writer_thread: wrote statistical data");
5287 profiler_process_heap_shot_write_jobs ();
5290 LOG_WRITER_THREAD ("data_writer_thread: wrote data and released lock");
5293 if (profiler->writer_thread_flush_everything) {
5294 LOG_WRITER_THREAD ("data_writer_thread: flushing requested, but thread is not attached...");
5295 profiler->writer_thread_flush_everything = FALSE;
5296 WRITER_EVENT_DONE_RAISE ();
5297 LOG_WRITER_THREAD ("data_writer_thread: done event raised.");
5301 if (profiler->detach_writer_thread) {
5302 if (this_thread != NULL) {
5303 LOG_WRITER_THREAD ("data_writer_thread: detach requested, acquiring lock and flushing data");
5305 flush_everything ();
5307 LOG_WRITER_THREAD ("data_writer_thread: flushed data and released lock");
5308 LOG_WRITER_THREAD ("data_writer_thread: detaching thread");
5309 mono_thread_detach (this_thread);
5311 profiler->detach_writer_thread = FALSE;
5312 thread_detached = TRUE;
5314 LOG_WRITER_THREAD ("data_writer_thread: warning: thread has already been detached");
5318 if (profiler->terminate_writer_thread) {
5319 LOG_WRITER_THREAD ("data_writer_thread: exiting thread");
5320 CLEANUP_WRITER_THREAD ();
5328 mono_profiler_startup (const char *desc);
5330 /* the entry point (mono_profiler_load?) */
5332 mono_profiler_startup (const char *desc)
5334 profiler = g_new0 (MonoProfiler, 1);
5336 setup_user_options ((desc != NULL) ? desc : DEFAULT_ARGUMENTS);
5338 INITIALIZE_PROFILER_MUTEX ();
5339 MONO_PROFILER_GET_CURRENT_TIME (profiler->start_time);
5340 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->start_counter);
5341 profiler->last_header_counter = 0;
5343 profiler->methods = method_id_mapping_new ();
5344 profiler->classes = class_id_mapping_new ();
5345 profiler->loaded_assemblies = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5346 profiler->loaded_modules = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5347 profiler->loaded_appdomains = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5349 profiler->statistical_data = profiler_statistical_data_new (profiler);
5350 profiler->statistical_data_second_buffer = profiler_statistical_data_new (profiler);
5352 profiler->write_buffers = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
5353 profiler->write_buffers->next = NULL;
5354 profiler->current_write_buffer = profiler->write_buffers;
5355 profiler->current_write_position = 0;
5356 profiler->full_write_buffers = 0;
5358 profiler->executable_regions = profiler_executable_memory_regions_new (1, 1);
5360 profiler->executable_files.table = g_hash_table_new (g_str_hash, g_str_equal);
5361 profiler->executable_files.new_files = NULL;
5363 profiler->heap_shot_write_jobs = NULL;
5364 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
5365 profiler_heap_buffers_setup (&(profiler->heap));
5367 profiler_heap_buffers_clear (&(profiler->heap));
5369 profiler->garbage_collection_counter = 0;
5371 WRITER_EVENT_INIT ();
5372 LOG_WRITER_THREAD ("mono_profiler_startup: creating writer thread");
5373 CREATE_WRITER_THREAD (data_writer_thread);
5374 LOG_WRITER_THREAD ("mono_profiler_startup: created writer thread");
5376 ALLOCATE_PROFILER_THREAD_DATA ();
5380 write_intro_block ();
5381 write_directives_block (TRUE);
5383 mono_profiler_install (profiler, profiler_shutdown);
5385 mono_profiler_install_appdomain (appdomain_start_load, appdomain_end_load,
5386 appdomain_start_unload, appdomain_end_unload);
5387 mono_profiler_install_assembly (assembly_start_load, assembly_end_load,
5388 assembly_start_unload, assembly_end_unload);
5389 mono_profiler_install_module (module_start_load, module_end_load,
5390 module_start_unload, module_end_unload);
5391 mono_profiler_install_class (class_start_load, class_end_load,
5392 class_start_unload, class_end_unload);
5393 mono_profiler_install_jit_compile (method_start_jit, method_end_jit);
5394 mono_profiler_install_enter_leave (method_enter, method_leave);
5395 mono_profiler_install_method_free (method_free);
5396 mono_profiler_install_thread (thread_start, thread_end);
5397 mono_profiler_install_allocation (object_allocated);
5398 mono_profiler_install_statistical (statistical_hit);
5399 mono_profiler_install_statistical_call_chain (statistical_call_chain, profiler->statistical_call_chain_depth);
5400 mono_profiler_install_gc (gc_event, gc_resize);
5401 mono_profiler_install_runtime_initialized (runtime_initialized);
5403 mono_profiler_install_jit_end (method_jit_result);
5406 mono_profiler_set_events (profiler->flags);