12 #define USE_DL_PREFIX 1
14 #include "mono-codeman.h"
15 #include "mono-mmap.h"
17 #include <mono/metadata/class-internals.h>
18 #include <mono/metadata/profiler-private.h>
19 #ifdef HAVE_VALGRIND_MEMCHECK_H
20 #include <valgrind/memcheck.h>
23 #if defined(__native_client_codegen__) && defined(__native_client__)
25 #include <sys/nacl_syscalls.h>
29 * AMD64 processors maintain icache coherency only for pages which are
30 * marked executable. Also, windows DEP requires us to obtain executable memory from
31 * malloc when using dynamic code managers. The system malloc can't do this so we use a
32 * slighly modified version of Doug Lea's Malloc package for this purpose:
33 * http://g.oswego.edu/dl/html/malloc.html
38 #if defined(__ia64__) || defined(__x86_64__)
40 * We require 16 byte alignment on amd64 so the fp literals embedded in the code are
41 * properly aligned for SSE2.
47 #ifdef __native_client_codegen__
48 /* For Google Native Client, all targets of indirect control flow need to */
49 /* be aligned to a 32-byte boundary. MIN_ALIGN was updated to 32 to force */
50 /* alignment for calls from tramp-x86.c to mono_global_codeman_reserve() */
51 /* and mono_domain_code_reserve(). */
56 /* if a chunk has less than this amount of free space it's considered full */
57 #define MAX_WASTAGE 32
61 #define ARCH_MAP_FLAGS MONO_MMAP_32BIT
63 #define ARCH_MAP_FLAGS 0
66 #define MONO_PROT_RWX (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC)
68 typedef struct _CodeChunck CodeChunk;
80 unsigned int flags: 8;
81 /* this number of bytes is available to resolve addresses far in memory */
82 unsigned int bsize: 24;
85 struct _MonoCodeManager {
90 #if defined(__native_client_codegen__) && defined(__native_client__)
95 #define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1))
97 #if defined(__native_client_codegen__) && defined(__native_client__)
98 /* End of text segment, set by linker.
99 * Dynamic text starts on the next allocated page.
102 char *next_dynamic_code_addr = NULL;
105 * This routine gets the next available bundle aligned
106 * pointer in the dynamic code section. It does not check
107 * for the section end, this error will be caught in the
111 allocate_code(intptr_t increment)
114 if (increment < 0) return NULL;
115 increment = increment & kNaClBundleMask ? (increment & ~kNaClBundleMask) + kNaClBundleSize : increment;
116 addr = next_dynamic_code_addr;
117 next_dynamic_code_addr += increment;
122 nacl_is_code_address (void *target)
124 return (char *)target < next_dynamic_code_addr;
127 const int kMaxPatchDepth = 32;
128 __thread unsigned char **patch_source_base = NULL;
129 __thread unsigned char **patch_dest_base = NULL;
130 __thread int *patch_alloc_size = NULL;
131 __thread int patch_current_depth = -1;
132 __thread int allow_target_modification = 1;
135 nacl_allow_target_modification (int val)
137 allow_target_modification = val;
141 nacl_jit_check_init ()
143 if (patch_source_base == NULL) {
144 patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
145 patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
146 patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
151 /* Given a patch target, modify the target such that patching will work when
152 * the code is copied to the data section.
155 nacl_modify_patch_target (unsigned char *target)
157 /* This seems like a bit of an ugly way to do this but the advantage
158 * is we don't have to worry about all the conditions in
159 * mono_resolve_patch_target, and it can be used by all the bare uses
165 if (!allow_target_modification) return target;
167 nacl_jit_check_init ();
168 sb = patch_source_base[patch_current_depth];
169 db = patch_dest_base[patch_current_depth];
171 if (target >= sb && (target < sb + patch_alloc_size[patch_current_depth])) {
172 /* Do nothing. target is in the section being generated.
173 * no need to modify, the disp will be the same either way.
176 int target_offset = target - db;
177 target = sb + target_offset;
183 nacl_inverse_modify_patch_target (unsigned char *target)
189 if (!allow_target_modification) return target;
191 nacl_jit_check_init ();
192 sb = patch_source_base[patch_current_depth];
193 db = patch_dest_base[patch_current_depth];
195 target_offset = target - sb;
196 target = db + target_offset;
201 #endif /* __native_client_codegen && __native_client__ */
204 * mono_code_manager_new:
206 * Creates a new code manager. A code manager can be used to allocate memory
207 * suitable for storing native code that can be later executed.
208 * A code manager allocates memory from the operating system in large chunks
209 * (typically 64KB in size) so that many methods can be allocated inside them
210 * close together, improving cache locality.
212 * Returns: the new code manager
215 mono_code_manager_new (void)
217 MonoCodeManager *cman = malloc (sizeof (MonoCodeManager));
220 cman->current = NULL;
224 #if defined(__native_client_codegen__) && defined(__native_client__)
225 if (next_dynamic_code_addr == NULL) {
226 const guint kPageMask = 0xFFFF; /* 64K pages */
227 next_dynamic_code_addr = (uintptr_t)(etext + kPageMask) & ~kPageMask;
228 /* Workaround bug in service runtime, unable to allocate */
229 /* from the first page in the dynamic code section. */
231 next_dynamic_code_addr += (uintptr_t)0x10000;
233 cman->hash = mono_g_hash_table_new (NULL, NULL);
234 /* Keep the hash table from being collected */
235 mono_gc_register_root (&cman->hash, sizeof (void*), NULL);
236 if (patch_source_base == NULL) {
237 patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
238 patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
239 patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
246 * mono_code_manager_new_dynamic:
248 * Creates a new code manager suitable for holding native code that can be
249 * used for single or small methods that need to be deallocated independently
250 * of other native code.
252 * Returns: the new code manager
255 mono_code_manager_new_dynamic (void)
257 MonoCodeManager *cman = mono_code_manager_new ();
264 free_chunklist (CodeChunk *chunk)
268 #if defined(HAVE_VALGRIND_MEMCHECK_H) && defined (VALGRIND_JIT_UNREGISTER_MAP)
269 int valgrind_unregister = 0;
270 if (RUNNING_ON_VALGRIND)
271 valgrind_unregister = 1;
272 #define valgrind_unregister(x) do { if (valgrind_unregister) { VALGRIND_JIT_UNREGISTER_MAP(NULL,x); } } while (0)
274 #define valgrind_unregister(x)
279 mono_profiler_code_chunk_destroy ((gpointer) dead->data);
281 if (dead->flags == CODE_FLAG_MMAP) {
282 mono_vfree (dead->data, dead->size);
283 /* valgrind_unregister(dead->data); */
284 } else if (dead->flags == CODE_FLAG_MALLOC) {
292 * mono_code_manager_destroy:
293 * @cman: a code manager
295 * Free all the memory associated with the code manager @cman.
298 mono_code_manager_destroy (MonoCodeManager *cman)
300 free_chunklist (cman->full);
301 free_chunklist (cman->current);
306 * mono_code_manager_invalidate:
307 * @cman: a code manager
309 * Fill all the memory with an invalid native code value
310 * so that any attempt to execute code allocated in the code
311 * manager @cman will fail. This is used for debugging purposes.
314 mono_code_manager_invalidate (MonoCodeManager *cman)
318 #if defined(__i386__) || defined(__x86_64__)
319 int fill_value = 0xcc; /* x86 break */
321 int fill_value = 0x2a;
324 for (chunk = cman->current; chunk; chunk = chunk->next)
325 memset (chunk->data, fill_value, chunk->size);
326 for (chunk = cman->full; chunk; chunk = chunk->next)
327 memset (chunk->data, fill_value, chunk->size);
331 * mono_code_manager_set_read_only:
332 * @cman: a code manager
334 * Make the code manager read only, so further allocation requests cause an assert.
337 mono_code_manager_set_read_only (MonoCodeManager *cman)
339 cman->read_only = TRUE;
343 * mono_code_manager_foreach:
344 * @cman: a code manager
345 * @func: a callback function pointer
346 * @user_data: additional data to pass to @func
348 * Invokes the callback @func for each different chunk of memory allocated
349 * in the code manager @cman.
352 mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
355 for (chunk = cman->current; chunk; chunk = chunk->next) {
356 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
359 for (chunk = cman->full; chunk; chunk = chunk->next) {
360 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
365 /* BIND_ROOM is the divisor for the chunck of code size dedicated
366 * to binding branches (branches not reachable with the immediate displacement)
367 * bind_size = size/BIND_ROOM;
368 * we should reduce it and make MIN_PAGES bigger for such systems
370 #if defined(__ppc__) || defined(__powerpc__)
378 new_codechunk (int dynamic, int size)
380 int minsize, flags = CODE_FLAG_MMAP;
381 int chunk_size, bsize = 0;
387 flags = CODE_FLAG_MALLOC;
390 pagesize = mono_pagesize ();
394 flags = CODE_FLAG_MALLOC;
396 minsize = pagesize * MIN_PAGES;
398 chunk_size = minsize;
401 chunk_size += pagesize - 1;
402 chunk_size &= ~ (pagesize - 1);
406 bsize = chunk_size / BIND_ROOM;
407 if (bsize < MIN_BSIZE)
409 bsize += MIN_ALIGN -1;
410 bsize &= ~ (MIN_ALIGN - 1);
411 if (chunk_size - size < bsize) {
412 chunk_size = size + bsize;
413 chunk_size += pagesize - 1;
414 chunk_size &= ~ (pagesize - 1);
418 if (flags == CODE_FLAG_MALLOC) {
419 ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1);
423 /* Allocate MIN_ALIGN-1 more than we need so we can still */
424 /* guarantee MIN_ALIGN alignment for individual allocs */
425 /* from mono_code_manager_reserve_align. */
426 ptr = mono_valloc (NULL, chunk_size + MIN_ALIGN - 1, MONO_PROT_RWX | ARCH_MAP_FLAGS);
431 if (flags == CODE_FLAG_MALLOC) {
433 /* Make sure the thunks area is zeroed */
434 memset (ptr, 0, bsize);
438 chunk = malloc (sizeof (CodeChunk));
440 if (flags == CODE_FLAG_MALLOC)
443 mono_vfree (ptr, chunk_size);
447 chunk->size = chunk_size;
449 chunk->flags = flags;
451 chunk->bsize = bsize;
452 mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size);
454 /*printf ("code chunk at: %p\n", ptr);*/
459 * mono_code_manager_reserve:
460 * @cman: a code manager
461 * @size: size of memory to allocate
462 * @alignment: power of two alignment value
464 * Allocates at least @size bytes of memory inside the code manager @cman.
466 * Returns: the pointer to the allocated memory or #NULL on failure
469 mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment)
471 #if !defined(__native_client__) || !defined(__native_client_codegen__)
472 CodeChunk *chunk, *prev;
474 guint32 align_mask = alignment - 1;
476 g_assert (!cman->read_only);
478 /* eventually allow bigger alignments, but we need to fix the dynamic alloc code to
481 g_assert (alignment <= MIN_ALIGN);
484 ++mono_stats.dynamic_code_alloc_count;
485 mono_stats.dynamic_code_bytes_count += size;
488 if (!cman->current) {
489 cman->current = new_codechunk (cman->dynamic, size);
494 for (chunk = cman->current; chunk; chunk = chunk->next) {
495 if (ALIGN_INT (chunk->pos, alignment) + size <= chunk->size) {
496 chunk->pos = ALIGN_INT (chunk->pos, alignment);
497 /* Align the chunk->data we add to chunk->pos */
498 /* or we can't guarantee proper alignment */
499 ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~align_mask) + chunk->pos);
500 chunk->pos = ((char*)ptr - chunk->data) + size;
505 * no room found, move one filled chunk to cman->full
506 * to keep cman->current from growing too much
509 for (chunk = cman->current; chunk; prev = chunk, chunk = chunk->next) {
510 if (chunk->pos + MIN_ALIGN * 4 <= chunk->size)
513 prev->next = chunk->next;
515 cman->current = chunk->next;
517 chunk->next = cman->full;
521 chunk = new_codechunk (cman->dynamic, size);
524 chunk->next = cman->current;
525 cman->current = chunk;
526 chunk->pos = ALIGN_INT (chunk->pos, alignment);
527 /* Align the chunk->data we add to chunk->pos */
528 /* or we can't guarantee proper alignment */
529 ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~align_mask) + chunk->pos);
530 chunk->pos = ((char*)ptr - chunk->data) + size;
533 unsigned char *temp_ptr, *code_ptr;
534 /* Round up size to next bundle */
535 alignment = kNaClBundleSize;
536 size = (size + kNaClBundleSize) & (~kNaClBundleMask);
537 /* Allocate a temp buffer */
538 temp_ptr = memalign (alignment, size);
539 g_assert (((uintptr_t)temp_ptr & kNaClBundleMask) == 0);
540 /* Allocate code space from the service runtime */
541 code_ptr = allocate_code (size);
542 /* Insert pointer to code space in hash, keyed by buffer ptr */
543 mono_g_hash_table_insert (cman->hash, temp_ptr, code_ptr);
545 nacl_jit_check_init ();
547 patch_current_depth++;
548 patch_source_base[patch_current_depth] = temp_ptr;
549 patch_dest_base[patch_current_depth] = code_ptr;
550 patch_alloc_size[patch_current_depth] = size;
551 g_assert (patch_current_depth < kMaxPatchDepth);
557 * mono_code_manager_reserve:
558 * @cman: a code manager
559 * @size: size of memory to allocate
561 * Allocates at least @size bytes of memory inside the code manager @cman.
563 * Returns: the pointer to the allocated memory or #NULL on failure
566 mono_code_manager_reserve (MonoCodeManager *cman, int size)
568 return mono_code_manager_reserve_align (cman, size, MIN_ALIGN);
572 * mono_code_manager_commit:
573 * @cman: a code manager
574 * @data: the pointer returned by mono_code_manager_reserve ()
575 * @size: the size requested in the call to mono_code_manager_reserve ()
576 * @newsize: the new size to reserve
578 * If we reserved too much room for a method and we didn't allocate
579 * already from the code manager, we can get back the excess allocation
580 * for later use in the code manager.
583 mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
585 #if !defined(__native_client__) || !defined(__native_client_codegen__)
586 g_assert (newsize <= size);
588 if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
589 cman->current->pos -= size - newsize;
594 g_assert (newsize <= size);
595 code = mono_g_hash_table_lookup (cman->hash, data);
596 g_assert (code != NULL);
597 /* Pad space after code with HLTs */
598 /* TODO: this is x86/amd64 specific */
599 while (newsize & kNaClBundleMask) {
600 *((char *)data + newsize) = 0xf4;
603 status = nacl_dyncode_create (code, data, newsize);
605 g_assert_not_reached ();
607 mono_g_hash_table_remove (cman->hash, data);
608 g_assert (data == patch_source_base[patch_current_depth]);
609 g_assert (code == patch_dest_base[patch_current_depth]);
610 patch_current_depth--;
611 g_assert (patch_current_depth >= -1);
616 #if defined(__native_client_codegen__) && defined(__native_client__)
618 nacl_code_manager_get_code_dest (MonoCodeManager *cman, void *data)
620 return mono_g_hash_table_lookup (cman->hash, data);
625 * mono_code_manager_size:
626 * @cman: a code manager
627 * @used_size: pointer to an integer for the result
629 * This function can be used to get statistics about a code manager:
630 * the integer pointed to by @used_size will contain how much
631 * memory is actually used inside the code managed @cman.
633 * Returns: the amount of memory allocated in @cman
636 mono_code_manager_size (MonoCodeManager *cman, int *used_size)
641 for (chunk = cman->current; chunk; chunk = chunk->next) {
645 for (chunk = cman->full; chunk; chunk = chunk->next) {