12 #define USE_DL_PREFIX 1
14 #include "mono-codeman.h"
15 #include "mono-mmap.h"
16 #include "mono-counters.h"
18 #include <mono/metadata/class-internals.h>
19 #include <mono/metadata/profiler-private.h>
20 #ifdef HAVE_VALGRIND_MEMCHECK_H
21 #include <valgrind/memcheck.h>
24 #if defined(__native_client_codegen__) && defined(__native_client__)
26 #include <nacl/nacl_dyncode.h>
29 static uintptr_t code_memory_used = 0;
32 * AMD64 processors maintain icache coherency only for pages which are
33 * marked executable. Also, windows DEP requires us to obtain executable memory from
34 * malloc when using dynamic code managers. The system malloc can't do this so we use a
35 * slighly modified version of Doug Lea's Malloc package for this purpose:
36 * http://g.oswego.edu/dl/html/malloc.html
41 #if defined(__ia64__) || defined(__x86_64__)
43 * We require 16 byte alignment on amd64 so the fp literals embedded in the code are
44 * properly aligned for SSE2.
50 #ifdef __native_client_codegen__
51 /* For Google Native Client, all targets of indirect control flow need to */
52 /* be aligned to a 32-byte boundary. MIN_ALIGN was updated to 32 to force */
53 /* alignment for calls from tramp-x86.c to mono_global_codeman_reserve() */
54 /* and mono_domain_code_reserve(). */
59 /* if a chunk has less than this amount of free space it's considered full */
60 #define MAX_WASTAGE 32
64 #define ARCH_MAP_FLAGS MONO_MMAP_32BIT
66 #define ARCH_MAP_FLAGS 0
69 #define MONO_PROT_RWX (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC)
71 typedef struct _CodeChunck CodeChunk;
83 unsigned int flags: 8;
84 /* this number of bytes is available to resolve addresses far in memory */
85 unsigned int bsize: 24;
88 struct _MonoCodeManager {
93 #if defined(__native_client_codegen__) && defined(__native_client__)
98 #define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1))
100 #if defined(__native_client_codegen__) && defined(__native_client__)
101 /* End of text segment, set by linker.
102 * Dynamic text starts on the next allocated page.
105 char *next_dynamic_code_addr = NULL;
108 * This routine gets the next available bundle aligned
109 * pointer in the dynamic code section. It does not check
110 * for the section end, this error will be caught in the
114 allocate_code(intptr_t increment)
117 if (increment < 0) return NULL;
118 increment = increment & kNaClBundleMask ? (increment & ~kNaClBundleMask) + kNaClBundleSize : increment;
119 addr = next_dynamic_code_addr;
120 next_dynamic_code_addr += increment;
125 nacl_is_code_address (void *target)
127 return (char *)target < next_dynamic_code_addr;
130 const int kMaxPatchDepth = 32;
131 __thread unsigned char **patch_source_base = NULL;
132 __thread unsigned char **patch_dest_base = NULL;
133 __thread int *patch_alloc_size = NULL;
134 __thread int patch_current_depth = -1;
135 __thread int allow_target_modification = 1;
138 nacl_allow_target_modification (int val)
140 allow_target_modification = val;
144 nacl_jit_check_init ()
146 if (patch_source_base == NULL) {
147 patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
148 patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
149 patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
154 /* Given a patch target, modify the target such that patching will work when
155 * the code is copied to the data section.
158 nacl_modify_patch_target (unsigned char *target)
160 /* This seems like a bit of an ugly way to do this but the advantage
161 * is we don't have to worry about all the conditions in
162 * mono_resolve_patch_target, and it can be used by all the bare uses
168 if (!allow_target_modification) return target;
170 nacl_jit_check_init ();
171 sb = patch_source_base[patch_current_depth];
172 db = patch_dest_base[patch_current_depth];
174 if (target >= sb && (target < sb + patch_alloc_size[patch_current_depth])) {
175 /* Do nothing. target is in the section being generated.
176 * no need to modify, the disp will be the same either way.
179 int target_offset = target - db;
180 target = sb + target_offset;
186 nacl_inverse_modify_patch_target (unsigned char *target)
192 if (!allow_target_modification) return target;
194 nacl_jit_check_init ();
195 sb = patch_source_base[patch_current_depth];
196 db = patch_dest_base[patch_current_depth];
198 target_offset = target - sb;
199 target = db + target_offset;
204 #endif /* __native_client_codegen && __native_client__ */
207 * mono_code_manager_new:
209 * Creates a new code manager. A code manager can be used to allocate memory
210 * suitable for storing native code that can be later executed.
211 * A code manager allocates memory from the operating system in large chunks
212 * (typically 64KB in size) so that many methods can be allocated inside them
213 * close together, improving cache locality.
215 * Returns: the new code manager
218 mono_code_manager_new (void)
220 MonoCodeManager *cman = malloc (sizeof (MonoCodeManager));
223 cman->current = NULL;
227 #if defined(__native_client_codegen__) && defined(__native_client__)
228 if (next_dynamic_code_addr == NULL) {
229 const guint kPageMask = 0xFFFF; /* 64K pages */
230 next_dynamic_code_addr = (uintptr_t)(etext + kPageMask) & ~kPageMask;
231 /* Workaround bug in service runtime, unable to allocate */
232 /* from the first page in the dynamic code section. */
234 next_dynamic_code_addr += (uintptr_t)0x10000;
236 cman->hash = mono_g_hash_table_new (NULL, NULL);
237 /* Keep the hash table from being collected */
238 mono_gc_register_root (&cman->hash, sizeof (void*), NULL);
239 if (patch_source_base == NULL) {
240 patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
241 patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
242 patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
249 * mono_code_manager_new_dynamic:
251 * Creates a new code manager suitable for holding native code that can be
252 * used for single or small methods that need to be deallocated independently
253 * of other native code.
255 * Returns: the new code manager
258 mono_code_manager_new_dynamic (void)
260 MonoCodeManager *cman = mono_code_manager_new ();
267 free_chunklist (CodeChunk *chunk)
271 #if defined(HAVE_VALGRIND_MEMCHECK_H) && defined (VALGRIND_JIT_UNREGISTER_MAP)
272 int valgrind_unregister = 0;
273 if (RUNNING_ON_VALGRIND)
274 valgrind_unregister = 1;
275 #define valgrind_unregister(x) do { if (valgrind_unregister) { VALGRIND_JIT_UNREGISTER_MAP(NULL,x); } } while (0)
277 #define valgrind_unregister(x)
282 mono_profiler_code_chunk_destroy ((gpointer) dead->data);
284 if (dead->flags == CODE_FLAG_MMAP) {
285 mono_vfree (dead->data, dead->size);
286 /* valgrind_unregister(dead->data); */
287 } else if (dead->flags == CODE_FLAG_MALLOC) {
290 code_memory_used -= dead->size;
296 * mono_code_manager_destroy:
297 * @cman: a code manager
299 * Free all the memory associated with the code manager @cman.
302 mono_code_manager_destroy (MonoCodeManager *cman)
304 free_chunklist (cman->full);
305 free_chunklist (cman->current);
310 * mono_code_manager_invalidate:
311 * @cman: a code manager
313 * Fill all the memory with an invalid native code value
314 * so that any attempt to execute code allocated in the code
315 * manager @cman will fail. This is used for debugging purposes.
318 mono_code_manager_invalidate (MonoCodeManager *cman)
322 #if defined(__i386__) || defined(__x86_64__)
323 int fill_value = 0xcc; /* x86 break */
325 int fill_value = 0x2a;
328 for (chunk = cman->current; chunk; chunk = chunk->next)
329 memset (chunk->data, fill_value, chunk->size);
330 for (chunk = cman->full; chunk; chunk = chunk->next)
331 memset (chunk->data, fill_value, chunk->size);
335 * mono_code_manager_set_read_only:
336 * @cman: a code manager
338 * Make the code manager read only, so further allocation requests cause an assert.
341 mono_code_manager_set_read_only (MonoCodeManager *cman)
343 cman->read_only = TRUE;
347 * mono_code_manager_foreach:
348 * @cman: a code manager
349 * @func: a callback function pointer
350 * @user_data: additional data to pass to @func
352 * Invokes the callback @func for each different chunk of memory allocated
353 * in the code manager @cman.
356 mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
359 for (chunk = cman->current; chunk; chunk = chunk->next) {
360 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
363 for (chunk = cman->full; chunk; chunk = chunk->next) {
364 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
369 /* BIND_ROOM is the divisor for the chunck of code size dedicated
370 * to binding branches (branches not reachable with the immediate displacement)
371 * bind_size = size/BIND_ROOM;
372 * we should reduce it and make MIN_PAGES bigger for such systems
374 #if defined(__ppc__) || defined(__powerpc__)
382 new_codechunk (int dynamic, int size)
384 int minsize, flags = CODE_FLAG_MMAP;
385 int chunk_size, bsize = 0;
391 flags = CODE_FLAG_MALLOC;
394 pagesize = mono_pagesize ();
398 flags = CODE_FLAG_MALLOC;
400 minsize = pagesize * MIN_PAGES;
402 chunk_size = minsize;
405 chunk_size += pagesize - 1;
406 chunk_size &= ~ (pagesize - 1);
410 bsize = chunk_size / BIND_ROOM;
411 if (bsize < MIN_BSIZE)
413 bsize += MIN_ALIGN -1;
414 bsize &= ~ (MIN_ALIGN - 1);
415 if (chunk_size - size < bsize) {
416 chunk_size = size + bsize;
417 chunk_size += pagesize - 1;
418 chunk_size &= ~ (pagesize - 1);
422 if (flags == CODE_FLAG_MALLOC) {
423 ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1);
427 /* Allocate MIN_ALIGN-1 more than we need so we can still */
428 /* guarantee MIN_ALIGN alignment for individual allocs */
429 /* from mono_code_manager_reserve_align. */
430 ptr = mono_valloc (NULL, chunk_size + MIN_ALIGN - 1, MONO_PROT_RWX | ARCH_MAP_FLAGS);
435 if (flags == CODE_FLAG_MALLOC) {
437 /* Make sure the thunks area is zeroed */
438 memset (ptr, 0, bsize);
442 chunk = malloc (sizeof (CodeChunk));
444 if (flags == CODE_FLAG_MALLOC)
447 mono_vfree (ptr, chunk_size);
451 chunk->size = chunk_size;
453 chunk->flags = flags;
455 chunk->bsize = bsize;
456 mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size);
458 code_memory_used += chunk_size;
459 mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE, code_memory_used);
460 /*printf ("code chunk at: %p\n", ptr);*/
465 * mono_code_manager_reserve:
466 * @cman: a code manager
467 * @size: size of memory to allocate
468 * @alignment: power of two alignment value
470 * Allocates at least @size bytes of memory inside the code manager @cman.
472 * Returns: the pointer to the allocated memory or #NULL on failure
475 mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment)
477 #if !defined(__native_client__) || !defined(__native_client_codegen__)
478 CodeChunk *chunk, *prev;
480 guint32 align_mask = alignment - 1;
482 g_assert (!cman->read_only);
484 /* eventually allow bigger alignments, but we need to fix the dynamic alloc code to
487 g_assert (alignment <= MIN_ALIGN);
490 ++mono_stats.dynamic_code_alloc_count;
491 mono_stats.dynamic_code_bytes_count += size;
494 if (!cman->current) {
495 cman->current = new_codechunk (cman->dynamic, size);
500 for (chunk = cman->current; chunk; chunk = chunk->next) {
501 if (ALIGN_INT (chunk->pos, alignment) + size <= chunk->size) {
502 chunk->pos = ALIGN_INT (chunk->pos, alignment);
503 /* Align the chunk->data we add to chunk->pos */
504 /* or we can't guarantee proper alignment */
505 ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos);
506 chunk->pos = ((char*)ptr - chunk->data) + size;
511 * no room found, move one filled chunk to cman->full
512 * to keep cman->current from growing too much
515 for (chunk = cman->current; chunk; prev = chunk, chunk = chunk->next) {
516 if (chunk->pos + MIN_ALIGN * 4 <= chunk->size)
519 prev->next = chunk->next;
521 cman->current = chunk->next;
523 chunk->next = cman->full;
527 chunk = new_codechunk (cman->dynamic, size);
530 chunk->next = cman->current;
531 cman->current = chunk;
532 chunk->pos = ALIGN_INT (chunk->pos, alignment);
533 /* Align the chunk->data we add to chunk->pos */
534 /* or we can't guarantee proper alignment */
535 ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos);
536 chunk->pos = ((char*)ptr - chunk->data) + size;
539 unsigned char *temp_ptr, *code_ptr;
540 /* Round up size to next bundle */
541 alignment = kNaClBundleSize;
542 size = (size + kNaClBundleSize) & (~kNaClBundleMask);
543 /* Allocate a temp buffer */
544 temp_ptr = memalign (alignment, size);
545 g_assert (((uintptr_t)temp_ptr & kNaClBundleMask) == 0);
546 /* Allocate code space from the service runtime */
547 code_ptr = allocate_code (size);
548 /* Insert pointer to code space in hash, keyed by buffer ptr */
549 mono_g_hash_table_insert (cman->hash, temp_ptr, code_ptr);
551 nacl_jit_check_init ();
553 patch_current_depth++;
554 patch_source_base[patch_current_depth] = temp_ptr;
555 patch_dest_base[patch_current_depth] = code_ptr;
556 patch_alloc_size[patch_current_depth] = size;
557 g_assert (patch_current_depth < kMaxPatchDepth);
563 * mono_code_manager_reserve:
564 * @cman: a code manager
565 * @size: size of memory to allocate
567 * Allocates at least @size bytes of memory inside the code manager @cman.
569 * Returns: the pointer to the allocated memory or #NULL on failure
572 mono_code_manager_reserve (MonoCodeManager *cman, int size)
574 return mono_code_manager_reserve_align (cman, size, MIN_ALIGN);
578 * mono_code_manager_commit:
579 * @cman: a code manager
580 * @data: the pointer returned by mono_code_manager_reserve ()
581 * @size: the size requested in the call to mono_code_manager_reserve ()
582 * @newsize: the new size to reserve
584 * If we reserved too much room for a method and we didn't allocate
585 * already from the code manager, we can get back the excess allocation
586 * for later use in the code manager.
589 mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
591 #if !defined(__native_client__) || !defined(__native_client_codegen__)
592 g_assert (newsize <= size);
594 if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
595 cman->current->pos -= size - newsize;
600 g_assert (newsize <= size);
601 code = mono_g_hash_table_lookup (cman->hash, data);
602 g_assert (code != NULL);
603 /* Pad space after code with HLTs */
604 /* TODO: this is x86/amd64 specific */
605 while (newsize & kNaClBundleMask) {
606 *((char *)data + newsize) = 0xf4;
609 status = nacl_dyncode_create (code, data, newsize);
611 g_assert_not_reached ();
613 mono_g_hash_table_remove (cman->hash, data);
614 g_assert (data == patch_source_base[patch_current_depth]);
615 g_assert (code == patch_dest_base[patch_current_depth]);
616 patch_current_depth--;
617 g_assert (patch_current_depth >= -1);
622 #if defined(__native_client_codegen__) && defined(__native_client__)
624 nacl_code_manager_get_code_dest (MonoCodeManager *cman, void *data)
626 return mono_g_hash_table_lookup (cman->hash, data);
631 * mono_code_manager_size:
632 * @cman: a code manager
633 * @used_size: pointer to an integer for the result
635 * This function can be used to get statistics about a code manager:
636 * the integer pointed to by @used_size will contain how much
637 * memory is actually used inside the code managed @cman.
639 * Returns: the amount of memory allocated in @cman
642 mono_code_manager_size (MonoCodeManager *cman, int *used_size)
647 for (chunk = cman->current; chunk; chunk = chunk->next) {
651 for (chunk = cman->full; chunk; chunk = chunk->next) {