X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Futils%2Fmono-codeman.c;h=83c2bdcc690dcf1847789d4491897350cf6dea90;hb=68be3904cf770be9f98a6ce0e8d71899cb94f189;hp=5c2a83b118965861560e3b60e89874c61c96e2e4;hpb=20d845ca07479ece43da9e344c060e49887fd369;p=mono.git diff --git a/mono/utils/mono-codeman.c b/mono/utils/mono-codeman.c index 5c2a83b1189..83c2bdcc690 100644 --- a/mono/utils/mono-codeman.c +++ b/mono/utils/mono-codeman.c @@ -21,12 +21,7 @@ #include #endif -#if defined(__native_client_codegen__) && defined(__native_client__) -#include -#include -#include -#endif -#include +#include static uintptr_t code_memory_used = 0; @@ -52,16 +47,6 @@ static size_t dynamic_code_frees_count; #define MIN_ALIGN 16 #else #define MIN_ALIGN 8 -#endif -#ifdef __native_client_codegen__ -/* For Google Native Client, all targets of indirect control flow need to */ -/* be aligned to bundle boundary. 16 bytes on ARM, 32 bytes on x86. - * MIN_ALIGN was updated to force alignment for calls from - * tramp-.c to mono_global_codeman_reserve() */ -/* and mono_domain_code_reserve(). */ -#undef MIN_ALIGN -#define MIN_ALIGN kNaClBundleSize - #endif /* if a chunk has less than this amount of free space it's considered full */ @@ -99,138 +84,10 @@ struct _MonoCodeManager { CodeChunk *current; CodeChunk *full; CodeChunk *last; -#if defined(__native_client_codegen__) && defined(__native_client__) - GHashTable *hash; -#endif }; #define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1)) -#if defined(__native_client_codegen__) && defined(__native_client__) -/* End of text segment, set by linker. - * Dynamic text starts on the next allocated page. - */ -extern char etext[]; -char *next_dynamic_code_addr = NULL; - -/* - * This routine gets the next available bundle aligned - * pointer in the dynamic code section. It does not check - * for the section end, this error will be caught in the - * service runtime. - */ -void* -allocate_code(intptr_t increment) -{ - char *addr; - if (increment < 0) return NULL; - increment = increment & kNaClBundleMask ? (increment & ~kNaClBundleMask) + kNaClBundleSize : increment; - addr = next_dynamic_code_addr; - next_dynamic_code_addr += increment; - return addr; -} - -int -nacl_is_code_address (void *target) -{ - return (char *)target < next_dynamic_code_addr; -} - -/* Fill code buffer with arch-specific NOPs. */ -void -mono_nacl_fill_code_buffer (guint8 *data, int size); - -#ifndef USE_JUMP_TABLES -const int kMaxPatchDepth = 32; -__thread unsigned char **patch_source_base = NULL; -__thread unsigned char **patch_dest_base = NULL; -__thread int *patch_alloc_size = NULL; -__thread int patch_current_depth = -1; -__thread int allow_target_modification = 1; - -static void -nacl_jit_check_init () -{ - if (patch_source_base == NULL) { - patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *)); - patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *)); - patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int)); - } -} -#endif - -void -nacl_allow_target_modification (int val) -{ -#ifndef USE_JUMP_TABLES - allow_target_modification = val; -#endif /* USE_JUMP_TABLES */ -} - -/* Given a patch target, modify the target such that patching will work when - * the code is copied to the data section. - */ -void* -nacl_modify_patch_target (unsigned char *target) -{ - /* - * There's no need in patch tricks for jumptables, - * as we always patch same jumptable. - */ -#ifndef USE_JUMP_TABLES - /* This seems like a bit of an ugly way to do this but the advantage - * is we don't have to worry about all the conditions in - * mono_resolve_patch_target, and it can be used by all the bare uses - * of _patch. - */ - unsigned char *sb; - unsigned char *db; - - if (!allow_target_modification) return target; - - nacl_jit_check_init (); - sb = patch_source_base[patch_current_depth]; - db = patch_dest_base[patch_current_depth]; - - if (target >= sb && (target < sb + patch_alloc_size[patch_current_depth])) { - /* Do nothing. target is in the section being generated. - * no need to modify, the disp will be the same either way. - */ - } else { - int target_offset = target - db; - target = sb + target_offset; - } -#endif - return target; -} - -void* -nacl_inverse_modify_patch_target (unsigned char *target) -{ - /* - * There's no need in patch tricks for jumptables, - * as we always patch same jumptable. - */ -#ifndef USE_JUMP_TABLES - unsigned char *sb; - unsigned char *db; - int target_offset; - - if (!allow_target_modification) return target; - - nacl_jit_check_init (); - sb = patch_source_base[patch_current_depth]; - db = patch_dest_base[patch_current_depth]; - - target_offset = target - sb; - target = db + target_offset; -#endif - return target; -} - - -#endif /* __native_client_codegen && __native_client__ */ - #define VALLOC_FREELIST_SIZE 16 static mono_mutex_t valloc_mutex; @@ -243,15 +100,15 @@ codechunk_valloc (void *preferred, guint32 size) GSList *freelist; if (!valloc_freelists) { - mono_mutex_init_recursive (&valloc_mutex); + mono_os_mutex_init_recursive (&valloc_mutex); valloc_freelists = g_hash_table_new (NULL, NULL); } /* * Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321. */ - mono_mutex_lock (&valloc_mutex); - freelist = g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); + mono_os_mutex_lock (&valloc_mutex); + freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); if (freelist) { ptr = freelist->data; memset (ptr, 0, size); @@ -262,7 +119,7 @@ codechunk_valloc (void *preferred, guint32 size) if (!ptr && preferred) ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS); } - mono_mutex_unlock (&valloc_mutex); + mono_os_mutex_unlock (&valloc_mutex); return ptr; } @@ -271,15 +128,15 @@ codechunk_vfree (void *ptr, guint32 size) { GSList *freelist; - mono_mutex_lock (&valloc_mutex); - freelist = g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); + mono_os_mutex_lock (&valloc_mutex); + freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); if (!freelist || g_slist_length (freelist) < VALLOC_FREELIST_SIZE) { freelist = g_slist_prepend (freelist, ptr); g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist); } else { mono_vfree (ptr, size); } - mono_mutex_unlock (&valloc_mutex); + mono_os_mutex_unlock (&valloc_mutex); } static void @@ -292,7 +149,7 @@ codechunk_cleanup (void) return; g_hash_table_iter_init (&iter, valloc_freelists); while (g_hash_table_iter_next (&iter, &key, &value)) { - GSList *freelist = value; + GSList *freelist = (GSList *) value; GSList *l; for (l = freelist; l; l = l->next) { @@ -331,32 +188,7 @@ mono_code_manager_cleanup (void) MonoCodeManager* mono_code_manager_new (void) { - MonoCodeManager *cman = g_malloc0 (sizeof (MonoCodeManager)); - if (!cman) - return NULL; -#if defined(__native_client_codegen__) && defined(__native_client__) - if (next_dynamic_code_addr == NULL) { - const guint kPageMask = 0xFFFF; /* 64K pages */ - next_dynamic_code_addr = (uintptr_t)(etext + kPageMask) & ~kPageMask; -#if defined (__GLIBC__) - /* TODO: For now, just jump 64MB ahead to avoid dynamic libraries. */ - next_dynamic_code_addr += (uintptr_t)0x4000000; -#else - /* Workaround bug in service runtime, unable to allocate */ - /* from the first page in the dynamic code section. */ - next_dynamic_code_addr += (uintptr_t)0x10000; -#endif - } - cman->hash = g_hash_table_new (NULL, NULL); -# ifndef USE_JUMP_TABLES - if (patch_source_base == NULL) { - patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *)); - patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *)); - patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int)); - } -# endif -#endif - return cman; + return (MonoCodeManager *) g_malloc0 (sizeof (MonoCodeManager)); } /** @@ -488,11 +320,8 @@ mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void #if defined(__ppc__) || defined(__powerpc__) #define BIND_ROOM 4 #endif -#if defined(__arm__) -#define BIND_ROOM 8 -#endif #if defined(TARGET_ARM64) -#define BIND_ROOM 8 +#define BIND_ROOM 4 #endif static CodeChunk* @@ -529,15 +358,21 @@ new_codechunk (CodeChunk *last, int dynamic, int size) } } #ifdef BIND_ROOM - bsize = chunk_size / BIND_ROOM; + if (dynamic) + /* Reserve more space since there are no other chunks we might use if this one gets full */ + bsize = (chunk_size * 2) / BIND_ROOM; + else + bsize = chunk_size / BIND_ROOM; if (bsize < MIN_BSIZE) bsize = MIN_BSIZE; bsize += MIN_ALIGN -1; bsize &= ~ (MIN_ALIGN - 1); if (chunk_size - size < bsize) { chunk_size = size + bsize; - chunk_size += pagesize - 1; - chunk_size &= ~ (pagesize - 1); + if (!dynamic) { + chunk_size += pagesize - 1; + chunk_size &= ~ (pagesize - 1); + } } #endif @@ -563,7 +398,7 @@ new_codechunk (CodeChunk *last, int dynamic, int size) #endif } - chunk = malloc (sizeof (CodeChunk)); + chunk = (CodeChunk *) malloc (sizeof (CodeChunk)); if (!chunk) { if (flags == CODE_FLAG_MALLOC) dlfree (ptr); @@ -573,7 +408,7 @@ new_codechunk (CodeChunk *last, int dynamic, int size) } chunk->next = NULL; chunk->size = chunk_size; - chunk->data = ptr; + chunk->data = (char *) ptr; chunk->flags = flags; chunk->pos = bsize; chunk->bsize = bsize; @@ -598,7 +433,6 @@ new_codechunk (CodeChunk *last, int dynamic, int size) void* mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment) { -#if !defined(__native_client__) || !defined(__native_client_codegen__) CodeChunk *chunk, *prev; void *ptr; guint32 align_mask = alignment - 1; @@ -661,31 +495,6 @@ mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment) ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos); chunk->pos = ((char*)ptr - chunk->data) + size; return ptr; -#else - unsigned char *temp_ptr, *code_ptr; - /* Round up size to next bundle */ - alignment = kNaClBundleSize; - size = (size + kNaClBundleSize) & (~kNaClBundleMask); - /* Allocate a temp buffer */ - temp_ptr = memalign (alignment, size); - g_assert (((uintptr_t)temp_ptr & kNaClBundleMask) == 0); - /* Allocate code space from the service runtime */ - code_ptr = allocate_code (size); - /* Insert pointer to code space in hash, keyed by buffer ptr */ - g_hash_table_insert (cman->hash, temp_ptr, code_ptr); - -#ifndef USE_JUMP_TABLES - nacl_jit_check_init (); - - patch_current_depth++; - patch_source_base[patch_current_depth] = temp_ptr; - patch_dest_base[patch_current_depth] = code_ptr; - patch_alloc_size[patch_current_depth] = size; - g_assert (patch_current_depth < kMaxPatchDepth); -#endif - - return temp_ptr; -#endif } /** @@ -717,51 +526,13 @@ mono_code_manager_reserve (MonoCodeManager *cman, int size) void mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize) { -#if !defined(__native_client__) || !defined(__native_client_codegen__) g_assert (newsize <= size); if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) { cman->current->pos -= size - newsize; } -#else - unsigned char *code; - int status; - g_assert (NACL_BUNDLE_ALIGN_UP(newsize) <= size); - code = g_hash_table_lookup (cman->hash, data); - g_assert (code != NULL); - mono_nacl_fill_code_buffer ((uint8_t*)data + newsize, size - newsize); - newsize = NACL_BUNDLE_ALIGN_UP(newsize); - g_assert ((GPOINTER_TO_UINT (data) & kNaClBundleMask) == 0); - g_assert ((newsize & kNaClBundleMask) == 0); - status = nacl_dyncode_create (code, data, newsize); - if (status != 0) { - unsigned char *codep; - fprintf(stderr, "Error creating Native Client dynamic code section attempted to be\n" - "emitted at %p (hex dissasembly of code follows):\n", code); - for (codep = data; codep < data + newsize; codep++) - fprintf(stderr, "%02x ", *codep); - fprintf(stderr, "\n"); - g_assert_not_reached (); - } - g_hash_table_remove (cman->hash, data); -# ifndef USE_JUMP_TABLES - g_assert (data == patch_source_base[patch_current_depth]); - g_assert (code == patch_dest_base[patch_current_depth]); - patch_current_depth--; - g_assert (patch_current_depth >= -1); -# endif - free (data); -#endif } -#if defined(__native_client_codegen__) && defined(__native_client__) -void * -nacl_code_manager_get_code_dest (MonoCodeManager *cman, void *data) -{ - return g_hash_table_lookup (cman->hash, data); -} -#endif - /** * mono_code_manager_size: * @cman: a code manager @@ -791,27 +562,3 @@ mono_code_manager_size (MonoCodeManager *cman, int *used_size) *used_size = used; return size; } - -#ifdef __native_client_codegen__ -# if defined(TARGET_ARM) -/* Fill empty space with UDF instruction used as halt on ARM. */ -void -mono_nacl_fill_code_buffer (guint8 *data, int size) -{ - guint32* data32 = (guint32*)data; - int i; - g_assert(size % 4 == 0); - for (i = 0; i < size / 4; i++) - data32[i] = 0xE7FEDEFF; -} -# elif (defined(TARGET_X86) || defined(TARGET_AMD64)) -/* Fill empty space with HLT instruction */ -void -mono_nacl_fill_code_buffer(guint8 *data, int size) -{ - memset (data, 0xf4, size); -} -# else -# error "Not ported" -# endif -#endif