#include "mono-mmap.h"
#include "mono-counters.h"
#include "dlmalloc.h"
-#include <mono/metadata/class-internals.h>
+#include <mono/io-layer/io-layer.h>
#include <mono/metadata/profiler-private.h>
#ifdef HAVE_VALGRIND_MEMCHECK_H
#include <valgrind/memcheck.h>
#if defined(__native_client_codegen__) && defined(__native_client__)
#include <malloc.h>
#include <nacl/nacl_dyncode.h>
+#include <mono/mini/mini.h>
#endif
static uintptr_t code_memory_used = 0;
+static size_t dynamic_code_alloc_count;
+static size_t dynamic_code_bytes_count;
+static size_t dynamic_code_frees_count;
/*
* AMD64 processors maintain icache coherency only for pages which are
#define MIN_PAGES 16
-#if defined(__ia64__) || defined(__x86_64__)
+#if defined(__ia64__) || defined(__x86_64__) || defined (_WIN64)
/*
* We require 16 byte alignment on amd64 so the fp literals embedded in the code are
* properly aligned for SSE2.
#endif
#ifdef __native_client_codegen__
/* For Google Native Client, all targets of indirect control flow need to */
-/* be aligned to a 32-byte boundary. MIN_ALIGN was updated to 32 to force */
-/* alignment for calls from tramp-x86.c to mono_global_codeman_reserve() */
+/* be aligned to bundle boundary. 16 bytes on ARM, 32 bytes on x86.
+ * MIN_ALIGN was updated to force alignment for calls from
+ * tramp-<arch>.c to mono_global_codeman_reserve() */
/* and mono_domain_code_reserve(). */
#undef MIN_ALIGN
-#define MIN_ALIGN 32
+#define MIN_ALIGN kNaClBundleSize
+
#endif
/* if a chunk has less than this amount of free space it's considered full */
int read_only;
CodeChunk *current;
CodeChunk *full;
+ CodeChunk *last;
#if defined(__native_client_codegen__) && defined(__native_client__)
GHashTable *hash;
#endif
return (char *)target < next_dynamic_code_addr;
}
+/* Fill code buffer with arch-specific NOPs. */
+void
+mono_nacl_fill_code_buffer (guint8 *data, int size);
+
+#ifndef USE_JUMP_TABLES
const int kMaxPatchDepth = 32;
__thread unsigned char **patch_source_base = NULL;
__thread unsigned char **patch_dest_base = NULL;
__thread int patch_current_depth = -1;
__thread int allow_target_modification = 1;
-void
-nacl_allow_target_modification (int val)
-{
- allow_target_modification = val;
-}
-
static void
nacl_jit_check_init ()
{
patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
}
}
+#endif
+void
+nacl_allow_target_modification (int val)
+{
+#ifndef USE_JUMP_TABLES
+ allow_target_modification = val;
+#endif /* USE_JUMP_TABLES */
+}
/* Given a patch target, modify the target such that patching will work when
* the code is copied to the data section.
void*
nacl_modify_patch_target (unsigned char *target)
{
+ /*
+ * There's no need in patch tricks for jumptables,
+ * as we always patch same jumptable.
+ */
+#ifndef USE_JUMP_TABLES
/* This seems like a bit of an ugly way to do this but the advantage
* is we don't have to worry about all the conditions in
* mono_resolve_patch_target, and it can be used by all the bare uses
int target_offset = target - db;
target = sb + target_offset;
}
+#endif
return target;
}
void*
nacl_inverse_modify_patch_target (unsigned char *target)
{
+ /*
+ * There's no need in patch tricks for jumptables,
+ * as we always patch same jumptable.
+ */
+#ifndef USE_JUMP_TABLES
unsigned char *sb;
unsigned char *db;
int target_offset;
target_offset = target - sb;
target = db + target_offset;
+#endif
return target;
}
static GHashTable *valloc_freelists;
static void*
-codechunk_valloc (guint32 size)
+codechunk_valloc (void *preferred, guint32 size)
{
void *ptr;
GSList *freelist;
if (freelist) {
ptr = freelist->data;
memset (ptr, 0, size);
- freelist = g_slist_remove_link (freelist, freelist);
+ freelist = g_slist_delete_link (freelist, freelist);
g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist);
} else {
- ptr = mono_valloc (NULL, size + MIN_ALIGN - 1, MONO_PROT_RWX | ARCH_MAP_FLAGS);
+ ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
+ if (!ptr && preferred)
+ ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
}
LeaveCriticalSection (&valloc_mutex);
return ptr;
void
mono_code_manager_init (void)
{
+ mono_counters_register ("Dynamic code allocs", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &dynamic_code_alloc_count);
+ mono_counters_register ("Dynamic code bytes", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &dynamic_code_bytes_count);
+ mono_counters_register ("Dynamic code frees", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &dynamic_code_frees_count);
}
void
MonoCodeManager*
mono_code_manager_new (void)
{
- MonoCodeManager *cman = malloc (sizeof (MonoCodeManager));
+ MonoCodeManager *cman = g_malloc0 (sizeof (MonoCodeManager));
if (!cman)
return NULL;
- cman->current = NULL;
- cman->full = NULL;
- cman->dynamic = 0;
- cman->read_only = 0;
#if defined(__native_client_codegen__) && defined(__native_client__)
if (next_dynamic_code_addr == NULL) {
const guint kPageMask = 0xFFFF; /* 64K pages */
#endif
}
cman->hash = g_hash_table_new (NULL, NULL);
+# ifndef USE_JUMP_TABLES
if (patch_source_base == NULL) {
patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
}
+# endif
#endif
return cman;
}
#if defined(__arm__)
#define BIND_ROOM 8
#endif
+#if defined(TARGET_ARM64)
+#define BIND_ROOM 8
+#endif
static CodeChunk*
-new_codechunk (int dynamic, int size)
+new_codechunk (CodeChunk *last, int dynamic, int size)
{
int minsize, flags = CODE_FLAG_MMAP;
int chunk_size, bsize = 0;
if (size < minsize)
chunk_size = minsize;
else {
+ /* Allocate MIN_ALIGN-1 more than we need so we can still */
+ /* guarantee MIN_ALIGN alignment for individual allocs */
+ /* from mono_code_manager_reserve_align. */
+ size += MIN_ALIGN - 1;
+ size &= ~(MIN_ALIGN - 1);
chunk_size = size;
chunk_size += pagesize - 1;
chunk_size &= ~ (pagesize - 1);
if (!ptr)
return NULL;
} else {
- /* Allocate MIN_ALIGN-1 more than we need so we can still */
- /* guarantee MIN_ALIGN alignment for individual allocs */
- /* from mono_code_manager_reserve_align. */
- ptr = codechunk_valloc (chunk_size);
+ /* Try to allocate code chunks next to each other to help the VM */
+ if (last)
+ ptr = codechunk_valloc ((guint8*)last->data + last->size, chunk_size);
+ else
+ ptr = codechunk_valloc (NULL, chunk_size);
if (!ptr)
return NULL;
}
g_assert (alignment <= MIN_ALIGN);
if (cman->dynamic) {
- ++mono_stats.dynamic_code_alloc_count;
- mono_stats.dynamic_code_bytes_count += size;
+ ++dynamic_code_alloc_count;
+ dynamic_code_bytes_count += size;
}
if (!cman->current) {
- cman->current = new_codechunk (cman->dynamic, size);
+ cman->current = new_codechunk (cman->last, cman->dynamic, size);
if (!cman->current)
return NULL;
+ cman->last = cman->current;
}
for (chunk = cman->current; chunk; chunk = chunk->next) {
cman->full = chunk;
break;
}
- chunk = new_codechunk (cman->dynamic, size);
+ chunk = new_codechunk (cman->last, cman->dynamic, size);
if (!chunk)
return NULL;
chunk->next = cman->current;
cman->current = chunk;
+ cman->last = cman->current;
chunk->pos = ALIGN_INT (chunk->pos, alignment);
/* Align the chunk->data we add to chunk->pos */
/* or we can't guarantee proper alignment */
/* Insert pointer to code space in hash, keyed by buffer ptr */
g_hash_table_insert (cman->hash, temp_ptr, code_ptr);
+#ifndef USE_JUMP_TABLES
nacl_jit_check_init ();
patch_current_depth++;
patch_dest_base[patch_current_depth] = code_ptr;
patch_alloc_size[patch_current_depth] = size;
g_assert (patch_current_depth < kMaxPatchDepth);
+#endif
+
return temp_ptr;
#endif
}
#else
unsigned char *code;
int status;
- g_assert (newsize <= size);
+ g_assert (NACL_BUNDLE_ALIGN_UP(newsize) <= size);
code = g_hash_table_lookup (cman->hash, data);
g_assert (code != NULL);
- /* Pad space after code with HLTs */
- /* TODO: this is x86/amd64 specific */
- while (newsize & kNaClBundleMask) {
- *((char *)data + newsize) = 0xf4;
- newsize++;
- }
+ mono_nacl_fill_code_buffer ((uint8_t*)data + newsize, size - newsize);
+ newsize = NACL_BUNDLE_ALIGN_UP(newsize);
+ g_assert ((GPOINTER_TO_UINT (data) & kNaClBundleMask) == 0);
+ g_assert ((newsize & kNaClBundleMask) == 0);
status = nacl_dyncode_create (code, data, newsize);
if (status != 0) {
unsigned char *codep;
g_assert_not_reached ();
}
g_hash_table_remove (cman->hash, data);
+# ifndef USE_JUMP_TABLES
g_assert (data == patch_source_base[patch_current_depth]);
g_assert (code == patch_dest_base[patch_current_depth]);
patch_current_depth--;
g_assert (patch_current_depth >= -1);
+# endif
free (data);
#endif
}
return size;
}
+#ifdef __native_client_codegen__
+# if defined(TARGET_ARM)
+/* Fill empty space with UDF instruction used as halt on ARM. */
+void
+mono_nacl_fill_code_buffer (guint8 *data, int size)
+{
+ guint32* data32 = (guint32*)data;
+ int i;
+ g_assert(size % 4 == 0);
+ for (i = 0; i < size / 4; i++)
+ data32[i] = 0xE7FEDEFF;
+}
+# elif (defined(TARGET_X86) || defined(TARGET_AMD64))
+/* Fill empty space with HLT instruction */
+void
+mono_nacl_fill_code_buffer(guint8 *data, int size)
+{
+ memset (data, 0xf4, size);
+}
+# else
+# error "Not ported"
+# endif
+#endif