#include "config.h"
+
+#ifdef HAVE_UNISTD_H
#include <unistd.h>
+#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <glib.h>
-#ifdef PLATFORM_WIN32
-#include <windows.h>
-#include <io.h>
-#else
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-#include <fcntl.h>
-#endif
+/* For dlmalloc.h */
+#define USE_DL_PREFIX 1
#include "mono-codeman.h"
+#include "mono-mmap.h"
+#include "mono-counters.h"
+#include "dlmalloc.h"
+#include <mono/metadata/class-internals.h>
+#include <mono/metadata/profiler-private.h>
+#ifdef HAVE_VALGRIND_MEMCHECK_H
+#include <valgrind/memcheck.h>
+#endif
-#ifdef PLATFORM_WIN32
-#define FORCE_MALLOC
+#if defined(__native_client_codegen__) && defined(__native_client__)
+#include <malloc.h>
+#include <nacl/nacl_dyncode.h>
#endif
+static uintptr_t code_memory_used = 0;
+
+/*
+ * AMD64 processors maintain icache coherency only for pages which are
+ * marked executable. Also, windows DEP requires us to obtain executable memory from
+ * malloc when using dynamic code managers. The system malloc can't do this so we use a
+ * slighly modified version of Doug Lea's Malloc package for this purpose:
+ * http://g.oswego.edu/dl/html/malloc.html
+ */
+
#define MIN_PAGES 16
#if defined(__ia64__) || defined(__x86_64__)
#else
#define MIN_ALIGN 8
#endif
+#ifdef __native_client_codegen__
+/* For Google Native Client, all targets of indirect control flow need to */
+/* be aligned to a 32-byte boundary. MIN_ALIGN was updated to 32 to force */
+/* alignment for calls from tramp-x86.c to mono_global_codeman_reserve() */
+/* and mono_domain_code_reserve(). */
+#undef MIN_ALIGN
+#define MIN_ALIGN 32
+#endif
/* if a chunk has less than this amount of free space it's considered full */
#define MAX_WASTAGE 32
#define MIN_BSIZE 32
-#ifndef MAP_ANONYMOUS
-#ifdef MAP_ANON
-#define MAP_ANONYMOUS MAP_ANON
-#else
-#define FORCE_MALLOC
-#endif
-#endif
-
#ifdef __x86_64__
-#define ARCH_MAP_FLAGS MAP_32BIT
+#define ARCH_MAP_FLAGS MONO_MMAP_32BIT
#else
#define ARCH_MAP_FLAGS 0
#endif
+#define MONO_PROT_RWX (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC)
+
typedef struct _CodeChunck CodeChunk;
enum {
struct _MonoCodeManager {
int dynamic;
+ int read_only;
CodeChunk *current;
CodeChunk *full;
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ GHashTable *hash;
+#endif
};
+#define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1))
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+/* End of text segment, set by linker.
+ * Dynamic text starts on the next allocated page.
+ */
+extern char etext[];
+char *next_dynamic_code_addr = NULL;
+
+/*
+ * This routine gets the next available bundle aligned
+ * pointer in the dynamic code section. It does not check
+ * for the section end, this error will be caught in the
+ * service runtime.
+ */
+void*
+allocate_code(intptr_t increment)
+{
+ char *addr;
+ if (increment < 0) return NULL;
+ increment = increment & kNaClBundleMask ? (increment & ~kNaClBundleMask) + kNaClBundleSize : increment;
+ addr = next_dynamic_code_addr;
+ next_dynamic_code_addr += increment;
+ return addr;
+}
+
+int
+nacl_is_code_address (void *target)
+{
+ return (char *)target < next_dynamic_code_addr;
+}
+
+const int kMaxPatchDepth = 32;
+__thread unsigned char **patch_source_base = NULL;
+__thread unsigned char **patch_dest_base = NULL;
+__thread int *patch_alloc_size = NULL;
+__thread int patch_current_depth = -1;
+__thread int allow_target_modification = 1;
+
+void
+nacl_allow_target_modification (int val)
+{
+ allow_target_modification = val;
+}
+
+static void
+nacl_jit_check_init ()
+{
+ if (patch_source_base == NULL) {
+ patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
+ patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
+ patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
+ }
+}
+
+
+/* Given a patch target, modify the target such that patching will work when
+ * the code is copied to the data section.
+ */
+void*
+nacl_modify_patch_target (unsigned char *target)
+{
+ /* This seems like a bit of an ugly way to do this but the advantage
+ * is we don't have to worry about all the conditions in
+ * mono_resolve_patch_target, and it can be used by all the bare uses
+ * of <arch>_patch.
+ */
+ unsigned char *sb;
+ unsigned char *db;
+
+ if (!allow_target_modification) return target;
+
+ nacl_jit_check_init ();
+ sb = patch_source_base[patch_current_depth];
+ db = patch_dest_base[patch_current_depth];
+
+ if (target >= sb && (target < sb + patch_alloc_size[patch_current_depth])) {
+ /* Do nothing. target is in the section being generated.
+ * no need to modify, the disp will be the same either way.
+ */
+ } else {
+ int target_offset = target - db;
+ target = sb + target_offset;
+ }
+ return target;
+}
+
+void*
+nacl_inverse_modify_patch_target (unsigned char *target)
+{
+ unsigned char *sb;
+ unsigned char *db;
+ int target_offset;
+
+ if (!allow_target_modification) return target;
+
+ nacl_jit_check_init ();
+ sb = patch_source_base[patch_current_depth];
+ db = patch_dest_base[patch_current_depth];
+
+ target_offset = target - sb;
+ target = db + target_offset;
+ return target;
+}
+
+
+#endif /* __native_client_codegen && __native_client__ */
+
+/**
+ * mono_code_manager_new:
+ *
+ * Creates a new code manager. A code manager can be used to allocate memory
+ * suitable for storing native code that can be later executed.
+ * A code manager allocates memory from the operating system in large chunks
+ * (typically 64KB in size) so that many methods can be allocated inside them
+ * close together, improving cache locality.
+ *
+ * Returns: the new code manager
+ */
MonoCodeManager*
mono_code_manager_new (void)
{
cman->current = NULL;
cman->full = NULL;
cman->dynamic = 0;
+ cman->read_only = 0;
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (next_dynamic_code_addr == NULL) {
+ const guint kPageMask = 0xFFFF; /* 64K pages */
+ next_dynamic_code_addr = (uintptr_t)(etext + kPageMask) & ~kPageMask;
+#if defined (__GLIBC__)
+ /* TODO: For now, just jump 64MB ahead to avoid dynamic libraries. */
+ next_dynamic_code_addr += (uintptr_t)0x4000000;
+#else
+ /* Workaround bug in service runtime, unable to allocate */
+ /* from the first page in the dynamic code section. */
+ next_dynamic_code_addr += (uintptr_t)0x10000;
+#endif
+ }
+ cman->hash = g_hash_table_new (NULL, NULL);
+ if (patch_source_base == NULL) {
+ patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
+ patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
+ patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
+ }
+#endif
return cman;
}
+/**
+ * mono_code_manager_new_dynamic:
+ *
+ * Creates a new code manager suitable for holding native code that can be
+ * used for single or small methods that need to be deallocated independently
+ * of other native code.
+ *
+ * Returns: the new code manager
+ */
MonoCodeManager*
mono_code_manager_new_dynamic (void)
{
free_chunklist (CodeChunk *chunk)
{
CodeChunk *dead;
+
+#if defined(HAVE_VALGRIND_MEMCHECK_H) && defined (VALGRIND_JIT_UNREGISTER_MAP)
+ int valgrind_unregister = 0;
+ if (RUNNING_ON_VALGRIND)
+ valgrind_unregister = 1;
+#define valgrind_unregister(x) do { if (valgrind_unregister) { VALGRIND_JIT_UNREGISTER_MAP(NULL,x); } } while (0)
+#else
+#define valgrind_unregister(x)
+#endif
+
for (; chunk; ) {
dead = chunk;
+ mono_profiler_code_chunk_destroy ((gpointer) dead->data);
chunk = chunk->next;
if (dead->flags == CODE_FLAG_MMAP) {
-#ifndef FORCE_MALLOC
- munmap (dead->data, dead->size);
-#endif
+ mono_vfree (dead->data, dead->size);
+ /* valgrind_unregister(dead->data); */
} else if (dead->flags == CODE_FLAG_MALLOC) {
- free (dead->data);
+ dlfree (dead->data);
}
+ code_memory_used -= dead->size;
free (dead);
}
}
+/**
+ * mono_code_manager_destroy:
+ * @cman: a code manager
+ *
+ * Free all the memory associated with the code manager @cman.
+ */
void
mono_code_manager_destroy (MonoCodeManager *cman)
{
free (cman);
}
-/* fill all the memory with the 0x2a (42) value */
+/**
+ * mono_code_manager_invalidate:
+ * @cman: a code manager
+ *
+ * Fill all the memory with an invalid native code value
+ * so that any attempt to execute code allocated in the code
+ * manager @cman will fail. This is used for debugging purposes.
+ */
void
mono_code_manager_invalidate (MonoCodeManager *cman)
{
memset (chunk->data, fill_value, chunk->size);
}
+/**
+ * mono_code_manager_set_read_only:
+ * @cman: a code manager
+ *
+ * Make the code manager read only, so further allocation requests cause an assert.
+ */
+void
+mono_code_manager_set_read_only (MonoCodeManager *cman)
+{
+ cman->read_only = TRUE;
+}
+
+/**
+ * mono_code_manager_foreach:
+ * @cman: a code manager
+ * @func: a callback function pointer
+ * @user_data: additional data to pass to @func
+ *
+ * Invokes the callback @func for each different chunk of memory allocated
+ * in the code manager @cman.
+ */
void
mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
{
}
}
-static int
-query_pagesize (void)
-{
-#ifdef PLATFORM_WIN32
- SYSTEM_INFO info;
- GetSystemInfo (&info);
- return info.dwAllocationGranularity;
-#else
- return getpagesize ();
-#endif
-}
-
/* BIND_ROOM is the divisor for the chunck of code size dedicated
* to binding branches (branches not reachable with the immediate displacement)
* bind_size = size/BIND_ROOM;
static CodeChunk*
new_codechunk (int dynamic, int size)
{
- static int pagesize = 0;
int minsize, flags = CODE_FLAG_MMAP;
int chunk_size, bsize = 0;
+ int pagesize;
CodeChunk *chunk;
void *ptr;
flags = CODE_FLAG_MALLOC;
#endif
- if (!pagesize)
- pagesize = query_pagesize ();
+ pagesize = mono_pagesize ();
if (dynamic) {
chunk_size = size;
flags = CODE_FLAG_MALLOC;
- }
- else {
+ } else {
minsize = pagesize * MIN_PAGES;
if (size < minsize)
chunk_size = minsize;
}
#endif
- /* does it make sense to use the mmap-like API? */
if (flags == CODE_FLAG_MALLOC) {
- ptr = malloc (chunk_size);
+ ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1);
+ if (!ptr)
+ return NULL;
+ } else {
+ /* Allocate MIN_ALIGN-1 more than we need so we can still */
+ /* guarantee MIN_ALIGN alignment for individual allocs */
+ /* from mono_code_manager_reserve_align. */
+ ptr = mono_valloc (NULL, chunk_size + MIN_ALIGN - 1, MONO_PROT_RWX | ARCH_MAP_FLAGS);
if (!ptr)
return NULL;
-
- }
- else {
-#ifndef FORCE_MALLOC
- ptr = mmap (0, chunk_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS|ARCH_MAP_FLAGS, -1, 0);
- if (ptr == (void*)-1) {
- int fd = open ("/dev/zero", O_RDONLY);
- if (fd != -1) {
- ptr = mmap (0, chunk_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|ARCH_MAP_FLAGS, fd, 0);
- close (fd);
- }
- if (ptr == (void*)-1) {
- ptr = malloc (chunk_size);
- if (!ptr)
- return NULL;
- flags = CODE_FLAG_MALLOC;
- }
- }
-#else
- return NULL;
-#endif
}
if (flags == CODE_FLAG_MALLOC) {
- /*
- * AMD64 processors maintain icache coherency only for pages which are
- * marked executable.
- */
-#ifndef PLATFORM_WIN32
- {
- char *page_start = (char *) (((gssize) (ptr)) & ~ (pagesize - 1));
- int pages = ((char*)ptr + chunk_size - page_start + pagesize - 1) / pagesize;
- int err = mprotect (page_start, pages * pagesize, PROT_READ | PROT_WRITE | PROT_EXEC);
- assert (!err);
- }
-#else
- {
- DWORD oldp;
- int err = VirtualProtect (ptr, chunk_size, PAGE_EXECUTE_READWRITE, &oldp);
- assert (err);
- }
-#endif
-
#ifdef BIND_ROOM
- /* Make sure the thunks area is zeroed */
- memset (ptr, 0, bsize);
+ /* Make sure the thunks area is zeroed */
+ memset (ptr, 0, bsize);
#endif
}
chunk = malloc (sizeof (CodeChunk));
if (!chunk) {
if (flags == CODE_FLAG_MALLOC)
- free (ptr);
-#ifndef FORCE_MALLOC
+ dlfree (ptr);
else
- munmap (ptr, chunk_size);
-#endif
+ mono_vfree (ptr, chunk_size);
return NULL;
}
chunk->next = NULL;
chunk->flags = flags;
chunk->pos = bsize;
chunk->bsize = bsize;
+ mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size);
+ code_memory_used += chunk_size;
+ mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE, code_memory_used);
/*printf ("code chunk at: %p\n", ptr);*/
return chunk;
}
+/**
+ * mono_code_manager_reserve:
+ * @cman: a code manager
+ * @size: size of memory to allocate
+ * @alignment: power of two alignment value
+ *
+ * Allocates at least @size bytes of memory inside the code manager @cman.
+ *
+ * Returns: the pointer to the allocated memory or #NULL on failure
+ */
void*
-mono_code_manager_reserve (MonoCodeManager *cman, int size)
+mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment)
{
+#if !defined(__native_client__) || !defined(__native_client_codegen__)
CodeChunk *chunk, *prev;
void *ptr;
-
- size += MIN_ALIGN;
- size &= ~ (MIN_ALIGN - 1);
+ guint32 align_mask = alignment - 1;
+
+ g_assert (!cman->read_only);
+
+ /* eventually allow bigger alignments, but we need to fix the dynamic alloc code to
+ * handle this before
+ */
+ g_assert (alignment <= MIN_ALIGN);
+
+ if (cman->dynamic) {
+ ++mono_stats.dynamic_code_alloc_count;
+ mono_stats.dynamic_code_bytes_count += size;
+ }
if (!cman->current) {
cman->current = new_codechunk (cman->dynamic, size);
}
for (chunk = cman->current; chunk; chunk = chunk->next) {
- if (chunk->pos + size <= chunk->size) {
- ptr = chunk->data + chunk->pos;
- chunk->pos += size;
+ if (ALIGN_INT (chunk->pos, alignment) + size <= chunk->size) {
+ chunk->pos = ALIGN_INT (chunk->pos, alignment);
+ /* Align the chunk->data we add to chunk->pos */
+ /* or we can't guarantee proper alignment */
+ ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos);
+ chunk->pos = ((char*)ptr - chunk->data) + size;
return ptr;
}
}
return NULL;
chunk->next = cman->current;
cman->current = chunk;
- ptr = chunk->data + chunk->pos;
- chunk->pos += size;
+ chunk->pos = ALIGN_INT (chunk->pos, alignment);
+ /* Align the chunk->data we add to chunk->pos */
+ /* or we can't guarantee proper alignment */
+ ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos);
+ chunk->pos = ((char*)ptr - chunk->data) + size;
return ptr;
+#else
+ unsigned char *temp_ptr, *code_ptr;
+ /* Round up size to next bundle */
+ alignment = kNaClBundleSize;
+ size = (size + kNaClBundleSize) & (~kNaClBundleMask);
+ /* Allocate a temp buffer */
+ temp_ptr = memalign (alignment, size);
+ g_assert (((uintptr_t)temp_ptr & kNaClBundleMask) == 0);
+ /* Allocate code space from the service runtime */
+ code_ptr = allocate_code (size);
+ /* Insert pointer to code space in hash, keyed by buffer ptr */
+ g_hash_table_insert (cman->hash, temp_ptr, code_ptr);
+
+ nacl_jit_check_init ();
+
+ patch_current_depth++;
+ patch_source_base[patch_current_depth] = temp_ptr;
+ patch_dest_base[patch_current_depth] = code_ptr;
+ patch_alloc_size[patch_current_depth] = size;
+ g_assert (patch_current_depth < kMaxPatchDepth);
+ return temp_ptr;
+#endif
+}
+
+/**
+ * mono_code_manager_reserve:
+ * @cman: a code manager
+ * @size: size of memory to allocate
+ *
+ * Allocates at least @size bytes of memory inside the code manager @cman.
+ *
+ * Returns: the pointer to the allocated memory or #NULL on failure
+ */
+void*
+mono_code_manager_reserve (MonoCodeManager *cman, int size)
+{
+ return mono_code_manager_reserve_align (cman, size, MIN_ALIGN);
}
-/*
- * if we reserved too much room for a method and we didn't allocate
- * already from the code manager, we can get back the excess allocation.
+/**
+ * mono_code_manager_commit:
+ * @cman: a code manager
+ * @data: the pointer returned by mono_code_manager_reserve ()
+ * @size: the size requested in the call to mono_code_manager_reserve ()
+ * @newsize: the new size to reserve
+ *
+ * If we reserved too much room for a method and we didn't allocate
+ * already from the code manager, we can get back the excess allocation
+ * for later use in the code manager.
*/
void
mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
{
- newsize += MIN_ALIGN;
- newsize &= ~ (MIN_ALIGN - 1);
- size += MIN_ALIGN;
- size &= ~ (MIN_ALIGN - 1);
+#if !defined(__native_client__) || !defined(__native_client_codegen__)
+ g_assert (newsize <= size);
if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
cman->current->pos -= size - newsize;
}
+#else
+ unsigned char *code;
+ int status;
+ g_assert (newsize <= size);
+ code = g_hash_table_lookup (cman->hash, data);
+ g_assert (code != NULL);
+ /* Pad space after code with HLTs */
+ /* TODO: this is x86/amd64 specific */
+ while (newsize & kNaClBundleMask) {
+ *((char *)data + newsize) = 0xf4;
+ newsize++;
+ }
+ status = nacl_dyncode_create (code, data, newsize);
+ if (status != 0) {
+ unsigned char *codep;
+ fprintf(stderr, "Error creating Native Client dynamic code section attempted to be\n"
+ "emitted at %p (hex dissasembly of code follows):\n", code);
+ for (codep = data; codep < data + newsize; codep++)
+ fprintf(stderr, "%02x ", *codep);
+ fprintf(stderr, "\n");
+ g_assert_not_reached ();
+ }
+ g_hash_table_remove (cman->hash, data);
+ g_assert (data == patch_source_base[patch_current_depth]);
+ g_assert (code == patch_dest_base[patch_current_depth]);
+ patch_current_depth--;
+ g_assert (patch_current_depth >= -1);
+ free (data);
+#endif
+}
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+void *
+nacl_code_manager_get_code_dest (MonoCodeManager *cman, void *data)
+{
+ return g_hash_table_lookup (cman->hash, data);
+}
+#endif
+
+/**
+ * mono_code_manager_size:
+ * @cman: a code manager
+ * @used_size: pointer to an integer for the result
+ *
+ * This function can be used to get statistics about a code manager:
+ * the integer pointed to by @used_size will contain how much
+ * memory is actually used inside the code managed @cman.
+ *
+ * Returns: the amount of memory allocated in @cman
+ */
+int
+mono_code_manager_size (MonoCodeManager *cman, int *used_size)
+{
+ CodeChunk *chunk;
+ guint32 size = 0;
+ guint32 used = 0;
+ for (chunk = cman->current; chunk; chunk = chunk->next) {
+ size += chunk->size;
+ used += chunk->pos;
+ }
+ for (chunk = cman->full; chunk; chunk = chunk->next) {
+ size += chunk->size;
+ used += chunk->pos;
+ }
+ if (used_size)
+ *used_size = used;
+ return size;
}