+/**
+ * \file
+ */
+
#include "config.h"
#ifdef HAVE_UNISTD_H
#include "mono-mmap.h"
#include "mono-counters.h"
#include "dlmalloc.h"
-#include <mono/io-layer/io-layer.h>
#include <mono/metadata/profiler-private.h>
#ifdef HAVE_VALGRIND_MEMCHECK_H
#include <valgrind/memcheck.h>
#endif
-#if defined(__native_client_codegen__) && defined(__native_client__)
-#include <malloc.h>
-#include <nacl/nacl_dyncode.h>
-#include <mono/mini/mini.h>
-#endif
#include <mono/utils/mono-os-mutex.h>
static size_t dynamic_code_alloc_count;
static size_t dynamic_code_bytes_count;
static size_t dynamic_code_frees_count;
+static MonoCodeManagerCallbacks code_manager_callbacks;
/*
* AMD64 processors maintain icache coherency only for pages which are
#define MIN_PAGES 16
-#if defined(__ia64__) || defined(__x86_64__) || defined (_WIN64)
+#if defined(__x86_64__) || defined (_WIN64)
/*
* We require 16 byte alignment on amd64 so the fp literals embedded in the code are
* properly aligned for SSE2.
#define MIN_ALIGN 16
#else
#define MIN_ALIGN 8
-#endif
-#ifdef __native_client_codegen__
-/* For Google Native Client, all targets of indirect control flow need to */
-/* be aligned to bundle boundary. 16 bytes on ARM, 32 bytes on x86.
- * MIN_ALIGN was updated to force alignment for calls from
- * tramp-<arch>.c to mono_global_codeman_reserve() */
-/* and mono_domain_code_reserve(). */
-#undef MIN_ALIGN
-#define MIN_ALIGN kNaClBundleSize
-
#endif
/* if a chunk has less than this amount of free space it's considered full */
CodeChunk *current;
CodeChunk *full;
CodeChunk *last;
-#if defined(__native_client_codegen__) && defined(__native_client__)
- GHashTable *hash;
-#endif
};
#define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1))
-#if defined(__native_client_codegen__) && defined(__native_client__)
-/* End of text segment, set by linker.
- * Dynamic text starts on the next allocated page.
- */
-extern char etext[];
-char *next_dynamic_code_addr = NULL;
-
-/*
- * This routine gets the next available bundle aligned
- * pointer in the dynamic code section. It does not check
- * for the section end, this error will be caught in the
- * service runtime.
- */
-void*
-allocate_code(intptr_t increment)
-{
- char *addr;
- if (increment < 0) return NULL;
- increment = increment & kNaClBundleMask ? (increment & ~kNaClBundleMask) + kNaClBundleSize : increment;
- addr = next_dynamic_code_addr;
- next_dynamic_code_addr += increment;
- return addr;
-}
-
-int
-nacl_is_code_address (void *target)
-{
- return (char *)target < next_dynamic_code_addr;
-}
-
-/* Fill code buffer with arch-specific NOPs. */
-void
-mono_nacl_fill_code_buffer (guint8 *data, int size);
-
-#ifndef USE_JUMP_TABLES
-const int kMaxPatchDepth = 32;
-__thread unsigned char **patch_source_base = NULL;
-__thread unsigned char **patch_dest_base = NULL;
-__thread int *patch_alloc_size = NULL;
-__thread int patch_current_depth = -1;
-__thread int allow_target_modification = 1;
-
-static void
-nacl_jit_check_init ()
-{
- if (patch_source_base == NULL) {
- patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
- patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
- patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
- }
-}
-#endif
-
-void
-nacl_allow_target_modification (int val)
-{
-#ifndef USE_JUMP_TABLES
- allow_target_modification = val;
-#endif /* USE_JUMP_TABLES */
-}
-
-/* Given a patch target, modify the target such that patching will work when
- * the code is copied to the data section.
- */
-void*
-nacl_modify_patch_target (unsigned char *target)
-{
- /*
- * There's no need in patch tricks for jumptables,
- * as we always patch same jumptable.
- */
-#ifndef USE_JUMP_TABLES
- /* This seems like a bit of an ugly way to do this but the advantage
- * is we don't have to worry about all the conditions in
- * mono_resolve_patch_target, and it can be used by all the bare uses
- * of <arch>_patch.
- */
- unsigned char *sb;
- unsigned char *db;
-
- if (!allow_target_modification) return target;
-
- nacl_jit_check_init ();
- sb = patch_source_base[patch_current_depth];
- db = patch_dest_base[patch_current_depth];
-
- if (target >= sb && (target < sb + patch_alloc_size[patch_current_depth])) {
- /* Do nothing. target is in the section being generated.
- * no need to modify, the disp will be the same either way.
- */
- } else {
- int target_offset = target - db;
- target = sb + target_offset;
- }
-#endif
- return target;
-}
-
-void*
-nacl_inverse_modify_patch_target (unsigned char *target)
-{
- /*
- * There's no need in patch tricks for jumptables,
- * as we always patch same jumptable.
- */
-#ifndef USE_JUMP_TABLES
- unsigned char *sb;
- unsigned char *db;
- int target_offset;
-
- if (!allow_target_modification) return target;
-
- nacl_jit_check_init ();
- sb = patch_source_base[patch_current_depth];
- db = patch_dest_base[patch_current_depth];
-
- target_offset = target - sb;
- target = db + target_offset;
-#endif
- return target;
-}
-
-
-#endif /* __native_client_codegen && __native_client__ */
-
#define VALLOC_FREELIST_SIZE 16
static mono_mutex_t valloc_mutex;
freelist = g_slist_delete_link (freelist, freelist);
g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist);
} else {
- ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
+ ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS, MONO_MEM_ACCOUNT_CODE);
if (!ptr && preferred)
- ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
+ ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS, MONO_MEM_ACCOUNT_CODE);
}
mono_os_mutex_unlock (&valloc_mutex);
return ptr;
freelist = g_slist_prepend (freelist, ptr);
g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist);
} else {
- mono_vfree (ptr, size);
+ mono_vfree (ptr, size, MONO_MEM_ACCOUNT_CODE);
}
mono_os_mutex_unlock (&valloc_mutex);
}
GSList *l;
for (l = freelist; l; l = l->next) {
- mono_vfree (l->data, GPOINTER_TO_UINT (key));
+ mono_vfree (l->data, GPOINTER_TO_UINT (key), MONO_MEM_ACCOUNT_CODE);
}
g_slist_free (freelist);
}
codechunk_cleanup ();
}
+void
+mono_code_manager_install_callbacks (MonoCodeManagerCallbacks* callbacks)
+{
+ code_manager_callbacks = *callbacks;
+}
+
/**
* mono_code_manager_new:
*
MonoCodeManager*
mono_code_manager_new (void)
{
- MonoCodeManager *cman = (MonoCodeManager *) g_malloc0 (sizeof (MonoCodeManager));
- if (!cman)
- return NULL;
-#if defined(__native_client_codegen__) && defined(__native_client__)
- if (next_dynamic_code_addr == NULL) {
- const guint kPageMask = 0xFFFF; /* 64K pages */
- next_dynamic_code_addr = (uintptr_t)(etext + kPageMask) & ~kPageMask;
-#if defined (__GLIBC__)
- /* TODO: For now, just jump 64MB ahead to avoid dynamic libraries. */
- next_dynamic_code_addr += (uintptr_t)0x4000000;
-#else
- /* Workaround bug in service runtime, unable to allocate */
- /* from the first page in the dynamic code section. */
- next_dynamic_code_addr += (uintptr_t)0x10000;
-#endif
- }
- cman->hash = g_hash_table_new (NULL, NULL);
-# ifndef USE_JUMP_TABLES
- if (patch_source_base == NULL) {
- patch_source_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
- patch_dest_base = g_malloc (kMaxPatchDepth * sizeof(unsigned char *));
- patch_alloc_size = g_malloc (kMaxPatchDepth * sizeof(int));
- }
-# endif
-#endif
- return cman;
+ return (MonoCodeManager *) g_malloc0 (sizeof (MonoCodeManager));
}
/**
for (; chunk; ) {
dead = chunk;
- mono_profiler_code_chunk_destroy ((gpointer) dead->data);
+ MONO_PROFILER_RAISE (jit_chunk_destroyed, ((mono_byte *) dead->data));
+ if (code_manager_callbacks.chunk_destroy)
+ code_manager_callbacks.chunk_destroy ((gpointer)dead->data);
chunk = chunk->next;
if (dead->flags == CODE_FLAG_MMAP) {
codechunk_vfree (dead->data, dead->size);
dlfree (dead->data);
}
code_memory_used -= dead->size;
- free (dead);
+ g_free (dead);
}
}
/**
* mono_code_manager_destroy:
- * @cman: a code manager
- *
- * Free all the memory associated with the code manager @cman.
+ * \param cman a code manager
+ * Free all the memory associated with the code manager \p cman.
*/
void
mono_code_manager_destroy (MonoCodeManager *cman)
{
free_chunklist (cman->full);
free_chunklist (cman->current);
- free (cman);
+ g_free (cman);
}
/**
* mono_code_manager_invalidate:
- * @cman: a code manager
- *
+ * \param cman a code manager
* Fill all the memory with an invalid native code value
* so that any attempt to execute code allocated in the code
- * manager @cman will fail. This is used for debugging purposes.
+ * manager \p cman will fail. This is used for debugging purposes.
*/
void
mono_code_manager_invalidate (MonoCodeManager *cman)
/**
* mono_code_manager_set_read_only:
- * @cman: a code manager
- *
+ * \param cman a code manager
* Make the code manager read only, so further allocation requests cause an assert.
*/
void
/**
* mono_code_manager_foreach:
- * @cman: a code manager
- * @func: a callback function pointer
- * @user_data: additional data to pass to @func
- *
- * Invokes the callback @func for each different chunk of memory allocated
- * in the code manager @cman.
+ * \param cman a code manager
+ * \param func a callback function pointer
+ * \param user_data additional data to pass to \p func
+ * Invokes the callback \p func for each different chunk of memory allocated
+ * in the code manager \p cman.
*/
void
mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
{
int minsize, flags = CODE_FLAG_MMAP;
int chunk_size, bsize = 0;
- int pagesize;
+ int pagesize, valloc_granule;
CodeChunk *chunk;
void *ptr;
#endif
pagesize = mono_pagesize ();
+ valloc_granule = mono_valloc_granule ();
if (dynamic) {
chunk_size = size;
flags = CODE_FLAG_MALLOC;
} else {
- minsize = pagesize * MIN_PAGES;
+ minsize = MAX (pagesize * MIN_PAGES, valloc_granule);
if (size < minsize)
chunk_size = minsize;
else {
size += MIN_ALIGN - 1;
size &= ~(MIN_ALIGN - 1);
chunk_size = size;
- chunk_size += pagesize - 1;
- chunk_size &= ~ (pagesize - 1);
+ chunk_size += valloc_granule - 1;
+ chunk_size &= ~ (valloc_granule - 1);
}
}
#ifdef BIND_ROOM
if (chunk_size - size < bsize) {
chunk_size = size + bsize;
if (!dynamic) {
- chunk_size += pagesize - 1;
- chunk_size &= ~ (pagesize - 1);
+ chunk_size += valloc_granule - 1;
+ chunk_size &= ~ (valloc_granule - 1);
}
}
#endif
#endif
}
- chunk = (CodeChunk *) malloc (sizeof (CodeChunk));
+ chunk = (CodeChunk *) g_malloc (sizeof (CodeChunk));
if (!chunk) {
if (flags == CODE_FLAG_MALLOC)
dlfree (ptr);
else
- mono_vfree (ptr, chunk_size);
+ mono_vfree (ptr, chunk_size, MONO_MEM_ACCOUNT_CODE);
return NULL;
}
chunk->next = NULL;
chunk->flags = flags;
chunk->pos = bsize;
chunk->bsize = bsize;
- mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size);
+ if (code_manager_callbacks.chunk_new)
+ code_manager_callbacks.chunk_new ((gpointer)chunk->data, chunk->size);
+ MONO_PROFILER_RAISE (jit_chunk_created, ((mono_byte *) chunk->data, chunk->size));
code_memory_used += chunk_size;
mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE, code_memory_used);
}
/**
- * mono_code_manager_reserve:
- * @cman: a code manager
- * @size: size of memory to allocate
- * @alignment: power of two alignment value
- *
- * Allocates at least @size bytes of memory inside the code manager @cman.
- *
- * Returns: the pointer to the allocated memory or #NULL on failure
+ * mono_code_manager_reserve_align:
+ * \param cman a code manager
+ * \param size size of memory to allocate
+ * \param alignment power of two alignment value
+ * Allocates at least \p size bytes of memory inside the code manager \p cman.
+ * \returns the pointer to the allocated memory or NULL on failure
*/
void*
mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment)
{
-#if !defined(__native_client__) || !defined(__native_client_codegen__)
CodeChunk *chunk, *prev;
void *ptr;
guint32 align_mask = alignment - 1;
ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos);
chunk->pos = ((char*)ptr - chunk->data) + size;
return ptr;
-#else
- unsigned char *temp_ptr, *code_ptr;
- /* Round up size to next bundle */
- alignment = kNaClBundleSize;
- size = (size + kNaClBundleSize) & (~kNaClBundleMask);
- /* Allocate a temp buffer */
- temp_ptr = memalign (alignment, size);
- g_assert (((uintptr_t)temp_ptr & kNaClBundleMask) == 0);
- /* Allocate code space from the service runtime */
- code_ptr = allocate_code (size);
- /* Insert pointer to code space in hash, keyed by buffer ptr */
- g_hash_table_insert (cman->hash, temp_ptr, code_ptr);
-
-#ifndef USE_JUMP_TABLES
- nacl_jit_check_init ();
-
- patch_current_depth++;
- patch_source_base[patch_current_depth] = temp_ptr;
- patch_dest_base[patch_current_depth] = code_ptr;
- patch_alloc_size[patch_current_depth] = size;
- g_assert (patch_current_depth < kMaxPatchDepth);
-#endif
-
- return temp_ptr;
-#endif
}
/**
* mono_code_manager_reserve:
- * @cman: a code manager
- * @size: size of memory to allocate
- *
- * Allocates at least @size bytes of memory inside the code manager @cman.
- *
- * Returns: the pointer to the allocated memory or #NULL on failure
+ * \param cman a code manager
+ * \param size size of memory to allocate
+ * Allocates at least \p size bytes of memory inside the code manager \p cman.
+ * \returns the pointer to the allocated memory or NULL on failure
*/
void*
mono_code_manager_reserve (MonoCodeManager *cman, int size)
/**
* mono_code_manager_commit:
- * @cman: a code manager
- * @data: the pointer returned by mono_code_manager_reserve ()
- * @size: the size requested in the call to mono_code_manager_reserve ()
- * @newsize: the new size to reserve
- *
+ * \param cman a code manager
+ * \param data the pointer returned by mono_code_manager_reserve ()
+ * \param size the size requested in the call to mono_code_manager_reserve ()
+ * \param newsize the new size to reserve
* If we reserved too much room for a method and we didn't allocate
* already from the code manager, we can get back the excess allocation
* for later use in the code manager.
void
mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
{
-#if !defined(__native_client__) || !defined(__native_client_codegen__)
g_assert (newsize <= size);
if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
cman->current->pos -= size - newsize;
}
-#else
- unsigned char *code;
- int status;
- g_assert (NACL_BUNDLE_ALIGN_UP(newsize) <= size);
- code = g_hash_table_lookup (cman->hash, data);
- g_assert (code != NULL);
- mono_nacl_fill_code_buffer ((uint8_t*)data + newsize, size - newsize);
- newsize = NACL_BUNDLE_ALIGN_UP(newsize);
- g_assert ((GPOINTER_TO_UINT (data) & kNaClBundleMask) == 0);
- g_assert ((newsize & kNaClBundleMask) == 0);
- status = nacl_dyncode_create (code, data, newsize);
- if (status != 0) {
- unsigned char *codep;
- fprintf(stderr, "Error creating Native Client dynamic code section attempted to be\n"
- "emitted at %p (hex dissasembly of code follows):\n", code);
- for (codep = data; codep < data + newsize; codep++)
- fprintf(stderr, "%02x ", *codep);
- fprintf(stderr, "\n");
- g_assert_not_reached ();
- }
- g_hash_table_remove (cman->hash, data);
-# ifndef USE_JUMP_TABLES
- g_assert (data == patch_source_base[patch_current_depth]);
- g_assert (code == patch_dest_base[patch_current_depth]);
- patch_current_depth--;
- g_assert (patch_current_depth >= -1);
-# endif
- free (data);
-#endif
}
-#if defined(__native_client_codegen__) && defined(__native_client__)
-void *
-nacl_code_manager_get_code_dest (MonoCodeManager *cman, void *data)
-{
- return g_hash_table_lookup (cman->hash, data);
-}
-#endif
-
/**
* mono_code_manager_size:
- * @cman: a code manager
- * @used_size: pointer to an integer for the result
- *
+ * \param cman a code manager
+ * \param used_size pointer to an integer for the result
* This function can be used to get statistics about a code manager:
- * the integer pointed to by @used_size will contain how much
- * memory is actually used inside the code managed @cman.
- *
- * Returns: the amount of memory allocated in @cman
+ * the integer pointed to by \p used_size will contain how much
+ * memory is actually used inside the code managed \p cman.
+ * \returns the amount of memory allocated in \p cman
*/
int
mono_code_manager_size (MonoCodeManager *cman, int *used_size)
*used_size = used;
return size;
}
-
-#ifdef __native_client_codegen__
-# if defined(TARGET_ARM)
-/* Fill empty space with UDF instruction used as halt on ARM. */
-void
-mono_nacl_fill_code_buffer (guint8 *data, int size)
-{
- guint32* data32 = (guint32*)data;
- int i;
- g_assert(size % 4 == 0);
- for (i = 0; i < size / 4; i++)
- data32[i] = 0xE7FEDEFF;
-}
-# elif (defined(TARGET_X86) || defined(TARGET_AMD64))
-/* Fill empty space with HLT instruction */
-void
-mono_nacl_fill_code_buffer(guint8 *data, int size)
-{
- memset (data, 0xf4, size);
-}
-# else
-# error "Not ported"
-# endif
-#endif