/*
* mini.c: The new Mono code generator.
*
- * Author:
+ * Authors:
* Paolo Molaro (lupus@ximian.com)
* Dietmar Maurer (dietmar@ximian.com)
*
- * (C) 2002 Ximian, Inc.
+ * Copyright 2002-2003 Ximian, Inc.
+ * Copyright 2003-2010 Novell, Inc.
*/
+#define MONO_LLVM_IN_MINI 1
#include <config.h>
#include <signal.h>
#ifdef HAVE_ALLOCA_H
#include <sys/time.h>
#endif
-#ifdef HAVE_VALGRIND_MEMCHECK_H
-#include <valgrind/memcheck.h>
-#endif
+#include <mono/utils/memcheck.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/mempool-internals.h>
#include <mono/metadata/attach.h>
+#include <mono/metadata/runtime.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-counters.h>
-#include <mono/utils/mono-logger.h>
+#include <mono/utils/mono-logger-internal.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/dtrace.h>
#include "mini.h"
+#include "mini-llvm.h"
#include "tasklets.h"
#include <string.h>
#include <ctype.h>
#include "mini-gc.h"
#include "debugger-agent.h"
+#if defined(HAVE_KW_THREAD)
+#define MINI_FAST_TLS_SET(x,y) x = y
+#define MINI_FAST_TLS_GET(x) x
+#define MINI_FAST_TLS_INIT(x)
+#define MINI_FAST_TLS_DECLARE(x) static __thread gpointer x MONO_TLS_FAST;
+#define MINI_HAVE_FAST_TLS
+#define MINI_THREAD_VAR_OFFSET(x,y) MONO_THREAD_VAR_OFFSET(x,y)
+#elif (defined(__APPLE__) && defined(__i386__))
+#define MINI_FAST_TLS_SET(x,y) pthread_setspecific(x, y)
+#define MINI_FAST_TLS_GET(x) pthread_getspecific(x)
+#define MINI_FAST_TLS_INIT(x) pthread_key_create(&x, NULL)
+#define MINI_FAST_TLS_DECLARE(x) static pthread_key_t x;
+#define MINI_HAVE_FAST_TLS
+#define MINI_THREAD_VAR_OFFSET(x,y) y = (gint32) x
+#else
+#define MINI_THREAD_VAR_OFFSET(x,y) MONO_THREAD_VAR_OFFSET(x,y)
+#endif
+
static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, MonoException **ex);
-/* helper methods signature */
-/* FIXME: Make these static again */
-MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
-MonoMethodSignature *helper_sig_domain_get = NULL;
-MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
-MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
-MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
guint32 mono_jit_tls_id = -1;
-#ifdef HAVE_KW_THREAD
-static __thread gpointer mono_jit_tls MONO_TLS_FAST;
+#ifdef MINI_HAVE_FAST_TLS
+MINI_FAST_TLS_DECLARE(mono_jit_tls);
+#endif
+
+#ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
+#define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 1
#endif
MonoTraceSpec *mono_jit_trace_calls = NULL;
static gboolean mono_using_xdebug;
static int mini_verbose = 0;
-/* Statistics */
-#ifdef ENABLE_LLVM
-static int methods_with_llvm, methods_without_llvm;
-#endif
-
/*
- * This flag controls whenever the runtime uses LLVM compiled code.
- * Enabling this causes different/slower code paths to be used, which is why it
- * defaults to FALSE if ENABLE_LLVM is not defined, i.e. the runtime is only capable of
- * running AOT code compiled by LLVM.
- * Changes when this flag is set include:
- * - a per method vtable trampoline is used to handle virtual calls, instead of only
- * one trampoline.
- * - fast generic virtual calls are not supported.
+ * This flag controls whenever the runtime uses LLVM for JIT compilation, and whenever
+ * it can load AOT code compiled by LLVM.
*/
-#ifdef ENABLE_LLVM
-gboolean mono_use_llvm = TRUE;
-#else
gboolean mono_use_llvm = FALSE;
-#endif
#define mono_jit_lock() EnterCriticalSection (&jit_mutex)
#define mono_jit_unlock() LeaveCriticalSection (&jit_mutex)
static MonoCodeManager *global_codeman = NULL;
-/* FIXME: Make this static again */
-GHashTable *jit_icall_name_hash = NULL;
+static GHashTable *jit_icall_name_hash = NULL;
static MonoDebugOptions debug_options;
gboolean mono_dont_free_global_codeman;
+gpointer
+mono_realloc_native_code (MonoCompile *cfg)
+{
+#if defined(__default_codegen__)
+ return g_realloc (cfg->native_code, cfg->code_size);
+#elif defined(__native_client_codegen__)
+ guint old_padding;
+ gpointer native_code;
+ guint alignment_check;
+
+ /* Save the old alignment offset so we can re-align after the realloc. */
+ old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
+
+ cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
+ cfg->code_size + kNaClAlignment );
+
+ /* Align native_code to next nearest kNaClAlignment byte. */
+ native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
+ native_code = (guint)native_code & ~kNaClAlignmentMask;
+
+ /* Shift the data to be 32-byte aligned again. */
+ memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
+
+ alignment_check = (guint)native_code & kNaClAlignmentMask;
+ g_assert (alignment_check == 0);
+ return native_code;
+#else
+ g_assert_not_reached ();
+ return cfg->native_code;
+#endif
+}
+
+#ifdef __native_client_codegen__
+
+/* Prevent instructions from straddling a 32-byte alignment boundary. */
+/* Instructions longer than 32 bytes must be aligned internally. */
+/* IN: pcode, instlen */
+/* OUT: pcode */
+void mono_nacl_align_inst(guint8 **pcode, int instlen) {
+ int space_in_block;
+
+ space_in_block = kNaClAlignment - ((uintptr_t)(*pcode) & kNaClAlignmentMask);
+
+ if (G_UNLIKELY (instlen >= kNaClAlignment)) {
+ g_assert_not_reached();
+ } else if (instlen > space_in_block) {
+ *pcode = mono_arch_nacl_pad(*pcode, space_in_block);
+ }
+}
+
+/* Move emitted call sequence to the end of a kNaClAlignment-byte block. */
+/* IN: start pointer to start of call sequence */
+/* IN: pcode pointer to end of call sequence (current "IP") */
+/* OUT: start pointer to the start of the call sequence after padding */
+/* OUT: pcode pointer to the end of the call sequence after padding */
+void mono_nacl_align_call(guint8 **start, guint8 **pcode) {
+ const size_t MAX_NACL_CALL_LENGTH = kNaClAlignment;
+ guint8 copy_of_call[MAX_NACL_CALL_LENGTH];
+ guint8 *temp;
+
+ const size_t length = (size_t)((*pcode)-(*start));
+ g_assert(length < MAX_NACL_CALL_LENGTH);
+
+ memcpy(copy_of_call, *start, length);
+ temp = mono_nacl_pad_call(*start, (guint8)length);
+ memcpy(temp, copy_of_call, length);
+ (*start) = temp;
+ (*pcode) = temp + length;
+}
+
+/* mono_nacl_pad_call(): Insert padding for Native Client call instructions */
+/* code pointer to buffer for emitting code */
+/* ilength length of call instruction */
+guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength) {
+ int freeSpaceInBlock = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
+ int padding = freeSpaceInBlock - ilength;
+
+ if (padding < 0) {
+ /* There isn't enough space in this block for the instruction. */
+ /* Fill this block and start a new one. */
+ code = mono_arch_nacl_pad(code, freeSpaceInBlock);
+ freeSpaceInBlock = kNaClAlignment;
+ padding = freeSpaceInBlock - ilength;
+ }
+ g_assert(ilength > 0);
+ g_assert(padding >= 0);
+ g_assert(padding < kNaClAlignment);
+ if (0 == padding) return code;
+ return mono_arch_nacl_pad(code, padding);
+}
+
+guint8 *mono_nacl_align(guint8 *code) {
+ int padding = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
+ if (padding != kNaClAlignment) code = mono_arch_nacl_pad(code, padding);
+ return code;
+}
+
+void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji)
+{
+ MonoJumpInfo *patch_info;
+ for (patch_info = ji; patch_info; patch_info = patch_info->next) {
+ unsigned char *ip = patch_info->ip.i + code;
+ ip = mono_arch_nacl_skip_nops(ip);
+ patch_info->ip.i = ip - code;
+ }
+}
+#endif /* __native_client_codegen__ */
+
gboolean
mono_running_on_valgrind (void)
{
-#ifdef HAVE_VALGRIND_MEMCHECK_H
- if (RUNNING_ON_VALGRIND){
+ if (RUNNING_ON_VALGRIND){
#ifdef VALGRIND_JIT_REGISTER_MAP
- valgrind_register = TRUE;
+ valgrind_register = TRUE;
#endif
- return TRUE;
- } else
- return FALSE;
-#else
+ return TRUE;
+ } else
return FALSE;
-#endif
}
+typedef struct {
+ MonoExceptionClause *clause;
+ MonoBasicBlock *basic_block;
+ int start_offset;
+} TryBlockHole;
+
typedef struct {
void *ip;
MonoMethod *method;
* output. Unlike mono_pmip which returns a string, this routine
* prints the value on the standard output.
*/
+#ifdef __GNUC__
+/* Prevent the linker from optimizing this away in embedding setups to help debugging */
+ __attribute__((used))
+#endif
void
mono_print_method_from_ip (void *ip)
{
}
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+/* Given the temporary buffer (allocated by mono_global_codeman_reserve) into
+ * which we are generating code, return a pointer to the destination in the
+ * dynamic code segment into which the code will be copied when
+ * mono_global_codeman_commit is called.
+ * LOCKING: Acquires the jit lock.
+ */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ void *dest;
+ mono_jit_lock ();
+ dest = nacl_code_manager_get_code_dest (global_codeman, data);
+ mono_jit_unlock ();
+ return dest;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+ mono_jit_lock ();
+ mono_code_manager_commit (global_codeman, data, size, newsize);
+ mono_jit_unlock ();
+}
+
+/*
+ * Convenience function which calls mono_global_codeman_commit to validate and
+ * copy the code. The caller sets *buf_base and *buf_size to the start and size
+ * of the buffer (allocated by mono_global_codeman_reserve), and *code_end to
+ * the byte after the last instruction byte. On return, *buf_base will point to
+ * the start of the copied in the code segment, and *code_end will point after
+ * the end of the copied code.
+ */
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+ guint8 *tmp = nacl_global_codeman_get_dest (*buf_base);
+ mono_global_codeman_commit (*buf_base, buf_size, *code_end - *buf_base);
+ *code_end = tmp + (*code_end - *buf_base);
+ *buf_base = tmp;
+}
+#else
+/* no-op versions of Native Client functions */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ return data;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+}
+
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+}
+
+#endif /* __native_client__ */
+
/**
* mono_create_unwind_op:
*
{
return mono_jump_info_token_new2 (mp, image, token, NULL);
}
+
+/*
+ * mono_tramp_info_create:
+ *
+ * Create a MonoTrampInfo structure from the arguments. This function assumes ownership
+ * of NAME, JI, and UNWIND_OPS.
+ */
+MonoTrampInfo*
+mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops)
+{
+ MonoTrampInfo *info = g_new0 (MonoTrampInfo, 1);
+
+ info->name = (char*)name;
+ info->code = code;
+ info->code_size = code_size;
+ info->ji = ji;
+ info->unwind_ops = unwind_ops;
+
+ return info;
+}
+
+void
+mono_tramp_info_free (MonoTrampInfo *info)
+{
+ GSList *l;
+
+ g_free (info->name);
+
+ // FIXME: ji
+ for (l = info->unwind_ops; l; l = l->next)
+ g_free (l->data);
+ g_slist_free (info->unwind_ops);
+ g_free (info);
+}
#define MONO_INIT_VARINFO(vi,id) do { \
(vi)->range.first_use.pos.bid = 0xffff; \
static int
mono_find_block_region_notry (MonoCompile *cfg, int offset)
{
- MonoMethod *method = cfg->method;
- MonoMethodHeader *header = mono_method_get_header (method);
+ MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
mono_get_block_region_notry (MonoCompile *cfg, int region)
{
if ((region & (0xf << 4)) == MONO_REGION_TRY) {
- MonoMethodHeader *header = mono_method_get_header (cfg->method);
+ MonoMethodHeader *header = cfg->header;
/*
* This can happen if a try clause is nested inside a finally clause.
if (type->byref)
return OP_LOAD_MEMBASE;
- switch (mono_type_get_underlying_type (type)->type) {
+ type = mono_type_get_underlying_type (type);
+
+ switch (type->type) {
case MONO_TYPE_I1:
return OP_LOADI1_MEMBASE;
case MONO_TYPE_U1:
inst->backend.is_pinvoke = 0;
inst->dreg = vreg;
+ if (cfg->compute_gc_maps) {
+ if (type->byref) {
+ mono_mark_vreg_as_mp (cfg, vreg);
+ } else {
+ MonoType *t = mini_type_get_underlying_type (NULL, type);
+ if ((MONO_TYPE_ISSTRUCT (t) && inst->klass->has_references) || MONO_TYPE_IS_REFERENCE (t)) {
+ inst->flags |= MONO_INST_GC_TRACK;
+ mono_mark_vreg_as_ref (cfg, vreg);
+ }
+ }
+ }
+
cfg->varinfo [num] = inst;
MONO_INIT_VARINFO (&cfg->vars [num], num);
#endif
+void
+mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
+{
+ if (vreg >= cfg->vreg_is_ref_len) {
+ gboolean *tmp = cfg->vreg_is_ref;
+ int size = cfg->vreg_is_ref_len;
+
+ while (vreg >= cfg->vreg_is_ref_len)
+ cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
+ cfg->vreg_is_ref = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
+ if (size)
+ memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
+ }
+ cfg->vreg_is_ref [vreg] = TRUE;
+}
+
+void
+mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
+{
+ if (vreg >= cfg->vreg_is_mp_len) {
+ gboolean *tmp = cfg->vreg_is_mp;
+ int size = cfg->vreg_is_mp_len;
+
+ while (vreg >= cfg->vreg_is_mp_len)
+ cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
+ cfg->vreg_is_mp = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
+ if (size)
+ memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
+ }
+ cfg->vreg_is_mp [vreg] = TRUE;
+}
+
static MonoType*
type_from_stack_type (MonoInst *ins) {
switch (ins->type) {
return FALSE;
}
-static MonoJitICallInfo **emul_opcode_map = NULL;
-
-MonoJitICallInfo *
-mono_find_jit_opcode_emulation (int opcode)
-{
- g_assert (opcode >= 0 && opcode <= OP_LAST);
- if (emul_opcode_map)
- return emul_opcode_map [opcode];
- else
- return NULL;
-}
-
gboolean
mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
{
MonoAssembly *assembly = method->klass->image->assembly;
- if (method->wrapper_type != MONO_WRAPPER_NONE)
+ if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
return FALSE;
if (assembly->in_gac || assembly->image == mono_defaults.corlib)
return FALSE;
*
* Returns true if the method is invalid.
*/
-gboolean
-mini_method_verify (MonoCompile *cfg, MonoMethod *method)
+static gboolean
+mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
{
GSList *tmp, *res;
gboolean is_fulltrust;
res = mono_method_verify_with_current_settings (method, cfg->skip_visibility);
if ((error = mono_loader_get_last_error ())) {
- cfg->exception_type = error->exception_type;
+ if (fail_compile)
+ cfg->exception_type = error->exception_type;
+ else
+ mono_loader_clear_error ();
if (res)
mono_free_verify_list (res);
return TRUE;
for (tmp = res; tmp; tmp = tmp->next) {
MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
if (info->info.status == MONO_VERIFY_ERROR) {
- cfg->exception_type = info->exception_type;
- cfg->exception_message = g_strdup (info->info.message);
+ if (fail_compile) {
+ char *method_name = mono_method_full_name (method, TRUE);
+ cfg->exception_type = info->exception_type;
+ cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
+ g_free (method_name);
+ }
mono_free_verify_list (res);
return TRUE;
}
if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
- cfg->exception_type = info->exception_type;
- cfg->exception_message = g_strdup (info->info.message);
+ if (fail_compile) {
+ char *method_name = mono_method_full_name (method, TRUE);
+ cfg->exception_type = info->exception_type;
+ cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
+ g_free (method_name);
+ }
mono_free_verify_list (res);
return TRUE;
}
return FALSE;
}
-static void
-create_helper_signature (void)
+/*Returns true if something went wrong*/
+gboolean
+mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
{
- helper_sig_domain_get = mono_create_icall_signature ("ptr");
- helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
- helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
+ MonoMethod *method_definition = method;
+ gboolean dont_verify = mini_assembly_can_skip_verification (cfg->domain, method);
+ dont_verify |= method->klass->image->assembly->corlib_internal;
+
+ while (method_definition->is_inflated) {
+ MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
+ method_definition = imethod->declaring;
+ }
+
+ return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
}
static gconstpointer
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
+ gboolean reuse_slot;
LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
align = 16;
}
+ reuse_slot = TRUE;
+ if (cfg->disable_reuse_stack_slots)
+ reuse_slot = FALSE;
+
t = mono_type_get_underlying_type (inst->inst_vtype);
switch (t->type) {
case MONO_TYPE_GENERICINST:
slot_info = &vtype_stack_slots [nvtypes];
nvtypes ++;
}
+ if (cfg->disable_reuse_ref_stack_slots)
+ reuse_slot = FALSE;
break;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
#endif
-#ifdef HAVE_SGEN_GC
- slot_info = &scalar_stack_slots [MONO_TYPE_I];
- break;
-#else
+ if (cfg->disable_ref_noref_stack_slot_share) {
+ slot_info = &scalar_stack_slots [MONO_TYPE_I];
+ break;
+ }
/* Fall through */
-#endif
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
/* Share non-float stack slots of the same size */
slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
+ if (cfg->disable_reuse_ref_stack_slots)
+ reuse_slot = FALSE;
break;
default:
LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
+ if (!reuse_slot)
+ slot = 0xffffff;
+
if (slot == 0xffffff) {
/*
* Allways allocate valuetypes to sizeof (gpointer) to allow more
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
+ gboolean reuse_slot;
if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
return mono_allocate_stack_slots_full2 (cfg, backward, stack_size, stack_align);
vars = mono_varlist_sort (cfg, vars, 0);
offset = 0;
- *stack_align = sizeof (gpointer);
+ *stack_align = sizeof(mgreg_t);
for (l = vars; l; l = l->next) {
vmv = l->data;
inst = cfg->varinfo [vmv->idx];
align = 16;
}
+ reuse_slot = TRUE;
+ if (cfg->disable_reuse_stack_slots)
+ reuse_slot = FALSE;
+
t = mono_type_get_underlying_type (inst->inst_vtype);
if (t->byref) {
slot_info = &scalar_stack_slots [MONO_TYPE_I];
slot_info = &vtype_stack_slots [nvtypes];
nvtypes ++;
}
+ if (cfg->disable_reuse_ref_stack_slots)
+ reuse_slot = FALSE;
break;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
#endif
-#ifdef HAVE_SGEN_GC
- slot_info = &scalar_stack_slots [MONO_TYPE_I];
- break;
-#else
+ if (cfg->disable_ref_noref_stack_slot_share) {
+ slot_info = &scalar_stack_slots [MONO_TYPE_I];
+ break;
+ }
/* Fall through */
-#endif
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
/* Share non-float stack slots of the same size */
slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
+ if (cfg->disable_reuse_ref_stack_slots)
+ reuse_slot = FALSE;
break;
default:
*/
}
- if (cfg->disable_reuse_stack_slots)
+ if (!reuse_slot)
slot = 0xffffff;
if (slot == 0xffffff) {
return mono_allocate_stack_slots_full (m, TRUE, stack_size, stack_align);
}
+#define EMUL_HIT_SHIFT 3
+#define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
+/* small hit bitmap cache */
+static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
+static short emul_opcode_num = 0;
+static short emul_opcode_alloced = 0;
+static short *emul_opcode_opcodes = NULL;
+static MonoJitICallInfo **emul_opcode_map = NULL;
+
+MonoJitICallInfo *
+mono_find_jit_opcode_emulation (int opcode)
+{
+ g_assert (opcode >= 0 && opcode <= OP_LAST);
+ if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
+ int i;
+ for (i = 0; i < emul_opcode_num; ++i) {
+ if (emul_opcode_opcodes [i] == opcode)
+ return emul_opcode_map [i];
+ }
+ }
+ return NULL;
+}
+
void
mono_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, gboolean no_throw)
{
MonoJitICallInfo *info;
MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
- if (!emul_opcode_map)
- emul_opcode_map = g_new0 (MonoJitICallInfo*, OP_LAST + 1);
-
g_assert (!sig->hasthis);
g_assert (sig->param_count < 3);
info = mono_register_jit_icall (func, name, sig, no_throw);
- emul_opcode_map [opcode] = info;
+ if (emul_opcode_num >= emul_opcode_alloced) {
+ int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
+ emul_opcode_alloced += incr;
+ emul_opcode_map = g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
+ emul_opcode_opcodes = g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
+ }
+ emul_opcode_map [emul_opcode_num] = info;
+ emul_opcode_opcodes [emul_opcode_num] = opcode;
+ emul_opcode_num++;
+ emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
}
static void
MonoBasicBlock *bb;
MonoInst *c;
- g_print ("IR code for method %s\n", mono_method_full_name (cfg->method, TRUE));
+ {
+ char *method_name = mono_method_full_name (cfg->method, TRUE);
+ g_print ("IR code for method %s\n", method_name);
+ g_free (method_name);
+ }
for (i = 0; i < cfg->num_bblocks; ++i) {
bb = cfg->bblocks [i];
{
if (ins == NULL) {
ins = bb->code;
+ if (ins)
+ ins->prev = ins_to_insert;
bb->code = ins_to_insert;
ins_to_insert->next = ins;
if (bb->last_ins == NULL)
void
mono_destroy_compile (MonoCompile *cfg)
{
+ GSList *l;
+
+ if (cfg->header)
+ mono_metadata_free_mh (cfg->header);
//mono_mempool_stats (cfg->mempool);
mono_free_loop_info (cfg);
if (cfg->rs)
g_hash_table_destroy (cfg->spvars);
if (cfg->exvars)
g_hash_table_destroy (cfg->exvars);
- mono_mempool_destroy (cfg->mempool);
+ for (l = cfg->headers_to_free; l; l = l->next)
+ mono_metadata_free_mh (l->data);
g_list_free (cfg->ldstr_list);
g_hash_table_destroy (cfg->token_info_hash);
if (cfg->abs_patches)
g_hash_table_destroy (cfg->abs_patches);
+ mono_mempool_destroy (cfg->mempool);
+
+ mono_debug_free_method (cfg);
g_free (cfg->varinfo);
g_free (cfg->vars);
g_free (cfg);
}
-#ifdef HAVE_KW_THREAD
-static __thread gpointer mono_lmf_addr MONO_TLS_FAST;
+#ifdef MINI_HAVE_FAST_TLS
+MINI_FAST_TLS_DECLARE(mono_lmf_addr);
#ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR
/*
* When this is defined, the current lmf is stored in this tls variable instead of in
* jit_tls->lmf.
*/
-static __thread gpointer mono_lmf MONO_TLS_FAST;
+MINI_FAST_TLS_DECLARE(mono_lmf);
#endif
#endif
gint32
mono_get_jit_tls_offset (void)
{
-#ifdef HAVE_KW_THREAD
+#ifdef MINI_HAVE_FAST_TLS
int offset;
- MONO_THREAD_VAR_OFFSET (mono_jit_tls, offset);
+ MINI_THREAD_VAR_OFFSET (mono_jit_tls, offset);
return offset;
#else
return -1;
gint32
mono_get_lmf_tls_offset (void)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
int offset;
- MONO_THREAD_VAR_OFFSET(mono_lmf,offset);
+ MINI_THREAD_VAR_OFFSET(mono_lmf,offset);
return offset;
#else
return -1;
mono_get_lmf_addr_tls_offset (void)
{
int offset;
- MONO_THREAD_VAR_OFFSET(mono_lmf_addr,offset);
+ MINI_THREAD_VAR_OFFSET(mono_lmf_addr,offset);
return offset;
}
MonoLMF *
mono_get_lmf (void)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
- return mono_lmf;
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+ return MINI_FAST_TLS_GET (mono_lmf);
#else
MonoJitTlsData *jit_tls;
if ((jit_tls = TlsGetValue (mono_jit_tls_id)))
return jit_tls->lmf;
-
- g_assert_not_reached ();
+ /*
+ * We do not assert here because this function can be called from
+ * mini-gc.c on a thread that has not executed any managed code, yet
+ * (the thread object allocation can trigger a collection).
+ */
return NULL;
#endif
}
MonoLMF **
mono_get_lmf_addr (void)
{
-#ifdef HAVE_KW_THREAD
- return mono_lmf_addr;
+#ifdef MINI_HAVE_FAST_TLS
+ return MINI_FAST_TLS_GET (mono_lmf_addr);
#else
MonoJitTlsData *jit_tls;
void
mono_set_lmf (MonoLMF *lmf)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
- mono_lmf = lmf;
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+ MINI_FAST_TLS_SET (mono_lmf, lmf);
#endif
(*mono_get_lmf_addr ()) = lmf;
}
+static void
+mono_set_jit_tls (MonoJitTlsData *jit_tls)
+{
+ TlsSetValue (mono_jit_tls_id, jit_tls);
+
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_SET (mono_jit_tls, jit_tls);
+#endif
+}
+
+static void
+mono_set_lmf_addr (gpointer lmf_addr)
+{
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_SET (mono_lmf_addr, lmf_addr);
+#endif
+}
+
/* Called by native->managed wrappers */
void
mono_jit_thread_attach (MonoDomain *domain)
*/
domain = mono_get_root_domain ();
-#ifdef HAVE_KW_THREAD
- if (!mono_lmf_addr) {
+#ifdef MINI_HAVE_FAST_TLS
+ if (!MINI_FAST_TLS_GET (mono_lmf_addr)) {
mono_thread_attach (domain);
}
#else
jit_tls = g_new0 (MonoJitTlsData, 1);
- TlsSetValue (mono_jit_tls_id, jit_tls);
-
-#ifdef HAVE_KW_THREAD
- mono_jit_tls = jit_tls;
-#endif
-
jit_tls->abort_func = abort_func;
jit_tls->end_of_stack = stack_start;
+ mono_set_jit_tls (jit_tls);
+
lmf = g_new0 (MonoLMF, 1);
-#ifdef MONO_ARCH_INIT_TOP_LMF_ENTRY
MONO_ARCH_INIT_TOP_LMF_ENTRY (lmf);
-#else
- lmf->ebp = -1;
-#endif
jit_tls->first_lmf = lmf;
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
/* jit_tls->lmf is unused */
- mono_lmf = lmf;
- mono_lmf_addr = &mono_lmf;
+ MINI_FAST_TLS_SET (mono_lmf, lmf);
+ mono_set_lmf_addr (&mono_lmf);
#else
-#if defined(HAVE_KW_THREAD)
- mono_lmf_addr = &jit_tls->lmf;
-#endif
+ mono_set_lmf_addr (&jit_tls->lmf);
jit_tls->lmf = lmf;
#endif
}
static void
-mono_thread_start_cb (gsize tid, gpointer stack_start, gpointer func)
+free_jit_tls_data (MonoJitTlsData *jit_tls)
+{
+ mono_arch_free_jit_tls_data (jit_tls);
+ mono_free_altstack (jit_tls);
+
+ g_free (jit_tls->first_lmf);
+ g_free (jit_tls);
+}
+
+static void
+mono_thread_start_cb (intptr_t tid, gpointer stack_start, gpointer func)
{
MonoInternalThread *thread;
void *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort);
mono_debugger_thread_created (tid, thread->root_domain_thread, jit_tls, func);
if (thread)
thread->jit_data = jit_tls;
+
+ mono_arch_cpu_init ();
}
void (*mono_thread_attach_aborted_cb ) (MonoObject *obj) = NULL;
}
static void
-mono_thread_attach_cb (gsize tid, gpointer stack_start)
+mono_thread_attach_cb (intptr_t tid, gpointer stack_start)
{
MonoInternalThread *thread;
void *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort_dummy);
thread->jit_data = jit_tls;
if (mono_profiler_get_events () & MONO_PROFILE_STATISTICAL)
mono_runtime_setup_stat_profiler ();
+
+ mono_arch_cpu_init ();
}
static void
-mini_thread_cleanup (MonoThread *thread)
+mini_thread_cleanup (MonoInternalThread *thread)
{
- MonoInternalThread *internal = thread->internal_thread;
- MonoJitTlsData *jit_tls = internal->jit_data;
+ MonoJitTlsData *jit_tls = thread->jit_data;
if (jit_tls) {
mono_debugger_thread_cleanup (jit_tls);
- mono_arch_free_jit_tls_data (jit_tls);
-
- mono_free_altstack (jit_tls);
- g_free (jit_tls->first_lmf);
- g_free (jit_tls);
- internal->jit_data = NULL;
/* We can't clean up tls information if we are on another thread, it will clean up the wrong stuff
* It would be nice to issue a warning when this happens outside of the shutdown sequence. but it's
*
* The current offender is mono_thread_manage which cleanup threads from the outside.
*/
- if (internal == mono_thread_internal_current ()) {
- TlsSetValue (mono_jit_tls_id, NULL);
-
-#ifdef HAVE_KW_THREAD
- mono_jit_tls = NULL;
- mono_lmf_addr = NULL;
-#if defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
- mono_lmf = NULL;
-#endif
-#endif
+ if (thread == mono_thread_internal_current ()) {
+ mono_set_lmf (NULL);
+ mono_set_jit_tls (NULL);
+ mono_set_lmf_addr (NULL);
}
+
+ free_jit_tls_data (jit_tls);
+
+ thread->jit_data = NULL;
}
}
mono_create_tls_get (MonoCompile *cfg, int offset)
{
#ifdef MONO_ARCH_HAVE_TLS_GET
- MonoInst* ins;
-
- if (offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->dreg = mono_alloc_preg (cfg);
- ins->inst_offset = offset;
- return ins;
-#else
- return NULL;
+ if (MONO_ARCH_HAVE_TLS_GET) {
+ MonoInst* ins;
+
+ if (offset == -1)
+ return NULL;
+
+ MONO_INST_NEW (cfg, ins, OP_TLS_GET);
+ ins->dreg = mono_alloc_preg (cfg);
+ ins->inst_offset = offset;
+ return ins;
+ }
#endif
+ return NULL;
}
MonoInst*
return mono_create_tls_get (cfg, mono_domain_get_tls_offset ());
}
+MonoInst*
+mono_get_thread_intrinsic (MonoCompile* cfg)
+{
+ return mono_create_tls_get (cfg, mono_thread_get_tls_offset ());
+}
+
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
switch (ji->type) {
case MONO_PATCH_INFO_RVA:
case MONO_PATCH_INFO_LDSTR:
- case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
case MONO_PATCH_INFO_DECLSEC:
return (ji->type << 8) | ji->data.token->token;
+ case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
+ return (ji->type << 8) | ji->data.token->token | (ji->data.token->has_context ? (gsize)ji->data.token->context.class_inst : 0);
case MONO_PATCH_INFO_INTERNAL_METHOD:
return (ji->type << 8) | g_str_hash (ji->data.name);
case MONO_PATCH_INFO_VTABLE:
switch (patch_info->type) {
case MONO_PATCH_INFO_BB:
- g_assert (patch_info->data.bb->native_offset);
+ /*
+ * FIXME: This could be hit for methods without a prolog. Should use -1
+ * but too much code depends on a 0 initial value.
+ */
+ //g_assert (patch_info->data.bb->native_offset);
target = patch_info->data.bb->native_offset + code;
break;
case MONO_PATCH_INFO_ABS:
target = patch_info->data.inst->inst_c0 + code;
break;
case MONO_PATCH_INFO_IP:
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* Need to transform to the destination address, it's */
+ /* emitted as an immediate in the code. */
+ target = nacl_inverse_modify_patch_target(ip);
+#else
target = ip;
+#endif
break;
case MONO_PATCH_INFO_METHOD_REL:
target = code + patch_info->data.offset;
}
case MONO_PATCH_INFO_METHOD_JUMP:
target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE);
+#if defined(__native_client__) && defined(__native_client_codegen__)
+#if defined(TARGET_AMD64)
+ /* This target is an absolute address, not relative to the */
+ /* current code being emitted on AMD64. */
+ target = nacl_inverse_modify_patch_target(target);
+#endif
+#endif
break;
case MONO_PATCH_INFO_METHOD:
if (patch_info->data.method == method) {
gpointer *jump_table;
int i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak, but we don't care if we're */
+ /* not deleting JIT'd methods anyway */
+ jump_table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (method && method->dynamic) {
jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
jump_table = mono_domain_code_reserve (domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
}
+#endif
- for (i = 0; i < patch_info->data.table->table_size; i++)
+ for (i = 0; i < patch_info->data.table->table_size; i++) {
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* 'code' is relative to the current code blob, we */
+ /* need to do this transform on it to make the */
+ /* pointers in this table absolute */
+ jump_table [i] = nacl_inverse_modify_patch_target (code) + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#else
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#endif
+ }
+
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* jump_table is in the data section, we need to transform */
+ /* it here so when it gets modified in amd64_patch it will */
+ /* then point back to the absolute data address */
+ target = nacl_inverse_modify_patch_target (jump_table);
+#else
target = jump_table;
+#endif
break;
}
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_SFLDA: {
MonoVTable *vtable = mono_class_vtable (domain, patch_info->data.field->parent);
- g_assert (vtable);
- if (!vtable->initialized && !(vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) && (method && mono_class_needs_cctor_run (vtable->klass, method)))
- /* Done by the generated code */
- ;
+ if (mono_class_field_is_special_static (patch_info->data.field)) {
+ gpointer addr = NULL;
+
+ mono_domain_lock (domain);
+ if (domain->special_static_fields)
+ addr = g_hash_table_lookup (domain->special_static_fields, patch_info->data.field);
+ mono_domain_unlock (domain);
+ g_assert (addr);
+ return addr;
+ }
+
+ g_assert (vtable);
+ if (!vtable->initialized && !(vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) && (method && mono_class_needs_cctor_run (vtable->klass, method)))
+ /* Done by the generated code */
+ ;
else {
if (run_cctors)
mono_runtime_class_init (vtable);
g_assert_not_reached ();
#endif
break;
+ case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: {
+ int card_table_shift_bits;
+ gpointer card_table_mask;
+
+ target = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
+ break;
+ }
+ case MONO_PATCH_INFO_CASTCLASS_CACHE: {
+ target = mono_domain_alloc0 (domain, sizeof (gpointer));
+ break;
+ }
default:
g_assert_not_reached ();
}
return (gpointer)target;
}
+void
+mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
+{
+ ins->inst_offset = native_offset;
+ g_ptr_array_add (cfg->seq_points, ins);
+ bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
+ bb->last_seq_point = ins;
+}
+
+#ifndef DISABLE_JIT
+
static void
mono_compile_create_vars (MonoCompile *cfg)
{
MonoMethodHeader *header;
int i;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
mono_arch_create_vars (cfg);
}
+#endif /* #ifndef DISABLE_JIT */
+
void
mono_print_code (MonoCompile *cfg, const char* msg)
{
}
case MONO_PATCH_INFO_SWITCH: {
gpointer *table;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak. */
+ /* TODO: can we free this when */
+ /* making the final jump table? */
+ table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (cfg->method->dynamic) {
table = mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
table = mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
+#endif
for (i = 0; i < patch_info->data.table->table_size; i++) {
/* Might be NULL if the switch is eliminated */
GSList *list;
MonoDomain *domain = cfg->domain;
unsigned char *ip = cfg->native_code + patch_info->ip.i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* When this jump target gets evaluated, the method */
+ /* will be installed in the dynamic code section, */
+ /* not at the location of cfg->native_code. */
+ ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
+#endif
mono_domain_lock (domain);
if (!domain_jit_info (domain)->jump_target_hash)
}
}
+static void
+collect_pred_seq_points (MonoBasicBlock *bb, MonoInst *ins, GSList **next, int depth)
+{
+ int i;
+ MonoBasicBlock *in_bb;
+
+ for (i = 0; i < bb->in_count; ++i) {
+ in_bb = bb->in_bb [i];
+
+ if (in_bb->last_seq_point) {
+ next [in_bb->last_seq_point->backend.size] = g_slist_append (next [in_bb->last_seq_point->backend.size], GUINT_TO_POINTER (ins->backend.size));
+ } else {
+ /* Have to look at its predecessors */
+ if (depth < 5)
+ collect_pred_seq_points (in_bb, ins, next, depth + 1);
+ }
+ }
+}
+
+static void
+mono_save_seq_point_info (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+ GSList *bb_seq_points, *l;
+ MonoInst *last;
+ MonoDomain *domain = cfg->domain;
+ int i;
+ MonoSeqPointInfo *info;
+ GSList **next;
+
+ if (!cfg->seq_points)
+ return;
+
+ info = g_malloc0 (sizeof (MonoSeqPointInfo) + (cfg->seq_points->len - MONO_ZERO_LEN_ARRAY) * sizeof (SeqPoint));
+ info->len = cfg->seq_points->len;
+ for (i = 0; i < cfg->seq_points->len; ++i) {
+ SeqPoint *sp = &info->seq_points [i];
+ MonoInst *ins = g_ptr_array_index (cfg->seq_points, i);
+
+ sp->il_offset = ins->inst_imm;
+ sp->native_offset = ins->inst_offset;
+
+ /* Used below */
+ ins->backend.size = i;
+ }
+
+ /*
+ * For each sequence point, compute the list of sequence points immediately
+ * following it, this is needed to implement 'step over' in the debugger agent.
+ */
+ next = g_new0 (GSList*, cfg->seq_points->len);
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ bb_seq_points = g_slist_reverse (bb->seq_points);
+ last = NULL;
+ for (l = bb_seq_points; l; l = l->next) {
+ MonoInst *ins = l->data;
+
+ if (!(ins->flags & MONO_INST_SINGLE_STEP_LOC))
+ continue;
+
+ if (last != NULL) {
+ /* Link with the previous seq point in the same bb */
+ next [last->backend.size] = g_slist_append (next [last->backend.size], GUINT_TO_POINTER (ins->backend.size));
+ } else {
+ /* Link with the last bb in the previous bblocks */
+ collect_pred_seq_points (bb, ins, next, 0);
+ }
+
+ last = ins;
+ }
+ }
+
+ if (cfg->verbose_level > 2) {
+ printf ("\nSEQ POINT MAP: \n");
+ }
+
+ for (i = 0; i < cfg->seq_points->len; ++i) {
+ SeqPoint *sp = &info->seq_points [i];
+ GSList *l;
+ int j, next_index;
+
+ sp->next_len = g_slist_length (next [i]);
+ sp->next = g_new (int, sp->next_len);
+ j = 0;
+ if (cfg->verbose_level > 2 && next [i]) {
+ printf ("\t0x%x ->", sp->il_offset);
+ for (l = next [i]; l; l = l->next) {
+ next_index = GPOINTER_TO_UINT (l->data);
+ printf (" 0x%x", info->seq_points [next_index].il_offset);
+ }
+ printf ("\n");
+ }
+ for (l = next [i]; l; l = l->next) {
+ next_index = GPOINTER_TO_UINT (l->data);
+ sp->next [j ++] = next_index;
+ }
+ g_slist_free (next [i]);
+ }
+ g_free (next);
+
+ cfg->seq_point_info = info;
+
+ // FIXME: dynamic methods
+ if (!cfg->compile_aot) {
+ mono_domain_lock (domain);
+ // FIXME: How can the lookup succeed ?
+ if (!g_hash_table_lookup (domain_jit_info (domain)->seq_points, cfg->method_to_register))
+ g_hash_table_insert (domain_jit_info (domain)->seq_points, cfg->method_to_register, info);
+ mono_domain_unlock (domain);
+ }
+
+ g_ptr_array_free (cfg->seq_points, TRUE);
+ cfg->seq_points = NULL;
+}
+
void
mono_codegen (MonoCompile *cfg)
{
MonoBasicBlock *bb;
int max_epilog_size;
guint8 *code;
+ MonoDomain *code_domain;
+
+ if (mono_using_xdebug)
+ /*
+ * Recent gdb versions have trouble processing symbol files containing
+ * overlapping address ranges, so allocate all code from the code manager
+ * of the root domain. (#666152).
+ */
+ code_domain = mono_get_root_domain ();
+ else
+ code_domain = cfg->domain;
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ void *code_dest;
+
+ /* This keeps patch targets from being transformed during
+ * ordinary method compilation, for local branches and jumps.
+ */
+ nacl_allow_target_modification (FALSE);
+#endif
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
+ bb->real_native_offset = cfg->code_len;
//if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
mono_arch_output_basic_block (cfg, bb);
+ bb->native_length = cfg->code_len - bb->native_offset;
if (bb == cfg->bb_exit) {
cfg->epilog_begin = cfg->code_len;
}
}
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
+#endif
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
mono_domain_unlock (cfg->domain);
- code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
+ if (mono_using_xdebug)
+ /* See the comment for cfg->code_domain */
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ else
+ code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
} else {
guint unwindlen = 0;
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
#endif
- code = mono_domain_code_reserve (cfg->domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ nacl_allow_target_modification (TRUE);
+#endif
memcpy (code, cfg->native_code, cfg->code_len);
+#if defined(__default_codegen__)
g_free (cfg->native_code);
+#elif defined(__native_client_codegen__)
+ if (cfg->native_code_alloc) {
+ g_free (cfg->native_code_alloc);
+ cfg->native_code_alloc = 0;
+ }
+ else if (cfg->native_code) {
+ g_free (cfg->native_code);
+ }
+#endif /* __native_client_codegen__ */
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
mono_arch_save_unwind_info (cfg);
#endif
-
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (!cfg->compile_aot) {
+ if (cfg->method->dynamic) {
+ code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code);
+ } else {
+ code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code);
+ }
+ }
+#endif
+
+#if defined(__native_client_codegen__)
+ mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
+#endif
+
mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
if (cfg->method->dynamic) {
- mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
+ if (mono_using_xdebug)
+ mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
+ else
+ mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
} else {
- mono_domain_code_commit (cfg->domain, cfg->native_code, cfg->code_size, cfg->code_len);
+ mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ cfg->native_code = code_dest;
+#endif
mono_profiler_code_buffer_new (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method);
mono_arch_flush_icache (cfg->native_code, cfg->code_len);
}
}
+static void
+mono_handle_out_of_line_bblock (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_BR);
+ MONO_ADD_INS (bb, ins);
+ ins->inst_target_bb = bb->next_bb;
+ }
+ }
+}
+
+static MonoJitInfo*
+create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
+{
+ GSList *tmp;
+ MonoMethodHeader *header;
+ MonoJitInfo *jinfo;
+ int num_clauses;
+ int generic_info_size;
+ int holes_size = 0, num_holes = 0;
+
+ g_assert (method_to_compile == cfg->method);
+ header = cfg->header;
+
+ if (cfg->generic_sharing_context)
+ generic_info_size = sizeof (MonoGenericJitInfo);
+ else
+ generic_info_size = 0;
+
+ if (cfg->try_block_holes) {
+ for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
+ TryBlockHole *hole = tmp->data;
+ MonoExceptionClause *ec = hole->clause;
+ int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
+ MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
+ g_assert (clause_last_bb);
+
+ /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
+ if (clause_last_bb->native_offset != hole_end)
+ ++num_holes;
+ }
+ if (num_holes)
+ holes_size = sizeof (MonoTryBlockHoleTableJitInfo) + num_holes * sizeof (MonoTryBlockHoleJitInfo);
+ if (G_UNLIKELY (cfg->verbose_level >= 4))
+ printf ("Number of try block holes %d\n", num_holes);
+ }
+
+ if (COMPILE_LLVM (cfg))
+ num_clauses = cfg->llvm_ex_info_len;
+ else
+ num_clauses = header->num_clauses;
+
+ if (cfg->method->dynamic) {
+ jinfo = g_malloc0 (MONO_SIZEOF_JIT_INFO + (num_clauses * sizeof (MonoJitExceptionInfo)) +
+ generic_info_size + holes_size);
+ } else {
+ jinfo = mono_domain_alloc0 (cfg->domain, MONO_SIZEOF_JIT_INFO +
+ (num_clauses * sizeof (MonoJitExceptionInfo)) +
+ generic_info_size + holes_size);
+ }
+
+ jinfo->method = cfg->method_to_register;
+ jinfo->code_start = cfg->native_code;
+ jinfo->code_size = cfg->code_len;
+ jinfo->used_regs = cfg->used_int_regs;
+ jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
+ jinfo->cas_inited = FALSE; /* initialization delayed at the first stalk walk using this method */
+ jinfo->num_clauses = num_clauses;
+ if (COMPILE_LLVM (cfg))
+ jinfo->from_llvm = TRUE;
+
+ if (cfg->generic_sharing_context) {
+ MonoInst *inst;
+ MonoGenericJitInfo *gi;
+
+ jinfo->has_generic_jit_info = 1;
+
+ gi = mono_jit_info_get_generic_jit_info (jinfo);
+ g_assert (gi);
+
+ gi->generic_sharing_context = cfg->generic_sharing_context;
+
+ if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method_to_compile)->method_inst ||
+ method_to_compile->klass->valuetype) {
+ g_assert (cfg->rgctx_var);
+ }
+
+ gi->has_this = 1;
+
+ if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method_to_compile)->method_inst ||
+ method_to_compile->klass->valuetype) {
+ inst = cfg->rgctx_var;
+ if (!COMPILE_LLVM (cfg))
+ g_assert (inst->opcode == OP_REGOFFSET);
+ } else {
+ inst = cfg->args [0];
+ }
+
+ if (COMPILE_LLVM (cfg)) {
+ g_assert (cfg->llvm_this_reg != -1);
+ gi->this_in_reg = 0;
+ gi->this_reg = cfg->llvm_this_reg;
+ gi->this_offset = cfg->llvm_this_offset;
+ } else if (inst->opcode == OP_REGVAR) {
+ gi->this_in_reg = 1;
+ gi->this_reg = inst->dreg;
+ } else {
+ g_assert (inst->opcode == OP_REGOFFSET);
+#ifdef TARGET_X86
+ g_assert (inst->inst_basereg == X86_EBP);
+#elif defined(TARGET_AMD64)
+ g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
+#endif
+ g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
+
+ gi->this_in_reg = 0;
+ gi->this_reg = inst->inst_basereg;
+ gi->this_offset = inst->inst_offset;
+ }
+ }
+
+ if (num_holes) {
+ MonoTryBlockHoleTableJitInfo *table;
+ int i;
+
+ jinfo->has_try_block_holes = 1;
+ table = mono_jit_info_get_try_block_hole_table_info (jinfo);
+ table->num_holes = (guint16)num_holes;
+ i = 0;
+ for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
+ guint32 start_bb_offset;
+ MonoTryBlockHoleJitInfo *hole;
+ TryBlockHole *hole_data = tmp->data;
+ MonoExceptionClause *ec = hole_data->clause;
+ int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
+ MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
+ g_assert (clause_last_bb);
+
+ /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
+ if (clause_last_bb->native_offset == hole_end)
+ continue;
+
+ start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
+ hole = &table->holes [i++];
+ hole->clause = hole_data->clause - &header->clauses [0];
+ hole->offset = (guint32)hole_data->start_offset;
+ hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
+
+ if (G_UNLIKELY (cfg->verbose_level >= 4))
+ printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
+ }
+ g_assert (i == num_holes);
+ }
+
+ if (COMPILE_LLVM (cfg)) {
+ if (num_clauses)
+ memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
+ } else if (header->num_clauses) {
+ int i;
+
+ for (i = 0; i < header->num_clauses; i++) {
+ MonoExceptionClause *ec = &header->clauses [i];
+ MonoJitExceptionInfo *ei = &jinfo->clauses [i];
+ MonoBasicBlock *tblock;
+ MonoInst *exvar;
+
+ ei->flags = ec->flags;
+
+ exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
+ ei->exvar_offset = exvar ? exvar->inst_offset : 0;
+
+ if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
+ tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
+ g_assert (tblock);
+ ei->data.filter = cfg->native_code + tblock->native_offset;
+ } else {
+ ei->data.catch_class = ec->data.catch_class;
+ }
+
+ tblock = cfg->cil_offset_to_bb [ec->try_offset];
+ g_assert (tblock);
+ g_assert (tblock->native_offset);
+ ei->try_start = cfg->native_code + tblock->native_offset;
+ if (tblock->extend_try_block) {
+ /*
+ * Extend the try block backwards to include parts of the previous call
+ * instruction.
+ */
+ ei->try_start = (guint8*)ei->try_start - MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
+ }
+ tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
+ g_assert (tblock);
+ if (!tblock->native_offset) {
+ int j, end;
+ for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
+ MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
+ if (bb && bb->native_offset) {
+ tblock = bb;
+ break;
+ }
+ }
+ }
+ ei->try_end = cfg->native_code + tblock->native_offset;
+ g_assert (tblock->native_offset);
+ tblock = cfg->cil_offset_to_bb [ec->handler_offset];
+ g_assert (tblock);
+ ei->handler_start = cfg->native_code + tblock->native_offset;
+
+ for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
+ TryBlockHole *hole = tmp->data;
+ gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
+ if (hole->clause == ec && hole_end == ei->try_end) {
+ if (G_UNLIKELY (cfg->verbose_level >= 4))
+ printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
+
+ ei->try_end = cfg->native_code + hole->start_offset;
+ break;
+ }
+ }
+
+ if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
+ int end_offset;
+ if (ec->handler_offset + ec->handler_len < header->code_size) {
+ tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
+ g_assert (tblock);
+ end_offset = tblock->native_offset;
+ } else {
+ end_offset = cfg->epilog_begin;
+ }
+ ei->data.handler_end = cfg->native_code + end_offset;
+ }
+ }
+ }
+
+ if (G_UNLIKELY (cfg->verbose_level >= 4)) {
+ int i;
+ for (i = 0; i < jinfo->num_clauses; i++) {
+ MonoJitExceptionInfo *ei = &jinfo->clauses [i];
+ int start = (guint8*)ei->try_start - cfg->native_code;
+ int end = (guint8*)ei->try_end - cfg->native_code;
+ int handler = (guint8*)ei->handler_start - cfg->native_code;
+
+ printf ("JitInfo EH clause %d flags %x try %x-%x handler %x\n", i, ei->flags, start, end, handler);
+ }
+ }
+
+ /*
+ * Its possible to generate dwarf unwind info for xdebug etc, but not actually
+ * using it during runtime, hence the define.
+ */
+#ifdef MONO_ARCH_HAVE_XP_UNWIND
+ if (cfg->encoded_unwind_ops) {
+ jinfo->used_regs = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
+ g_free (cfg->encoded_unwind_ops);
+ } else if (cfg->unwind_ops) {
+ guint32 info_len;
+ guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
+
+ jinfo->used_regs = mono_cache_unwind_info (unwind_info, info_len);
+ g_free (unwind_info);
+ }
+#endif
+
+ return jinfo;
+}
+#endif
+
+/*
+ * mini_get_shared_method:
+ *
+ * Return the method which is actually compiled/registered when doing generic sharing.
+ */
+MonoMethod*
+mini_get_shared_method (MonoMethod *method)
+{
+ MonoGenericContext shared_context;
+ MonoMethod *declaring_method, *res;
+ int i;
+ gboolean partial = FALSE;
+
+ if (method->is_generic || method->klass->generic_container)
+ declaring_method = method;
+ else
+ declaring_method = mono_method_get_declaring_generic_method (method);
+
+ if (declaring_method->is_generic)
+ shared_context = mono_method_get_generic_container (declaring_method)->context;
+ else
+ shared_context = declaring_method->klass->generic_container->context;
+
+ /* Handle partial sharing */
+ if (method != declaring_method && method->is_inflated && !mono_method_is_generic_sharable_impl_full (method, FALSE, FALSE)) {
+ MonoGenericContext *context = mono_method_get_context (method);
+ MonoGenericInst *inst;
+ MonoType **type_argv;
+
+ /*
+ * Create the shared context by replacing the ref type arguments with
+ * type parameters, and keeping the rest.
+ */
+ partial = TRUE;
+ inst = context->class_inst;
+ if (inst) {
+ type_argv = g_new0 (MonoType*, inst->type_argc);
+ for (i = 0; i < inst->type_argc; ++i) {
+ if (MONO_TYPE_IS_REFERENCE (inst->type_argv [i]) || inst->type_argv [i]->type == MONO_TYPE_VAR || inst->type_argv [i]->type == MONO_TYPE_MVAR)
+ type_argv [i] = shared_context.class_inst->type_argv [i];
+ else
+ type_argv [i] = inst->type_argv [i];
+ }
+
+ shared_context.class_inst = mono_metadata_get_generic_inst (inst->type_argc, type_argv);
+ g_free (type_argv);
+ }
+
+ inst = context->method_inst;
+ if (inst) {
+ type_argv = g_new0 (MonoType*, inst->type_argc);
+ for (i = 0; i < inst->type_argc; ++i) {
+ if (MONO_TYPE_IS_REFERENCE (inst->type_argv [i]) || inst->type_argv [i]->type == MONO_TYPE_VAR || inst->type_argv [i]->type == MONO_TYPE_MVAR)
+ type_argv [i] = shared_context.method_inst->type_argv [i];
+ else
+ type_argv [i] = inst->type_argv [i];
+ }
+
+ shared_context.method_inst = mono_metadata_get_generic_inst (inst->type_argc, type_argv);
+ g_free (type_argv);
+ }
+ }
+
+ res = mono_class_inflate_generic_method (declaring_method, &shared_context);
+ if (!partial) {
+ /* The result should be an inflated method whose parent is not inflated */
+ g_assert (!res->klass->is_inflated);
+ }
+ return res;
+}
+
+#ifndef DISABLE_JIT
/*
* mini_method_compile:
* @method: the method to compile
mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, gboolean run_cctors, gboolean compile_aot, int parts)
{
MonoMethodHeader *header;
+ MonoMethodSignature *sig;
+ MonoError err;
guint8 *ip;
MonoCompile *cfg;
- MonoJitInfo *jinfo;
int dfn, i, code_size_ratio;
gboolean deadce_has_run = FALSE;
- gboolean try_generic_shared, try_llvm;
+ gboolean try_generic_shared, try_llvm = FALSE;
MonoMethod *method_to_compile, *method_to_register;
- int generic_info_size;
mono_jit_stats.methods_compiled++;
if (mono_profiler_get_events () & MONO_PROFILE_JIT_COMPILATION)
mono_profiler_method_jit (method);
if (MONO_PROBE_METHOD_COMPILE_BEGIN_ENABLED ())
MONO_PROBE_METHOD_COMPILE_BEGIN (method);
-
+
if (compile_aot)
- /* We are passed the original generic method definition */
+ /*
+ * We might get passed the original generic method definition or
+ * instances with type parameters.
+ * FIXME: Remove the method->klass->generic_class limitation.
+ */
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
- (opts & MONO_OPT_GSHARED) && (method->is_generic || method->klass->generic_container);
+ (opts & MONO_OPT_GSHARED) && ((method->is_generic || method->klass->generic_container) || (!method->klass->generic_class && mono_method_is_generic_sharable_impl (method, TRUE)));
else
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
(opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_impl (method, FALSE);
mono_stats.generics_unsharable_methods++;
}
- try_llvm = TRUE;
-
-#ifndef ENABLE_LLVM
- try_llvm = FALSE;
+#ifdef ENABLE_LLVM
+ try_llvm = mono_use_llvm;
#endif
restart_compile:
if (try_generic_shared) {
- MonoMethod *declaring_method;
- MonoGenericContext *shared_context;
-
- if (compile_aot) {
- declaring_method = method;
- } else {
- declaring_method = mono_method_get_declaring_generic_method (method);
- if (method->klass->generic_class)
- g_assert (method->klass->generic_class->container_class == declaring_method->klass);
- else
- g_assert (method->klass == declaring_method->klass);
- }
-
- if (declaring_method->is_generic)
- shared_context = &(mono_method_get_generic_container (declaring_method)->context);
- else
- shared_context = &declaring_method->klass->generic_container->context;
-
- method_to_compile = mono_class_inflate_generic_method (declaring_method, shared_context);
+ method_to_compile = mini_get_shared_method (method);
g_assert (method_to_compile);
} else {
method_to_compile = method;
cfg = g_new0 (MonoCompile, 1);
cfg->method = method_to_compile;
+ cfg->header = mono_method_get_header (cfg->method);
cfg->mempool = mono_mempool_new ();
cfg->opt = opts;
cfg->prof_options = mono_profiler_get_events ();
cfg->compile_llvm = try_llvm;
cfg->token_info_hash = g_hash_table_new (NULL, NULL);
+ if (cfg->gen_seq_points)
+ cfg->seq_points = g_ptr_array_new ();
+
if (cfg->compile_aot && !try_generic_shared && (method->is_generic || method->klass->generic_container)) {
cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
return cfg;
}
if (cfg->generic_sharing_context) {
- MonoGenericContext object_context = mono_method_construct_object_context (method_to_compile);
-
- method_to_register = mono_class_inflate_generic_method (method_to_compile, &object_context);
+ method_to_register = method_to_compile;
} else {
g_assert (method == method_to_compile);
method_to_register = method;
}
cfg->method_to_register = method_to_register;
- /* No way to obtain the location info for 'this' */
- if (try_generic_shared) {
- cfg->exception_message = g_strdup ("gshared");
- cfg->disable_llvm = TRUE;
- }
-
- if (cfg->method->save_lmf) {
- cfg->exception_message = g_strdup ("lmf");
- cfg->disable_llvm = TRUE;
- }
-
- /* FIXME: */
- if (cfg->method->dynamic) {
- cfg->exception_message = g_strdup ("dynamic.");
- cfg->disable_llvm = TRUE;
+ mono_error_init (&err);
+ sig = mono_method_signature_checked (cfg->method, &err);
+ if (!sig) {
+ cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
+ cfg->exception_message = g_strdup (mono_error_get_message (&err));
+ mono_error_cleanup (&err);
+ if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ())
+ MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
+ return cfg;
}
- header = mono_method_get_header (method_to_compile);
+ header = cfg->header;
if (!header) {
MonoLoaderError *error;
return cfg;
}
- if (header->clauses) {
- /*
- * Cannot be enabled until LLVM supports implicit exceptions, or we use
- * explicit checks, or we disable this for methods which might throw implicit
- * exceptions inside clauses.
- */
- cfg->exception_message = g_strdup ("clauses");
- cfg->disable_llvm = TRUE;
- }
-
#ifdef ENABLE_LLVM
{
static gboolean inited;
if (!inited) {
- mono_counters_register ("Methods JITted using LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &methods_with_llvm);
- mono_counters_register ("Methods JITted using mono JIT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &methods_without_llvm);
inited = TRUE;
}
-
+
/*
* Check for methods which cannot be compiled by LLVM early, to avoid
* the extra compilation pass.
*/
- if (COMPILE_LLVM (cfg) && cfg->disable_llvm) {
- if (cfg->verbose_level >= 1) {
- //nm = mono_method_full_name (cfg->method, TRUE);
- printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
- //g_free (nm);
+ if (COMPILE_LLVM (cfg)) {
+ mono_llvm_check_method_supported (cfg);
+ if (cfg->disable_llvm) {
+ if (cfg->verbose_level >= 1) {
+ //nm = mono_method_full_name (cfg->method, TRUE);
+ printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
+ //g_free (nm);
+ }
+ mono_destroy_compile (cfg);
+ try_llvm = FALSE;
+ goto restart_compile;
}
- InterlockedIncrement (&methods_without_llvm);
- mono_destroy_compile (cfg);
- try_llvm = FALSE;
- goto restart_compile;
}
}
#endif
cfg->opt &= ~MONO_OPT_COPYPROP;
cfg->opt &= ~MONO_OPT_CONSPROP;
cfg->opt &= ~MONO_OPT_GSHARED;
+
+ /* This is needed for the soft debugger, which doesn't like code after the epilog */
+ cfg->disable_out_of_line_bblocks = TRUE;
}
if (mono_using_xdebug) {
cfg->disable_reuse_registers = TRUE;
cfg->disable_reuse_stack_slots = TRUE;
cfg->extend_live_ranges = TRUE;
+ cfg->compute_precise_live_ranges = TRUE;
}
+ mini_gc_init_cfg (cfg);
+
if (COMPILE_LLVM (cfg)) {
cfg->opt |= MONO_OPT_ABCREM;
}
if (getenv ("MONO_VERBOSE_METHOD")) {
- if (strcmp (cfg->method->name, getenv ("MONO_VERBOSE_METHOD")) == 0)
- cfg->verbose_level = 4;
+ char *name = getenv ("MONO_VERBOSE_METHOD");
+
+ if ((strchr (name, '.') > name) || strchr (name, ':')) {
+ MonoMethodDesc *desc;
+
+ desc = mono_method_desc_new (name, TRUE);
+ if (mono_method_desc_full_match (desc, cfg->method)) {
+ cfg->verbose_level = 4;
+ }
+ mono_method_desc_free (desc);
+ } else {
+ if (strcmp (cfg->method->name, getenv ("MONO_VERBOSE_METHOD")) == 0)
+ cfg->verbose_level = 4;
+ }
}
ip = (guint8 *)header->code;
cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
if (cfg->verbose_level > 0) {
+ char *method_name;
if (COMPILE_LLVM (cfg))
- g_print ("converting llvm method %s\n", mono_method_full_name (method, TRUE));
+ g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
else if (cfg->generic_sharing_context)
- g_print ("converting shared method %s\n", mono_method_full_name (method, TRUE));
+ g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else
- g_print ("converting method %s\n", mono_method_full_name (method, TRUE));
+ g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
+ g_free (method_name);
}
if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
*/
//cfg->enable_extended_bblocks = TRUE;
+ /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
+ if (mono_compile_is_broken (cfg, cfg->method, TRUE)) {
+ if (mini_get_debug_options ()->break_on_unverified)
+ G_BREAKPOINT ();
+ return cfg;
+ }
+
/*
* create MonoInst* which represents arguments and local variables
*/
*/
mono_liveness_handle_exception_clauses (cfg);
+ mono_handle_out_of_line_bblock (cfg);
+
/*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
if (!COMPILE_LLVM (cfg))
#endif
if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
- if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM))
+ /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
+ if (cfg->opt & MONO_OPT_ABCREM)
mono_perform_abc_removal (cfg);
}
mono_decompose_array_access_opts (cfg);
if (cfg->got_var) {
+#ifndef MONO_ARCH_GOT_REG
GList *regs;
+#endif
+ int got_reg;
g_assert (cfg->got_var_allocated);
* branches problem. Testcase: mcs crash in
* System.MonoCustomAttrs:GetCustomAttributes.
*/
+#ifdef MONO_ARCH_GOT_REG
+ got_reg = MONO_ARCH_GOT_REG;
+#else
regs = mono_arch_get_global_int_regs (cfg);
g_assert (regs);
+ got_reg = GPOINTER_TO_INT (regs->data);
+ g_list_free (regs);
+#endif
cfg->got_var->opcode = OP_REGVAR;
- cfg->got_var->dreg = GPOINTER_TO_INT (regs->data);
+ cfg->got_var->dreg = got_reg;
cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
-
- g_list_free (regs);
}
/*
}
if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) {
- GList *vars, *regs;
+ GList *vars, *regs, *l;
/* fixme: maybe we can avoid to compute livenesss here if already computed ? */
cfg->comp_done &= ~MONO_COMP_LIVENESS;
if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
regs = mono_arch_get_global_int_regs (cfg);
- if (cfg->got_var)
- regs = g_list_delete_link (regs, regs);
+ /* Remove the reg reserved for holding the GOT address */
+ if (cfg->got_var) {
+ for (l = regs; l; l = l->next) {
+ if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
+ regs = g_list_delete_link (regs, l);
+ break;
+ }
+ }
+ }
mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs);
}
}
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
- if (!cfg->globalra && !COMPILE_LLVM (cfg))
+ if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
mono_arch_allocate_vars (cfg);
+ if (cfg->exception_type)
+ return cfg;
+ }
{
MonoBasicBlock *bb;
continue;
for (; tree; tree = tree->next) {
mono_print_ins_index (-1, tree);
- }
- }
- }
-
- /* FIXME: */
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- bb->max_vreg = cfg->next_vreg;
- }
- }
-
- if (COMPILE_LLVM (cfg)) {
-#ifdef ENABLE_LLVM
- char *nm;
-
- /* The IR has to be in SSA form for LLVM */
- if (!(cfg->comp_done & MONO_COMP_SSA)) {
- cfg->exception_message = g_strdup ("SSA disabled.");
- cfg->disable_llvm = TRUE;
- }
-
- if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
- mono_decompose_array_access_opts (cfg);
-
- if (!cfg->disable_llvm)
- mono_llvm_emit_method (cfg);
- if (!cfg->disable_llvm && header->num_clauses && !cfg->compile_aot && cfg->llvm_ex_info_len != header->num_clauses) {
- cfg->exception_message = g_strdup ("clause num mismatch.");
- cfg->disable_llvm = TRUE;
- }
- if (cfg->disable_llvm) {
- if (cfg->verbose_level >= 1) {
- //nm = mono_method_full_name (cfg->method, TRUE);
- printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
- //g_free (nm);
- }
- InterlockedIncrement (&methods_without_llvm);
- mono_destroy_compile (cfg);
- try_llvm = FALSE;
- goto restart_compile;
- }
-
- InterlockedIncrement (&methods_with_llvm);
-
- if (cfg->verbose_level > 0 && !cfg->compile_aot) {
- nm = mono_method_full_name (cfg->method, TRUE);
- g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
- nm,
- cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
- g_free (nm);
- }
-#endif
- } else {
- mono_codegen (cfg);
- }
-
- if (cfg->verbose_level >= 2) {
- char *id = mono_method_full_name (cfg->method, FALSE);
- mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
- g_free (id);
- }
-
- if (cfg->generic_sharing_context)
- generic_info_size = sizeof (MonoGenericJitInfo);
- else
- generic_info_size = 0;
-
- if (cfg->method->dynamic) {
- jinfo = g_malloc0 (MONO_SIZEOF_JIT_INFO + (header->num_clauses * sizeof (MonoJitExceptionInfo)) +
- generic_info_size);
- } else {
- jinfo = mono_domain_alloc0 (cfg->domain, MONO_SIZEOF_JIT_INFO +
- (header->num_clauses * sizeof (MonoJitExceptionInfo)) +
- generic_info_size);
- }
-
- jinfo->method = method_to_register;
- jinfo->code_start = cfg->native_code;
- jinfo->code_size = cfg->code_len;
- jinfo->used_regs = cfg->used_int_regs;
- jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
- jinfo->cas_inited = FALSE; /* initialization delayed at the first stalk walk using this method */
- jinfo->num_clauses = header->num_clauses;
- if (COMPILE_LLVM (cfg))
- jinfo->from_llvm = TRUE;
-
- if (cfg->generic_sharing_context) {
- MonoInst *inst;
- MonoGenericJitInfo *gi;
-
- jinfo->has_generic_jit_info = 1;
-
- gi = mono_jit_info_get_generic_jit_info (jinfo);
- g_assert (gi);
-
- gi->generic_sharing_context = cfg->generic_sharing_context;
-
- if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
- mini_method_get_context (method_to_compile)->method_inst ||
- method_to_compile->klass->valuetype) {
- g_assert (cfg->rgctx_var);
- }
-
- gi->has_this = 1;
-
- if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
- mini_method_get_context (method_to_compile)->method_inst ||
- method_to_compile->klass->valuetype) {
- inst = cfg->rgctx_var;
- g_assert (inst->opcode == OP_REGOFFSET);
- } else {
- inst = cfg->args [0];
+ }
+ }
}
- if (inst->opcode == OP_REGVAR) {
- gi->this_in_reg = 1;
- gi->this_reg = inst->dreg;
- } else {
- g_assert (inst->opcode == OP_REGOFFSET);
-#ifdef TARGET_X86
- g_assert (inst->inst_basereg == X86_EBP);
-#elif defined(TARGET_AMD64)
- g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
-#endif
- g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
-
- gi->this_in_reg = 0;
- gi->this_reg = inst->inst_basereg;
- gi->this_offset = inst->inst_offset;
+ /* FIXME: */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ bb->max_vreg = cfg->next_vreg;
}
}
- if (header->num_clauses) {
- int i;
-
- for (i = 0; i < header->num_clauses; i++) {
- MonoExceptionClause *ec = &header->clauses [i];
- MonoJitExceptionInfo *ei = &jinfo->clauses [i];
- MonoBasicBlock *tblock;
- MonoInst *exvar;
+ if (COMPILE_LLVM (cfg)) {
+#ifdef ENABLE_LLVM
+ char *nm;
- ei->flags = ec->flags;
+ /* The IR has to be in SSA form for LLVM */
+ if (!(cfg->comp_done & MONO_COMP_SSA)) {
+ cfg->exception_message = g_strdup ("SSA disabled.");
+ cfg->disable_llvm = TRUE;
+ }
- exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
- ei->exvar_offset = exvar ? exvar->inst_offset : 0;
+ if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
+ mono_decompose_array_access_opts (cfg);
- if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
- tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
- g_assert (tblock);
- ei->data.filter = cfg->native_code + tblock->native_offset;
- } else {
- ei->data.catch_class = ec->data.catch_class;
+ if (!cfg->disable_llvm)
+ mono_llvm_emit_method (cfg);
+ if (cfg->disable_llvm) {
+ if (cfg->verbose_level >= 1) {
+ //nm = mono_method_full_name (cfg->method, TRUE);
+ printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
+ //g_free (nm);
}
+ mono_destroy_compile (cfg);
+ try_llvm = FALSE;
+ goto restart_compile;
+ }
- if (COMPILE_LLVM (cfg)) {
- if (!cfg->compile_aot) {
- g_assert (cfg->llvm_ex_info && i < cfg->llvm_ex_info_len);
- ei->try_start = cfg->llvm_ex_info [i].try_start;
- ei->try_end = cfg->llvm_ex_info [i].try_end;
- ei->handler_start = cfg->llvm_ex_info [i].handler_start;
- }
- } else {
- tblock = cfg->cil_offset_to_bb [ec->try_offset];
- g_assert (tblock);
- ei->try_start = cfg->native_code + tblock->native_offset;
- g_assert (tblock->native_offset);
- tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
- g_assert (tblock);
- ei->try_end = cfg->native_code + tblock->native_offset;
- g_assert (tblock->native_offset);
- tblock = cfg->cil_offset_to_bb [ec->handler_offset];
- g_assert (tblock);
- ei->handler_start = cfg->native_code + tblock->native_offset;
- }
+ if (cfg->verbose_level > 0 && !cfg->compile_aot) {
+ nm = mono_method_full_name (cfg->method, TRUE);
+ g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
+ nm,
+ cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
+ g_free (nm);
}
+#endif
+ } else {
+ mono_codegen (cfg);
}
- /*
- * Its possible to generate dwarf unwind info for xdebug etc, but not actually
- * using it during runtime, hence the define.
- */
-#ifdef MONO_ARCH_HAVE_XP_UNWIND
- if (cfg->encoded_unwind_ops) {
- jinfo->used_regs = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
- g_free (cfg->encoded_unwind_ops);
- } else if (cfg->unwind_ops) {
- guint32 info_len;
- guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
-
- jinfo->used_regs = mono_cache_unwind_info (unwind_info, info_len);
- g_free (unwind_info);
- }
-#endif
+ if (COMPILE_LLVM (cfg))
+ InterlockedIncrement (&mono_jit_stats.methods_with_llvm);
+ else
+ InterlockedIncrement (&mono_jit_stats.methods_without_llvm);
- cfg->jit_info = jinfo;
+ cfg->jit_info = create_jit_info (cfg, method_to_compile);
#ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
if (cfg->extend_live_ranges) {
}
#endif
- mono_save_xdebug_info (cfg);
+ if (!cfg->compile_aot)
+ mono_save_xdebug_info (cfg);
mini_gc_create_gc_map (cfg);
- if (cfg->seq_points) {
- // FIXME: dynamic methods
- mono_domain_lock (domain);
- g_hash_table_insert (domain_jit_info (domain)->seq_points, method_to_register, cfg->seq_points);
- mono_domain_unlock (domain);
+ mono_save_seq_point_info (cfg);
+
+ if (cfg->verbose_level >= 2) {
+ char *id = mono_method_full_name (cfg->method, FALSE);
+ mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
+ g_free (id);
}
if (!cfg->compile_aot) {
mono_domain_lock (cfg->domain);
- mono_jit_info_table_add (cfg->domain, jinfo);
+ mono_jit_info_table_add (cfg->domain, cfg->jit_info);
if (cfg->method->dynamic)
- mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = jinfo;
+ mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = cfg->jit_info;
mono_domain_unlock (cfg->domain);
}
g_free (mono_jit_stats.biggest_method);
mono_jit_stats.biggest_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
}
- code_size_ratio = (code_size_ratio * 100) / mono_method_get_header (method)->code_size;
+ code_size_ratio = (code_size_ratio * 100) / header->code_size;
if (code_size_ratio > mono_jit_stats.max_code_size_ratio && mono_jit_stats.enabled) {
mono_jit_stats.max_code_size_ratio = code_size_ratio;
g_free (mono_jit_stats.max_ratio_method);
#endif /* DISABLE_JIT */
-static MonoJitInfo*
-lookup_generic_method (MonoDomain *domain, MonoMethod *method)
+MonoJitInfo*
+mono_domain_lookup_shared_generic (MonoDomain *domain, MonoMethod *method)
{
- MonoMethod *open_method;
+ static gboolean inited = FALSE;
+ static int lookups = 0;
+ static int failed_lookups = 0;
+ MonoJitInfo *ji;
- if (!mono_method_is_generic_sharable_impl (method, FALSE))
- return NULL;
+ ji = mono_internal_hash_table_lookup (&domain->jit_code_hash, mini_get_shared_method (method));
+ if (ji && !ji->has_generic_jit_info)
+ ji = NULL;
- open_method = mono_method_get_declaring_generic_method (method);
+ if (!inited) {
+ mono_counters_register ("Shared generic lookups", MONO_COUNTER_INT|MONO_COUNTER_GENERICS, &lookups);
+ mono_counters_register ("Failed shared generic lookups", MONO_COUNTER_INT|MONO_COUNTER_GENERICS, &failed_lookups);
+ inited = TRUE;
+ }
+
+ ++lookups;
+ if (!ji)
+ ++failed_lookups;
- return mono_domain_lookup_shared_generic (domain, open_method);
+ return ji;
}
/*
if (ji)
return ji;
- return lookup_generic_method (domain, method);
+ if (!mono_method_is_generic_sharable_impl (method, FALSE))
+ return NULL;
+ return mono_domain_lookup_shared_generic (domain, method);
}
static MonoJitInfo*
return info;
}
+#if ENABLE_JIT_MAP
+static FILE* perf_map_file = NULL;
+
+void
+mono_enable_jit_map (void)
+{
+ if (!perf_map_file) {
+ char name [64];
+ g_snprintf (name, sizeof (name), "/tmp/perf-%d.map", getpid ());
+ unlink (name);
+ perf_map_file = fopen (name, "w");
+ }
+}
+
+void
+mono_emit_jit_tramp (void *start, int size, const char *desc)
+{
+ if (perf_map_file)
+ fprintf (perf_map_file, "%llx %x %s\n", (long long unsigned int)(gsize)start, size, desc);
+}
+
+void
+mono_emit_jit_map (MonoJitInfo *jinfo)
+{
+ if (perf_map_file) {
+ char *name = mono_method_full_name (jinfo->method, TRUE);
+ mono_emit_jit_tramp (jinfo->code_start, jinfo->code_size, name);
+ g_free (name);
+ }
+}
+
+gboolean
+mono_jit_map_is_enabled (void)
+{
+ return perf_map_file != NULL;
+}
+
+#endif
+
static gpointer
mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoException **jit_ex)
{
MonoVTable *vtable;
MonoException *ex = NULL;
guint32 prof_options;
+ GTimer *jit_timer;
+ MonoMethod *prof_method;
#ifdef MONO_USE_AOT_COMPILER
if (opt & MONO_OPT_AOT) {
mono_lookup_pinvoke_call (method, NULL, NULL);
}
nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc, FALSE);
- return mono_get_addr_from_ftnptr (mono_compile_method (nm));
+ code = mono_get_addr_from_ftnptr (mono_compile_method (nm));
+ jinfo = mono_jit_info_table_find (target_domain, code);
+ if (!jinfo)
+ jinfo = mono_jit_info_table_find (mono_domain_get (), code);
+ if (jinfo)
+ mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
+ return code;
//if (mono_debug_format != MONO_DEBUG_FORMAT_NONE)
//mono_debug_add_wrapper (method, nm);
} else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
const char *name = method->name;
+ char *full_name, *msg;
MonoMethod *nm;
if (method->klass->parent == mono_defaults.multicastdelegate_class) {
return mono_get_addr_from_ftnptr (mono_compile_method (nm));
}
}
+
+ full_name = mono_method_full_name (method, TRUE);
+ msg = g_strdup_printf ("Unrecognizable runtime implemented method '%s'", full_name);
+ *jit_ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", msg);
+ g_free (full_name);
+ g_free (msg);
return NULL;
}
return NULL;
}
+ jit_timer = g_timer_new ();
+
cfg = mini_method_compile (method, opt, target_domain, TRUE, FALSE, 0);
+ prof_method = cfg->method;
+
+ g_timer_stop (jit_timer);
+ mono_jit_stats.jit_time += g_timer_elapsed (jit_timer, NULL);
+ g_timer_destroy (jit_timer);
switch (cfg->exception_type) {
case MONO_EXCEPTION_NONE:
ex = exp;
break;
}
+ case MONO_EXCEPTION_OUT_OF_MEMORY:
+ ex = mono_domain_get ()->out_of_memory_ex;
+ break;
default:
g_assert_not_reached ();
}
if (ex) {
- mono_destroy_compile (cfg);
- *jit_ex = ex;
-
if (cfg->prof_options & MONO_PROFILE_JIT_COMPILATION)
mono_profiler_method_end_jit (method, NULL, MONO_PROFILE_FAILED);
+ mono_destroy_compile (cfg);
+ *jit_ex = ex;
+
return NULL;
}
mono_destroy_compile (cfg);
+#ifndef DISABLE_JIT
if (domain_jit_info (target_domain)->jump_target_hash) {
MonoJumpInfo patch_info;
GSList *list, *tmp;
g_slist_free (list);
}
+ mono_emit_jit_map (jinfo);
+#endif
mono_domain_unlock (target_domain);
mono_loader_unlock ();
vtable = mono_class_vtable (target_domain, method->klass);
if (!vtable) {
- MonoException *exc;
- exc = mono_class_get_exception_for_failure (method->klass);
- g_assert (exc);
- mono_raise_exception (exc);
+ ex = mono_class_get_exception_for_failure (method->klass);
+ g_assert (ex);
+ *jit_ex = ex;
+ return NULL;
}
- if (prof_options & MONO_PROFILE_JIT_COMPILATION)
+ if (prof_options & MONO_PROFILE_JIT_COMPILATION) {
+ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ if (mono_marshal_method_from_wrapper (method)) {
+ /* Native func wrappers have no method */
+ /* The profiler doesn't know about wrappers, so pass the original icall method */
+ mono_profiler_method_end_jit (mono_marshal_method_from_wrapper (method), jinfo, MONO_PROFILE_OK);
+ }
+ }
mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
+ if (prof_method != method) {
+ mono_profiler_method_end_jit (prof_method, jinfo, MONO_PROFILE_OK);
+ }
+ }
- mono_runtime_class_init (vtable);
+ ex = mono_runtime_class_init_full (vtable, FALSE);
+ if (ex) {
+ *jit_ex = ex;
+ return NULL;
+ }
return code;
}
/* We can't use a domain specific method in another domain */
if (! ((domain != target_domain) && !info->domain_neutral)) {
MonoVTable *vtable;
+ MonoException *tmpEx;
mono_jit_stats.methods_lookups++;
vtable = mono_class_vtable (domain, method->klass);
g_assert (vtable);
- mono_runtime_class_init (vtable);
+ tmpEx = mono_runtime_class_init_full (vtable, ex == NULL);
+ if (tmpEx) {
+ *ex = tmpEx;
+ return NULL;
+ }
return mono_create_ftnptr (target_domain, info->code_start);
}
}
* We need this here because mono_marshal_get_runtime_invoke can place
* the helper method in System.Object and not the target class.
*/
- mono_runtime_class_init (info->vtable);
+ if (exc) {
+ *exc = (MonoObject*)mono_runtime_class_init_full (info->vtable, FALSE);
+ if (*exc)
+ return NULL;
+ } else {
+ mono_runtime_class_init (info->vtable);
+ }
+
+ /* The wrappers expect this to be initialized to NULL */
+ if (exc)
+ *exc = NULL;
#ifdef MONO_ARCH_DYN_CALL_SUPPORTED
if (info->dyn_call_info) {
#if defined(MONO_ARCH_HAVE_IS_INT_OVERFLOW)
if (mono_arch_is_int_overflow (ctx, info))
- exc = mono_get_exception_arithmetic ();
+ /*
+ * The spec says this throws ArithmeticException, but MS throws the derived
+ * OverflowException.
+ */
+ exc = mono_get_exception_overflow ();
else
exc = mono_get_exception_divide_by_zero ();
#else
mono_arch_handle_exception (ctx, exc, FALSE);
}
+#if defined(MONO_ARCH_USE_SIGACTION) || defined(HOST_WIN32)
+#define HAVE_SIG_INFO
+#endif
+
void
SIG_HANDLER_SIGNATURE (mono_sigsegv_signal_handler)
{
-#ifndef MONO_ARCH_SIGSEGV_ON_ALTSTACK
- MonoException *exc = NULL;
-#endif
MonoJitInfo *ji;
MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
+ gpointer fault_addr = NULL;
GET_CONTEXT;
-#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
+#if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED) && defined(HAVE_SIG_INFO)
if (mono_arch_is_single_step_event (info, ctx)) {
mono_debugger_agent_single_step_event (ctx);
return;
}
#endif
+#if !defined(HOST_WIN32) && defined(HAVE_SIG_INFO)
+ fault_addr = info->si_addr;
+ if (mono_aot_is_pagefault (info->si_addr)) {
+ mono_aot_handle_pagefault (info->si_addr);
+ return;
+ }
+#endif
+
/* The thread might no be registered with the runtime */
if (!mono_domain_get () || !jit_tls) {
if (mono_chain_signal (SIG_HANDLER_PARAMS))
if (mono_handle_soft_stack_ovf (jit_tls, ji, ctx, (guint8*)info->si_addr))
return;
- /* The hard-guard page has been hit: there is not much we can do anymore
- * Print a hopefully clear message and abort.
- */
+#ifdef MONO_ARCH_HAVE_SIGCTX_TO_MONOCTX
+ /* info->si_addr seems to be NULL on some kernels when handling stack overflows */
+ fault_addr = info->si_addr;
+ if (fault_addr == NULL) {
+ MonoContext mctx;
+
+ mono_arch_sigctx_to_monoctx (ctx, &mctx);
+
+ fault_addr = MONO_CONTEXT_GET_SP (&mctx);
+ }
+#endif
+
if (jit_tls->stack_size &&
- ABS ((guint8*)info->si_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 32768) {
- const char *method;
- /* we don't do much now, but we can warn the user with a useful message */
- fprintf (stderr, "Stack overflow: IP: %p, fault addr: %p\n", mono_arch_ip_from_context (ctx), (gpointer)info->si_addr);
- if (ji && ji->method)
- method = mono_method_full_name (ji->method, TRUE);
- else
- method = "Unmanaged";
- fprintf (stderr, "At %s\n", method);
- _exit (1);
+ ABS ((guint8*)fault_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 8192 * sizeof (gpointer)) {
+ /*
+ * The hard-guard page has been hit: there is not much we can do anymore
+ * Print a hopefully clear message and abort.
+ */
+ mono_handle_hard_stack_ovf (jit_tls, ji, ctx, (guint8*)info->si_addr);
+ g_assert_not_reached ();
} else {
/* The original handler might not like that it is executed on an altstack... */
if (!ji && mono_chain_signal (SIG_HANDLER_PARAMS))
mono_handle_native_sigsegv (SIGSEGV, ctx);
}
- mono_arch_handle_exception (ctx, exc, FALSE);
+ mono_arch_handle_exception (ctx, NULL, FALSE);
#endif
}
guint8 *addr = NULL;
if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && mono_method_signature (method)->generic_param_count) {
- return mono_arch_create_specific_trampoline (method, MONO_TRAMPOLINE_GENERIC_VIRTUAL_REMOTING,
+ return mono_create_specific_trampoline (method, MONO_TRAMPOLINE_GENERIC_VIRTUAL_REMOTING,
domain, NULL);
}
return mono_get_addr_from_ftnptr (addr);
}
-#ifdef MONO_ARCH_HAVE_IMT
-static G_GNUC_UNUSED gpointer
-mini_get_imt_trampoline (void)
+static gpointer *vtable_trampolines;
+static int vtable_trampolines_size;
+
+gpointer
+mini_get_vtable_trampoline (int slot_index)
{
- static gpointer tramp = NULL;
- if (!tramp)
- tramp = mono_create_specific_trampoline (MONO_FAKE_IMT_METHOD, MONO_TRAMPOLINE_JIT, mono_get_root_domain (), NULL);
- return tramp;
+ int index = slot_index + MONO_IMT_SIZE;
+
+ g_assert (slot_index >= - MONO_IMT_SIZE);
+ if (!vtable_trampolines || slot_index + MONO_IMT_SIZE >= vtable_trampolines_size) {
+ mono_jit_lock ();
+ if (!vtable_trampolines || index >= vtable_trampolines_size) {
+ int new_size;
+ gpointer new_table;
+
+ new_size = vtable_trampolines_size ? vtable_trampolines_size * 2 : 128;
+ while (new_size <= index)
+ new_size *= 2;
+ new_table = g_new0 (gpointer, new_size);
+
+ if (vtable_trampolines)
+ memcpy (new_table, vtable_trampolines, vtable_trampolines_size * sizeof (gpointer));
+ g_free (vtable_trampolines);
+ mono_memory_barrier ();
+ vtable_trampolines = new_table;
+ vtable_trampolines_size = new_size;
+ }
+ mono_jit_unlock ();
+ }
+
+ if (!vtable_trampolines [index])
+ vtable_trampolines [index] = mono_create_specific_trampoline (GUINT_TO_POINTER (slot_index), MONO_TRAMPOLINE_VCALL, mono_get_root_domain (), NULL);
+ return vtable_trampolines [index];
}
-#endif
-gpointer
-mini_get_vtable_trampoline (void)
+static gpointer
+mini_get_imt_trampoline (int slot_index)
{
- static gpointer tramp = NULL;
- if (!tramp)
- tramp = mono_create_specific_trampoline (MONO_FAKE_VTABLE_METHOD, MONO_TRAMPOLINE_JIT, mono_get_root_domain (), NULL);
- return tramp;
+ return mini_get_vtable_trampoline (slot_index - MONO_IMT_SIZE);
}
static void
debug_options.handle_sigint = TRUE;
else if (!strcmp (arg, "keep-delegates"))
debug_options.keep_delegates = TRUE;
+ else if (!strcmp (arg, "reverse-pinvoke-exceptions"))
+ debug_options.reverse_pinvoke_exceptions = TRUE;
else if (!strcmp (arg, "collect-pagefault-stats"))
debug_options.collect_pagefault_stats = TRUE;
else if (!strcmp (arg, "break-on-unverified"))
debug_options.no_gdb_backtrace = TRUE;
else if (!strcmp (arg, "suspend-on-sigsegv"))
debug_options.suspend_on_sigsegv = TRUE;
+ else if (!strcmp (arg, "suspend-on-unhandled"))
+ debug_options.suspend_on_unhandled = TRUE;
else if (!strcmp (arg, "dont-free-domains"))
mono_dont_free_domains = TRUE;
else if (!strcmp (arg, "dyn-runtime-invoke"))
debug_options.gdb = TRUE;
else if (!strcmp (arg, "explicit-null-checks"))
debug_options.explicit_null_checks = TRUE;
+ else if (!strcmp (arg, "gen-seq-points"))
+ debug_options.gen_seq_points = TRUE;
+ else if (!strcmp (arg, "init-stacks"))
+ debug_options.init_stacks = TRUE;
+ else if (!strcmp (arg, "casts"))
+ debug_options.better_cast_details = TRUE;
else {
fprintf (stderr, "Invalid option for the MONO_DEBUG env variable: %s\n", arg);
- fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains', 'suspend-on-sigsegv', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks'\n");
+ fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'reverse-pinvoke-exceptions', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains', 'suspend-on-sigsegv', 'suspend-on-unhandled', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks', 'init-stacks'\n");
exit (1);
}
}
#endif
}
+static void
+register_jit_stats (void)
+{
+ mono_counters_register ("Compiled methods", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &mono_jit_stats.methods_compiled);
+ mono_counters_register ("Methods from AOT", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &mono_jit_stats.methods_aot);
+ mono_counters_register ("Methods JITted using LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_with_llvm);
+ mono_counters_register ("Methods JITted using mono JIT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_without_llvm);
+ mono_counters_register ("Total time spent JITting (sec)", MONO_COUNTER_JIT | MONO_COUNTER_DOUBLE, &mono_jit_stats.jit_time);
+}
+
static void runtime_invoke_info_free (gpointer value);
static void
info->delegate_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
info->llvm_vcall_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
info->runtime_invoke_hash = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, runtime_invoke_info_free);
- info->seq_points = g_hash_table_new (mono_aligned_addr_hash, NULL);
+ info->seq_points = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, g_free);
info->arch_seq_points = g_hash_table_new (mono_aligned_addr_hash, NULL);
domain->runtime_info = info;
g_hash_table_destroy (info->static_rgctx_trampoline_hash);
g_hash_table_destroy (info->llvm_vcall_trampoline_hash);
g_hash_table_destroy (info->runtime_invoke_hash);
+ g_hash_table_destroy (info->seq_points);
+ g_hash_table_destroy (info->arch_seq_points);
if (info->agent_info)
mono_debugger_agent_free_domain_info (domain);
MONO_PROBE_VES_INIT_BEGIN ();
-#ifdef __linux__
+#if defined(__linux__) && !defined(__native_client__)
if (access ("/proc/self/maps", F_OK) != 0) {
g_print ("Mono requires /proc to be mounted.\n");
exit (1);
mini_debugger_init ();
#endif
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_INIT (mono_jit_tls);
+ MINI_FAST_TLS_INIT (mono_lmf_addr);
+#ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR
+ MINI_FAST_TLS_INIT (mono_lmf);
+#endif
+#endif
+
#ifdef MONO_ARCH_HAVE_TLS_GET
- mono_runtime_set_has_tls_get (TRUE);
+ mono_runtime_set_has_tls_get (MONO_ARCH_HAVE_TLS_GET);
#else
mono_runtime_set_has_tls_get (FALSE);
#endif
callbacks.create_ftnptr = mini_create_ftnptr;
callbacks.get_addr_from_ftnptr = mini_get_addr_from_ftnptr;
callbacks.get_runtime_build_info = mono_get_runtime_build_info;
+ callbacks.set_cast_details = mono_set_cast_details;
+
+#ifdef MONO_ARCH_HAVE_IMT
+ if (mono_use_imt) {
+ callbacks.get_vtable_trampoline = mini_get_vtable_trampoline;
+ callbacks.get_imt_trampoline = mini_get_imt_trampoline;
+ }
+#endif
mono_install_callbacks (&callbacks);
+
+ if (getenv ("MONO_DEBUG") != NULL)
+ mini_parse_debug_options ();
mono_arch_cpu_init ();
mono_unwind_init ();
- mini_gc_init ();
-
- if (getenv ("MONO_DEBUG") != NULL)
- mini_parse_debug_options ();
-
if (getenv ("MONO_XDEBUG")) {
char *xdebug_opts = getenv ("MONO_XDEBUG");
mono_xdebug_init (xdebug_opts);
}
#ifdef ENABLE_LLVM
- mono_llvm_init ();
+ if (mono_use_llvm) {
+ if (!mono_llvm_load (NULL)) {
+ mono_use_llvm = FALSE;
+ fprintf (stderr, "Mono Warning: llvm support could not be loaded.\n");
+ }
+ }
+ if (mono_use_llvm)
+ mono_llvm_init ();
#endif
mono_trampolines_init ();
if (!g_thread_supported ())
g_thread_init (NULL);
- mono_gc_base_init ();
-
mono_jit_tls_id = TlsAlloc ();
setup_jit_tls_data ((gpointer)-1, mono_thread_abort);
#ifdef JIT_TRAMPOLINES_WORK
mono_install_compile_method (mono_jit_compile_method);
mono_install_free_method (mono_jit_free_method);
-#ifdef MONO_ARCH_LLVM_SUPPORTED
- if (mono_use_llvm)
- /* The runtime currently only uses this for filling out vtables */
- mono_install_trampoline (mono_create_llvm_vcall_trampoline);
- else
- mono_install_trampoline (mono_create_jit_trampoline);
-#else
mono_install_trampoline (mono_create_jit_trampoline);
-#endif
mono_install_jump_trampoline (mono_create_jump_trampoline);
mono_install_remoting_trampoline (mono_jit_create_remoting_trampoline);
mono_install_delegate_trampoline (mono_create_delegate_trampoline);
mono_install_get_class_from_name (mono_aot_get_class_from_name);
mono_install_jit_info_find_in_aot (mono_aot_find_jit_info);
+ if (debug_options.collect_pagefault_stats) {
+ mono_aot_set_make_unreadable (TRUE);
+ }
+
if (runtime_version)
domain = mono_init_version (filename, runtime_version);
else
mono_install_imt_thunk_builder (mono_aot_get_imt_thunk);
else
mono_install_imt_thunk_builder (mono_arch_build_imt_thunk);
- if (!mono_use_llvm) {
- /* LLVM needs a per-method vtable trampoline */
- mono_install_vtable_trampoline (mini_get_vtable_trampoline ());
- /*
- * The imt code in mono_magic_trampoline () can't handle LLVM code. By disabling
- * this, we force iface calls to go through the llvm vcall trampoline.
- */
- mono_install_imt_trampoline (mini_get_imt_trampoline ());
- }
}
#endif
mono_icall_init ();
+ /* This should come after mono_init () too */
+ mini_gc_init ();
+
mono_add_internal_call ("System.Diagnostics.StackFrame::get_frame_info",
ves_icall_get_frame_info);
mono_add_internal_call ("System.Diagnostics.StackTrace::get_trace",
mono_add_internal_call ("Mono.Runtime::mono_runtime_install_handlers",
mono_runtime_install_handlers);
+ mono_create_helper_signatures ();
- create_helper_signature ();
+ register_jit_stats ();
#define JIT_CALLS_WORK
#ifdef JIT_CALLS_WORK
register_icall (mono_get_throw_exception (), "mono_arch_throw_exception", "void object", TRUE);
register_icall (mono_get_rethrow_exception (), "mono_arch_rethrow_exception", "void object", TRUE);
- register_icall (mono_get_throw_exception_by_name (), "mono_arch_throw_exception_by_name", "void ptr", TRUE);
-#if MONO_ARCH_HAVE_THROW_CORLIB_EXCEPTION
register_icall (mono_get_throw_corlib_exception (), "mono_arch_throw_corlib_exception",
"void ptr", TRUE);
-#endif
register_icall (mono_thread_get_undeniable_exception, "mono_thread_get_undeniable_exception", "object", FALSE);
register_icall (mono_thread_interruption_checkpoint, "mono_thread_interruption_checkpoint", "void", FALSE);
register_icall (mono_thread_force_interruption_checkpoint, "mono_thread_force_interruption_checkpoint", "void", FALSE);
register_icall (mono_load_remote_field_new, "mono_load_remote_field_new", "object object ptr ptr", FALSE);
register_icall (mono_store_remote_field_new, "mono_store_remote_field_new", "void object ptr ptr object", FALSE);
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ register_icall (mono_nacl_gc, "mono_nacl_gc", "void", TRUE);
+#endif
/*
* NOTE, NOTE, NOTE, NOTE:
* when adding emulation for some opcodes, remember to also add a dummy
mono_register_opcode_emulation (OP_LCONV_TO_R_UN, "__emul_lconv_to_r8_un", "double long", mono_lconv_to_r8_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_FREM
+#if defined(__default_codegen__)
mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", fmod, FALSE);
+#elif defined(__native_client_codegen__)
+ mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", mono_fmod, FALSE);
+#endif
#endif
#ifdef MONO_ARCH_SOFT_FLOAT
register_icall (mono_isfinite, "mono_isfinite", "uint32 double", FALSE);
#endif
+#ifdef COMPRESSED_INTERFACE_BITMAP
+ register_icall (mono_class_interface_match, "mono_class_interface_match", "uint32 ptr int32", TRUE);
+#endif
+
#if SIZEOF_REGISTER == 4
mono_register_opcode_emulation (OP_FCONV_TO_U, "__emul_fconv_to_u", "uint32 double", mono_fconv_u4, TRUE);
#endif
register_icall (mono_create_corlib_exception_2, "mono_create_corlib_exception_2", "object int object object", TRUE);
register_icall (mono_array_new_1, "mono_array_new_1", "object ptr int", FALSE);
register_icall (mono_array_new_2, "mono_array_new_2", "object ptr int int", FALSE);
+ register_icall (mono_array_new_3, "mono_array_new_3", "object ptr int int int", FALSE);
+ register_icall (mono_get_native_calli_wrapper, "mono_get_native_calli_wrapper", "ptr ptr ptr ptr", FALSE);
+ register_icall (mono_resume_unwind, "mono_resume_unwind", "void", TRUE);
+
+ register_icall (mono_gc_wbarrier_value_copy_bitmap, "mono_gc_wbarrier_value_copy_bitmap", "void ptr ptr int int", FALSE);
+
+ register_icall (mono_object_castclass_with_cache, "mono_object_castclass_with_cache", "object object ptr ptr", FALSE);
+ register_icall (mono_object_isinst_with_cache, "mono_object_isinst_with_cache", "object object ptr ptr", FALSE);
+
#endif
mono_generic_sharing_init ();
{
if (mono_jit_stats.enabled) {
g_print ("Mono Jit statistics\n");
- g_print ("Compiled methods: %ld\n", mono_jit_stats.methods_compiled);
- g_print ("Methods from AOT: %ld\n", mono_jit_stats.methods_aot);
g_print ("Methods cache lookup: %ld\n", mono_jit_stats.methods_lookups);
- g_print ("Method trampolines: %ld\n", mono_jit_stats.method_trampolines);
g_print ("Basic blocks: %ld\n", mono_jit_stats.basic_blocks);
g_print ("Max basic blocks: %ld\n", mono_jit_stats.max_basic_blocks);
g_print ("Allocated vars: %ld\n", mono_jit_stats.allocate_var);
g_print ("JIT info table lookups: %ld\n", mono_stats.jit_info_table_lookup_count);
g_print ("Hazardous pointers: %ld\n", mono_stats.hazardous_pointer_count);
-#ifdef HAVE_SGEN_GC
g_print ("Minor GC collections: %ld\n", mono_stats.minor_gc_count);
-#endif
g_print ("Major GC collections: %ld\n", mono_stats.major_gc_count);
-#ifdef HAVE_SGEN_GC
g_print ("Minor GC time in msecs: %lf\n", (double)mono_stats.minor_gc_time_usecs / 1000.0);
-#endif
g_print ("Major GC time in msecs: %lf\n", (double)mono_stats.major_gc_time_usecs / 1000.0);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
g_print ("\nDecl security check : %ld\n", mono_jit_stats.cas_declsec_check);
#endif
#ifndef MONO_CROSS_COMPILE
+ mono_runtime_shutdown ();
/*
* mono_runtime_cleanup() and mono_domain_finalize () need to
* be called early since they need the execution engine still
mono_runtime_cleanup (domain);
#endif
+ free_jit_tls_data (TlsGetValue (mono_jit_tls_id));
+
mono_icall_cleanup ();
mono_runtime_cleanup_handlers ();
mono_debugger_cleanup ();
#ifdef ENABLE_LLVM
- mono_llvm_cleanup ();
+ if (mono_use_llvm)
+ mono_llvm_cleanup ();
#endif
+ mono_aot_cleanup ();
+
mono_trampolines_cleanup ();
mono_unwind_cleanup ();
mono_code_manager_destroy (global_codeman);
g_hash_table_destroy (jit_icall_name_hash);
g_free (emul_opcode_map);
+ g_free (emul_opcode_opcodes);
+ g_free (vtable_trampolines);
mono_arch_cleanup ();
+ mono_generic_sharing_cleanup ();
+
mono_cleanup ();
mono_trace_cleanup ();
mono_get_runtime_build_info (void)
{
if (mono_build_date)
- return g_strdup_printf ("%s %s", FULL_VERSION, mono_build_date);
+ return g_strdup_printf ("%s (%s %s)", VERSION, FULL_VERSION, mono_build_date);
else
- return g_strdup_printf ("%s", FULL_VERSION);
+ return g_strdup_printf ("%s (%s)", VERSION, FULL_VERSION);
}
static void
g_hash_table_destroy (assemblies);
}
+#ifndef DISABLE_JIT
+
void*
mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments) {
return mono_arch_instrument_epilog_full (cfg, func, p, enable_arguments, FALSE);
}
+
+void
+mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
+{
+ TryBlockHole *hole = mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
+ hole->clause = clause;
+ hole->start_offset = start - cfg->native_code;
+ hole->basic_block = bb;
+
+ cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
+}
+
+void
+mono_cfg_set_exception (MonoCompile *cfg, int type)
+{
+ cfg->exception_type = type;
+}
+
+#endif