[runtime] Fix test_op_il_seq_point in amd64.
#define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
static mono_mutex_t mini_arch_mutex;
-MonoBreakpointInfo
-mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
-
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
return code [0] == 0xe8;
}
+ static inline gboolean
+ amd64_use_imm32 (gint64 val)
+ {
+ if (mini_get_debug_options()->single_imm_size)
+ return FALSE;
+
+ return amd64_is_imm32 (val);
+ }
+
#ifdef __native_client_codegen__
/* Keep track of instruction "depth", that is, the level of sub-instruction */
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
- if (!amd64_is_imm32 (ins->inst_imm)) {
+ if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
#ifndef __native_client_codegen__
/* Don't generate memindex opcodes (to simplify */
/* read sandboxing) */
- if (!amd64_is_imm32 (ins->inst_offset)) {
+ if (!amd64_use_imm32 (ins->inst_offset)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI8_MEMBASE_IMM:
- if (!amd64_is_imm32 (ins->inst_imm)) {
+ if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
#endif
case OP_LOADI8_MEM:
// FIXME: Decompose this earlier
- if (amd64_is_imm32 (ins->inst_imm))
+ if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8);
else {
- amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+ amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8);
}
break;
break;
case OP_LOADU4_MEM:
// FIXME: Decompose this earlier
- if (amd64_is_imm32 (ins->inst_imm))
+ if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
else {
- amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+ amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
}
break;
case OP_ICONST:
case OP_I8CONST:
- if ((((guint64)ins->inst_c0) >> 32) == 0)
+ if ((((guint64)ins->inst_c0) >> 32) == 0 && !mini_get_debug_options()->single_imm_size)
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
else
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
gboolean
mono_arch_is_inst_imm (gint64 imm)
{
- return amd64_is_imm32 (imm);
+ return amd64_use_imm32 (imm);
}
/*
#define MAX_ARCH_DELEGATE_PARAMS 10
static gpointer
-get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
+get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count)
{
guint8 *code, *start;
+ GSList *unwind_ops = NULL;
int i;
+ unwind_ops = mono_arch_get_cie_program ();
+
if (has_target) {
start = code = mono_global_codeman_reserve (64);
nacl_global_codeman_validate (&start, 64, &code);
mono_arch_flush_icache (start, code - start);
- if (code_len)
- *code_len = code - start;
+ if (has_target) {
+ *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
+ } else {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
+ *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
+ g_free (name);
+ }
if (mono_jit_map_is_enabled ()) {
char *buff;
#define MAX_VIRTUAL_DELEGATE_OFFSET 32
static gpointer
-get_delegate_virtual_invoke_impl (gboolean load_imt_reg, int offset, guint32 *code_len)
+get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset)
{
guint8 *code, *start;
int size = 20;
+ char *tramp_name;
+ GSList *unwind_ops;
- if (offset / sizeof (gpointer) > MAX_VIRTUAL_DELEGATE_OFFSET)
+ if (offset / (int)sizeof (gpointer) > MAX_VIRTUAL_DELEGATE_OFFSET)
return NULL;
start = code = mono_global_codeman_reserve (size);
+ unwind_ops = mono_arch_get_cie_program ();
+
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
amd64_jump_membase (code, AMD64_RAX, offset);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
- if (code_len)
- *code_len = code - start;
+ if (load_imt_reg)
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", - offset / sizeof (gpointer));
+ else
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", offset / sizeof (gpointer));
+ *info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops);
+ g_free (tramp_name);
return start;
}
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
- guint8 *code;
- guint32 code_len;
+ MonoTrampInfo *info;
int i;
- char *tramp_name;
- code = get_delegate_invoke_impl (TRUE, 0, &code_len);
- res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
+ get_delegate_invoke_impl (&info, TRUE, 0);
+ res = g_slist_prepend (res, info);
- for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
- code = get_delegate_invoke_impl (FALSE, i, &code_len);
- tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ get_delegate_invoke_impl (&info, FALSE, i);
+ res = g_slist_prepend (res, info);
}
- for (i = 0; i < MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
- code = get_delegate_virtual_invoke_impl (TRUE, i * SIZEOF_VOID_P, &code_len);
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
+ get_delegate_virtual_invoke_impl (&info, TRUE, - i * SIZEOF_VOID_P);
+ res = g_slist_prepend (res, info);
- code = get_delegate_virtual_invoke_impl (FALSE, i * SIZEOF_VOID_P, &code_len);
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ get_delegate_virtual_invoke_impl (&info, FALSE, i * SIZEOF_VOID_P);
+ res = g_slist_prepend (res, info);
}
return res;
if (cached)
return cached;
- if (mono_aot_only)
+ if (mono_aot_only) {
start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
- else
- start = get_delegate_invoke_impl (TRUE, 0, NULL);
+ } else {
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, TRUE, 0);
+ mono_tramp_info_register (info, NULL);
+ }
mono_memory_barrier ();
start = mono_aot_get_trampoline (name);
g_free (name);
} else {
- start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
+ mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
- return get_delegate_virtual_invoke_impl (load_imt_reg, offset, NULL);
+ MonoTrampInfo *info;
+ gpointer code;
+
+ code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset);
+ if (code)
+ mono_tramp_info_register (info, NULL);
+ return code;
}
void
int size = 0;
guint8 *code, *start;
gboolean vtable_is_32bit = ((gsize)(vtable) == (gsize)(int)(gsize)(vtable));
+ GSList *unwind_ops;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
}
}
} else {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
code = mono_domain_code_reserve (domain, size);
#endif
start = code;
+
+ unwind_ops = mono_arch_get_cie_program ();
+
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = code;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
- amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof(gpointer));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
}
#endif
}
} else {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (gpointer));
else {
- amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof (gpointer));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
nacl_domain_code_validate(domain, &start, size, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
+ mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
+
return start;
}
#include <mono/utils/dtrace.h>
#include <mono/utils/mono-signal-handler.h>
#include <mono/utils/mono-threads.h>
+#include <mono/utils/checked-build.h>
#include <mono/io-layer/io-layer.h>
#include "mini.h"
gboolean
mono_running_on_valgrind (void)
{
+#ifndef HOST_WIN32
if (RUNNING_ON_VALGRIND){
#ifdef VALGRIND_JIT_REGISTER_MAP
valgrind_register = TRUE;
#endif
return TRUE;
} else
+#endif
return FALSE;
}
if (!domain)
domain = mono_get_root_domain ();
- ji = mono_jit_info_table_find (domain, ip);
+ ji = mono_jit_info_table_find_internal (domain, ip, TRUE, TRUE);
if (!ji) {
user_data.ip = ip;
user_data.method = NULL;
}
else
return NULL;
+ } else if (ji->is_trampoline) {
+ res = g_strdup_printf ("<%p - %s trampoline>", ip, ((MonoTrampInfo*)ji->d.tramp_info)->name);
+ return res;
}
+
method = mono_method_full_name (jinfo_get_method (ji), TRUE);
/* FIXME: unused ? */
location = mono_debug_lookup_source_location (jinfo_get_method (ji), (guint32)((guint8*)ip - (guint8*)ji->code_start), domain);
{
MonoMethod *cmethod;
- if (!caller || !callee)
+ if (!caller || caller->is_trampoline || !callee || callee->is_trampoline)
return FALSE;
/*
void
mono_tramp_info_free (MonoTrampInfo *info)
{
- GSList *l;
-
g_free (info->name);
// FIXME: ji
- for (l = info->unwind_ops; l; l = l->next)
- g_free (l->data);
- g_slist_free (info->unwind_ops);
+ mono_free_unwind_info (info->unwind_ops);
g_free (info);
}
mono_jit_info_init (ji, NULL, info->code, info->code_size, 0, 0, 0);
ji->d.tramp_info = info;
ji->is_trampoline = TRUE;
- // FIXME: Unwind info
+
+ ji->unwind_info = mono_cache_unwind_info (info->uw_info, info->uw_info_len);
mono_jit_info_table_add (domain, ji);
}
* Frees INFO.
*/
void
-mono_tramp_info_register (MonoTrampInfo *info)
+mono_tramp_info_register (MonoTrampInfo *info, MonoDomain *domain)
{
MonoTrampInfo *copy;
if (!info)
return;
+ if (!domain)
+ domain = mono_get_root_domain ();
+
copy = g_new0 (MonoTrampInfo, 1);
copy->code = info->code;
copy->code_size = info->code_size;
copy->name = g_strdup (info->name);
+ if (info->unwind_ops) {
+ copy->uw_info = mono_unwind_ops_encode (info->unwind_ops, ©->uw_info_len);
+ } else {
+ /* Trampolines from aot have the unwind ops already encoded */
+ copy->uw_info = info->uw_info;
+ copy->uw_info_len = info->uw_info_len;
+ }
+
mono_jit_lock ();
tramp_infos = g_slist_prepend (tramp_infos, copy);
mono_jit_unlock ();
mono_save_trampoline_xdebug_info (info);
- if (mono_get_root_domain ())
- register_trampoline_jit_info (mono_get_root_domain (), copy);
+ /* Only register trampolines that have unwind infos */
+ if (mono_get_root_domain () && copy->uw_info)
+ register_trampoline_jit_info (domain, copy);
if (mono_jit_map_is_enabled ())
mono_emit_jit_tramp (info->code, info->code_size, info->name);
MonoDomain *domain = mono_get_root_domain ();
gboolean check_exc = TRUE;
- if (callinfo->wrapper) {
+ if (callinfo->wrapper)
return callinfo->wrapper;
- }
if (callinfo->trampoline)
return callinfo->trampoline;
- /*
- * We use the lock on the root domain instead of the JIT lock to protect
- * callinfo->trampoline, since we do a lot of stuff inside the critical section.
- */
- mono_loader_lock (); /*FIXME mono_compile_method requires the loader lock, by large.*/
- mono_domain_lock (domain);
-
- if (callinfo->trampoline) {
- mono_domain_unlock (domain);
- mono_loader_unlock ();
- return callinfo->trampoline;
- }
-
if (!strcmp (callinfo->name, "mono_thread_interruption_checkpoint"))
/* This icall is used to check for exceptions, so don't check in the wrapper */
check_exc = FALSE;
trampoline = mono_compile_method (wrapper);
else
trampoline = mono_create_ftnptr (domain, mono_create_jit_trampoline_in_domain (domain, wrapper));
- mono_register_jit_icall_wrapper (callinfo, trampoline);
- callinfo->trampoline = trampoline;
-
- mono_domain_unlock (domain);
+ mono_loader_lock ();
+ if (!callinfo->trampoline) {
+ mono_register_jit_icall_wrapper (callinfo, trampoline);
+ callinfo->trampoline = trampoline;
+ }
mono_loader_unlock ();
return callinfo->trampoline;
static void
register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, const char *symbol, gboolean no_throw)
{
+#ifndef DISABLE_JIT
mini_register_opcode_emulation (opcode, name, sigstr, func, symbol, no_throw);
+#endif
}
/*
case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR:
case MONO_PATCH_INFO_GC_NURSERY_START:
case MONO_PATCH_INFO_JIT_TLS_ID:
- case MONO_PATCH_INFO_MONITOR_ENTER:
- case MONO_PATCH_INFO_MONITOR_ENTER_V4:
- case MONO_PATCH_INFO_MONITOR_EXIT:
case MONO_PATCH_INFO_GOT_OFFSET:
case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG:
return (ji->type << 8);
target = mono_create_rgctx_lazy_fetch_trampoline (slot);
break;
}
- case MONO_PATCH_INFO_MONITOR_ENTER:
- target = mono_create_monitor_enter_trampoline ();
- break;
- case MONO_PATCH_INFO_MONITOR_ENTER_V4:
- target = mono_create_monitor_enter_v4_trampoline ();
- break;
- case MONO_PATCH_INFO_MONITOR_EXIT:
- target = mono_create_monitor_exit_trampoline ();
- break;
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
case MONO_PATCH_INFO_SEQ_POINT_INFO:
if (!run_cctors)
mono_internal_hash_table_remove (&domain->jit_code_hash, method);
mono_domain_jit_code_hash_unlock (domain);
g_hash_table_remove (domain_jit_info (domain)->jump_trampoline_hash, method);
+
+ /* requires the domain lock - took above */
mono_conc_hashtable_remove (domain_jit_info (domain)->runtime_invoke_hash, method);
/* Remove jump targets in this method */
if (!info->dyn_call_info)
info->runtime_invoke = mono_jit_compile_method (invoke);
+ mono_domain_lock (domain);
info2 = mono_conc_hashtable_insert (domain_info->runtime_invoke_hash, method, info);
+ mono_domain_unlock (domain);
if (info2) {
g_free (info);
info = info2;
MONO_SIG_HANDLER_INFO_TYPE *info = MONO_SIG_HANDLER_GET_INFO ();
MONO_SIG_HANDLER_GET_CONTEXT;
- ji = mono_jit_info_table_find (mono_domain_get (), mono_arch_ip_from_context (ctx));
+ ji = mono_jit_info_table_find_internal (mono_domain_get (), mono_arch_ip_from_context (ctx), TRUE, TRUE);
#if defined(MONO_ARCH_HAVE_IS_INT_OVERFLOW)
if (mono_arch_is_int_overflow (ctx, info))
}
#endif
- ji = mono_jit_info_table_find (mono_domain_get (), mono_arch_ip_from_context (ctx));
+ ji = mono_jit_info_table_find_internal (mono_domain_get (), mono_arch_ip_from_context (ctx), TRUE, TRUE);
#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
if (mono_handle_soft_stack_ovf (jit_tls, ji, ctx, info, (guint8*)info->si_addr))
/* FIXME Support more cases */
if (mono_aot_only) {
char tramp_name [256];
+ const char *imt = load_imt_reg ? "_imt" : "";
+ int ind = (load_imt_reg ? (-offset) : offset) / SIZEOF_VOID_P;
- sprintf (tramp_name, "delegate_virtual_invoke%s_%d", load_imt_reg ? "_imt" : "", offset / SIZEOF_VOID_P);
+ sprintf (tramp_name, "delegate_virtual_invoke%s_%d", imt, ind);
cache [idx] = mono_aot_get_trampoline (tramp_name);
g_assert (cache [idx]);
} else {
debug_options.gen_sdb_seq_points = TRUE;
else if (!strcmp (arg, "gen-compact-seq-points"))
debug_options.gen_seq_points_compact_data = TRUE;
+ else if (!strcmp (arg, "single-imm-size"))
+ debug_options.single_imm_size = TRUE;
else if (!strcmp (arg, "init-stacks"))
debug_options.init_stacks = TRUE;
else if (!strcmp (arg, "casts"))
info->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
info->delegate_trampoline_hash = g_hash_table_new (class_method_pair_hash, class_method_pair_equal);
info->llvm_vcall_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
- info->runtime_invoke_hash = mono_conc_hashtable_new_full (&domain->lock, mono_aligned_addr_hash, NULL, NULL, runtime_invoke_info_free);
+ info->runtime_invoke_hash = mono_conc_hashtable_new_full (mono_aligned_addr_hash, NULL, NULL, runtime_invoke_info_free);
info->seq_points = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, mono_seq_point_info_free);
info->arch_seq_points = g_hash_table_new (mono_aligned_addr_hash, NULL);
info->jump_target_hash = g_hash_table_new (NULL, NULL);
MONO_VES_INIT_BEGIN ();
+ CHECKED_MONO_INIT ();
+
#if defined(__linux__) && !defined(__native_client__)
if (access ("/proc/self/maps", F_OK) != 0) {
g_print ("Mono requires /proc to be mounted.\n");
ticallbacks.setup_async_callback = mono_setup_async_callback;
ticallbacks.thread_state_init_from_sigctx = mono_thread_state_init_from_sigctx;
ticallbacks.thread_state_init_from_handle = mono_thread_state_init_from_handle;
+ ticallbacks.thread_state_init = mono_thread_state_init;
mono_counters_init ();
ves_icall_get_frame_info);
mono_add_internal_call ("System.Diagnostics.StackTrace::get_trace",
ves_icall_get_trace);
- mono_add_internal_call ("System.Exception::get_trace",
- ves_icall_System_Exception_get_trace);
mono_add_internal_call ("Mono.Runtime::mono_runtime_install_handlers",
mono_runtime_install_handlers);
mono_mutex_destroy (&jit_mutex);
- mono_mutex_destroy (&mono_delegate_section);
-
mono_code_manager_cleanup ();
#ifdef USE_JUMP_TABLES
g_hash_table_destroy (assemblies);
}
+/*
+ * Used by LLVM.
+ * Have to export this for AOT.
+ */
+void
+mono_personality (void)
+{
+ /* Not used */
+ g_assert_not_reached ();
+}
+
#ifdef USE_JUMP_TABLES
#define DEFAULT_JUMPTABLE_CHUNK_ELEMENTS 128
#endif
/* Version number of the AOT file format */
-#define MONO_AOT_FILE_VERSION 120
+#define MONO_AOT_FILE_VERSION 121
//TODO: This is x86/amd64 specific.
#define mono_simd_shuffle_mask(a,b,c,d) ((a) | ((b) << 2) | ((c) << 4) | ((d) << 6))
/* maps MonoMethod -> MonoJitDynamicMethodInfo */
GHashTable *dynamic_code_hash;
GHashTable *method_code_hash;
- /* Maps methods to a RuntimeInvokeInfo structure */
+ /* Maps methods to a RuntimeInvokeInfo structure, protected by the associated MonoDomain lock */
MonoConcurrentHashTable *runtime_invoke_hash;
/* Maps MonoMethod to a GPtrArray containing sequence point locations */
+ /* Protected by the domain lock */
GHashTable *seq_points;
/* Debugger agent data */
gpointer agent_info;
MONO_TRAMPOLINE_DELEGATE,
MONO_TRAMPOLINE_RESTORE_STACK_PROT,
MONO_TRAMPOLINE_GENERIC_VIRTUAL_REMOTING,
- MONO_TRAMPOLINE_MONITOR_ENTER,
- MONO_TRAMPOLINE_MONITOR_ENTER_V4,
- MONO_TRAMPOLINE_MONITOR_EXIT,
MONO_TRAMPOLINE_VCALL,
MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD,
MONO_TRAMPOLINE_NUM
/* These trampolines return normally to their caller */
#define MONO_TRAMPOLINE_TYPE_MUST_RETURN(t) \
((t) == MONO_TRAMPOLINE_RESTORE_STACK_PROT || \
- (t) == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH || \
- (t) == MONO_TRAMPOLINE_MONITOR_ENTER || \
- (t) == MONO_TRAMPOLINE_MONITOR_ENTER_V4 || \
- (t) == MONO_TRAMPOLINE_MONITOR_EXIT)
+ (t) == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
/* These trampolines receive an argument directly in a register */
#define MONO_TRAMPOLINE_TYPE_HAS_ARG(t) \
- ((t) == MONO_TRAMPOLINE_MONITOR_ENTER || \
- (t) == MONO_TRAMPOLINE_MONITOR_ENTER_V4 || \
- (t) == MONO_TRAMPOLINE_MONITOR_EXIT || \
- (t) == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
+ ((t) == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
/* optimization flags */
#define OPTFLAG(id,shift,name,descr) MONO_OPT_ ## id = 1 << shift,
*/
gboolean gen_sdb_seq_points;
gboolean gen_seq_points_compact_data;
+ /*
+ * Setting single_imm_size should guarantee that each time managed code is compiled
+ * the same instructions and registers are used, regardless of the size of used values.
+ */
+ gboolean single_imm_size;
gboolean explicit_null_checks;
/*
* Fill stack frames with 0x2a in method prologs. This helps with the
MonoInst* mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode);
MonoInst* mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg);
void mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index);
-MonoInst* mono_compile_create_var_load (MonoCompile *cfg, gssize var_index);
-MonoInst* mono_compile_create_var_store (MonoCompile *cfg, gssize var_index, MonoInst *value);
MonoInst* mini_get_int_to_float_spill_area (MonoCompile *cfg);
MonoType* mono_type_from_stack_type (MonoInst *ins);
guint32 mono_alloc_ireg (MonoCompile *cfg) MONO_LLVM_INTERNAL;
int val);
MonoTrampInfo* mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops);
void mono_tramp_info_free (MonoTrampInfo *info);
-void mono_tramp_info_register (MonoTrampInfo *info);
+void mono_tramp_info_register (MonoTrampInfo *info, MonoDomain *domain);
int mini_exception_id_by_name (const char *name);
gboolean mini_type_is_hfa (MonoType *t, int *out_nfields, int *out_esize) MONO_LLVM_INTERNAL;
void mono_walk_stack_with_state (MonoJitStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions unwind_options, void *user_data);
void mono_walk_stack (MonoJitStackWalk func, MonoUnwindOptions options, void *user_data);
gboolean mono_thread_state_init_from_sigctx (MonoThreadUnwindState *ctx, void *sigctx);
+void mono_thread_state_init (MonoThreadUnwindState *ctx);
gboolean mono_thread_state_init_from_current (MonoThreadUnwindState *ctx);
gboolean mono_thread_state_init_from_monoctx (MonoThreadUnwindState *ctx, MonoContext *mctx);
MonoReflectionMethod **method,
gint32 *iloffset, gint32 *native_offset,
MonoString **file, gint32 *line, gint32 *column);
-MonoString *ves_icall_System_Exception_get_trace (MonoException *exc);
void mono_set_cast_details (MonoClass *from, MonoClass *to);
/* Installs a function which is called when the runtime encounters an unhandled exception.