#define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
static mono_mutex_t mini_arch_mutex;
-MonoBreakpointInfo
-mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
-
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
return code [0] == 0xe8;
}
+static inline gboolean
+amd64_use_imm32 (gint64 val)
+{
+ if (mini_get_debug_options()->single_imm_size)
+ return FALSE;
+
+ return amd64_is_imm32 (val);
+}
+
#ifdef __native_client_codegen__
/* Keep track of instruction "depth", that is, the level of sub-instruction */
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
- if (!amd64_is_imm32 (ins->inst_imm)) {
+ if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
#ifndef __native_client_codegen__
/* Don't generate memindex opcodes (to simplify */
/* read sandboxing) */
- if (!amd64_is_imm32 (ins->inst_offset)) {
+ if (!amd64_use_imm32 (ins->inst_offset)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI8_MEMBASE_IMM:
- if (!amd64_is_imm32 (ins->inst_imm)) {
+ if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
#endif
case OP_LOADI8_MEM:
// FIXME: Decompose this earlier
- if (amd64_is_imm32 (ins->inst_imm))
+ if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8);
else {
- amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+ amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8);
}
break;
break;
case OP_LOADU4_MEM:
// FIXME: Decompose this earlier
- if (amd64_is_imm32 (ins->inst_imm))
+ if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
else {
- amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+ amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
}
break;
case OP_ICONST:
case OP_I8CONST:
- if ((((guint64)ins->inst_c0) >> 32) == 0)
+ if ((((guint64)ins->inst_c0) >> 32) == 0 && !mini_get_debug_options()->single_imm_size)
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
else
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
gboolean
mono_arch_is_inst_imm (gint64 imm)
{
- return amd64_is_imm32 (imm);
+ return amd64_use_imm32 (imm);
}
/*
#define MAX_ARCH_DELEGATE_PARAMS 10
static gpointer
-get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
+get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count)
{
guint8 *code, *start;
+ GSList *unwind_ops = NULL;
int i;
+ unwind_ops = mono_arch_get_cie_program ();
+
if (has_target) {
start = code = mono_global_codeman_reserve (64);
nacl_global_codeman_validate (&start, 64, &code);
mono_arch_flush_icache (start, code - start);
- if (code_len)
- *code_len = code - start;
+ if (has_target) {
+ *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
+ } else {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
+ *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
+ g_free (name);
+ }
if (mono_jit_map_is_enabled ()) {
char *buff;
#define MAX_VIRTUAL_DELEGATE_OFFSET 32
static gpointer
-get_delegate_virtual_invoke_impl (gboolean load_imt_reg, int offset, guint32 *code_len)
+get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset)
{
guint8 *code, *start;
int size = 20;
+ char *tramp_name;
+ GSList *unwind_ops;
- if (offset / sizeof (gpointer) > MAX_VIRTUAL_DELEGATE_OFFSET)
+ if (offset / (int)sizeof (gpointer) > MAX_VIRTUAL_DELEGATE_OFFSET)
return NULL;
start = code = mono_global_codeman_reserve (size);
+ unwind_ops = mono_arch_get_cie_program ();
+
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
amd64_jump_membase (code, AMD64_RAX, offset);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
- if (code_len)
- *code_len = code - start;
+ if (load_imt_reg)
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", - offset / sizeof (gpointer));
+ else
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", offset / sizeof (gpointer));
+ *info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops);
+ g_free (tramp_name);
return start;
}
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
- guint8 *code;
- guint32 code_len;
+ MonoTrampInfo *info;
int i;
- char *tramp_name;
- code = get_delegate_invoke_impl (TRUE, 0, &code_len);
- res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
+ get_delegate_invoke_impl (&info, TRUE, 0);
+ res = g_slist_prepend (res, info);
- for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
- code = get_delegate_invoke_impl (FALSE, i, &code_len);
- tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ get_delegate_invoke_impl (&info, FALSE, i);
+ res = g_slist_prepend (res, info);
}
- for (i = 0; i < MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
- code = get_delegate_virtual_invoke_impl (TRUE, i * SIZEOF_VOID_P, &code_len);
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
+ get_delegate_virtual_invoke_impl (&info, TRUE, - i * SIZEOF_VOID_P);
+ res = g_slist_prepend (res, info);
- code = get_delegate_virtual_invoke_impl (FALSE, i * SIZEOF_VOID_P, &code_len);
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ get_delegate_virtual_invoke_impl (&info, FALSE, i * SIZEOF_VOID_P);
+ res = g_slist_prepend (res, info);
}
return res;
if (cached)
return cached;
- if (mono_aot_only)
+ if (mono_aot_only) {
start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
- else
- start = get_delegate_invoke_impl (TRUE, 0, NULL);
+ } else {
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, TRUE, 0);
+ mono_tramp_info_register (info, NULL);
+ }
mono_memory_barrier ();
start = mono_aot_get_trampoline (name);
g_free (name);
} else {
- start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
+ mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
- return get_delegate_virtual_invoke_impl (load_imt_reg, offset, NULL);
+ MonoTrampInfo *info;
+ gpointer code;
+
+ code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset);
+ if (code)
+ mono_tramp_info_register (info, NULL);
+ return code;
}
void
int size = 0;
guint8 *code, *start;
gboolean vtable_is_32bit = ((gsize)(vtable) == (gsize)(int)(gsize)(vtable));
+ GSList *unwind_ops;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
}
}
} else {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
code = mono_domain_code_reserve (domain, size);
#endif
start = code;
+
+ unwind_ops = mono_arch_get_cie_program ();
+
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = code;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
- amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof(gpointer));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
}
#endif
}
} else {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (gpointer));
else {
- amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof (gpointer));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
nacl_domain_code_validate(domain, &start, size, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
+ mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
+
return start;
}