#include "mini-arm.h"
#include "cpu-arm.h"
#include "trace.h"
+#include "ir-emit.h"
#ifdef ARM_FPU_FPA
#include "mono/arch/arm/arm-fpa-codegen.h"
#elif defined(ARM_FPU_VFP)
#include "mono/arch/arm/arm-vfp-codegen.h"
#endif
+#if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
+#define HAVE_AEABI_READ_TP 1
+#endif
+
+static gint lmf_tls_offset = -1;
+static gint lmf_addr_tls_offset = -1;
+
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
#ifdef ARM_FPU_FPA
if (ins->dreg != ARM_FPA_F0)
ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
+#elif defined(ARM_FPU_VFP)
+ if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
+ ARM_FMSR (code, ins->dreg, ARMREG_R0);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+ } else {
+ ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
+ }
#endif
break;
}
return NULL;
}
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
+#define MAX_ARCH_DELEGATE_PARAMS 3
+
+static gpointer
+get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
{
- gpointer vt;
- int displacement;
- vt = mono_arch_get_vcall_slot (code, regs, &displacement);
- if (!vt)
- return NULL;
- return (gpointer*)((char*)vt + displacement);
+ guint8 *code, *start;
+
+ if (has_target) {
+ start = code = mono_global_codeman_reserve (12);
+
+ /* Replace the this argument with the target */
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+
+ g_assert ((code - start) <= 12);
+
+ mono_arch_flush_icache (start, 12);
+ } else {
+ int size, i;
+
+ size = 8 + param_count * 4;
+ start = code = mono_global_codeman_reserve (size);
+
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ /* slide down the arguments */
+ for (i = 0; i < param_count; ++i) {
+ ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
+ }
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+
+ g_assert ((code - start) <= size);
+
+ mono_arch_flush_icache (start, size);
+ }
+
+ if (code_size)
+ *code_size = code - start;
+
+ return start;
}
-#define MAX_ARCH_DELEGATE_PARAMS 3
+/*
+ * mono_arch_get_delegate_invoke_impls:
+ *
+ * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
+ * trampolines.
+ */
+GSList*
+mono_arch_get_delegate_invoke_impls (void)
+{
+ GSList *res = NULL;
+ guint8 *code;
+ guint32 code_len;
+ int i;
+
+ code = get_delegate_invoke_impl (TRUE, 0, &code_len);
+ res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
+
+ for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ code = get_delegate_invoke_impl (FALSE, i, &code_len);
+ res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
+ }
+
+ return res;
+}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
mono_mini_arch_unlock ();
return cached;
}
-
- start = code = mono_global_codeman_reserve (12);
-
- /* Replace the this argument with the target */
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
- ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
- g_assert ((code - start) <= 12);
-
- mono_arch_flush_icache (code, 12);
+ if (mono_aot_only)
+ start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
+ else
+ start = get_delegate_invoke_impl (TRUE, 0, NULL);
cached = start;
mono_mini_arch_unlock ();
return cached;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
- int size, i;
+ int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
return code;
}
- size = 8 + sig->param_count * 4;
- start = code = mono_global_codeman_reserve (size);
-
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
- /* slide down the arguments */
- for (i = 0; i < sig->param_count; ++i) {
- ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
+ if (mono_aot_only) {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
+ start = mono_aot_get_named_code (name);
+ g_free (name);
+ } else {
+ start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
}
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
-
- g_assert ((code - start) <= size);
-
- mono_arch_flush_icache (code, size);
cache [sig->param_count] = start;
mono_mini_arch_unlock ();
return start;
return 2;
}
+#ifndef __GNUC_PREREQ
+#define __GNUC_PREREQ(maj, min) (0)
+#endif
+
void
mono_arch_flush_icache (guint8 *code, gint size)
{
#if __APPLE__
sys_icache_invalidate (code, size);
+#elif __GNUC_PREREQ(4, 1)
+ __clear_cache (code, code + size);
+#elif defined(PLATFORM_ANDROID)
+ const int syscall = 0xf0002;
+ __asm __volatile (
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, %2\n"
+ "mov r2, #0x0\n"
+ "svc 0x00000000\n"
+ :
+ : "r" (code), "r" (code + size), "r" (syscall)
+ : "r0", "r1", "r7"
+ );
#else
__asm __volatile ("mov r0, %0\n"
"mov r1, %1\n"
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
return;
}
+#elif defined(ARM_FPU_VFP)
+ if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_SETFRET);
+ ins->dreg = cfg->ret->dreg;
+ ins->sreg1 = val->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ return;
+ }
+#else
+ if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ return;
+ }
#endif
}
MonoInst *ins, *temp, *last_ins = NULL;
int rot_amount, imm8, low_imm;
- /* setup the virtual reg allocator */
- if (bb->max_vreg > cfg->rs->next_vreg)
- cfg->rs->next_vreg = bb->max_vreg;
-
MONO_BB_FOR_EACH_INS (bb, ins) {
loop_start:
switch (ins->opcode) {
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = mono_op_imm_to_op (ins->opcode);
}
- break;
+ if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
+ goto loop_start;
+ else
+ break;
case OP_MUL_IMM:
case OP_IMUL_IMM:
if (ins->inst_imm == 1) {
}
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = OP_IMUL;
break;
+ case OP_SBB:
+ case OP_ISBB:
+ case OP_SUBCC:
+ case OP_ISUBCC:
+ if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
+ /* ARM sets the C flag to 1 if there was _no_ overflow */
+ ins->next->opcode = OP_COND_EXC_NC;
+ break;
case OP_LOCALLOC_IMM:
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = OP_LOCALLOC;
break;
break;
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
break;
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_basereg;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = temp->dreg;
ins->inst_offset = low_imm;
break;
break;
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
break;
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_destbasereg;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->inst_destbasereg = temp->dreg;
ins->inst_offset = low_imm;
break;
case OP_STOREI4_MEMBASE_IMM:
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
last_ins = temp;
last_ins = ins;
}
bb->last_ins = last_ins;
- bb->max_vreg = cfg->rs->next_vreg;
-
+ bb->max_vreg = cfg->next_vreg;
}
static guchar*
pdata.found = 0;
mono_domain_lock (domain);
- mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
+ mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
if (!pdata.found) {
/* this uses the first available slot */
pdata.found = 2;
- mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
+ mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
}
mono_domain_unlock (domain);
// FIXME:
NOT_IMPLEMENTED;
} else if (ainfo->regtype == RegTypeBase) {
- // FIXME:
- NOT_IMPLEMENTED;
+ /* Nothing to do */
} else if (ainfo->regtype == RegTypeFP) {
g_assert_not_reached ();
} else if (ainfo->regtype == RegTypeStructByVal) {
case OP_MEMORY_BARRIER:
break;
case OP_TLS_GET:
+#ifdef HAVE_AEABI_READ_TP
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"__aeabi_read_tp");
+ code = emit_call_seq (cfg, code);
+
+ ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
+#else
g_assert_not_reached ();
+#endif
break;
/*case OP_BIGMUL:
ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
case OP_IMUL_OVF:
/* FIXME: handle ovf/ sreg2 != dreg */
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
+ /* FIXME: MUL doesn't set the C/O flags on ARM */
break;
case OP_IMUL_OVF_UN:
/* FIXME: handle ovf/ sreg2 != dreg */
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
+ /* FIXME: MUL doesn't set the C/O flags on ARM */
break;
case OP_ICONST:
code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
- ARM_B (code, 0);
+ if (cfg->compile_aot) {
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(gpointer*)code = NULL;
+ code += 4;
+ ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
+ } else {
+ ARM_B (code, 0);
+ }
break;
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
}
code = emit_move_return_value (cfg, ins, code);
break;
- case OP_OUTARG:
- g_assert_not_reached ();
- break;
case OP_LOCALLOC: {
/* keep alignment */
int alloca_waste = cfg->param_area;
EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
break;
case OP_COND_EXC_C:
- case OP_COND_EXC_OV:
- case OP_COND_EXC_NC:
- case OP_COND_EXC_NO:
case OP_COND_EXC_IC:
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
+ break;
+ case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
+ break;
+ case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
+ break;
+ case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
- /* FIXME: */
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
break;
case OP_IBEQ:
case OP_IBNE_UN:
/* This is generated by the local regalloc pass which runs after the lowering pass */
if (!arm_is_fpimm8 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
} else {
ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
/* This is generated by the local regalloc pass which runs after the lowering pass */
if (!arm_is_fpimm8 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
} else {
ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
case OP_ICONV_TO_R8:
ARM_FLTD (code, ins->dreg, ins->sreg1);
break;
+
#elif defined(ARM_FPU_VFP)
+
case OP_R8CONST:
if (cfg->compile_aot) {
ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
}
break;
case OP_STORER8_MEMBASE_REG:
- g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ /* This is generated by the local regalloc pass which runs after the lowering pass */
+ if (!arm_is_fpimm8 (ins->inst_offset)) {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
+ ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
+ } else {
+ ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ }
break;
case OP_LOADR8_MEMBASE:
- g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ /* This is generated by the local regalloc pass which runs after the lowering pass */
+ if (!arm_is_fpimm8 (ins->inst_offset)) {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
+ ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
+ } else {
+ ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ }
break;
case OP_STORER4_MEMBASE_REG:
g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
+ ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_LOADR4_MEMBASE:
g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
+ ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
break;
case OP_ICONV_TO_R_UN: {
g_assert_not_reached ();
break;
}
case OP_ICONV_TO_R4:
- g_assert_not_reached ();
- //ARM_FLTS (code, ins->dreg, ins->sreg1);
+ ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
+ ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
+ ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
break;
case OP_ICONV_TO_R8:
- g_assert_not_reached ();
- //ARM_FLTD (code, ins->dreg, ins->sreg1);
+ ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
+ ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
+ break;
+
+ case OP_SETFRET:
+ if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
+ ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
+ ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
+ } else {
+ ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
+ }
break;
+
#endif
+
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
#endif
break;
case OP_FCEQ:
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
g_assert_not_reached ();
break;
case OP_FBGE:
+ /* FIXME does VFP requires both conds?
+ * FPA requires EQ even thou the docs suggests that just CS is enough
+ */
+ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
break;
case OP_FBGE_UN:
#ifdef ARM_FPU_FPA
if (ins->dreg != ins->sreg1)
ARM_MVFD (code, ins->dreg, ins->sreg1);
-#else
- g_assert_not_reached ();
+#elif defined(ARM_FPU_VFP)
+ ARM_CPYD (code, ins->dreg, ins->sreg1);
#endif
break;
}
#endif /* DISABLE_JIT */
+#ifdef HAVE_AEABI_READ_TP
+void __aeabi_read_tp (void);
+#endif
+
void
mono_arch_register_lowlevel_calls (void)
{
/* The signature doesn't matter */
mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
+ mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
+
+#ifdef HAVE_AEABI_READ_TP
+ mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
+#endif
}
#define patch_lis_ori(ip,val) do {\
CallInfo *cinfo;
int tracing = 0;
int lmf_offset = 0;
- int prev_sp_offset;
+ int prev_sp_offset, reg_offset;
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
tracing = 1;
cfg->code_size = 256 + sig->param_count * 20;
code = cfg->native_code = g_malloc (cfg->code_size);
+ mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
+
ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
alloc_size = cfg->stack_offset;
pos = 0;
if (!method->save_lmf) {
+ /* We save SP by storing it into IP and saving IP */
ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
prev_sp_offset = 8; /* ip and lr */
for (i = 0; i < 16; ++i) {
if (cfg->used_int_regs & (1 << i))
prev_sp_offset += 4;
}
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
+ reg_offset = 0;
+ for (i = 0; i < 16; ++i) {
+ if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
+ mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
+ reg_offset += 4;
+ }
+ }
} else {
ARM_PUSH (code, 0x5ff0);
prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
+ reg_offset = 0;
+ for (i = 0; i < 16; ++i) {
+ if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
+ mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
+ reg_offset += 4;
+ }
+ }
pos += sizeof (MonoLMF) - prev_sp_offset;
lmf_offset = pos;
}
code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
}
- if (cfg->frame_reg != ARMREG_SP)
+ if (cfg->frame_reg != ARMREG_SP) {
ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
+ mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
+ }
//g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
prev_sp_offset += alloc_size;
}
if (method->save_lmf) {
+ gboolean get_lmf_fast = FALSE;
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_get_lmf_addr");
- code = emit_call_seq (cfg, code);
+#ifdef HAVE_AEABI_READ_TP
+ gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
+
+ if (lmf_addr_tls_offset != -1) {
+ get_lmf_fast = TRUE;
+
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"__aeabi_read_tp");
+ code = emit_call_seq (cfg, code);
+
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
+ get_lmf_fast = TRUE;
+ }
+#endif
+ if (!get_lmf_fast) {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_get_lmf_addr");
+ code = emit_call_seq (cfg, code);
+ }
/* we build the MonoLMF structure on the stack - see mini-arm.h */
/* lmf_offset is the offset from the previous stack pointer,
* alloc_size is the total stack space allocated, so the offset
}
+static gboolean tls_offset_inited = FALSE;
+
void
mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
{
+ if (!tls_offset_inited) {
+ tls_offset_inited = TRUE;
+
+ lmf_tls_offset = mono_get_lmf_tls_offset ();
+ lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
+ }
}
void
return 0;
}
-MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
+MonoInst*
+mono_arch_get_domain_intrinsic (MonoCompile* cfg)
{
- return NULL;
+ return mono_get_domain_intrinsic (cfg);
}
-MonoInst*
+MonoInst*
mono_arch_get_thread_intrinsic (MonoCompile* cfg)
{
- return NULL;
+ return mono_get_thread_intrinsic (cfg);
}
guint32
void
mono_arch_fixup_jinfo (MonoCompile *cfg)
{
- /* max encoded stack usage is 64KB * 4 */
- g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
- cfg->jit_info->used_regs |= cfg->stack_usage << 14;
}
#ifdef MONO_ARCH_HAVE_IMT
mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
{
if (cfg->compile_aot) {
- int method_reg = mono_regstate_next_int (cfg->rs);
+ int method_reg = mono_alloc_ireg (cfg);
MonoInst *ins;
call->dynamic_imt_arg = TRUE;
{
int size, i, extra_space = 0;
arminstr_t *code, *start, *vtable_target = NULL;
+ gboolean large_offsets = FALSE;
+ guint32 **constant_pool_starts;
+
size = BASE_SIZE;
+ constant_pool_starts = g_new0 (guint32*, count);
- g_assert (!fail_tramp);
+ /*
+ * We might be called with a fail_tramp from the IMT builder code even if
+ * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
+ */
+ //g_assert (!fail_tramp);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
- g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot])));
+ if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
+ item->chunk_size += 32;
+ large_offsets = TRUE;
+ }
if (item->check_target_idx) {
if (!item->compare_done)
size += item->chunk_size;
}
- start = code = mono_code_manager_reserve (domain->code_mp, size);
+ if (large_offsets)
+ size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
+
+ start = code = mono_domain_code_reserve (domain, size);
#if DEBUG_IMT
printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
}
#endif
- ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
+ if (large_offsets)
+ ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
+ else
+ ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
vtable_target = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
- arminstr_t *imt_method = NULL;
+ arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
+ gint32 vtable_offset;
+
item->code_target = (guint8*)code;
if (item->is_equals) {
}
item->jmp_code = (guint8*)code;
ARM_B_COND (code, ARMCOND_NE, 0);
-
- ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
- ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]));
} else {
/*Enable the commented code to assert on wrong method*/
#if ENABLE_WRONG_METHOD_CHECK
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
ARM_B_COND (code, ARMCOND_NE, 1);
-#endif
- ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
- ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]));
-#if ENABLE_WRONG_METHOD_CHECK
ARM_DBRK (code);
#endif
}
+ vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
+ if (!arm_is_imm12 (vtable_offset)) {
+ /*
+ * We need to branch to a computed address but we don't have
+ * a free register to store it, since IP must contain the
+ * vtable address. So we push the two values to the stack, and
+ * load them both using LDM.
+ */
+ /* Compute target address */
+ vtable_offset_ins = code;
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
+ /* Save it to the fourth slot */
+ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
+ /* Restore registers and branch */
+ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
+
+ code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
+ } else {
+ ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
+ if (large_offsets)
+ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
+ ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
+ }
+
if (imt_method)
code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
}
/*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
+ constant_pool_starts [i] = code;
if (extra_space) {
code += extra_space;
extra_space = 0;
}
if (i > 0 && item->is_equals) {
int j;
- arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
+ arminstr_t *space_start = constant_pool_starts [i];
for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
}
}
#endif
+ g_free (constant_pool_starts);
+
mono_arch_flush_icache ((guint8*)start, size);
mono_stats.imt_thunks_size += code - start;
gpointer
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
- return ctx->regs [reg];
+ if (reg >= 4 && reg <= 11)
+ return (gpointer)ctx->regs [reg - 4];
+ else if (reg == ARMREG_IP)
+ return (gpointer)ctx->regs [8];
+ else if (reg == ARMREG_LR)
+ return (gpointer)ctx->regs [9];
+ else {
+ g_assert_not_reached ();
+ return NULL;
+ }
}