#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
-#include <mono/metadata/gc-internal.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-hwcap-arm.h>
void sys_icache_invalidate (void *start, size_t len);
#endif
-static gint lmf_tls_offset = -1;
-static gint lmf_addr_tls_offset = -1;
-
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
static MonoArmFPU arm_fpu;
#if defined(ARM_FPU_VFP_HARD)
-static int vfp_scratch1 = ARM_VFP_F28;
-static int vfp_scratch2 = ARM_VFP_F30;
+/*
+ * On armhf, d0-d7 are used for argument passing and d8-d15
+ * must be preserved across calls, which leaves us no room
+ * for scratch registers. So we use d14-d15 but back up their
+ * previous contents to a stack slot before using them - see
+ * mono_arm_emit_vfp_scratch_save/_restore ().
+ */
+static int vfp_scratch1 = ARM_VFP_D14;
+static int vfp_scratch2 = ARM_VFP_D15;
#else
+/*
+ * On armel, d0-d7 do not need to be preserved, so we can
+ * freely make use of them as scratch registers.
+ */
static int vfp_scratch1 = ARM_VFP_D0;
static int vfp_scratch2 = ARM_VFP_D1;
#endif
}
#endif
-static guint8*
-emit_aotconst (MonoCompile *cfg, guint8 *start, guint8 *code, int dreg, int tramp_type, gconstpointer target)
-{
- /* Load the GOT offset */
- mono_add_patch_info (cfg, code - start, tramp_type, target);
- ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
- ARM_B (code, 0);
- *(gpointer*)code = NULL;
- code += 4;
- /* Load the value from the GOT */
- ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
-
- return code;
-}
-
static guint8*
emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
{
for (list = inst->float_args; list; list = list->next) {
FloatArgData *fad = list->data;
MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
+ gboolean imm = arm_is_fpimm8 (var->inst_offset);
+
+ /* 4+1 insns for emit_big_add () and 1 for FLDS. */
+ if (!imm)
+ *max_len += 20 + 4;
*max_len += 4;
code = cfg->native_code + *offset;
}
- ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
+ if (!imm) {
+ code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
+ ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
+ } else
+ ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
*offset = code - cfg->native_code;
}
return code;
}
+static guint8 *
+mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
+{
+ MonoInst *inst;
+
+ g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
+
+ inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
+
+ if (IS_HARD_FLOAT) {
+ if (!arm_is_fpimm8 (inst->inst_offset)) {
+ code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ ARM_FSTD (code, reg, ARMREG_LR, 0);
+ } else
+ ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
+ }
+
+ return code;
+}
+
+static guint8 *
+mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
+{
+ MonoInst *inst;
+
+ g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
+
+ inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
+
+ if (IS_HARD_FLOAT) {
+ if (!arm_is_fpimm8 (inst->inst_offset)) {
+ code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ ARM_FLDD (code, reg, ARMREG_LR, 0);
+ } else
+ ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
+ }
+
+ return code;
+}
+
/*
- * emit_save_lmf:
+ * emit_restore_lmf:
*
* Emit code to pop an LMF structure from the LMF stack.
*/
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
+ MonoType *sig_ret;
/* FIXME: Support more cases */
- if (MONO_TYPE_ISSTRUCT (sig->ret))
+ sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
+ if (MONO_TYPE_ISSTRUCT (sig_ret))
return NULL;
if (has_target) {
gr += n_in_regs;
nwords -= n_in_regs;
}
+ if (sig->call_convention == MONO_CALL_VARARG)
+ /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
+ stack_size = ALIGN_TO (stack_size, align);
ainfo->offset = stack_size;
/*g_print ("offset for arg %d at %d\n", n, stack_size);*/
stack_size += nwords * sizeof (gpointer);
return cinfo;
}
+
+gboolean
+mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
+{
+ MonoType *callee_ret;
+ CallInfo *c1, *c2;
+ gboolean res;
+
+ if (cfg->compile_aot && !cfg->full_aot)
+ /* OP_TAILCALL doesn't work with AOT */
+ return FALSE;
+
+ c1 = get_call_info (NULL, NULL, caller_sig);
+ c2 = get_call_info (NULL, NULL, callee_sig);
+
+ /*
+ * Tail calls with more callee stack usage than the caller cannot be supported, since
+ * the extra stack space would be left on the stack after the tail call.
+ */
+ res = c1->stack_usage >= c2->stack_usage;
+ callee_ret = mini_replace_type (callee_sig->ret);
+ if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
+ /* An address on the callee's stack is passed as the first argument */
+ res = FALSE;
+
+ if (c2->stack_usage > 16 * 4)
+ res = FALSE;
+
+ g_free (c1);
+ g_free (c2);
+
+ return res;
+}
+
#ifndef DISABLE_JIT
static gboolean
MonoMethodSignature *sig;
MonoMethodHeader *header;
MonoInst *ins;
+ MonoType *sig_ret;
int i, offset, size, align, curinst;
CallInfo *cinfo;
guint32 ualign;
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
+ sig_ret = mini_replace_type (sig->ret);
mono_arch_compute_omit_fp (cfg);
offset = 0;
curinst = 0;
- if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
- if (sig->ret->type != MONO_TYPE_VOID) {
+ if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
+ if (sig_ret->type != MONO_TYPE_VOID) {
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = ARMREG_R0;
}
offset += size;
}
+ if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_new_i4) {
+ /* Allocate a temporary used by the atomic ops */
+ size = 4;
+ align = 4;
+
+ /* Allocate a local slot to hold the sig cookie address */
+ offset += align - 1;
+ offset &= ~(align - 1);
+ cfg->arch.atomic_tmp_offset = offset;
+ offset += size;
+ } else {
+ cfg->arch.atomic_tmp_offset = -1;
+ }
+
cfg->locals_min_stack_offset = offset;
curinst = cfg->locals_start;
{
MonoMethodSignature *sig;
CallInfo *cinfo;
+ int i;
sig = mono_method_signature (cfg->method);
cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
+ if (IS_HARD_FLOAT) {
+ for (i = 0; i < 2; i++) {
+ MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
+ inst->flags |= MONO_INST_VOLATILE;
+
+ cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
+ }
+ }
+
if (cinfo->ret.storage == RegTypeStructByVal)
cfg->ret_var_is_local = TRUE;
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
+ MonoType *ptype;
guint8 *ret = ((DynCallArgs*)buf)->ret;
mgreg_t res = ((DynCallArgs*)buf)->res;
mgreg_t res2 = ((DynCallArgs*)buf)->res2;
- switch (mono_type_get_underlying_type (sig->ret)->type) {
+ ptype = mini_type_get_underlying_type (NULL, sig->ret);
+ switch (ptype->type) {
case MONO_TYPE_VOID:
*(gpointer*)ret = NULL;
break;
((gint32*)ret) [1] = res2;
break;
case MONO_TYPE_GENERICINST:
- if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
+ if (MONO_TYPE_IS_REFERENCE (ptype)) {
*(gpointer*)ret = (gpointer)res;
break;
} else {
SAVE_STRUCT,
SAVE_ONE,
SAVE_TWO,
- SAVE_FP
+ SAVE_ONE_FP,
+ SAVE_TWO_FP
};
void*
int save_mode = SAVE_NONE;
int offset;
MonoMethod *method = cfg->method;
- int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
+ MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
+ int rtype = ret_type->type;
int save_offset = cfg->param_area;
save_offset += 7;
save_offset &= ~7;
save_mode = SAVE_TWO;
break;
case MONO_TYPE_R4:
+ if (IS_HARD_FLOAT)
+ save_mode = SAVE_ONE_FP;
+ else
+ save_mode = SAVE_ONE;
+ break;
case MONO_TYPE_R8:
- save_mode = SAVE_FP;
+ if (IS_HARD_FLOAT)
+ save_mode = SAVE_TWO_FP;
+ else
+ save_mode = SAVE_TWO;
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
+ save_mode = SAVE_ONE;
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
save_mode = SAVE_STRUCT;
break;
ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
}
break;
- case SAVE_FP:
- /* FIXME: what reg? */
+ case SAVE_ONE_FP:
+ ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
if (enable_arguments) {
- /* FIXME: what reg? */
+ ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
+ }
+ break;
+ case SAVE_TWO_FP:
+ ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
+ if (enable_arguments) {
+ ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
}
break;
case SAVE_STRUCT:
case SAVE_ONE:
ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
break;
- case SAVE_FP:
- /* FIXME */
+ case SAVE_ONE_FP:
+ ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
+ break;
+ case SAVE_TWO_FP:
+ ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
break;
case SAVE_NONE:
default:
{
/* sreg is a float, dreg is an integer reg */
if (IS_VFP) {
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
if (is_signed)
ARM_TOSIZD (code, vfp_scratch1, sreg);
else
ARM_TOUIZD (code, vfp_scratch1, sreg);
ARM_FMRS (code, dreg, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
if (!is_signed) {
if (size == 1)
g_assert_not_reached ();
#endif
break;
- case OP_CARD_TABLE_WBARRIER: {
- int card_table_shift;
- gpointer card_table_mask;
- gboolean card_table_nursery_check = mono_gc_card_table_nursery_check ();
- int ptr = ins->sreg1;
- int value = ins->sreg2;
- guint8 *br = NULL;
-
- mono_gc_get_card_table (&card_table_shift, &card_table_mask);
-
- if (card_table_nursery_check) {
- code = emit_aotconst (cfg, cfg->native_code, code, ARMREG_LR, MONO_PATCH_INFO_NURSERY_START_SHIFTED, NULL);
- code = emit_aotconst (cfg, cfg->native_code, code, ARMREG_IP, MONO_PATCH_INFO_NURSERY_SHIFT, NULL);
- ARM_SHR_REG (code, ARMREG_IP, value, ARMREG_IP);
- ARM_CMP_REG_REG (code, ARMREG_LR, ARMREG_IP);
- br = code;
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_CAS_I4:
+ case OP_ATOMIC_ADD_NEW_I4: {
+ int tmpreg;
+ guint8 *buf [16];
+
+ g_assert (v7_supported);
+
+ /* Free up a reg */
+ if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
+ tmpreg = ARMREG_IP;
+ else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
+ tmpreg = ARMREG_R0;
+ else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
+ tmpreg = ARMREG_R1;
+ else
+ tmpreg = ARMREG_R2;
+ g_assert (cfg->arch.atomic_tmp_offset != -1);
+ ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
+
+ switch (ins->opcode) {
+ case OP_ATOMIC_EXCHANGE_I4:
+ buf [0] = code;
+ ARM_DMB (code, ARM_DMB_SY);
+ ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
+ ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
+ ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
+ buf [1] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
- //ARM_B (code, 0);
- }
-
- code = emit_aotconst (cfg, cfg->native_code, code, ARMREG_LR, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
- ARM_SHR_IMM (code, ARMREG_IP, ptr, card_table_shift);
- if (card_table_mask) {
- imm8 = mono_arm_is_rotated_imm8 ((gsize)card_table_mask, &rot_amount);
- g_assert (imm8 >= 0);
- ARM_AND_REG_IMM (code, ARMREG_IP, ARMREG_IP, imm8, rot_amount);
+ arm_patch (buf [1], buf [0]);
+ break;
+ case OP_ATOMIC_CAS_I4:
+ ARM_DMB (code, ARM_DMB_SY);
+ buf [0] = code;
+ ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
+ ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
+ buf [1] = code;
+ ARM_B_COND (code, ARMCOND_NE, 0);
+ ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
+ ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
+ buf [2] = code;
+ ARM_B_COND (code, ARMCOND_NE, 0);
+ arm_patch (buf [2], buf [1]);
+ arm_patch (buf [1], code);
+ break;
+ case OP_ATOMIC_ADD_NEW_I4:
+ buf [0] = code;
+ ARM_DMB (code, ARM_DMB_SY);
+ ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
+ ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
+ ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
+ buf [1] = code;
+ ARM_B_COND (code, ARMCOND_NE, 0);
+ arm_patch (buf [1], buf [0]);
+ break;
+ default:
+ g_assert_not_reached ();
}
- ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
- code = mono_arm_emit_load_imm (code, ARMREG_IP, 1);
- ARM_STRB_IMM (code, ARMREG_IP, 0, ARMREG_LR);
- arm_patch (br, code);
+ ARM_DMB (code, ARM_DMB_SY);
+ if (tmpreg != ins->dreg)
+ ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
+ ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
break;
}
+
/*case OP_BIGMUL:
ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_STORE:
+ case OP_DUMMY_ICONST:
+ case OP_DUMMY_R8CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
code = mono_arm_patchable_b (code, ARMCOND_AL);
}
break;
+ case OP_TAILCALL: {
+ MonoCallInst *call = (MonoCallInst*)ins;
+
+ /*
+ * The stack looks like the following:
+ * <caller argument area>
+ * <saved regs etc>
+ * <rest of frame>
+ * <callee argument area>
+ * Need to copy the arguments from the callee argument area to
+ * the caller argument area, and pop the frame.
+ */
+ if (call->stack_usage) {
+ int i, prev_sp_offset = 0;
+
+ /* Compute size of saved registers restored below */
+ if (iphone_abi)
+ prev_sp_offset = 2 * 4;
+ else
+ prev_sp_offset = 1 * 4;
+ for (i = 0; i < 16; ++i) {
+ if (cfg->used_int_regs & (1 << i))
+ prev_sp_offset += 4;
+ }
+
+ code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
+
+ /* Copy arguments on the stack to our argument area */
+ for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
+ ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
+ ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
+ }
+ }
+
+ /*
+ * Keep in sync with mono_arch_emit_epilog
+ */
+ g_assert (!cfg->method->save_lmf);
+
+ code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
+ if (iphone_abi) {
+ if (cfg->used_int_regs)
+ ARM_POP (code, cfg->used_int_regs);
+ ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
+ } else {
+ ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
+ }
+
+ mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
+ if (cfg->compile_aot) {
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(gpointer*)code = NULL;
+ code += 4;
+ ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
+ } else {
+ code = mono_arm_patchable_b (code, ARMCOND_AL);
+ }
+ break;
+ }
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
break;
+ case OP_ICNEQ:
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
+ break;
+ case OP_ICGE:
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
+ break;
+ case OP_ICLE:
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
+ break;
+ case OP_ICGE_UN:
+ case OP_ICLE_UN:
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
+ break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_LT:
break;
case OP_STORER4_MEMBASE_REG:
g_assert (arm_is_fpimm8 (ins->inst_offset));
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_CVTD (code, vfp_scratch1, ins->sreg1);
ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
break;
case OP_LOADR4_MEMBASE:
g_assert (arm_is_fpimm8 (ins->inst_offset));
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
ARM_CVTS (code, ins->dreg, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
break;
case OP_ICONV_TO_R_UN: {
g_assert_not_reached ();
break;
}
case OP_ICONV_TO_R4:
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_FMSR (code, vfp_scratch1, ins->sreg1);
ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
ARM_CVTS (code, ins->dreg, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
break;
case OP_ICONV_TO_R8:
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_FMSR (code, vfp_scratch1, ins->sreg1);
ARM_FSITOD (code, ins->dreg, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
break;
case OP_SETFRET:
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
+ case OP_FCNEQ:
+ if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
+ break;
+ case OP_FCGE:
+ if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
+ break;
+ case OP_FCLE:
+ if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
+ break;
+
/* ARM FPA flags table:
* N Less than ARMCOND_MI
* Z Equal ARMCOND_EQ
case OP_CKFINITE: {
if (IS_VFP) {
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
+
#ifdef USE_JUMP_TABLES
{
gpointer *jte = mono_jumptable_add_entries (2);
ARM_FMSTAT (code);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
ARM_CPYD (code, ins->dreg, ins->sreg1);
+
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
}
break;
}
break;
}
} else if (ainfo->storage == RegTypeFP) {
- code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
- ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
+ int imm8, rot_amount;
+
+ if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
+ } else
+ ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
if (ainfo->size == 8)
ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
void
mono_arch_finish_init (void)
{
- lmf_tls_offset = mono_get_lmf_tls_offset ();
- lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
}
void
eabi_supported = TRUE;
}
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_CAS_I4:
+ case OP_ATOMIC_ADD_NEW_I4:
+ return v7_supported;
+ default:
+ return FALSE;
+ }
+}
+
#if defined(ENABLE_GSHAREDVT)
#include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"