void sys_icache_invalidate (void *start, size_t len);
#endif
-static gint lmf_tls_offset = -1;
-static gint lmf_addr_tls_offset = -1;
-
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
gboolean imm = arm_is_fpimm8 (var->inst_offset);
/* 4+1 insns for emit_big_add () and 1 for FLDS. */
- if (imm)
+ if (!imm)
*max_len += 20 + 4;
*max_len += 4;
}
/*
- * emit_save_lmf:
+ * emit_restore_lmf:
*
* Emit code to pop an LMF structure from the LMF stack.
*/
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
+ MonoType *sig_ret;
/* FIXME: Support more cases */
- if (MONO_TYPE_ISSTRUCT (sig->ret))
+ sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
+ if (MONO_TYPE_ISSTRUCT (sig_ret))
return NULL;
if (has_target) {
gr += n_in_regs;
nwords -= n_in_regs;
}
+ if (sig->call_convention == MONO_CALL_VARARG)
+ /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
+ stack_size = ALIGN_TO (stack_size, align);
ainfo->offset = stack_size;
/*g_print ("offset for arg %d at %d\n", n, stack_size);*/
stack_size += nwords * sizeof (gpointer);
MonoMethodSignature *sig;
MonoMethodHeader *header;
MonoInst *ins;
+ MonoType *sig_ret;
int i, offset, size, align, curinst;
CallInfo *cinfo;
guint32 ualign;
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
+ sig_ret = mini_replace_type (sig->ret);
mono_arch_compute_omit_fp (cfg);
offset = 0;
curinst = 0;
- if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
- if (sig->ret->type != MONO_TYPE_VOID) {
+ if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
+ if (sig_ret->type != MONO_TYPE_VOID) {
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = ARMREG_R0;
}
offset += size;
}
+ if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_new_i4) {
+ /* Allocate a temporary used by the atomic ops */
+ size = 4;
+ align = 4;
+
+ /* Allocate a local slot to hold the sig cookie address */
+ offset += align - 1;
+ offset &= ~(align - 1);
+ cfg->arch.atomic_tmp_offset = offset;
+ offset += size;
+ } else {
+ cfg->arch.atomic_tmp_offset = -1;
+ }
+
cfg->locals_min_stack_offset = offset;
curinst = cfg->locals_start;
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
+ MonoType *ptype;
guint8 *ret = ((DynCallArgs*)buf)->ret;
mgreg_t res = ((DynCallArgs*)buf)->res;
mgreg_t res2 = ((DynCallArgs*)buf)->res2;
- switch (mono_type_get_underlying_type (sig->ret)->type) {
+ ptype = mini_type_get_underlying_type (NULL, sig->ret);
+ switch (ptype->type) {
case MONO_TYPE_VOID:
*(gpointer*)ret = NULL;
break;
((gint32*)ret) [1] = res2;
break;
case MONO_TYPE_GENERICINST:
- if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
+ if (MONO_TYPE_IS_REFERENCE (ptype)) {
*(gpointer*)ret = (gpointer)res;
break;
} else {
SAVE_STRUCT,
SAVE_ONE,
SAVE_TWO,
- SAVE_FP
+ SAVE_ONE_FP,
+ SAVE_TWO_FP
};
void*
int save_mode = SAVE_NONE;
int offset;
MonoMethod *method = cfg->method;
- int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
+ MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
+ int rtype = ret_type->type;
int save_offset = cfg->param_area;
save_offset += 7;
save_offset &= ~7;
save_mode = SAVE_TWO;
break;
case MONO_TYPE_R4:
+ if (IS_HARD_FLOAT)
+ save_mode = SAVE_ONE_FP;
+ else
+ save_mode = SAVE_ONE;
+ break;
case MONO_TYPE_R8:
- save_mode = SAVE_FP;
+ if (IS_HARD_FLOAT)
+ save_mode = SAVE_TWO_FP;
+ else
+ save_mode = SAVE_TWO;
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
+ save_mode = SAVE_ONE;
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
save_mode = SAVE_STRUCT;
break;
ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
}
break;
- case SAVE_FP:
- /* FIXME: what reg? */
+ case SAVE_ONE_FP:
+ ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
+ if (enable_arguments) {
+ ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
+ }
+ break;
+ case SAVE_TWO_FP:
+ ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
if (enable_arguments) {
- /* FIXME: what reg? */
+ ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
}
break;
case SAVE_STRUCT:
case SAVE_ONE:
ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
break;
- case SAVE_FP:
- /* FIXME */
+ case SAVE_ONE_FP:
+ ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
+ break;
+ case SAVE_TWO_FP:
+ ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
break;
case SAVE_NONE:
default:
g_assert_not_reached ();
#endif
break;
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_CAS_I4:
+ case OP_ATOMIC_ADD_NEW_I4: {
+ int tmpreg;
+ guint8 *buf [16];
+
+ g_assert (v7_supported);
+
+ /* Free up a reg */
+ if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
+ tmpreg = ARMREG_IP;
+ else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
+ tmpreg = ARMREG_R0;
+ else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
+ tmpreg = ARMREG_R1;
+ else
+ tmpreg = ARMREG_R2;
+ g_assert (cfg->arch.atomic_tmp_offset != -1);
+ ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
+
+ switch (ins->opcode) {
+ case OP_ATOMIC_EXCHANGE_I4:
+ buf [0] = code;
+ ARM_DMB (code, ARM_DMB_SY);
+ ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
+ ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
+ ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
+ buf [1] = code;
+ ARM_B_COND (code, ARMCOND_NE, 0);
+ arm_patch (buf [1], buf [0]);
+ break;
+ case OP_ATOMIC_CAS_I4:
+ ARM_DMB (code, ARM_DMB_SY);
+ buf [0] = code;
+ ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
+ ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
+ buf [1] = code;
+ ARM_B_COND (code, ARMCOND_NE, 0);
+ ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
+ ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
+ buf [2] = code;
+ ARM_B_COND (code, ARMCOND_NE, 0);
+ arm_patch (buf [2], buf [1]);
+ arm_patch (buf [1], code);
+ break;
+ case OP_ATOMIC_ADD_NEW_I4:
+ buf [0] = code;
+ ARM_DMB (code, ARM_DMB_SY);
+ ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
+ ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
+ ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
+ buf [1] = code;
+ ARM_B_COND (code, ARMCOND_NE, 0);
+ arm_patch (buf [1], buf [0]);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ ARM_DMB (code, ARM_DMB_SY);
+ if (tmpreg != ins->dreg)
+ ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
+ ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
+ break;
+ }
+
/*case OP_BIGMUL:
ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_STORE:
+ case OP_DUMMY_ICONST:
+ case OP_DUMMY_R8CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
break;
+ case OP_ICNEQ:
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
+ break;
+ case OP_ICGE:
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
+ break;
+ case OP_ICLE:
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
+ break;
+ case OP_ICGE_UN:
+ case OP_ICLE_UN:
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
+ break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_LT:
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
+ case OP_FCNEQ:
+ if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
+ break;
+ case OP_FCGE:
+ if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
+ break;
+ case OP_FCLE:
+ if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
+ break;
+
/* ARM FPA flags table:
* N Less than ARMCOND_MI
* Z Equal ARMCOND_EQ
void
mono_arch_finish_init (void)
{
- lmf_tls_offset = mono_get_lmf_tls_offset ();
- lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
}
void
eabi_supported = TRUE;
}
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_CAS_I4:
+ case OP_ATOMIC_ADD_NEW_I4:
+ return v7_supported;
+ default:
+ return FALSE;
+ }
+}
+
#if defined(ENABLE_GSHAREDVT)
#include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"