#include "inssel.h"
#include "cpu-arm.h"
#include "trace.h"
+#ifdef ARM_FPU_FPA
#include "mono/arch/arm/arm-fpa-codegen.h"
+#elif defined(ARM_FPU_VFP)
+#include "mono/arch/arm/arm-vfp-codegen.h"
+#endif
+
+/* This mutex protects architecture specific caches */
+#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
+#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
+static CRITICAL_SECTION mini_arch_mutex;
+
+static int v5_supported = 0;
+static int thumb_supported = 0;
+
+static int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount);
/*
* TODO:
#define arm_is_imm8(v) ((v) > -256 && (v) < 256)
#define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
+#define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
+#define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
+#define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
+
+#define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
+#define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
+#define DEBUG_IMT 0
+
const char*
mono_arch_regname (int reg) {
static const char * rnames[] = {
arm_patch (code - 4, start_loop);
return code;
}
- g_assert (arm_is_imm12 (doffset));
- g_assert (arm_is_imm12 (doffset + size));
- g_assert (arm_is_imm12 (soffset));
- g_assert (arm_is_imm12 (soffset + size));
- while (size >= 4) {
- ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
- ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
- doffset += 4;
- soffset += 4;
- size -= 4;
+ if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
+ arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
+ while (size >= 4) {
+ ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
+ ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
+ doffset += 4;
+ soffset += 4;
+ size -= 4;
+ }
+ } else if (size) {
+ code = emit_big_add (code, ARMREG_R0, sreg, soffset);
+ code = emit_big_add (code, ARMREG_R1, dreg, doffset);
+ doffset = soffset = 0;
+ while (size >= 4) {
+ ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
+ ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
+ doffset += 4;
+ soffset += 4;
+ size -= 4;
+ }
}
g_assert (size == 0);
return code;
}
+static guint8*
+emit_call_reg (guint8 *code, int reg)
+{
+ if (v5_supported) {
+ ARM_BLX_REG (code, reg);
+ } else {
+ ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
+ if (thumb_supported)
+ ARM_BX (code, reg);
+ else
+ ARM_MOV_REG_REG (code, ARMREG_PC, reg);
+ }
+ return code;
+}
+
+static guint8*
+emit_call_seq (MonoCompile *cfg, guint8 *code)
+{
+ if (cfg->method->dynamic) {
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(gpointer*)code = NULL;
+ code += 4;
+ code = emit_call_reg (code, ARMREG_IP);
+ } else {
+ ARM_BL (code, 0);
+ }
+ return code;
+}
+
/*
* mono_arch_get_argument_info:
* @csig: a method signature
if (csig->pinvoke)
size = mono_type_native_stack_size (csig->params [k], &align);
else
- size = mono_type_stack_size (csig->params [k], &align);
+ size = mini_type_stack_size (NULL, csig->params [k], &align);
/* ignore alignment for now */
align = 1;
return frame_size;
}
+static gpointer*
+decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs)
+{
+ char *o = NULL;
+ int reg, offset = 0;
+ reg = (ldr >> 16 ) & 0xf;
+ offset = ldr & 0xfff;
+ if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
+ offset = -offset;
+ /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
+ o = regs [reg];
+ return (gpointer*)(o + offset);
+}
+
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code_ptr, gpointer *regs)
+{
+ guint32* code = (guint32*)code_ptr;
+
+ /* Locate the address of the method-specific trampoline. The call using
+ the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
+ looks something like this:
+
+ ldr rA, rX, #offset
+ mov lr, pc
+ mov pc, rA
+ or better:
+ mov lr, pc
+ ldr pc, rX, #offset
+
+ The call sequence could be also:
+ ldr ip, pc, 0
+ b skip
+ function pointer literal
+ skip:
+ mov lr, pc
+ mov pc, ip
+ Note that on ARM5+ we can use one instruction instead of the last two.
+ Therefore, we need to locate the 'ldr rA' instruction to know which
+ register was used to hold the method addrs.
+ */
+
+ /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
+ --code;
+
+ /* Three possible code sequences can happen here:
+ * interface call:
+ *
+ * add lr, [pc + #4]
+ * ldr pc, [rX - #offset]
+ * .word IMT value
+ *
+ * virtual call:
+ *
+ * mov lr, pc
+ * ldr pc, [rX - #offset]
+ *
+ * direct branch with bl:
+ *
+ * bl #offset
+ *
+ * direct branch with mov:
+ *
+ * mv pc, rX
+ *
+ * We only need to identify interface and virtual calls, the others can be ignored.
+ *
+ */
+ if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
+ return decode_vcall_slot_from_ldr (code [-1], regs);
+
+ if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
+ return decode_vcall_slot_from_ldr (code [0], regs);
+
+ return NULL;
+}
+
+#define MAX_ARCH_DELEGATE_PARAMS 3
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+ guint8 *code, *start;
+
+ /* FIXME: Support more cases */
+ if (MONO_TYPE_ISSTRUCT (sig->ret))
+ return NULL;
+
+ if (has_target) {
+ static guint8* cached = NULL;
+ mono_mini_arch_lock ();
+ if (cached) {
+ mono_mini_arch_unlock ();
+ return cached;
+ }
+
+ start = code = mono_global_codeman_reserve (12);
+
+ /* Replace the this argument with the target */
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+
+ g_assert ((code - start) <= 12);
+
+ mono_arch_flush_icache (code, 12);
+ cached = start;
+ mono_mini_arch_unlock ();
+ return cached;
+ } else {
+ static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
+ int size, i;
+
+ if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
+ return NULL;
+ for (i = 0; i < sig->param_count; ++i)
+ if (!mono_is_regsize_var (sig->params [i]))
+ return NULL;
+
+ mono_mini_arch_lock ();
+ code = cache [sig->param_count];
+ if (code) {
+ mono_mini_arch_unlock ();
+ return code;
+ }
+
+ size = 8 + sig->param_count * 4;
+ start = code = mono_global_codeman_reserve (size);
+
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ /* slide down the arguments */
+ for (i = 0; i < sig->param_count; ++i) {
+ ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
+ }
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+
+ g_assert ((code - start) <= size);
+
+ mono_arch_flush_icache (code, size);
+ cache [sig->param_count] = start;
+ mono_mini_arch_unlock ();
+ return start;
+ }
+
+ return NULL;
+}
+
+gpointer
+mono_arch_get_this_arg_from_call (MonoMethodSignature *sig, gssize *regs, guint8 *code)
+{
+ /* FIXME: handle returning a struct */
+ if (MONO_TYPE_ISSTRUCT (sig->ret))
+ return (gpointer)regs [ARMREG_R1];
+ return (gpointer)regs [ARMREG_R0];
+}
+
/*
* Initialize the cpu to execute managed code.
*/
{
}
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+ InitializeCriticalSection (&mini_arch_mutex);
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
mono_arch_cpu_optimizazions (guint32 *exclude_mask)
{
guint32 opts = 0;
+ char buf [512];
+ char *line;
+ FILE *file = fopen ("/proc/cpuinfo", "r");
+ if (file) {
+ while ((line = fgets (buf, 512, file))) {
+ if (strncmp (line, "Processor", 9) == 0) {
+ char *ver = strstr (line, "(v");
+ if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
+ v5_supported = TRUE;
+ }
+ continue;
+ }
+ if (strncmp (line, "Features", 8) == 0) {
+ char *th = strstr (line, "thumb");
+ if (th) {
+ thumb_supported = TRUE;
+ if (v5_supported)
+ break;
+ }
+ continue;
+ }
+ }
+ fclose (file);
+ /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
+ }
/* no arm-specific optimizations yet */
*exclude_mask = 0;
enum {
RegTypeGeneral,
RegTypeBase,
+ RegTypeBaseGen,
RegTypeFP,
RegTypeStructByVal,
RegTypeStructByAddr
ainfo->reg = *gr;
}
} else {
- if (*gr > ARMREG_R2) {
- /**stack_size += 7;
- *stack_size &= ~7;*/
+ if (*gr == ARMREG_R3
+#ifdef __ARM_EABI__
+ && 0
+#endif
+ ) {
+ /* first word in r3 and the second on the stack */
+ ainfo->offset = *stack_size;
+ ainfo->reg = ARMREG_SP; /* in the caller */
+ ainfo->regtype = RegTypeBaseGen;
+ *stack_size += 4;
+ } else if (*gr > ARMREG_R3) {
+#ifdef __ARM_EABI__
+ *stack_size += 7;
+ *stack_size &= ~7;
+#endif
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
ainfo->regtype = RegTypeBase;
*stack_size += 8;
} else {
- /*if ((*gr) & 1)
- (*gr) ++;*/
+#ifdef __ARM_EABI__
+ if ((*gr) & 1)
+ (*gr) ++;
+#endif
ainfo->reg = *gr;
}
(*gr) ++;
if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
continue;
- /* inst->unused indicates native sized value types, this is used by the
+ /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structure */
- if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
+ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
else
size = mono_type_size (inst->inst_vtype, &align);
curinst = 0;
if (sig->hasthis) {
- inst = m->varinfo [curinst];
+ inst = m->args [curinst];
if (inst->opcode != OP_REGVAR) {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
}
for (i = 0; i < sig->param_count; ++i) {
- inst = m->varinfo [curinst];
+ inst = m->args [curinst];
if (inst->opcode != OP_REGVAR) {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
arg->next = call->out_args;
call->out_args = arg;
if (ainfo->regtype == RegTypeGeneral) {
- arg->unused = ainfo->reg;
+ arg->backend.reg3 = ainfo->reg;
call->used_iregs |= 1 << ainfo->reg;
if (arg->type == STACK_I8)
call->used_iregs |= 1 << (ainfo->reg + 1);
if (arg->type == STACK_R8) {
if (ainfo->size == 4) {
+#ifndef MONO_ARCH_SOFT_FLOAT
arg->opcode = OP_OUTARG_R4;
+#endif
} else {
call->used_iregs |= 1 << (ainfo->reg + 1);
}
}
} else if (ainfo->regtype == RegTypeStructByAddr) {
/* FIXME: where si the data allocated? */
- arg->unused = ainfo->reg;
+ arg->backend.reg3 = ainfo->reg;
call->used_iregs |= 1 << ainfo->reg;
g_assert_not_reached ();
} else if (ainfo->regtype == RegTypeStructByVal) {
arg->opcode = OP_OUTARG_VT;
/* vtsize and offset have just 12 bits of encoding in number of words */
g_assert (((ainfo->vtsize | (ainfo->offset / 4)) & 0xfffff000) == 0);
- arg->unused = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
+ arg->backend.arg_info = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
} else if (ainfo->regtype == RegTypeBase) {
arg->opcode = OP_OUTARG_MEMBASE;
- arg->unused = (ainfo->offset << 8) | ainfo->size;
+ arg->backend.arg_info = (ainfo->offset << 8) | ainfo->size;
+ } else if (ainfo->regtype == RegTypeBaseGen) {
+ call->used_iregs |= 1 << ARMREG_R3;
+ arg->opcode = OP_OUTARG_MEMBASE;
+ arg->backend.arg_info = (ainfo->offset << 8) | 0xff;
+ if (arg->type == STACK_R8)
+ cfg->flags |= MONO_CFG_HAS_FPOUT;
} else if (ainfo->regtype == RegTypeFP) {
- arg->unused = ainfo->reg;
- /* FPA args are passed in int regs */
+ arg->backend.reg3 = ainfo->reg;
+ /* FP args are passed in int regs */
call->used_iregs |= 1 << ainfo->reg;
if (ainfo->size == 8) {
arg->opcode = OP_OUTARG_R8;
code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_R2);
+ code = emit_call_reg (code, ARMREG_R2);
return code;
}
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
code = cfg->native_code + offset;
}
-handle_enum:
switch (rtype) {
case MONO_TYPE_VOID:
/* special case string .ctor icall */
code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+ code = emit_call_reg (code, ARMREG_IP);
switch (save_mode) {
case SAVE_TWO:
if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
- if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- } else {
- //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
- ins->opcode = OP_MOVE;
- ins->sreg1 = last_ins->sreg1;
- }
+ ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? CEE_CONV_I1 : CEE_CONV_U1;
+ ins->sreg1 = last_ins->sreg1;
}
break;
case OP_LOADU2_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
- if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- } else {
- //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
- ins->opcode = OP_MOVE;
- ins->sreg1 = last_ins->sreg1;
- }
+ ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? CEE_CONV_I2 : CEE_CONV_U2;
+ ins->sreg1 = last_ins->sreg1;
}
break;
case CEE_CONV_I4:
static void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *next, *temp, *last_ins = NULL;
+ MonoInst *ins, *temp, *last_ins = NULL;
int rot_amount, imm8, low_imm;
/* setup the virtual reg allocator */
- if (bb->max_ireg > cfg->rs->next_vireg)
- cfg->rs->next_vireg = bb->max_ireg;
+ if (bb->max_vreg > cfg->rs->next_vreg)
+ cfg->rs->next_vreg = bb->max_vreg;
ins = bb->code;
while (ins) {
ins->inst_offset = low_imm;
break;
}
- /* FPA doesn't have indexed load instructions */
+ /* VFP/FPA doesn't have indexed load instructions */
g_assert_not_reached ();
break;
case OP_STORE_MEMBASE_REG:
break;
}
/*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
- /* FPA doesn't have indexed store instructions */
+ /* VFP/FPA doesn't have indexed store instructions */
g_assert_not_reached ();
break;
case OP_STORE_MEMBASE_IMM:
ins = ins->next;
}
bb->last_ins = last_ins;
- bb->max_ireg = cfg->rs->next_vireg;
+ bb->max_vreg = cfg->rs->next_vreg;
}
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg */
+#ifdef ARM_FPU_FPA
ARM_FIXZ (code, dreg, sreg);
+#elif defined(ARM_FPU_VFP)
+ if (is_signed)
+ ARM_TOSIZD (code, ARM_VFP_F0, sreg);
+ else
+ ARM_TOUIZD (code, ARM_VFP_F0, sreg);
+ ARM_FMRS (code, dreg, ARM_VFP_F0);
+#endif
if (!is_signed) {
if (size == 1)
ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
guchar *code = data;
guint32 *thunks = data;
guint32 *endthunks = (guint32*)(code + bsize);
- int i, count = 0;
+ int count = 0;
int difflow, diffhigh;
/* always ensure a call from pdata->code can reach to the thunks without further thunks */
return 1;
} else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
/* found a free slot instead: emit thunk */
+ /* ARMREG_IP is fine to use since this can't be an IMT call
+ * which is indirect
+ */
code = (guchar*)thunks;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+ if (thumb_supported)
+ ARM_BX (code, ARMREG_IP);
+ else
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
thunks [2] = (guint32)pdata->target;
mono_arch_flush_icache ((guchar*)thunks, 12);
void
arm_patch (guchar *code, const guchar *target)
{
- guint32 ins = *(guint32*)code;
+ guint32 *code32 = (void*)code;
+ guint32 ins = *code32;
guint32 prim = (ins >> 25) & 7;
- guint32 ovf;
+ guint32 tval = GPOINTER_TO_UINT (target);
//g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
if (prim == 5) { /* 101b */
/* the diff starts 8 bytes from the branch opcode */
gint diff = target - code - 8;
+ gint tbits;
+ gint tmask = 0xffffffff;
+ if (tval & 1) { /* entering thumb mode */
+ diff = target - 1 - code - 8;
+ g_assert (thumb_supported);
+ tbits = 0xf << 28; /* bl->blx bit pattern */
+ g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
+ /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
+ if (diff & 2) {
+ tbits |= 1 << 24;
+ }
+ tmask = ~(1 << 24); /* clear the link bit */
+ /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
+ } else {
+ tbits = 0;
+ }
if (diff >= 0) {
if (diff <= 33554431) {
diff >>= 2;
ins = (ins & 0xff000000) | diff;
- *(guint32*)code = ins;
+ ins &= tmask;
+ *code32 = ins | tbits;
return;
}
} else {
if (diff >= -33554432) {
diff >>= 2;
ins = (ins & 0xff000000) | (diff & ~0xff000000);
- *(guint32*)code = ins;
+ ins &= tmask;
+ *code32 = ins | tbits;
return;
}
}
return;
}
-
+ /*
+ * The alternative call sequences looks like this:
+ *
+ * ldr ip, [pc] // loads the address constant
+ * b 1f // jumps around the constant
+ * address constant embedded in the code
+ * 1f:
+ * mov lr, pc
+ * mov pc, ip
+ *
+ * There are two cases for patching:
+ * a) at the end of method emission: in this case code points to the start
+ * of the call sequence
+ * b) during runtime patching of the call site: in this case code points
+ * to the mov pc, ip instruction
+ *
+ * We have to handle also the thunk jump code sequence:
+ *
+ * ldr ip, [pc]
+ * mov pc, ip
+ * address constant // execution never reaches here
+ */
if ((ins & 0x0ffffff0) == 0x12fff10) {
/* branch and exchange: the address is constructed in a reg */
g_assert_not_reached ();
} else {
- guint32 ccode [3];
+ guint32 ccode [4];
guint32 *tmp = ccode;
- ARM_LDR_IMM (tmp, ARMREG_IP, ARMREG_PC, 0);
- ARM_MOV_REG_REG (tmp, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (tmp, ARMREG_PC, ARMREG_IP);
+ guint8 *emit = (guint8*)tmp;
+ ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
+ ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
+ ARM_BX (emit, ARMREG_IP);
if (ins == ccode [2]) {
- tmp = (guint32*)code;
- tmp [-1] = (guint32)target;
+ g_assert_not_reached (); // should be -2 ...
+ code32 [-1] = (guint32)target;
return;
}
if (ins == ccode [0]) {
- tmp = (guint32*)code;
- tmp [2] = (guint32)target;
+ /* handles both thunk jump code and the far call sequence */
+ code32 [2] = (guint32)target;
return;
}
g_assert_not_reached ();
* to be used with the emit macros.
* Return -1 otherwise.
*/
-int
+static int
mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
{
guint32 res, i;
while (ins) {
offset = code - cfg->native_code;
- max_len = ((guint8 *)arm_cpu_desc [ins->opcode])[MONO_INST_LEN];
+ max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
if (offset > (cfg->code_size - max_len - 16)) {
cfg->code_size *= 2;
g_assert (imm8 >= 0);
ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
break;
- case OP_X86_TEST_NULL:
- g_assert_not_reached ();
- break;
- case CEE_BREAK:
+ case OP_BREAK:
*(int*)code = 0xe7f001f0;
*(int*)code = 0xef9f0001;
code += 4;
}
case OP_SETFREG:
case OP_FMOVE:
+#ifdef ARM_FPU_FPA
ARM_MVFD (code, ins->dreg, ins->sreg1);
+#elif defined(ARM_FPU_VFP)
+ ARM_CPYD (code, ins->dreg, ins->sreg1);
+#endif
break;
case OP_FCONV_TO_R4:
+#ifdef ARM_FPU_FPA
ARM_MVFS (code, ins->dreg, ins->sreg1);
+#elif defined(ARM_FPU_VFP)
+ ARM_CVTD (code, ins->dreg, ins->sreg1);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+#endif
break;
- case CEE_JMP:
+ case OP_JMP:
/*
* Keep in sync with mono_arch_emit_epilog
*/
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
else
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
- if (cfg->method->dynamic) {
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- ARM_B (code, 0);
- *(gpointer*)code = NULL;
- code += 4;
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
- } else {
- ARM_BL (code, 0);
- }
+ code = emit_call_seq (cfg, code);
break;
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
+ code = emit_call_reg (code, ins->sreg1);
break;
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_CALL_MEMBASE:
g_assert (arm_is_imm12 (ins->inst_offset));
g_assert (ins->sreg1 != ARMREG_LR);
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
- ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
+ call = (MonoCallInst*)ins;
+ if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
+ ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
+ *((gpointer*)code) = (gpointer)call->method;
+ code += 4;
+ } else {
+ ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
+ ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
+ }
break;
case OP_OUTARG:
g_assert_not_reached ();
g_assert_not_reached ();
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_LR);
break;
- case CEE_THROW: {
+ case OP_THROW: {
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
- if (cfg->method->dynamic) {
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- ARM_B (code, 0);
- *(gpointer*)code = NULL;
- code += 4;
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
- } else {
- ARM_BL (code, 0);
- }
+ code = emit_call_seq (cfg, code);
break;
}
case OP_RETHROW: {
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
- if (cfg->method->dynamic) {
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- ARM_B (code, 0);
- *(gpointer*)code = NULL;
- code += 4;
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
- } else {
- ARM_BL (code, 0);
- }
+ code = emit_call_seq (cfg, code);
break;
}
case OP_START_HANDLER:
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
- case CEE_ENDFINALLY:
+ case OP_ENDFINALLY:
if (arm_is_imm12 (ins->inst_left->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
} else {
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
- case CEE_BR:
+ case OP_BR:
if (ins->flags & MONO_INST_BRLABEL) {
/*if (ins->inst_i0->inst_c0) {
ARM_B (code, 0);
break;
/* floating point opcodes */
+#ifdef ARM_FPU_FPA
case OP_R8CONST:
/* FIXME: we can optimize the imm load by dealing with part of
* the displacement in LDFD (aligning to 512).
case CEE_CONV_R8:
ARM_FLTD (code, ins->dreg, ins->sreg1);
break;
- case OP_X86_FP_LOAD_I8:
+#elif defined(ARM_FPU_VFP)
+ case OP_R8CONST:
+ /* FIXME: we can optimize the imm load by dealing with part of
+ * the displacement in LDFD (aligning to 512).
+ */
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
+ ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
+ break;
+ case OP_R4CONST:
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
+ ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+ break;
+ case OP_STORER8_MEMBASE_REG:
+ g_assert (arm_is_fpimm8 (ins->inst_offset));
+ ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ break;
+ case OP_LOADR8_MEMBASE:
+ g_assert (arm_is_fpimm8 (ins->inst_offset));
+ ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ break;
+ case OP_STORER4_MEMBASE_REG:
+ g_assert (arm_is_fpimm8 (ins->inst_offset));
+ ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ break;
+ case OP_LOADR4_MEMBASE:
+ g_assert (arm_is_fpimm8 (ins->inst_offset));
+ ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ break;
+ case CEE_CONV_R_UN: {
g_assert_not_reached ();
- /*x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);*/
break;
- case OP_X86_FP_LOAD_I4:
+ }
+ case CEE_CONV_R4:
+ g_assert_not_reached ();
+ //ARM_FLTS (code, ins->dreg, ins->sreg1);
+ break;
+ case CEE_CONV_R8:
g_assert_not_reached ();
- /*x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);*/
+ //ARM_FLTD (code, ins->dreg, ins->sreg1);
break;
+#endif
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
}
+#ifdef ARM_FPU_FPA
case OP_FADD:
ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
break;
case OP_FNEG:
ARM_MNFD (code, ins->dreg, ins->sreg1);
+ break;
+#elif defined(ARM_FPU_VFP)
+ case OP_FADD:
+ ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
+ case OP_FSUB:
+ ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
+ case OP_FMUL:
+ ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
+ case OP_FDIV:
+ ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
+ case OP_FNEG:
+ ARM_NEGD (code, ins->dreg, ins->sreg1);
+ break;
+#endif
case OP_FREM:
/* emulated */
g_assert_not_reached ();
case OP_FCOMPARE:
/* each fp compare op needs to do its own */
g_assert_not_reached ();
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+ //ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
break;
case OP_FCEQ:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_FCLT:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_FCLT_UN:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_FCGT:
/* swapped */
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_FCGT_UN:
/* swapped */
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
* V Unordered ARMCOND_VS
*/
case OP_FBEQ:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ);
break;
case OP_FBNE_UN:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ);
break;
case OP_FBLT:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBLT_UN:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBGT:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
break;
case OP_FBGT_UN:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
break;
case OP_FBGE:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
break;
case OP_FBGE_UN:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
break;
case OP_FBLE:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS); /* swapped */
break;
case OP_FBLE_UN:
+#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); /* swapped */
break;
- case CEE_CKFINITE: {
+ case OP_CKFINITE: {
/*ppc_stfd (code, ins->sreg1, -8, ppc_sp);
ppc_lwz (code, ppc_r11, -8, ppc_sp);
ppc_rlwinm (code, ppc_r11, ppc_r11, 0, 1, 31);
const unsigned char *target;
if (patch_info->type == MONO_PATCH_INFO_SWITCH) {
- gpointer *table = (gpointer *)patch_info->data.table->table;
gpointer *jt = (gpointer*)(ip + 8);
int i;
/* jt is the inlined jump table, 2 instructions after ip
max_offset += 6;
while (ins) {
- max_offset += ((guint8 *)arm_cpu_desc [ins->opcode])[MONO_INST_LEN];
+ max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
ins = ins->next;
}
}
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
- inst = cfg->varinfo [pos];
+ inst = cfg->args [pos];
if (cfg->verbose_level > 2)
g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
}
break;
case 2:
- g_assert (arm_is_imm8 (inst->inst_offset));
- ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
+ if (arm_is_imm8 (inst->inst_offset)) {
+ ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
+ ARM_STRH_IMM (code, ainfo->reg, ARMREG_IP, 0);
+ }
break;
case 8:
g_assert (arm_is_imm12 (inst->inst_offset));
}
break;
}
+ } else if (ainfo->regtype == RegTypeBaseGen) {
+ g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
+ g_assert (arm_is_imm12 (inst->inst_offset));
+ ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
+ ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
+ ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
} else if (ainfo->regtype == RegTypeBase) {
g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
switch (ainfo->size) {
break;
case 2:
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
- g_assert (arm_is_imm8 (inst->inst_offset));
- ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ if (arm_is_imm8 (inst->inst_offset)) {
+ ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
+ ARM_STRH_IMM (code, ARMREG_LR, ARMREG_IP, 0);
+ }
break;
case 8:
g_assert (arm_is_imm12 (inst->inst_offset));
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_get_lmf_addr");
- if (cfg->method->dynamic) {
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- ARM_B (code, 0);
- *(gpointer*)code = NULL;
- code += 4;
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
- } else {
- ARM_BL (code, 0);
- }
+ code = emit_call_seq (cfg, code);
/* we build the MonoLMF structure on the stack - see mini-arm.h */
/* lmf_offset is the offset from the previous stack pointer,
* alloc_size is the total stack space allocated, so the offset
/* *(lmf_addr) = r1 */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* save method info */
- code = mono_arm_emit_load_imm (code, ARMREG_R2, method);
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, GPOINTER_TO_INT (method));
ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, method));
ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
/* save the current IP */
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
- MonoJumpInfo *patch_info;
MonoMethod *method = cfg->method;
int pos, i, rot_amount;
int max_epilog_size = 16 + 20*4;
}
/*
- * Keep in sync with CEE_JMP
+ * Keep in sync with OP_JMP
*/
code = cfg->native_code + cfg->code_len;
code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
+ /* FIXME: add v4 thumb interworking support */
ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
}
if (strcmp (name, "ArrayTypeMismatchException") == 0)
return MONO_EXC_ARRAY_TYPE_MISMATCH;
g_error ("Unknown intrinsic exception %s\n", name);
+ return -1;
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
- int nthrows, i;
+ int i;
guint8 *code;
const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
- guint32 code_size;
- int exc_count = 0;
int max_epilog_size = 50;
/* count the number of exception infos */
patch_info->data.name = "mono_arch_throw_exception_by_name";
patch_info->ip.i = code - cfg->native_code;
ARM_B (code, 0);
- *(gpointer*)code = ex_name;
+ *(gconstpointer*)code = ex_name;
code += 4;
break;
}
this->sreg1 = this_reg;
this->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, this);
- mono_call_inst_add_outarg_reg (inst, this->dreg, this_dreg, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, this_dreg, FALSE);
}
if (vt_reg != -1) {
vtarg->sreg1 = vt_reg;
vtarg->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, vtarg);
- mono_call_inst_add_outarg_reg (inst, vtarg->dreg, ARMREG_R0, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, ARMREG_R0, FALSE);
}
}
cfg->jit_info->used_regs |= cfg->stack_usage << 14;
}
+#ifdef MONO_ARCH_HAVE_IMT
+
+void
+mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call)
+{
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+ guint32 *code_ptr = (guint32*)code;
+ code_ptr -= 2;
+ /* The IMT value is stored in the code stream right after the LDC instruction. */
+ if (!IS_LDR_PC (code_ptr [0])) {
+ g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
+ g_assert (IS_LDR_PC (code_ptr [0]));
+ }
+ return (MonoMethod*) code_ptr [1];
+}
+
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+{
+ return mono_arch_get_this_arg_from_call (mono_method_signature (method), (gssize*)regs, NULL);
+}
+
+
+#define ENABLE_WRONG_METHOD_CHECK 0
+#define BASE_SIZE (4 * 4)
+#define BSEARCH_ENTRY_SIZE (4 * 4)
+#define CMP_SIZE (3 * 4)
+#define BRANCH_SIZE (1 * 4)
+#define CALL_SIZE (2 * 4)
+#define WMC_SIZE (5 * 4)
+#define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
+
+static arminstr_t *
+arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
+{
+ guint32 delta = DISTANCE (target, code);
+ delta -= 8;
+ g_assert (delta >= 0 && delta <= 0xFFF);
+ *target = *target | delta;
+ *code = value;
+ return code + 1;
+}
+
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+ int size, i, extra_space = 0;
+ arminstr_t *code, *start, *vtable_target = NULL;
+ size = BASE_SIZE;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->is_equals) {
+ g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->vtable_slot])));
+
+ if (item->check_target_idx) {
+ if (!item->compare_done)
+ item->chunk_size += CMP_SIZE;
+ item->chunk_size += BRANCH_SIZE;
+ } else {
+#if ENABLE_WRONG_METHOD_CHECK
+ item->chunk_size += WMC_SIZE;
+#endif
+ }
+ item->chunk_size += CALL_SIZE;
+ } else {
+ item->chunk_size += BSEARCH_ENTRY_SIZE;
+ imt_entries [item->check_target_idx]->compare_done = TRUE;
+ }
+ size += item->chunk_size;
+ }
+
+ start = code = mono_code_manager_reserve (domain->code_mp, size);
+
+#if DEBUG_IMT
+ printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->method, item->method->name, &vtable->vtable [item->vtable_slot], item->is_equals, item->chunk_size);
+ }
+#endif
+
+ ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
+ vtable_target = code;
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ arminstr_t *imt_method = NULL;
+ item->code_target = (guint8*)code;
+
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done) {
+ imt_method = code;
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
+ }
+ item->jmp_code = (guint8*)code;
+ ARM_B_COND (code, ARMCOND_NE, 0);
+
+ ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
+ ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
+ } else {
+ /*Enable the commented code to assert on wrong method*/
+#if ENABLE_WRONG_METHOD_CHECK
+ imt_method = code;
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
+ ARM_B_COND (code, ARMCOND_NE, 1);
+#endif
+ ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
+ ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
+
+#if ENABLE_WRONG_METHOD_CHECK
+ ARM_DBRK (code);
+#endif
+ }
+
+ if (imt_method)
+ code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->method);
+
+ /*must emit after unconditional branch*/
+ if (vtable_target) {
+ code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
+ item->chunk_size += 4;
+ vtable_target = NULL;
+ }
+
+ /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
+ if (extra_space) {
+ code += extra_space;
+ extra_space = 0;
+ }
+ } else {
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
+
+ item->jmp_code = (guint8*)code;
+ ARM_B_COND (code, ARMCOND_GE, 0);
+ ++extra_space;
+ }
+ }
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->jmp_code) {
+ if (item->check_target_idx)
+ arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+ }
+ if (i > 0 && item->is_equals) {
+ int j;
+ arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
+ for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
+ space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->method);
+ }
+ }
+ }
+
+#if DEBUG_IMT
+ {
+ char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
+ mono_disassemble_code (NULL, (guint8*)start, size, buff);
+ g_free (buff);
+ }
+#endif
+
+ mono_arch_flush_icache ((guint8*)start, size);
+ mono_stats.imt_thunks_size += code - start;
+
+ g_assert (DISTANCE (start, code) <= size);
+ return start;
+}
+
+#endif
+
+