#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
+#include <mono/utils/mono-mmap.h>
#include "mini-arm.h"
-#include "inssel.h"
#include "cpu-arm.h"
#include "trace.h"
+#include "ir-emit.h"
#ifdef ARM_FPU_FPA
#include "mono/arch/arm/arm-fpa-codegen.h"
#elif defined(ARM_FPU_VFP)
#include "mono/arch/arm/arm-vfp-codegen.h"
#endif
+#if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
+#define HAVE_AEABI_READ_TP 1
+#endif
+
+static gint lmf_tls_offset = -1;
+static gint lmf_addr_tls_offset = -1;
+
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
static CRITICAL_SECTION mini_arch_mutex;
static int v5_supported = 0;
+static int v7_supported = 0;
static int thumb_supported = 0;
-static int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount);
+/*
+ * The code generated for sequence points reads from this location, which is
+ * made read-only when single stepping is enabled.
+ */
+static gpointer ss_trigger_page;
+
+/* Enabled breakpoints read from this trigger page */
+static gpointer bp_trigger_page;
+
+/* Structure used by the sequence points in AOTed code */
+typedef struct {
+ gpointer ss_trigger_page;
+ gpointer bp_trigger_page;
+ guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
+} SeqPointInfo;
/*
* TODO:
* 2) softfloat: the compiler emulates all the fp ops. Usually uses the
* ugly swapped double format (I guess a softfloat-vfp exists, too, though).
* 3) VFP: the new and actually sensible and useful FP support. Implemented
- * in HW or kernel-emulated, requires new tools. I think this ios what symbian uses.
+ * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
*
* The plan is to write the FPA support first. softfloat can be tested in a chroot.
*/
#define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
#define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
#define DEBUG_IMT 0
+
+/* A variant of ARM_LDR_IMM which can handle large offsets */
+#define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
+ if (arm_is_imm12 ((offset))) { \
+ ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
+ } else { \
+ g_assert ((scratch_reg) != (basereg)); \
+ code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
+ ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
+ } \
+ } while (0)
+
+#define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
+ if (arm_is_imm12 ((offset))) { \
+ ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
+ } else { \
+ g_assert ((scratch_reg) != (basereg)); \
+ code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
+ ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
+ } \
+ } while (0)
const char*
-mono_arch_regname (int reg) {
+mono_arch_regname (int reg)
+{
static const char * rnames[] = {
"arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
"arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
}
const char*
-mono_arch_fregname (int reg) {
+mono_arch_fregname (int reg)
+{
static const char * rnames[] = {
"arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
"arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
return "unknown";
}
+#ifndef DISABLE_JIT
+
static guint8*
emit_big_add (guint8 *code, int dreg, int sreg, int imm)
{
return code;
}
+static guint8*
+emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
+{
+ switch (ins->opcode) {
+ case OP_FCALL:
+ case OP_FCALL_REG:
+ case OP_FCALL_MEMBASE:
+#ifdef ARM_FPU_FPA
+ if (ins->dreg != ARM_FPA_F0)
+ ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
+#elif defined(ARM_FPU_VFP)
+ if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
+ ARM_FMSR (code, ins->dreg, ARMREG_R0);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+ } else {
+ ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
+ }
+#endif
+ break;
+ }
+
+ return code;
+}
+
+#endif /* #ifndef DISABLE_JIT */
+
/*
* mono_arch_get_argument_info:
* @csig: a method signature
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k, frame_size = 0;
- int size, align, pad;
+ guint32 size, align, pad;
int offset = 8;
if (MONO_TYPE_ISSTRUCT (csig->ret)) {
arg_info [0].size = frame_size;
for (k = 0; k < param_count; k++) {
-
- if (csig->pinvoke)
- size = mono_type_native_stack_size (csig->params [k], &align);
- else
- size = mini_type_stack_size (NULL, csig->params [k], &align);
+ size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
/* ignore alignment for now */
align = 1;
return frame_size;
}
+#define MAX_ARCH_DELEGATE_PARAMS 3
+
static gpointer
-decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs, int *displacement)
+get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
{
- char *o = NULL;
- int reg, offset = 0;
- reg = (ldr >> 16 ) & 0xf;
- offset = ldr & 0xfff;
- if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
- offset = -offset;
- /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
- o = regs [reg];
+ guint8 *code, *start;
- *displacement = offset;
- return o;
-}
+ if (has_target) {
+ start = code = mono_global_codeman_reserve (12);
-gpointer
-mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
-{
- guint32* code = (guint32*)code_ptr;
-
- /* Locate the address of the method-specific trampoline. The call using
- the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
- looks something like this:
-
- ldr rA, rX, #offset
- mov lr, pc
- mov pc, rA
- or better:
- mov lr, pc
- ldr pc, rX, #offset
-
- The call sequence could be also:
- ldr ip, pc, 0
- b skip
- function pointer literal
- skip:
- mov lr, pc
- mov pc, ip
- Note that on ARM5+ we can use one instruction instead of the last two.
- Therefore, we need to locate the 'ldr rA' instruction to know which
- register was used to hold the method addrs.
- */
-
- /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
- --code;
-
- /* Three possible code sequences can happen here:
- * interface call:
- *
- * add lr, [pc + #4]
- * ldr pc, [rX - #offset]
- * .word IMT value
- *
- * virtual call:
- *
- * mov lr, pc
- * ldr pc, [rX - #offset]
- *
- * direct branch with bl:
- *
- * bl #offset
- *
- * direct branch with mov:
- *
- * mv pc, rX
- *
- * We only need to identify interface and virtual calls, the others can be ignored.
- *
- */
- if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
- return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
+ /* Replace the this argument with the target */
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+
+ g_assert ((code - start) <= 12);
- if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
- return decode_vcall_slot_from_ldr (code [0], regs, displacement);
+ mono_arch_flush_icache (start, 12);
+ } else {
+ int size, i;
- return NULL;
+ size = 8 + param_count * 4;
+ start = code = mono_global_codeman_reserve (size);
+
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ /* slide down the arguments */
+ for (i = 0; i < param_count; ++i) {
+ ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
+ }
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+
+ g_assert ((code - start) <= size);
+
+ mono_arch_flush_icache (start, size);
+ }
+
+ if (code_size)
+ *code_size = code - start;
+
+ return start;
}
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
+/*
+ * mono_arch_get_delegate_invoke_impls:
+ *
+ * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
+ * trampolines.
+ */
+GSList*
+mono_arch_get_delegate_invoke_impls (void)
{
- gpointer vt;
- int displacement;
- vt = mono_arch_get_vcall_slot (code, regs, &displacement);
- if (!vt)
- return NULL;
- return (gpointer*)((char*)vt + displacement);
-}
+ GSList *res = NULL;
+ guint8 *code;
+ guint32 code_len;
+ int i;
-#define MAX_ARCH_DELEGATE_PARAMS 3
+ code = get_delegate_invoke_impl (TRUE, 0, &code_len);
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
+
+ for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ code = get_delegate_invoke_impl (FALSE, i, &code_len);
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
+ }
+
+ return res;
+}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
mono_mini_arch_unlock ();
return cached;
}
-
- start = code = mono_global_codeman_reserve (12);
-
- /* Replace the this argument with the target */
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
- ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
-
- g_assert ((code - start) <= 12);
- mono_arch_flush_icache (code, 12);
+ if (mono_aot_only)
+ start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
+ else
+ start = get_delegate_invoke_impl (TRUE, 0, NULL);
cached = start;
mono_mini_arch_unlock ();
return cached;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
- int size, i;
+ int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
return code;
}
- size = 8 + sig->param_count * 4;
- start = code = mono_global_codeman_reserve (size);
-
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
- /* slide down the arguments */
- for (i = 0; i < sig->param_count; ++i) {
- ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
+ if (mono_aot_only) {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
+ start = mono_aot_get_trampoline (name);
+ g_free (name);
+ } else {
+ start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
}
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
-
- g_assert ((code - start) <= size);
-
- mono_arch_flush_icache (code, size);
cache [sig->param_count] = start;
mono_mini_arch_unlock ();
return start;
}
gpointer
-mono_arch_get_this_arg_from_call (MonoMethodSignature *sig, gssize *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
{
- /* FIXME: handle returning a struct */
- if (MONO_TYPE_ISSTRUCT (sig->ret))
- return (gpointer)regs [ARMREG_R1];
return (gpointer)regs [ARMREG_R0];
}
void
mono_arch_init (void)
{
- InitializeCriticalSection (&mini_arch_mutex);
+ InitializeCriticalSection (&mini_arch_mutex);
+
+ ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+ bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+ mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+
+ mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
+ mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
}
/*
mono_arch_cpu_optimizazions (guint32 *exclude_mask)
{
guint32 opts = 0;
+ const char *cpu_arch = getenv ("MONO_CPU_ARCH");
+ if (cpu_arch != NULL) {
+ thumb_supported = strstr (cpu_arch, "thumb") != NULL;
+ if (strncmp (cpu_arch, "armv", 4) == 0) {
+ v5_supported = cpu_arch [4] >= '5';
+ v7_supported = cpu_arch [4] >= '7';
+ }
+ } else {
+#if __APPLE__
+ thumb_supported = TRUE;
+ v5_supported = TRUE;
+#else
char buf [512];
char *line;
FILE *file = fopen ("/proc/cpuinfo", "r");
while ((line = fgets (buf, 512, file))) {
if (strncmp (line, "Processor", 9) == 0) {
char *ver = strstr (line, "(v");
- if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
+ if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
v5_supported = TRUE;
- }
+ if (ver && (ver [2] == '7'))
+ v7_supported = TRUE;
continue;
}
if (strncmp (line, "Features", 8) == 0) {
fclose (file);
/*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
}
+#endif
+ }
/* no arm-specific optimizations yet */
*exclude_mask = 0;
return opts;
}
+#ifndef DISABLE_JIT
+
static gboolean
is_regsize_var (MonoType *t) {
if (t->byref)
return TRUE;
- t = mono_type_get_underlying_type (t);
+ t = mini_type_get_underlying_type (NULL, t);
switch (t->type) {
case MONO_TYPE_I4:
case MONO_TYPE_U4:
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
+
+ /*
+ * FIXME: Interface calls might go through a static rgctx trampoline which
+ * sets V5, but it doesn't save it, so we need to save it ourselves, and
+ * avoid using it.
+ */
+ if (cfg->flags & MONO_CFG_HAS_CALLS)
+ cfg->uses_rgctx_reg = TRUE;
+
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
- regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
+ if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
+ /* V5 is reserved for passing the vtable/rgctx/IMT method */
+ regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
/*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
/*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
return 2;
}
+#endif /* #ifndef DISABLE_JIT */
+
+#ifndef __GNUC_PREREQ
+#define __GNUC_PREREQ(maj, min) (0)
+#endif
+
void
mono_arch_flush_icache (guint8 *code, gint size)
{
+#if __APPLE__
+ sys_icache_invalidate (code, size);
+#elif __GNUC_PREREQ(4, 1)
+ __clear_cache (code, code + size);
+#elif defined(PLATFORM_ANDROID)
+ const int syscall = 0xf0002;
+ __asm __volatile (
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, %2\n"
+ "mov r2, #0x0\n"
+ "svc 0x00000000\n"
+ :
+ : "r" (code), "r" (code + size), "r" (syscall)
+ : "r0", "r1", "r7", "r2"
+ );
+#else
__asm __volatile ("mov r0, %0\n"
"mov r1, %1\n"
"mov r2, %2\n"
: /* no outputs */
: "r" (code), "r" (code + size), "r" (0)
: "r0", "r1", "r3" );
-
+#endif
}
-enum {
+typedef enum {
+ RegTypeNone,
RegTypeGeneral,
+ RegTypeIRegPair,
RegTypeBase,
RegTypeBaseGen,
RegTypeFP,
RegTypeStructByVal,
RegTypeStructByAddr
-};
+} ArgStorage;
typedef struct {
gint32 offset;
guint16 vtsize; /* in param area */
guint8 reg;
- guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
+ ArgStorage storage;
guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
} ArgInfo;
typedef struct {
int nargs;
guint32 stack_usage;
- guint32 struct_ret;
+ gboolean vtype_retaddr;
+ /* The index of the vret arg in the argument list */
+ int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
#define DEBUG(a)
+#ifndef __GNUC__
+/*#define __alignof__(a) sizeof(a)*/
+#define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
+#endif
+
+#define PARAM_REGS 4
+
static void inline
add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
{
if (*gr > ARMREG_R3) {
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
- ainfo->regtype = RegTypeBase;
+ ainfo->storage = RegTypeBase;
*stack_size += 4;
} else {
+ ainfo->storage = RegTypeGeneral;
ainfo->reg = *gr;
}
} else {
- if (*gr == ARMREG_R3
-#ifdef __ARM_EABI__
- && 0
+#if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
+ int i8_align = 4;
+#else
+ int i8_align = __alignof__ (gint64);
#endif
- ) {
+
+#if __ARM_EABI__
+ gboolean split = i8_align == 4;
+#else
+ gboolean split = TRUE;
+#endif
+
+ if (*gr == ARMREG_R3 && split) {
/* first word in r3 and the second on the stack */
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
- ainfo->regtype = RegTypeBaseGen;
+ ainfo->storage = RegTypeBaseGen;
*stack_size += 4;
} else if (*gr >= ARMREG_R3) {
#ifdef __ARM_EABI__
- *stack_size += 7;
- *stack_size &= ~7;
+ /* darwin aligns longs to 4 byte only */
+ if (i8_align == 8) {
+ *stack_size += 7;
+ *stack_size &= ~7;
+ }
#endif
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
- ainfo->regtype = RegTypeBase;
+ ainfo->storage = RegTypeBase;
*stack_size += 8;
} else {
#ifdef __ARM_EABI__
- if ((*gr) & 1)
+ if (i8_align == 8 && ((*gr) & 1))
(*gr) ++;
#endif
+ ainfo->storage = RegTypeIRegPair;
ainfo->reg = *gr;
}
(*gr) ++;
}
static CallInfo*
-calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
{
- guint i, gr;
+ guint i, gr, pstart;
int n = sig->hasthis + sig->param_count;
- guint32 simpletype;
+ MonoType *simpletype;
guint32 stack_size = 0;
- CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
+ CallInfo *cinfo;
+
+ if (mp)
+ cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ else
+ cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ cinfo->nargs = n;
gr = ARMREG_R0;
/* FIXME: handle returning a struct */
if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- add_general (&gr, &stack_size, &cinfo->ret, TRUE);
- cinfo->struct_ret = ARMREG_R0;
+ guint32 align;
+
+ if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
+ cinfo->ret.storage = RegTypeStructByVal;
+ } else {
+ cinfo->vtype_retaddr = TRUE;
+ }
}
+ pstart = 0;
n = 0;
- if (sig->hasthis) {
- add_general (&gr, &stack_size, cinfo->args + n, TRUE);
- n++;
+ /*
+ * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+ * the first argument, allowing 'this' to be always passed in the first arg reg.
+ * Also do this if the first argument is a reference type, since virtual calls
+ * are sometimes made using calli without sig->hasthis set, like in the delegate
+ * invoke wrappers.
+ */
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
+ } else {
+ add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
+ pstart = 1;
+ }
+ n ++;
+ add_general (&gr, &stack_size, &cinfo->ret, TRUE);
+ cinfo->vret_arg_index = 1;
+ } else {
+ /* this */
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
+ n ++;
+ }
+
+ if (cinfo->vtype_retaddr)
+ add_general (&gr, &stack_size, &cinfo->ret, TRUE);
}
- DEBUG(printf("params: %d\n", sig->param_count));
- for (i = 0; i < sig->param_count; ++i) {
+
+ DEBUG(printf("params: %d\n", sig->param_count));
+ for (i = pstart; i < sig->param_count; ++i) {
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
- /* Prevent implicit arguments and sig_cookie from
+ /* Prevent implicit arguments and sig_cookie from
being passed in registers */
- gr = ARMREG_R3 + 1;
- /* Emit the signature cookie just before the implicit arguments */
- add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
- }
- DEBUG(printf("param %d: ", i));
+ gr = ARMREG_R3 + 1;
+ /* Emit the signature cookie just before the implicit arguments */
+ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
+ }
+ DEBUG(printf("param %d: ", i));
if (sig->params [i]->byref) {
DEBUG(printf("byref\n"));
add_general (&gr, &stack_size, cinfo->args + n, TRUE);
n++;
continue;
}
- simpletype = mono_type_get_underlying_type (sig->params [i])->type;
- switch (simpletype) {
+ simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
+ switch (simpletype->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
n++;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ if (!mono_type_generic_inst_is_valuetype (simpletype)) {
cinfo->args [n].size = sizeof (gpointer);
add_general (&gr, &stack_size, cinfo->args + n, TRUE);
n++;
gint size;
int align_size;
int nwords;
+ guint32 align;
- if (simpletype == MONO_TYPE_TYPEDBYREF) {
+ if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
} else {
MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
if (is_pinvoke)
- size = mono_class_native_size (klass, NULL);
+ size = mono_class_native_size (klass, &align);
else
- size = mono_class_value_size (klass, NULL);
+ size = mono_class_value_size (klass, &align);
}
DEBUG(printf ("load %d bytes struct\n",
mono_class_native_size (sig->params [i]->data.klass, NULL)));
align_size += (sizeof (gpointer) - 1);
align_size &= ~(sizeof (gpointer) - 1);
nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
- cinfo->args [n].regtype = RegTypeStructByVal;
- /* FIXME: align gr and stack_size if needed */
+ cinfo->args [n].storage = RegTypeStructByVal;
+ /* FIXME: align stack_size if needed */
+#ifdef __ARM_EABI__
+ if (align >= 8 && (gr & 1))
+ gr ++;
+#endif
if (gr > ARMREG_R3) {
cinfo->args [n].size = 0;
cinfo->args [n].vtsize = nwords;
} else {
int rest = ARMREG_R3 - gr + 1;
int n_in_regs = rest >= nwords? nwords: rest;
+
cinfo->args [n].size = n_in_regs;
cinfo->args [n].vtsize = nwords - n_in_regs;
cinfo->args [n].reg = gr;
gr += n_in_regs;
+ nwords -= n_in_regs;
}
cinfo->args [n].offset = stack_size;
/*g_print ("offset for arg %d at %d\n", n, stack_size);*/
}
}
+ /* Handle the case where there are no implicit arguments */
+ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* Prevent implicit arguments and sig_cookie from
+ being passed in registers */
+ gr = ARMREG_R3 + 1;
+ /* Emit the signature cookie just before the implicit arguments */
+ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
+ }
+
{
- simpletype = mono_type_get_underlying_type (sig->ret)->type;
- switch (simpletype) {
+ simpletype = mini_type_get_underlying_type (NULL, sig->ret);
+ switch (simpletype->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_STRING:
+ cinfo->ret.storage = RegTypeGeneral;
cinfo->ret.reg = ARMREG_R0;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
+ cinfo->ret.storage = RegTypeIRegPair;
cinfo->ret.reg = ARMREG_R0;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
+ cinfo->ret.storage = RegTypeFP;
cinfo->ret.reg = ARMREG_R0;
/* FIXME: cinfo->ret.reg = ???;
- cinfo->ret.regtype = RegTypeFP;*/
+ cinfo->ret.storage = RegTypeFP;*/
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (simpletype)) {
+ cinfo->ret.storage = RegTypeGeneral;
cinfo->ret.reg = ARMREG_R0;
break;
}
- break;
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
- break;
case MONO_TYPE_TYPEDBYREF:
+ if (cinfo->ret.storage != RegTypeStructByVal)
+ cinfo->ret.storage = RegTypeStructByAddr;
+ break;
case MONO_TYPE_VOID:
break;
default:
return cinfo;
}
+#ifndef DISABLE_JIT
/*
* Set var information according to the calling convention. arm version.
* The locals var stuff should most likely be split in another method.
*/
void
-mono_arch_allocate_vars (MonoCompile *m)
+mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
- MonoInst *inst;
+ MonoInst *ins;
int i, offset, size, align, curinst;
int frame_reg = ARMREG_FP;
+ CallInfo *cinfo;
+ guint32 ualign;
+
+ sig = mono_method_signature (cfg->method);
+
+ if (!cfg->arch.cinfo)
+ cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+ cinfo = cfg->arch.cinfo;
/* FIXME: this will change when we use FP as gcc does */
- m->flags |= MONO_CFG_HAS_SPILLUP;
+ cfg->flags |= MONO_CFG_HAS_SPILLUP;
/* allow room for the vararg method args: void* and long/double */
- if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
- m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
+ if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
+ cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
- header = mono_method_get_header (m->method);
+ header = cfg->header;
/*
* We use the frame register also for any method that has
* filters get called before stack unwinding happens) when the filter
* code would call any method (this also applies to finally etc.).
*/
- if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
+ if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
frame_reg = ARMREG_FP;
- m->frame_reg = frame_reg;
+ cfg->frame_reg = frame_reg;
if (frame_reg != ARMREG_SP) {
- m->used_int_regs |= 1 << frame_reg;
+ cfg->used_int_regs |= 1 << frame_reg;
}
- sig = mono_method_signature (m->method);
-
+ if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
+ /* V5 is reserved for passing the vtable/rgctx/IMT method */
+ cfg->used_int_regs |= (1 << ARMREG_V5);
+
offset = 0;
curinst = 0;
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- m->ret->opcode = OP_REGVAR;
- m->ret->inst_c0 = ARMREG_R0;
- } else {
- /* FIXME: handle long and FP values */
- switch (mono_type_get_underlying_type (sig->ret)->type) {
+ if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
+ switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
case MONO_TYPE_VOID:
break;
default:
- m->ret->opcode = OP_REGVAR;
- m->ret->inst_c0 = ARMREG_R0;
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = ARMREG_R0;
break;
}
}
//offset &= ~(8 - 1);
/* add parameter area size for called functions */
- offset += m->param_area;
+ offset += cfg->param_area;
offset += 8 - 1;
offset &= ~(8 - 1);
- if (m->flags & MONO_CFG_HAS_FPOUT)
+ if (cfg->flags & MONO_CFG_HAS_FPOUT)
offset += 8;
/* allow room to save the return value */
- if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
+ if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
offset += 8;
/* the MonoLMF structure is stored just below the stack pointer */
-
- if (sig->call_convention == MONO_CALL_VARARG) {
- m->sig_cookie = 0;
- }
-
if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- inst = m->ret;
- offset += sizeof(gpointer) - 1;
- offset &= ~(sizeof(gpointer) - 1);
- inst->inst_offset = offset;
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
+ if (cinfo->ret.storage == RegTypeStructByVal) {
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = cfg->frame_reg;
+ offset += sizeof (gpointer) - 1;
+ offset &= ~(sizeof (gpointer) - 1);
+ cfg->ret->inst_offset = - offset;
+ } else {
+ ins = cfg->vret_addr;
+ offset += sizeof(gpointer) - 1;
+ offset &= ~(sizeof(gpointer) - 1);
+ ins->inst_offset = offset;
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr =");
+ mono_print_ins (cfg->vret_addr);
+ }
+ }
offset += sizeof(gpointer);
- if (sig->call_convention == MONO_CALL_VARARG)
- m->sig_cookie += sizeof (gpointer);
}
- curinst = m->locals_start;
- for (i = curinst; i < m->num_varinfo; ++i) {
- inst = m->varinfo [i];
- if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
+ /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
+ if (cfg->arch.seq_point_info_var) {
+ MonoInst *ins;
+
+ ins = cfg->arch.seq_point_info_var;
+
+ size = 4;
+ align = 4;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
+ ins->inst_offset = offset;
+ offset += size;
+
+ ins = cfg->arch.ss_trigger_page_var;
+ size = 4;
+ align = 4;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
+ ins->inst_offset = offset;
+ offset += size;
+ }
+
+ curinst = cfg->locals_start;
+ for (i = curinst; i < cfg->num_varinfo; ++i) {
+ ins = cfg->varinfo [i];
+ if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structure */
- if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
- size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
+ if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (ins->inst_vtype) && ins->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
+ size = mono_class_native_size (mono_class_from_mono_type (ins->inst_vtype), &ualign);
+ align = ualign;
+ }
else
- size = mono_type_size (inst->inst_vtype, &align);
+ size = mono_type_size (ins->inst_vtype, &align);
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
* since it loads/stores misaligned words, which don't do the right thing.
align = 4;
offset += align - 1;
offset &= ~(align - 1);
- inst->inst_offset = offset;
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_offset = offset;
+ ins->inst_basereg = frame_reg;
offset += size;
//g_print ("allocating local %d to %d\n", i, inst->inst_offset);
}
curinst = 0;
if (sig->hasthis) {
- inst = m->args [curinst];
- if (inst->opcode != OP_REGVAR) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
+ ins = cfg->args [curinst];
+ if (ins->opcode != OP_REGVAR) {
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
offset += sizeof (gpointer) - 1;
offset &= ~(sizeof (gpointer) - 1);
- inst->inst_offset = offset;
+ ins->inst_offset = offset;
offset += sizeof (gpointer);
- if (sig->call_convention == MONO_CALL_VARARG)
- m->sig_cookie += sizeof (gpointer);
}
curinst++;
}
+ if (sig->call_convention == MONO_CALL_VARARG) {
+ size = 4;
+ align = 4;
+
+ /* Allocate a local slot to hold the sig cookie address */
+ offset += align - 1;
+ offset &= ~(align - 1);
+ cfg->sig_cookie = offset;
+ offset += size;
+ }
+
for (i = 0; i < sig->param_count; ++i) {
- inst = m->args [curinst];
- if (inst->opcode != OP_REGVAR) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
- size = mono_type_size (sig->params [i], &align);
+ ins = cfg->args [curinst];
+
+ if (ins->opcode != OP_REGVAR) {
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
+ size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
+ align = ualign;
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
* since it loads/stores misaligned words, which don't do the right thing.
*/
if (align < 4 && size >= 4)
align = 4;
+ /* The code in the prolog () stores words when storing vtypes received in a register */
+ if (MONO_TYPE_ISSTRUCT (sig->params [i]))
+ align = 4;
offset += align - 1;
offset &= ~(align - 1);
- inst->inst_offset = offset;
+ ins->inst_offset = offset;
offset += size;
- if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
- m->sig_cookie += size;
}
curinst++;
}
offset &= ~(8 - 1);
/* change sign? */
- m->stack_offset = offset;
-
+ cfg->stack_offset = offset;
}
-/* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
- * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
- */
-
-/*
- * take the arguments and generate the arch-specific
- * instructions to properly call the function in call.
- * This includes pushing, moving arguments to the right register
- * etc.
- * Issue: who does the spilling if needed, and when?
- */
-MonoCallInst*
-mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
- MonoInst *arg, *in;
+void
+mono_arch_create_vars (MonoCompile *cfg)
+{
MonoMethodSignature *sig;
- int i, n;
CallInfo *cinfo;
- ArgInfo *ainfo;
- sig = call->signature;
- n = sig->param_count + sig->hasthis;
-
- cinfo = calculate_sizes (sig, sig->pinvoke);
- if (cinfo->struct_ret)
- call->used_iregs |= 1 << cinfo->struct_ret;
+ sig = mono_method_signature (cfg->method);
- for (i = 0; i < n; ++i) {
- ainfo = cinfo->args + i;
- if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
- MonoInst *sig_arg;
- cfg->disable_aot = TRUE;
-
- MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
- sig_arg->inst_p0 = call->signature;
-
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
- arg->inst_imm = cinfo->sig_cookie.offset;
- arg->inst_left = sig_arg;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
- }
- if (is_virtual && i == 0) {
- /* the argument will be attached to the call instrucion */
- in = call->args [i];
- call->used_iregs |= 1 << ainfo->reg;
- } else {
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
- in = call->args [i];
- arg->cil_code = in->cil_code;
- arg->inst_left = in;
- arg->inst_right = (MonoInst*)call;
- arg->type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
- if (ainfo->regtype == RegTypeGeneral) {
- arg->backend.reg3 = ainfo->reg;
- call->used_iregs |= 1 << ainfo->reg;
- if (arg->type == STACK_I8)
- call->used_iregs |= 1 << (ainfo->reg + 1);
- if (arg->type == STACK_R8) {
- if (ainfo->size == 4) {
-#ifndef MONO_ARCH_SOFT_FLOAT
- arg->opcode = OP_OUTARG_R4;
-#endif
- } else {
- call->used_iregs |= 1 << (ainfo->reg + 1);
- }
- cfg->flags |= MONO_CFG_HAS_FPOUT;
- }
- } else if (ainfo->regtype == RegTypeStructByAddr) {
- /* FIXME: where si the data allocated? */
- arg->backend.reg3 = ainfo->reg;
- call->used_iregs |= 1 << ainfo->reg;
- g_assert_not_reached ();
- } else if (ainfo->regtype == RegTypeStructByVal) {
- int cur_reg;
- /* mark the used regs */
- for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
- call->used_iregs |= 1 << (ainfo->reg + cur_reg);
- }
- arg->opcode = OP_OUTARG_VT;
- /* vtsize and offset have just 12 bits of encoding in number of words */
- g_assert (((ainfo->vtsize | (ainfo->offset / 4)) & 0xfffff000) == 0);
- arg->backend.arg_info = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
- } else if (ainfo->regtype == RegTypeBase) {
- arg->opcode = OP_OUTARG_MEMBASE;
- arg->backend.arg_info = (ainfo->offset << 8) | ainfo->size;
- } else if (ainfo->regtype == RegTypeBaseGen) {
- call->used_iregs |= 1 << ARMREG_R3;
- arg->opcode = OP_OUTARG_MEMBASE;
- arg->backend.arg_info = (ainfo->offset << 8) | 0xff;
- if (arg->type == STACK_R8)
- cfg->flags |= MONO_CFG_HAS_FPOUT;
- } else if (ainfo->regtype == RegTypeFP) {
- arg->backend.reg3 = ainfo->reg;
- /* FP args are passed in int regs */
- call->used_iregs |= 1 << ainfo->reg;
- if (ainfo->size == 8) {
- arg->opcode = OP_OUTARG_R8;
- call->used_iregs |= 1 << (ainfo->reg + 1);
- } else {
- arg->opcode = OP_OUTARG_R4;
- }
- cfg->flags |= MONO_CFG_HAS_FPOUT;
- } else {
- g_assert_not_reached ();
- }
+ if (!cfg->arch.cinfo)
+ cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+ cinfo = cfg->arch.cinfo;
+
+ if (cinfo->ret.storage == RegTypeStructByVal)
+ cfg->ret_var_is_local = TRUE;
+
+ if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
+ cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr = ");
+ mono_print_ins (cfg->vret_addr);
}
}
- call->stack_usage = cinfo->stack_usage;
- cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
- cfg->flags |= MONO_CFG_HAS_CALLS;
- /*
- * should set more info in call, such as the stack space
- * used by the args that needs to be added back to esp
- */
- g_free (cinfo);
- return call;
-}
+ if (cfg->gen_seq_points && cfg->compile_aot) {
+ MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.seq_point_info_var = ins;
-/*
- * Allow tracing to work with this interface (with an optional argument)
- */
+ /* Allocate a separate variable for this to save 1 load per seq point */
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ss_trigger_page_var = ins;
+ }
+}
-void*
-mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
+static void
+emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
- guchar *code = p;
+ MonoMethodSignature *tmp_sig;
+ MonoInst *sig_arg;
- code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
- ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
- code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
- code = emit_call_reg (code, ARMREG_R2);
- return code;
-}
+ if (call->tail_call)
+ NOT_IMPLEMENTED;
-enum {
- SAVE_NONE,
- SAVE_STRUCT,
- SAVE_ONE,
- SAVE_TWO,
- SAVE_FP
-};
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
-void*
-mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
-{
- guchar *code = p;
+ g_assert (cinfo->sig_cookie.storage == RegTypeBase);
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->dreg = mono_alloc_ireg (cfg);
+ sig_arg->inst_p0 = tmp_sig;
+ MONO_ADD_INS (cfg->cbb, sig_arg);
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
+}
+
+#ifdef ENABLE_LLVM
+LLVMCallInfo*
+mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
+{
+ int i, n;
+ CallInfo *cinfo;
+ ArgInfo *ainfo;
+ LLVMCallInfo *linfo;
+
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+
+ linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
+
+ /*
+ * LLVM always uses the native ABI while we use our own ABI, the
+ * only difference is the handling of vtypes:
+ * - we only pass/receive them in registers in some cases, and only
+ * in 1 or 2 integer registers.
+ */
+ if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
+ cfg->exception_message = g_strdup ("unknown ret conv");
+ cfg->disable_llvm = TRUE;
+ return linfo;
+ }
+
+ for (i = 0; i < n; ++i) {
+ ainfo = cinfo->args + i;
+
+ linfo->args [i].storage = LLVMArgNone;
+
+ switch (ainfo->storage) {
+ case RegTypeGeneral:
+ case RegTypeIRegPair:
+ case RegTypeBase:
+ linfo->args [i].storage = LLVMArgInIReg;
+ break;
+ default:
+ cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
+ cfg->disable_llvm = TRUE;
+ break;
+ }
+ }
+
+ return linfo;
+}
+#endif
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in, *ins;
+ MonoMethodSignature *sig;
+ int i, n;
+ CallInfo *cinfo;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig, sig->pinvoke);
+
+ for (i = 0; i < n; ++i) {
+ ArgInfo *ainfo = cinfo->args + i;
+ MonoType *t;
+
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+ t = mini_type_get_underlying_type (NULL, t);
+
+ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* Emit the signature cookie just before the implicit arguments */
+ emit_sig_cookie (cfg, call, cinfo);
+ }
+
+ in = call->args [i];
+
+ switch (ainfo->storage) {
+ case RegTypeGeneral:
+ case RegTypeIRegPair:
+ if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg + 1;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
+ } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
+#ifndef MONO_ARCH_SOFT_FLOAT
+ int creg;
+#endif
+
+ if (ainfo->size == 4) {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ /* mono_emit_call_args () have already done the r8->r4 conversion */
+ /* The converted value is in an int vreg */
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+#else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
+#endif
+ } else {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+
+ MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
+#else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
+#endif
+ }
+ cfg->flags |= MONO_CFG_HAS_FPOUT;
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+ }
+ break;
+ case RegTypeStructByAddr:
+ NOT_IMPLEMENTED;
+#if 0
+ /* FIXME: where si the data allocated? */
+ arg->backend.reg3 = ainfo->reg;
+ call->used_iregs |= 1 << ainfo->reg;
+ g_assert_not_reached ();
+#endif
+ break;
+ case RegTypeStructByVal:
+ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
+ ins->opcode = OP_OUTARG_VT;
+ ins->sreg1 = in->dreg;
+ ins->klass = in->klass;
+ ins->inst_p0 = call;
+ ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
+ MONO_ADD_INS (cfg->cbb, ins);
+ break;
+ case RegTypeBase:
+ if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+ } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
+ if (t->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+ } else {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+#else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+#endif
+ }
+ } else {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+ }
+ break;
+ case RegTypeBaseGen:
+ if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
+ } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
+ int creg;
+
+#ifdef MONO_ARCH_SOFT_FLOAT
+ g_assert_not_reached ();
+#endif
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
+ creg = mono_alloc_ireg (cfg);
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
+ cfg->flags |= MONO_CFG_HAS_FPOUT;
+ } else {
+ g_assert_not_reached ();
+ }
+ break;
+ case RegTypeFP: {
+ /* FIXME: */
+ NOT_IMPLEMENTED;
+#if 0
+ arg->backend.reg3 = ainfo->reg;
+ /* FP args are passed in int regs */
+ call->used_iregs |= 1 << ainfo->reg;
+ if (ainfo->size == 8) {
+ arg->opcode = OP_OUTARG_R8;
+ call->used_iregs |= 1 << (ainfo->reg + 1);
+ } else {
+ arg->opcode = OP_OUTARG_R4;
+ }
+#endif
+ cfg->flags |= MONO_CFG_HAS_FPOUT;
+ break;
+ }
+ default:
+ g_assert_not_reached ();
+ }
+ }
+
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
+ emit_sig_cookie (cfg, call, cinfo);
+
+ if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
+ MonoInst *vtarg;
+
+ if (cinfo->ret.storage == RegTypeStructByVal) {
+ /* The JIT will transform this into a normal call */
+ call->vret_in_reg = TRUE;
+ } else {
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, vtarg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
+ }
+ }
+
+ call->stack_usage = cinfo->stack_usage;
+
+ g_free (cinfo);
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = ins->inst_p1;
+ int ovf_size = ainfo->vtsize;
+ int doffset = ainfo->offset;
+ int i, soffset, dreg;
+
+ soffset = 0;
+ for (i = 0; i < ainfo->size; ++i) {
+ dreg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
+ mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
+ soffset += sizeof (gpointer);
+ }
+ //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
+ if (ovf_size != 0)
+ mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
+
+ if (!ret->byref) {
+ if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
+ MonoInst *ins;
+
+ if (COMPILE_LLVM (cfg)) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_SETLRET);
+ ins->sreg1 = val->dreg + 1;
+ ins->sreg2 = val->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+ return;
+ }
+#ifdef MONO_ARCH_SOFT_FLOAT
+ if (ret->type == MONO_TYPE_R8) {
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_SETFRET);
+ ins->dreg = cfg->ret->dreg;
+ ins->sreg1 = val->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ return;
+ }
+ if (ret->type == MONO_TYPE_R4) {
+ /* Already converted to an int in method_to_ir () */
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ return;
+ }
+#elif defined(ARM_FPU_VFP)
+ if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_SETFRET);
+ ins->dreg = cfg->ret->dreg;
+ ins->sreg1 = val->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ return;
+ }
+#else
+ if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ return;
+ }
+#endif
+ }
+
+ /* FIXME: */
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+}
+
+#endif /* #ifndef DISABLE_JIT */
+
+gboolean
+mono_arch_is_inst_imm (gint64 imm)
+{
+ return TRUE;
+}
+
+#define DYN_CALL_STACK_ARGS 6
+
+typedef struct {
+ MonoMethodSignature *sig;
+ CallInfo *cinfo;
+} ArchDynCallInfo;
+
+typedef struct {
+ mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
+ mgreg_t res, res2;
+ guint8 *ret;
+} DynCallArgs;
+
+static gboolean
+dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
+{
+ int i;
+
+ if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
+ return FALSE;
+
+ switch (cinfo->ret.storage) {
+ case RegTypeNone:
+ case RegTypeGeneral:
+ case RegTypeIRegPair:
+ case RegTypeStructByAddr:
+ break;
+ case RegTypeFP:
+#ifdef ARM_FPU_FPA
+ return FALSE;
+#elif defined(ARM_FPU_VFP)
+ break;
+#else
+ return FALSE;
+#endif
+ default:
+ return FALSE;
+ }
+
+ for (i = 0; i < cinfo->nargs; ++i) {
+ switch (cinfo->args [i].storage) {
+ case RegTypeGeneral:
+ break;
+ case RegTypeIRegPair:
+ break;
+ case RegTypeBase:
+ if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
+ return FALSE;
+ break;
+ case RegTypeStructByVal:
+ if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
+ return FALSE;
+ break;
+ default:
+ return FALSE;
+ }
+ }
+
+ // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
+ for (i = 0; i < sig->param_count; ++i) {
+ MonoType *t = sig->params [i];
+
+ if (t->byref)
+ continue;
+
+ switch (t->type) {
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+#ifdef MONO_ARCH_SOFT_FLOAT
+ return FALSE;
+#else
+ break;
+#endif
+ /*
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ return FALSE;
+ */
+ default:
+ break;
+ }
+ }
+
+ return TRUE;
+}
+
+MonoDynCallInfo*
+mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
+{
+ ArchDynCallInfo *info;
+ CallInfo *cinfo;
+
+ cinfo = get_call_info (NULL, NULL, sig, FALSE);
+
+ if (!dyn_call_supported (cinfo, sig)) {
+ g_free (cinfo);
+ return NULL;
+ }
+
+ info = g_new0 (ArchDynCallInfo, 1);
+ // FIXME: Preprocess the info to speed up start_dyn_call ()
+ info->sig = sig;
+ info->cinfo = cinfo;
+
+ return (MonoDynCallInfo*)info;
+}
+
+void
+mono_arch_dyn_call_free (MonoDynCallInfo *info)
+{
+ ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
+
+ g_free (ainfo->cinfo);
+ g_free (ainfo);
+}
+
+void
+mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
+{
+ ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
+ DynCallArgs *p = (DynCallArgs*)buf;
+ int arg_index, greg, i, j;
+ MonoMethodSignature *sig = dinfo->sig;
+
+ g_assert (buf_len >= sizeof (DynCallArgs));
+
+ p->res = 0;
+ p->ret = ret;
+
+ arg_index = 0;
+ greg = 0;
+
+ if (dinfo->cinfo->vtype_retaddr)
+ p->regs [greg ++] = (mgreg_t)ret;
+
+ if (sig->hasthis)
+ p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
+
+ for (i = 0; i < sig->param_count; i++) {
+ MonoType *t = mono_type_get_underlying_type (sig->params [i]);
+ gpointer *arg = args [arg_index ++];
+ ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
+ int slot = -1;
+
+ if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
+ slot = ainfo->reg;
+ else if (ainfo->storage == RegTypeBase)
+ slot = PARAM_REGS + (ainfo->offset / 4);
+ else
+ g_assert_not_reached ();
+
+ if (t->byref) {
+ p->regs [slot] = (mgreg_t)*arg;
+ continue;
+ }
+
+ switch (t->type) {
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ p->regs [slot] = (mgreg_t)*arg;
+ break;
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_U1:
+ p->regs [slot] = *(guint8*)arg;
+ break;
+ case MONO_TYPE_I1:
+ p->regs [slot] = *(gint8*)arg;
+ break;
+ case MONO_TYPE_I2:
+ p->regs [slot] = *(gint16*)arg;
+ break;
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ p->regs [slot] = *(guint16*)arg;
+ break;
+ case MONO_TYPE_I4:
+ p->regs [slot] = *(gint32*)arg;
+ break;
+ case MONO_TYPE_U4:
+ p->regs [slot] = *(guint32*)arg;
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ p->regs [slot ++] = (mgreg_t)arg [0];
+ p->regs [slot] = (mgreg_t)arg [1];
+ break;
+ case MONO_TYPE_R4:
+ p->regs [slot] = *(mgreg_t*)arg;
+ break;
+ case MONO_TYPE_R8:
+ p->regs [slot ++] = (mgreg_t)arg [0];
+ p->regs [slot] = (mgreg_t)arg [1];
+ break;
+ case MONO_TYPE_GENERICINST:
+ if (MONO_TYPE_IS_REFERENCE (t)) {
+ p->regs [slot] = (mgreg_t)*arg;
+ break;
+ } else {
+ /* Fall though */
+ }
+ case MONO_TYPE_VALUETYPE:
+ g_assert (ainfo->storage == RegTypeStructByVal);
+
+ if (ainfo->size == 0)
+ slot = PARAM_REGS + (ainfo->offset / 4);
+ else
+ slot = ainfo->reg;
+
+ for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
+ p->regs [slot ++] = ((mgreg_t*)arg) [j];
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+}
+
+void
+mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
+{
+ ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
+ MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
+ guint8 *ret = ((DynCallArgs*)buf)->ret;
+ mgreg_t res = ((DynCallArgs*)buf)->res;
+ mgreg_t res2 = ((DynCallArgs*)buf)->res2;
+
+ switch (mono_type_get_underlying_type (sig->ret)->type) {
+ case MONO_TYPE_VOID:
+ *(gpointer*)ret = NULL;
+ break;
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ *(gpointer*)ret = (gpointer)res;
+ break;
+ case MONO_TYPE_I1:
+ *(gint8*)ret = res;
+ break;
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ *(guint8*)ret = res;
+ break;
+ case MONO_TYPE_I2:
+ *(gint16*)ret = res;
+ break;
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ *(guint16*)ret = res;
+ break;
+ case MONO_TYPE_I4:
+ *(gint32*)ret = res;
+ break;
+ case MONO_TYPE_U4:
+ *(guint32*)ret = res;
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ /* This handles endianness as well */
+ ((gint32*)ret) [0] = res;
+ ((gint32*)ret) [1] = res2;
+ break;
+ case MONO_TYPE_GENERICINST:
+ if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
+ *(gpointer*)ret = (gpointer)res;
+ break;
+ } else {
+ /* Fall though */
+ }
+ case MONO_TYPE_VALUETYPE:
+ g_assert (ainfo->cinfo->vtype_retaddr);
+ /* Nothing to do */
+ break;
+#if defined(ARM_FPU_VFP)
+ case MONO_TYPE_R4:
+ *(float*)ret = *(float*)&res;
+ break;
+ case MONO_TYPE_R8: {
+ mgreg_t regs [2];
+
+ regs [0] = res;
+ regs [1] = res2;
+
+ *(double*)ret = *(double*)®s;
+ break;
+ }
+#endif
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+#ifndef DISABLE_JIT
+
+/*
+ * Allow tracing to work with this interface (with an optional argument)
+ */
+
+void*
+mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
+{
+ guchar *code = p;
+
+ code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
+ ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
+ code = emit_call_reg (code, ARMREG_R2);
+ return code;
+}
+
+enum {
+ SAVE_NONE,
+ SAVE_STRUCT,
+ SAVE_ONE,
+ SAVE_TWO,
+ SAVE_FP
+};
+
+void*
+mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
+{
+ guchar *code = p;
int save_mode = SAVE_NONE;
int offset;
MonoMethod *method = cfg->method;
- int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
+ int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
int save_offset = cfg->param_area;
save_offset += 7;
save_offset &= ~7;
* The immediate field for cond branches is big enough for all reasonable methods
*/
#define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
-if (ins->flags & MONO_INST_BRLABEL) { \
- if (0 && ins->inst_i0->inst_c0) { \
- ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
- } else { \
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
- ARM_B_COND (code, (condcode), 0); \
- } \
+if (0 && ins->inst_true_bb->native_offset) { \
+ ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
} else { \
- if (0 && ins->inst_true_bb->native_offset) { \
- ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
- } else { \
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
- ARM_B_COND (code, (condcode), 0); \
- } \
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
+ ARM_B_COND (code, (condcode), 0); \
}
#define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n;
-
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (ins, n, &bb->ins_list, node) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
+ MonoInst *ins, *n, *last_ins = NULL;
+ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_MUL_IMM:
- /* remove unnecessary multiplication with 1 */
- if (ins->inst_imm == 1) {
- if (ins->dreg != ins->sreg1) {
- ins->opcode = OP_MOVE;
- } else {
- MONO_DEL_INS (ins);
- continue;
- }
- } else {
- int power2 = mono_is_power_of_two (ins->inst_imm);
- if (power2 > 0) {
- ins->opcode = OP_SHL_IMM;
- ins->inst_imm = power2;
- }
- }
+ case OP_IMUL_IMM:
+ /* Already done by an arch-independent pass */
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
- MONO_DEL_INS (ins);
+ MONO_DELETE_INS (bb, ins);
continue;
} else {
//static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->dreg) {
- MONO_DEL_INS (ins);
+ MONO_DELETE_INS (bb, ins);
continue;
} else {
ins->opcode = OP_MOVE;
if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
- ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? CEE_CONV_I1 : CEE_CONV_U1;
+ ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
ins->sreg1 = last_ins->sreg1;
}
break;
if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
- ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? CEE_CONV_I2 : CEE_CONV_U2;
+ ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
ins->sreg1 = last_ins->sreg1;
}
break;
- case CEE_CONV_I4:
- case CEE_CONV_U4:
case OP_MOVE:
ins->opcode = OP_MOVE;
/*
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
- MONO_DEL_INS (ins);
+ MONO_DELETE_INS (bb, ins);
continue;
}
/*
if (last_ins && last_ins->opcode == OP_MOVE &&
ins->sreg1 == last_ins->dreg &&
ins->dreg == last_ins->sreg1) {
- MONO_DEL_INS (ins);
+ MONO_DELETE_INS (bb, ins);
continue;
}
break;
}
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
}
/*
ARMCOND_LO
};
-
-#define NEW_INS(cfg,ins,dest,op) do { \
- (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
- (dest)->opcode = (op); \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+#define NEW_INS(cfg,dest,op) do { \
+ MONO_INST_NEW ((cfg), (dest), (op)); \
+ mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
static int
{
switch (op) {
case OP_ADD_IMM:
- return CEE_ADD;
+ return OP_IADD;
case OP_SUB_IMM:
- return CEE_SUB;
+ return OP_ISUB;
case OP_AND_IMM:
- return CEE_AND;
+ return OP_IAND;
case OP_COMPARE_IMM:
return OP_COMPARE;
+ case OP_ICOMPARE_IMM:
+ return OP_ICOMPARE;
case OP_ADDCC_IMM:
return OP_ADDCC;
case OP_ADC_IMM:
case OP_SBB_IMM:
return OP_SBB;
case OP_OR_IMM:
- return CEE_OR;
+ return OP_IOR;
case OP_XOR_IMM:
- return CEE_XOR;
+ return OP_IXOR;
case OP_LOAD_MEMBASE:
return OP_LOAD_MEMINDEX;
case OP_LOADI4_MEMBASE:
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
+ MonoInst *ins, *temp, *last_ins = NULL;
int rot_amount, imm8, low_imm;
- MonoInst *ins, *temp;
-
- /* setup the virtual reg allocator */
- if (bb->max_vreg > cfg->rs->next_vreg)
- cfg->rs->next_vreg = bb->max_vreg;
MONO_BB_FOR_EACH_INS (bb, ins) {
- MonoInst *last_ins;
-
loop_start:
- last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_SUB_IMM:
case OP_AND_IMM:
case OP_COMPARE_IMM:
+ case OP_ICOMPARE_IMM:
case OP_ADDCC_IMM:
case OP_ADC_IMM:
case OP_SUBCC_IMM:
case OP_SBB_IMM:
case OP_OR_IMM:
case OP_XOR_IMM:
+ case OP_IADD_IMM:
+ case OP_ISUB_IMM:
+ case OP_IAND_IMM:
+ case OP_IADC_IMM:
+ case OP_ISBB_IMM:
+ case OP_IOR_IMM:
+ case OP_IXOR_IMM:
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
- ins->opcode = map_to_reg_reg_op (ins->opcode);
+ ins->opcode = mono_op_imm_to_op (ins->opcode);
}
- break;
+ if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
+ goto loop_start;
+ else
+ break;
case OP_MUL_IMM:
+ case OP_IMUL_IMM:
if (ins->inst_imm == 1) {
ins->opcode = OP_MOVE;
break;
ins->inst_imm = imm8;
break;
}
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
- ins->opcode = CEE_MUL;
+ ins->opcode = OP_IMUL;
+ break;
+ case OP_SBB:
+ case OP_ISBB:
+ case OP_SUBCC:
+ case OP_ISUBCC:
+ if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
+ /* ARM sets the C flag to 1 if there was _no_ overflow */
+ ins->next->opcode = OP_COND_EXC_NC;
+ break;
+ case OP_LOCALLOC_IMM:
+ NEW_INS (cfg, temp, OP_ICONST);
+ temp->inst_c0 = ins->inst_imm;
+ temp->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = temp->dreg;
+ ins->opcode = OP_LOCALLOC;
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
*/
if (arm_is_imm12 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_LOADI1_MEMBASE:
if (arm_is_imm8 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
break;
low_imm = ins->inst_offset & 0x1ff;
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
- NEW_INS (cfg, ins, temp, OP_ADD_IMM);
+ NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_basereg;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = temp->dreg;
ins->inst_offset = low_imm;
break;
case OP_STOREI1_MEMBASE_REG:
if (arm_is_imm12 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_STOREI2_MEMBASE_REG:
if (arm_is_imm8 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
break;
low_imm = ins->inst_offset & 0x1ff;
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
- NEW_INS (cfg, ins, temp, OP_ADD_IMM);
+ NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_destbasereg;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->inst_destbasereg = temp->dreg;
ins->inst_offset = low_imm;
break;
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
+ last_ins = temp;
goto loop_start; /* make it handle the possibly big ins->inst_offset */
+ case OP_FCOMPARE: {
+ gboolean swap = FALSE;
+ int reg;
+
+ if (!ins->next) {
+ /* Optimized away */
+ NULLIFY_INS (ins);
+ break;
+ }
+
+ /* Some fp compares require swapped operands */
+ switch (ins->next->opcode) {
+ case OP_FBGT:
+ ins->next->opcode = OP_FBLT;
+ swap = TRUE;
+ break;
+ case OP_FBGT_UN:
+ ins->next->opcode = OP_FBLT_UN;
+ swap = TRUE;
+ break;
+ case OP_FBLE:
+ ins->next->opcode = OP_FBGE;
+ swap = TRUE;
+ break;
+ case OP_FBLE_UN:
+ ins->next->opcode = OP_FBGE_UN;
+ swap = TRUE;
+ break;
+ default:
+ break;
+ }
+ if (swap) {
+ reg = ins->sreg1;
+ ins->sreg1 = ins->sreg2;
+ ins->sreg2 = reg;
+ }
+ break;
}
+ }
+
+ last_ins = ins;
+ }
+ bb->last_ins = last_ins;
+ bb->max_vreg = cfg->next_vreg;
+}
+
+void
+mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
+{
+ MonoInst *ins;
+
+ if (long_ins->opcode == OP_LNEG) {
+ ins = long_ins;
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
+ NULLIFY_INS (ins);
}
- bb->max_vreg = cfg->rs->next_vreg;
}
static guchar*
return code;
}
+#endif /* #ifndef DISABLE_JIT */
+
typedef struct {
guchar *code;
const guchar *target;
}
static void
-handle_thunk (int absolute, guchar *code, const guchar *target) {
- MonoDomain *domain = mono_domain_get ();
+handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
+{
PatchData pdata;
+ if (!domain)
+ domain = mono_domain_get ();
+
pdata.code = code;
pdata.target = target;
pdata.absolute = absolute;
pdata.found = 0;
mono_domain_lock (domain);
- mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
+ mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
if (!pdata.found) {
/* this uses the first available slot */
pdata.found = 2;
- mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
+ mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
}
mono_domain_unlock (domain);
g_assert (pdata.found == 1);
}
-void
-arm_patch (guchar *code, const guchar *target)
+static void
+arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
{
guint32 *code32 = (void*)code;
guint32 ins = *code32;
}
}
- handle_thunk (TRUE, code, target);
+ handle_thunk (domain, TRUE, code, target);
return;
}
// g_print ("patched with 0x%08x\n", ins);
}
+void
+arm_patch (guchar *code, const guchar *target)
+{
+ arm_patch_general (NULL, code, target);
+}
+
/*
* Return the >= 0 uimm8 value if val can be represented with a byte + rotation
* (with the rotation amount in *rot_amount. rot_amount is already adjusted
* to be used with the emit macros.
* Return -1 otherwise.
*/
-static int
+int
mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
{
guint32 res, i;
} else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
} else {
+ if (v7_supported) {
+ ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
+ if (val >> 16)
+ ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
+ return code;
+ }
if (val & 0xFF) {
ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
if (val & 0xFF00) {
return code;
}
+gboolean
+mono_arm_thumb_supported (void)
+{
+ return thumb_supported;
+}
+
+#ifndef DISABLE_JIT
+
+/*
+ * emit_load_volatile_arguments:
+ *
+ * Load volatile arguments from the stack to the original input registers.
+ * Required before a tail call.
+ */
+static guint8*
+emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
+{
+ MonoMethod *method = cfg->method;
+ MonoMethodSignature *sig;
+ MonoInst *inst;
+ CallInfo *cinfo;
+ guint32 i, pos;
+
+ /* FIXME: Generate intermediate code instead */
+
+ sig = mono_method_signature (method);
+
+ /* This is the opposite of the code in emit_prolog */
+
+ pos = 0;
+
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig, sig->pinvoke);
+
+ if (MONO_TYPE_ISSTRUCT (sig->ret)) {
+ ArgInfo *ainfo = &cinfo->ret;
+ inst = cfg->vret_addr;
+ g_assert (arm_is_imm12 (inst->inst_offset));
+ ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
+ }
+ for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
+ ArgInfo *ainfo = cinfo->args + i;
+ inst = cfg->args [pos];
+
+ if (cfg->verbose_level > 2)
+ g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
+ if (inst->opcode == OP_REGVAR) {
+ if (ainfo->storage == RegTypeGeneral)
+ ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
+ else if (ainfo->storage == RegTypeFP) {
+ g_assert_not_reached ();
+ } else if (ainfo->storage == RegTypeBase) {
+ // FIXME:
+ NOT_IMPLEMENTED;
+ /*
+ if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
+ ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
+ }
+ */
+ } else
+ g_assert_not_reached ();
+ } else {
+ if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
+ switch (ainfo->size) {
+ case 1:
+ case 2:
+ // FIXME:
+ NOT_IMPLEMENTED;
+ break;
+ case 8:
+ g_assert (arm_is_imm12 (inst->inst_offset));
+ ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
+ g_assert (arm_is_imm12 (inst->inst_offset + 4));
+ ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
+ break;
+ default:
+ if (arm_is_imm12 (inst->inst_offset)) {
+ ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
+ }
+ break;
+ }
+ } else if (ainfo->storage == RegTypeBaseGen) {
+ // FIXME:
+ NOT_IMPLEMENTED;
+ } else if (ainfo->storage == RegTypeBase) {
+ /* Nothing to do */
+ } else if (ainfo->storage == RegTypeFP) {
+ g_assert_not_reached ();
+ } else if (ainfo->storage == RegTypeStructByVal) {
+ int doffset = inst->inst_offset;
+ int soffset = 0;
+ int cur_reg;
+ int size = 0;
+ if (mono_class_from_mono_type (inst->inst_vtype))
+ size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
+ for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
+ if (arm_is_imm12 (doffset)) {
+ ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
+ ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
+ }
+ soffset += sizeof (gpointer);
+ doffset += sizeof (gpointer);
+ }
+ if (ainfo->vtsize)
+ // FIXME:
+ NOT_IMPLEMENTED;
+ } else if (ainfo->storage == RegTypeStructByAddr) {
+ } else {
+ // FIXME:
+ NOT_IMPLEMENTED;
+ }
+ }
+ pos ++;
+ }
+
+ g_free (cinfo);
+
+ return code;
+}
+
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoCallInst *call;
guint offset;
guint8 *code = cfg->native_code + cfg->code_len;
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos;
int imm8, rot_amount;
//x86_inc_mem (code, &cov->data [bb->dfn].count);
}
+ if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_break");
+ code = emit_call_seq (cfg, code);
+ }
+
MONO_BB_FOR_EACH_INS (bb, ins) {
offset = code - cfg->native_code;
case OP_MEMORY_BARRIER:
break;
case OP_TLS_GET:
+#ifdef HAVE_AEABI_READ_TP
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"__aeabi_read_tp");
+ code = emit_call_seq (cfg, code);
+
+ ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
+#else
g_assert_not_reached ();
+#endif
break;
/*case OP_BIGMUL:
ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
break;
case OP_STOREI2_MEMINDEX:
- /* note: the args are reversed in the macro */
- ARM_STRH_REG_REG (code, ins->inst_destbasereg, ins->sreg1, ins->sreg2);
+ ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
break;
case OP_STORE_MEMINDEX:
case OP_STOREI4_MEMINDEX:
ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADI1_MEMINDEX:
- /* note: the args are reversed in the macro */
- ARM_LDRSB_REG_REG (code, ins->inst_basereg, ins->dreg, ins->sreg2);
+ ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADU1_MEMINDEX:
ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADI2_MEMINDEX:
- /* note: the args are reversed in the macro */
- ARM_LDRSH_REG_REG (code, ins->inst_basereg, ins->dreg, ins->sreg2);
+ ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADU2_MEMINDEX:
- /* note: the args are reversed in the macro */
- ARM_LDRH_REG_REG (code, ins->inst_basereg, ins->dreg, ins->sreg2);
+ ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
- case CEE_CONV_I1:
+ case OP_ICONV_TO_I1:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
break;
- case CEE_CONV_I2:
+ case OP_ICONV_TO_I2:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
break;
- case CEE_CONV_U1:
+ case OP_ICONV_TO_U1:
ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
break;
- case CEE_CONV_U2:
+ case OP_ICONV_TO_U2:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
break;
case OP_COMPARE:
+ case OP_ICOMPARE:
ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
+ case OP_ICOMPARE_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
break;
case OP_BREAK:
- *(int*)code = 0xe7f001f0;
- *(int*)code = 0xef9f0001;
- code += 4;
+ /*
+ * gdb does not like encountering the hw breakpoint ins in the debugged code.
+ * So instead of emitting a trap, we emit a call a C function and place a
+ * breakpoint there.
+ */
+ //*(int*)code = 0xef9f0001;
+ //code += 4;
//ARM_DBRK (code);
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_break");
+ code = emit_call_seq (cfg, code);
+ break;
+ case OP_RELAXED_NOP:
+ ARM_NOP (code);
+ break;
+ case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
+ break;
+ case OP_SEQ_POINT: {
+ int i;
+ MonoInst *info_var = cfg->arch.seq_point_info_var;
+ MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
+ MonoInst *var;
+ int dreg = ARMREG_LR;
+
+ /*
+ * For AOT, we use one got slot per method, which will point to a
+ * SeqPointInfo structure, containing all the information required
+ * by the code below.
+ */
+ if (cfg->compile_aot) {
+ g_assert (info_var);
+ g_assert (info_var->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (info_var->inst_offset));
+ }
+
+ /*
+ * Read from the single stepping trigger page. This will cause a
+ * SIGSEGV when single stepping is enabled.
+ * We do this _before_ the breakpoint, so single stepping after
+ * a breakpoint is hit will step to the next IL offset.
+ */
+ g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
+
+ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
+ if (cfg->compile_aot) {
+ /* Load the trigger page addr from the variable initialized in the prolog */
+ var = ss_trigger_page_var;
+ g_assert (var);
+ g_assert (var->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (var->inst_offset));
+ ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
+ } else {
+ ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(int*)code = (int)ss_trigger_page;
+ code += 4;
+ }
+ ARM_LDR_IMM (code, dreg, dreg, 0);
+ }
+
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+
+ if (cfg->compile_aot) {
+ guint32 offset = code - cfg->native_code;
+ guint32 val;
+
+ ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
+ /* Add the offset */
+ val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
+ ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
+ /*
+ * Have to emit nops to keep the difference between the offset
+ * stored in seq_points and breakpoint instruction constant,
+ * mono_arch_get_ip_for_breakpoint () depends on this.
+ */
+ if (val & 0xFF00)
+ ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
+ else
+ ARM_NOP (code);
+ if (val & 0xFF0000)
+ ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
+ else
+ ARM_NOP (code);
+ g_assert (!(val & 0xFF000000));
+ /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
+ ARM_LDR_IMM (code, dreg, dreg, 0);
+
+ /* What is faster, a branch or a load ? */
+ ARM_CMP_REG_IMM (code, dreg, 0, 0);
+ /* The breakpoint instruction */
+ ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
+ } else {
+ /*
+ * A placeholder for a possible breakpoint inserted by
+ * mono_arch_set_breakpoint ().
+ */
+ for (i = 0; i < 4; ++i)
+ ARM_NOP (code);
+ }
break;
+ }
case OP_ADDCC:
+ case OP_IADDCC:
ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_ADD:
+ case OP_IADD:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADC:
+ case OP_IADC:
ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADDCC_IMM:
ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ADD_IMM:
+ case OP_IADD_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ADC_IMM:
+ case OP_IADC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
- case CEE_ADD_OVF:
+ case OP_IADD_OVF:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
- case CEE_ADD_OVF_UN:
+ case OP_IADD_OVF_UN:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
- case CEE_SUB_OVF:
+ case OP_ISUB_OVF:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
- case CEE_SUB_OVF_UN:
+ case OP_ISUB_OVF_UN:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
break;
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUBCC:
+ case OP_ISUBCC:
ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SUBCC_IMM:
g_assert (imm8 >= 0);
ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
- case CEE_SUB:
+ case OP_ISUB:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SBB:
+ case OP_ISBB:
ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SUB_IMM:
+ case OP_ISUB_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_SBB_IMM:
+ case OP_ISBB_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
g_assert (imm8 >= 0);
ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
- case CEE_AND:
+ case OP_IAND:
ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_AND_IMM:
+ case OP_IAND_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
- case CEE_DIV:
- case CEE_DIV_UN:
+ case OP_IDIV:
+ case OP_IDIV_UN:
case OP_DIV_IMM:
- case CEE_REM:
- case CEE_REM_UN:
+ case OP_IREM:
+ case OP_IREM_UN:
case OP_REM_IMM:
/* crappy ARM arch doesn't have a DIV instruction */
g_assert_not_reached ();
- case CEE_OR:
+ case OP_IOR:
ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_OR_IMM:
+ case OP_IOR_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
- case CEE_XOR:
+ case OP_IXOR:
ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_XOR_IMM:
+ case OP_IXOR_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
- case CEE_SHL:
+ case OP_ISHL:
ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHL_IMM:
+ case OP_ISHL_IMM:
if (ins->inst_imm)
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
+ else if (ins->dreg != ins->sreg1)
+ ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
- case CEE_SHR:
+ case OP_ISHR:
ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHR_IMM:
+ case OP_ISHR_IMM:
if (ins->inst_imm)
ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
+ else if (ins->dreg != ins->sreg1)
+ ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_SHR_UN_IMM:
+ case OP_ISHR_UN_IMM:
if (ins->inst_imm)
ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
+ else if (ins->dreg != ins->sreg1)
+ ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
- case CEE_SHR_UN:
+ case OP_ISHR_UN:
ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_NOT:
+ case OP_INOT:
ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
break;
- case CEE_NEG:
+ case OP_INEG:
ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
break;
- case CEE_MUL:
+ case OP_IMUL:
if (ins->dreg == ins->sreg2)
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
else
case OP_MUL_IMM:
g_assert_not_reached ();
break;
- case CEE_MUL_OVF:
+ case OP_IMUL_OVF:
/* FIXME: handle ovf/ sreg2 != dreg */
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
+ /* FIXME: MUL doesn't set the C/O flags on ARM */
break;
- case CEE_MUL_OVF_UN:
+ case OP_IMUL_OVF_UN:
/* FIXME: handle ovf/ sreg2 != dreg */
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
+ /* FIXME: MUL doesn't set the C/O flags on ARM */
break;
case OP_ICONST:
code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
/* Load the value from the GOT */
ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
break;
- case CEE_CONV_I4:
- case CEE_CONV_U4:
+ case OP_ICONV_TO_I4:
+ case OP_ICONV_TO_U4:
case OP_MOVE:
if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
* Keep in sync with mono_arch_emit_epilog
*/
g_assert (!cfg->method->save_lmf);
+
+ code = emit_load_volatile_arguments (cfg, code);
+
code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
- ARM_B (code, 0);
+ if (cfg->compile_aot) {
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(gpointer*)code = NULL;
+ code += 4;
+ ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
+ } else {
+ ARM_B (code, 0);
+ }
break;
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
break;
case OP_ARGLIST: {
-#if ARM_PORT
- if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
- ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
- } else {
- ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
- ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
- }
- ppc_stw (code, ppc_r11, 0, ins->sreg1);
-#endif
+ g_assert (cfg->sig_cookie < 128);
+ ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
+ ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
break;
}
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL:
call = (MonoCallInst*)ins;
else
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
code = emit_call_seq (cfg, code);
+ code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
code = emit_call_reg (code, ins->sreg1);
+ code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
g_assert (arm_is_imm12 (ins->inst_offset));
g_assert (ins->sreg1 != ARMREG_LR);
call = (MonoCallInst*)ins;
- if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
- if (cfg->compile_aot)
- /* FIXME: */
- cfg->disable_aot = 1;
+ if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
- *((gpointer*)code) = (gpointer)call->method;
+ /*
+ * We can't embed the method in the code stream in PIC code, or
+ * in gshared code.
+ * Instead, we put it in V5 in code emitted by
+ * mono_arch_emit_imt_argument (), and embed NULL here to
+ * signal the IMT thunk that the value is in V5.
+ */
+ if (call->dynamic_imt_arg)
+ *((gpointer*)code) = NULL;
+ else
+ *((gpointer*)code) = (gpointer)call->method;
code += 4;
} else {
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
}
- break;
- case OP_OUTARG:
- g_assert_not_reached ();
+ code = emit_move_return_value (cfg, ins, code);
break;
case OP_LOCALLOC: {
/* keep alignment */
ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
break;
}
+ case OP_DYN_CALL: {
+ int i;
+ MonoInst *var = cfg->dyn_call_var;
+
+ g_assert (var->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (var->inst_offset));
+
+ /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
+ ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
+ /* ip = ftn */
+ ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
+
+ /* Save args buffer */
+ ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
+
+ /* Set stack slots using R0 as scratch reg */
+ /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
+ for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
+ }
+
+ /* Set argument registers */
+ for (i = 0; i < PARAM_REGS; ++i)
+ ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
+
+ /* Make the call */
+ ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+
+ /* Save result */
+ ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
+ ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
+ break;
+ }
case OP_THROW: {
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
code = emit_call_seq (cfg, code);
break;
}
- case OP_START_HANDLER:
- if (arm_is_imm12 (ins->inst_left->inst_offset)) {
- ARM_STR_IMM (code, ARMREG_LR, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
+ case OP_START_HANDLER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
+ if (arm_is_imm12 (spvar->inst_offset)) {
+ ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
} else {
- code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
- ARM_STR_REG_REG (code, ARMREG_LR, ins->inst_left->inst_basereg, ARMREG_IP);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
+ ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
}
break;
- case OP_ENDFILTER:
+ }
+ case OP_ENDFILTER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
- if (arm_is_imm12 (ins->inst_left->inst_offset)) {
- ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
+ if (arm_is_imm12 (spvar->inst_offset)) {
+ ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
} else {
- g_assert (ARMREG_IP != ins->inst_left->inst_basereg);
- code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
- ARM_LDR_REG_REG (code, ARMREG_IP, ins->inst_left->inst_basereg, ARMREG_IP);
+ g_assert (ARMREG_IP != spvar->inst_basereg);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
+ ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
- case OP_ENDFINALLY:
- if (arm_is_imm12 (ins->inst_left->inst_offset)) {
- ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
+ }
+ case OP_ENDFINALLY: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
+ if (arm_is_imm12 (spvar->inst_offset)) {
+ ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
} else {
- g_assert (ARMREG_IP != ins->inst_left->inst_basereg);
- code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
- ARM_LDR_REG_REG (code, ARMREG_IP, ins->inst_left->inst_basereg, ARMREG_IP);
+ g_assert (ARMREG_IP != spvar->inst_basereg);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
+ ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
+ }
case OP_CALL_HANDLER:
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
ARM_BL (code, 0);
+ mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
case OP_BR:
- if (ins->flags & MONO_INST_BRLABEL) {
- /*if (ins->inst_i0->inst_c0) {
- ARM_B (code, 0);
- //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
- } else*/ {
- mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
- ARM_B (code, 0);
- }
- } else {
- /*if (ins->inst_target_bb->native_offset) {
- ARM_B (code, 0);
- //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
- } else*/ {
- mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
- ARM_B (code, 0);
- }
- }
+ /*if (ins->inst_target_bb->native_offset) {
+ ARM_B (code, 0);
+ //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
+ } else*/ {
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
+ ARM_B (code, 0);
+ }
break;
case OP_BR_REG:
ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
* After follows the data.
* FIXME: add aot support.
*/
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
max_len += 4 * GPOINTER_TO_INT (ins->klass);
- if (offset > (cfg->code_size - max_len - 16)) {
+ if (offset + max_len > (cfg->code_size - 16)) {
cfg->code_size += max_len;
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
code += 4 * GPOINTER_TO_INT (ins->klass);
break;
case OP_CEQ:
+ case OP_ICEQ:
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_CLT:
+ case OP_ICLT:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
break;
case OP_CLT_UN:
+ case OP_ICLT_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
break;
case OP_CGT:
+ case OP_ICGT:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
break;
case OP_CGT_UN:
+ case OP_ICGT_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
break;
case OP_COND_EXC_LE_UN:
EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
break;
+ case OP_COND_EXC_IEQ:
+ case OP_COND_EXC_INE_UN:
+ case OP_COND_EXC_ILT:
+ case OP_COND_EXC_ILT_UN:
+ case OP_COND_EXC_IGT:
+ case OP_COND_EXC_IGT_UN:
+ case OP_COND_EXC_IGE:
+ case OP_COND_EXC_IGE_UN:
+ case OP_COND_EXC_ILE:
+ case OP_COND_EXC_ILE_UN:
+ EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
+ break;
case OP_COND_EXC_C:
+ case OP_COND_EXC_IC:
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
+ break;
case OP_COND_EXC_OV:
+ case OP_COND_EXC_IOV:
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
+ break;
case OP_COND_EXC_NC:
- case OP_COND_EXC_NO:
- g_assert_not_reached ();
+ case OP_COND_EXC_INC:
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
break;
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLT:
- case CEE_BLT_UN:
- case CEE_BGT:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BGE_UN:
- case CEE_BLE:
- case CEE_BLE_UN:
- EMIT_COND_BRANCH (ins, ins->opcode - CEE_BEQ);
+ case OP_COND_EXC_NO:
+ case OP_COND_EXC_INO:
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
+ break;
+ case OP_IBEQ:
+ case OP_IBNE_UN:
+ case OP_IBLT:
+ case OP_IBLT_UN:
+ case OP_IBGT:
+ case OP_IBGT_UN:
+ case OP_IBGE:
+ case OP_IBGE_UN:
+ case OP_IBLE:
+ case OP_IBLE_UN:
+ EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
break;
/* floating point opcodes */
}
break;
case OP_STORER8_MEMBASE_REG:
- g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ /* This is generated by the local regalloc pass which runs after the lowering pass */
+ if (!arm_is_fpimm8 (ins->inst_offset)) {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
+ ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
+ } else {
+ ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ }
break;
case OP_LOADR8_MEMBASE:
- g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ /* This is generated by the local regalloc pass which runs after the lowering pass */
+ if (!arm_is_fpimm8 (ins->inst_offset)) {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
+ ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
+ } else {
+ ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ }
break;
case OP_STORER4_MEMBASE_REG:
g_assert (arm_is_fpimm8 (ins->inst_offset));
g_assert (arm_is_fpimm8 (ins->inst_offset));
ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
- case CEE_CONV_R_UN: {
+ case OP_ICONV_TO_R_UN: {
int tmpreg;
tmpreg = ins->dreg == 0? 1: 0;
ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
*/
break;
}
- case CEE_CONV_R4:
+ case OP_ICONV_TO_R4:
ARM_FLTS (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_R8:
+ case OP_ICONV_TO_R8:
ARM_FLTD (code, ins->dreg, ins->sreg1);
break;
+
#elif defined(ARM_FPU_VFP)
+
case OP_R8CONST:
if (cfg->compile_aot) {
- ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
+ ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 1);
*(guint32*)code = ((guint32*)(ins->inst_p0))[0];
code += 4;
}
break;
case OP_STORER8_MEMBASE_REG:
- g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ /* This is generated by the local regalloc pass which runs after the lowering pass */
+ if (!arm_is_fpimm8 (ins->inst_offset)) {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
+ ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
+ } else {
+ ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ }
break;
case OP_LOADR8_MEMBASE:
- g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ /* This is generated by the local regalloc pass which runs after the lowering pass */
+ if (!arm_is_fpimm8 (ins->inst_offset)) {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
+ ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
+ } else {
+ ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ }
break;
case OP_STORER4_MEMBASE_REG:
g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
+ ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_LOADR4_MEMBASE:
g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
+ ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
break;
- case CEE_CONV_R_UN: {
+ case OP_ICONV_TO_R_UN: {
g_assert_not_reached ();
break;
}
- case CEE_CONV_R4:
- g_assert_not_reached ();
- //ARM_FLTS (code, ins->dreg, ins->sreg1);
+ case OP_ICONV_TO_R4:
+ ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
+ ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
+ ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
break;
- case CEE_CONV_R8:
- g_assert_not_reached ();
- //ARM_FLTD (code, ins->dreg, ins->sreg1);
+ case OP_ICONV_TO_R8:
+ ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
+ ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
+ break;
+
+ case OP_SETFRET:
+ if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
+ ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
+ ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
+ } else {
+ ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
+ }
break;
+
#endif
+
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
g_assert_not_reached ();
/* Implemented as helper calls */
break;
- case OP_LCONV_TO_OVF_I: {
-#if ARM_PORT
- guint32 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
- // Check if its negative
- ppc_cmpi (code, 0, 0, ins->sreg1, 0);
- negative_branch = code;
- ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0);
- // Its positive msword == 0
- ppc_cmpi (code, 0, 0, ins->sreg2, 0);
- msword_positive_branch = code;
- ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
-
- ovf_ex_target = code;
- //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException");
- // Negative
- ppc_patch (negative_branch, code);
- ppc_cmpi (code, 0, 0, ins->sreg2, -1);
- msword_negative_branch = code;
- ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
- ppc_patch (msword_negative_branch, ovf_ex_target);
+ case OP_LCONV_TO_OVF_I4_2: {
+ guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
+ /*
+ * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
+ */
+
+ ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
+ high_bit_not_set = code;
+ ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
+
+ ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
+ valid_negative = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
+ invalid_negative = code;
+ ARM_B_COND (code, ARMCOND_AL, 0);
- ppc_patch (msword_positive_branch, code);
- if (ins->dreg != ins->sreg1)
- ppc_mr (code, ins->dreg, ins->sreg1);
-#endif
+ arm_patch (high_bit_not_set, code);
+
+ ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
+ valid_positive = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
+
+ arm_patch (invalid_negative, code);
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
+
+ arm_patch (valid_negative, code);
+ arm_patch (valid_positive, code);
+
if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
g_assert_not_reached ();
break;
case OP_FCOMPARE:
- /* each fp compare op needs to do its own */
- g_assert_not_reached ();
- //ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#ifdef ARM_FPU_FPA
+ ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+#endif
break;
case OP_FCEQ:
#ifdef ARM_FPU_FPA
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
#elif defined(ARM_FPU_VFP)
ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
#endif
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
* V Unordered ARMCOND_VS
*/
case OP_FBEQ:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
- EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ);
+ EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
break;
case OP_FBNE_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
- EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ);
+ EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
break;
case OP_FBLT:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBLT_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBGT:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
-#endif
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
- break;
case OP_FBGT_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
-#endif
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
+ case OP_FBLE:
+ case OP_FBLE_UN:
+ g_assert_not_reached ();
break;
case OP_FBGE:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
+#ifdef ARM_FPU_VFP
+ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
+#else
+ /* FPA requires EQ even thou the docs suggests that just CS is enough */
+ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
+#endif
break;
case OP_FBGE_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
break;
- case OP_FBLE:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
-#endif
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS); /* swapped */
- break;
- case OP_FBLE_UN:
+
+ case OP_CKFINITE: {
#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+ if (ins->dreg != ins->sreg1)
+ ARM_MVFD (code, ins->dreg, ins->sreg1);
#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
+ ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
+ ARM_B (code, 1);
+ *(guint32*)code = 0xffffffff;
+ code += 4;
+ *(guint32*)code = 0x7fefffff;
+ code += 4;
+ ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
+ ARM_FMSTAT (code);
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
+ ARM_CMPD (code, ins->sreg1, ins->sreg1);
+ ARM_FMSTAT (code);
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
+
+ ARM_CPYD (code, ins->dreg, ins->sreg1);
#endif
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); /* swapped */
- break;
- case OP_CKFINITE: {
- /*ppc_stfd (code, ins->sreg1, -8, ppc_sp);
- ppc_lwz (code, ppc_r11, -8, ppc_sp);
- ppc_rlwinm (code, ppc_r11, ppc_r11, 0, 1, 31);
- ppc_addis (code, ppc_r11, ppc_r11, -32752);
- ppc_rlwinmd (code, ppc_r11, ppc_r11, 1, 31, 31);
- EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException");*/
- g_assert_not_reached ();
break;
}
default:
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
}
cfg->code_len = code - cfg->native_code;
}
+#endif /* DISABLE_JIT */
+
+#ifdef HAVE_AEABI_READ_TP
+void __aeabi_read_tp (void);
+#endif
+
void
mono_arch_register_lowlevel_calls (void)
{
+ /* The signature doesn't matter */
+ mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
+ mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
+
+#ifndef MONO_CROSS_COMPILE
+#ifdef HAVE_AEABI_READ_TP
+ mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
+#endif
+#endif
}
#define patch_lis_ori(ip,val) do {\
default:
break;
}
- arm_patch (ip, target);
+ arm_patch_general (domain, ip, target);
}
}
+#ifndef DISABLE_JIT
+
/*
* Stack frame layout:
*
CallInfo *cinfo;
int tracing = 0;
int lmf_offset = 0;
- int prev_sp_offset;
+ int prev_sp_offset, reg_offset;
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
tracing = 1;
cfg->code_size = 256 + sig->param_count * 20;
code = cfg->native_code = g_malloc (cfg->code_size);
+ mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
+
ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
alloc_size = cfg->stack_offset;
pos = 0;
if (!method->save_lmf) {
+ /* We save SP by storing it into IP and saving IP */
ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
prev_sp_offset = 8; /* ip and lr */
for (i = 0; i < 16; ++i) {
if (cfg->used_int_regs & (1 << i))
prev_sp_offset += 4;
}
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
+ reg_offset = 0;
+ for (i = 0; i < 16; ++i) {
+ if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
+ mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
+ reg_offset += 4;
+ }
+ }
} else {
ARM_PUSH (code, 0x5ff0);
prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
+ reg_offset = 0;
+ for (i = 0; i < 16; ++i) {
+ if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
+ mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
+ reg_offset += 4;
+ }
+ }
pos += sizeof (MonoLMF) - prev_sp_offset;
lmf_offset = pos;
}
code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
}
- if (cfg->frame_reg != ARMREG_SP)
+ if (cfg->frame_reg != ARMREG_SP) {
ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
+ mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
+ }
//g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
prev_sp_offset += alloc_size;
*/
max_offset = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *ins;
+ MonoInst *ins = bb->code;
bb->max_offset = max_offset;
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
+ /* store runtime generic context */
+ if (cfg->rgctx_var) {
+ MonoInst *ins = cfg->rgctx_var;
+
+ g_assert (ins->opcode == OP_REGOFFSET);
+
+ if (arm_is_imm12 (ins->inst_offset)) {
+ ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
+ }
+ }
+
/* load arguments allocated to register from the stack */
pos = 0;
- cinfo = calculate_sizes (sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig, sig->pinvoke);
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
+ if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
ArgInfo *ainfo = &cinfo->ret;
- inst = cfg->ret;
+ inst = cfg->vret_addr;
g_assert (arm_is_imm12 (inst->inst_offset));
ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
}
+
+ if (sig->call_convention == MONO_CALL_VARARG) {
+ ArgInfo *cookie = &cinfo->sig_cookie;
+
+ /* Save the sig cookie address */
+ g_assert (cookie->storage == RegTypeBase);
+
+ g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
+ g_assert (arm_is_imm12 (cfg->sig_cookie));
+ ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
+ ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
+ }
+
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
inst = cfg->args [pos];
if (cfg->verbose_level > 2)
- g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
+ g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
if (inst->opcode == OP_REGVAR) {
- if (ainfo->regtype == RegTypeGeneral)
+ if (ainfo->storage == RegTypeGeneral)
ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
- else if (ainfo->regtype == RegTypeFP) {
+ else if (ainfo->storage == RegTypeFP) {
g_assert_not_reached ();
- } else if (ainfo->regtype == RegTypeBase) {
- g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
- ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
+ } else if (ainfo->storage == RegTypeBase) {
+ if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
+ ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
+ }
} else
g_assert_not_reached ();
g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
} else {
/* the argument should be put on the stack: FIXME handle size != word */
- if (ainfo->regtype == RegTypeGeneral) {
+ if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
switch (ainfo->size) {
case 1:
if (arm_is_imm12 (inst->inst_offset))
ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
- ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
- ARM_STRH_IMM (code, ainfo->reg, ARMREG_IP, 0);
+ ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
break;
case 8:
}
break;
}
- } else if (ainfo->regtype == RegTypeBaseGen) {
+ } else if (ainfo->storage == RegTypeBaseGen) {
g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
g_assert (arm_is_imm12 (inst->inst_offset));
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
- } else if (ainfo->regtype == RegTypeBase) {
- g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
+ } else if (ainfo->storage == RegTypeBase) {
+ if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
+ ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
+ ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
+ }
+
switch (ainfo->size) {
case 1:
- ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
- g_assert (arm_is_imm12 (inst->inst_offset));
- ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ if (arm_is_imm8 (inst->inst_offset)) {
+ ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
+ }
break;
case 2:
- ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
if (arm_is_imm8 (inst->inst_offset)) {
ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
- ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
- ARM_STRH_IMM (code, ARMREG_LR, ARMREG_IP, 0);
+ ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
case 8:
- g_assert (arm_is_imm12 (inst->inst_offset));
- ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
- ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
- g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4));
- g_assert (arm_is_imm12 (inst->inst_offset + 4));
- ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
- ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
+ if (arm_is_imm12 (inst->inst_offset)) {
+ ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
+ }
+ if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
+ ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
+ ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
+ }
+ if (arm_is_imm12 (inst->inst_offset + 4)) {
+ ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
+ ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
+ }
break;
default:
- g_assert (arm_is_imm12 (inst->inst_offset));
- ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
- ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ if (arm_is_imm12 (inst->inst_offset)) {
+ ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
+ ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
+ }
break;
}
- } else if (ainfo->regtype == RegTypeFP) {
+ } else if (ainfo->storage == RegTypeFP) {
g_assert_not_reached ();
- } else if (ainfo->regtype == RegTypeStructByVal) {
+ } else if (ainfo->storage == RegTypeStructByVal) {
int doffset = inst->inst_offset;
int soffset = 0;
int cur_reg;
int size = 0;
- if (mono_class_from_mono_type (inst->inst_vtype))
- size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
+ size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
- g_assert (arm_is_imm12 (doffset));
- ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
+ if (arm_is_imm12 (doffset)) {
+ ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
+ ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
+ }
soffset += sizeof (gpointer);
doffset += sizeof (gpointer);
}
//g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
}
- } else if (ainfo->regtype == RegTypeStructByAddr) {
+ } else if (ainfo->storage == RegTypeStructByAddr) {
g_assert_not_reached ();
/* FIXME: handle overrun! with struct sizes not multiple of 4 */
code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
pos++;
}
- if (method->save_lmf) {
-
+ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
+ if (cfg->compile_aot)
+ /* AOT code is only used in the root domain */
+ code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
+ else
+ code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_get_lmf_addr");
+ (gpointer)"mono_jit_thread_attach");
code = emit_call_seq (cfg, code);
+ }
+
+ if (method->save_lmf) {
+ gboolean get_lmf_fast = FALSE;
+
+#ifdef HAVE_AEABI_READ_TP
+ gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
+
+ if (lmf_addr_tls_offset != -1) {
+ get_lmf_fast = TRUE;
+
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"__aeabi_read_tp");
+ code = emit_call_seq (cfg, code);
+
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
+ get_lmf_fast = TRUE;
+ }
+#endif
+ if (!get_lmf_fast) {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_get_lmf_addr");
+ code = emit_call_seq (cfg, code);
+ }
/* we build the MonoLMF structure on the stack - see mini-arm.h */
/* lmf_offset is the offset from the previous stack pointer,
* alloc_size is the total stack space allocated, so the offset
ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* *(lmf_addr) = r1 */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
- /* save method info */
- code = mono_arm_emit_load_imm (code, ARMREG_R2, GPOINTER_TO_INT (method));
- ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, method));
- ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
+ /* Skip method (only needed for trampoline LMF frames) */
+ ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, esp));
/* save the current IP */
ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
if (tracing)
code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
+ if (cfg->arch.seq_point_info_var) {
+ MonoInst *ins = cfg->arch.seq_point_info_var;
+
+ /* Initialize the variable from a GOT slot */
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(gpointer*)code = NULL;
+ code += 4;
+ ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
+
+ g_assert (ins->opcode == OP_REGOFFSET);
+
+ if (arm_is_imm12 (ins->inst_offset)) {
+ ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
+ }
+ }
+
+ /* Initialize ss_trigger_page_var */
+ {
+ MonoInst *info_var = cfg->arch.seq_point_info_var;
+ MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
+ int dreg = ARMREG_LR;
+
+ if (info_var) {
+ g_assert (info_var->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (info_var->inst_offset));
+
+ ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
+ /* Load the trigger page addr */
+ ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
+ ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
+ }
+ }
+
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
g_free (cinfo);
int pos, i, rot_amount;
int max_epilog_size = 16 + 20*4;
guint8 *code;
+ CallInfo *cinfo;
if (cfg->method->save_lmf)
max_epilog_size += 128;
}
pos = 0;
+ /* Load returned vtypes into registers if needed */
+ cinfo = cfg->arch.cinfo;
+ if (cinfo->ret.storage == RegTypeStructByVal) {
+ MonoInst *ins = cfg->ret;
+
+ if (arm_is_imm12 (ins->inst_offset)) {
+ ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
+ ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
+ }
+ }
+
if (method->save_lmf) {
int lmf_offset;
/* all but r0-r3, sp and pc */
return MONO_EXC_NULL_REF;
if (strcmp (name, "ArrayTypeMismatchException") == 0)
return MONO_EXC_ARRAY_TYPE_MISMATCH;
+ if (strcmp (name, "ArgumentException") == 0)
+ return MONO_EXC_ARGUMENT;
g_error ("Unknown intrinsic exception %s\n", name);
return -1;
}
MonoJumpInfo *patch_info;
int i;
guint8 *code;
- const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
- guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
+ guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
+ guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
int max_epilog_size = 50;
+ for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
+ exc_throw_pos [i] = NULL;
+ exc_throw_found [i] = 0;
+ }
+
/* count the number of exception infos */
/*
* make sure we have enough space for exceptions
- * 12 is the simulated call to throw_exception_by_name
*/
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_EXC) {
i = exception_id_by_name (patch_info->data.target);
if (!exc_throw_found [i]) {
- max_epilog_size += 12;
+ max_epilog_size += 32;
exc_throw_found [i] = TRUE;
}
}
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_EXC: {
+ MonoClass *exc_class;
unsigned char *ip = patch_info->ip.i + cfg->native_code;
- const char *ex_name = patch_info->data.target;
+
i = exception_id_by_name (patch_info->data.target);
if (exc_throw_pos [i]) {
arm_patch (ip, exc_throw_pos [i]);
exc_throw_pos [i] = code;
}
arm_patch (ip, code);
- //*(int*)code = 0xef9f0001;
- //code += 4;
- ARM_NOP (code);
- /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
+
+ exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
+ g_assert (exc_class);
+
+ ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
- /* we got here from a conditional call, so the calling ip is set in lr already */
patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
- patch_info->data.name = "mono_arch_throw_exception_by_name";
+ patch_info->data.name = "mono_arch_throw_corlib_exception";
patch_info->ip.i = code - cfg->native_code;
- ARM_B (code, 0);
- *(gconstpointer*)code = ex_name;
+ ARM_BL (code, 0);
+ *(guint32*)(gpointer)code = exc_class->type_token;
code += 4;
break;
}
}
+#endif /* #ifndef DISABLE_JIT */
+
+static gboolean tls_offset_inited = FALSE;
+
void
mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
{
-}
+ if (!tls_offset_inited) {
+ tls_offset_inited = TRUE;
-void
-mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
-{
+ lmf_tls_offset = mono_get_lmf_tls_offset ();
+ lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
+ }
}
void
-mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
+mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
{
-
- int this_dreg = ARMREG_R0;
-
- if (vt_reg != -1)
- this_dreg = ARMREG_R1;
-
- /* add the this argument */
- if (this_reg != -1) {
- MonoInst *this;
- MONO_INST_NEW (cfg, this, OP_MOVE);
- this->type = this_type;
- this->sreg1 = this_reg;
- this->dreg = mono_regstate_next_int (cfg->rs);
- mono_bblock_add_inst (cfg->cbb, this);
- mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, this_dreg, FALSE);
- }
-
- if (vt_reg != -1) {
- MonoInst *vtarg;
- MONO_INST_NEW (cfg, vtarg, OP_MOVE);
- vtarg->type = STACK_MP;
- vtarg->sreg1 = vt_reg;
- vtarg->dreg = mono_regstate_next_int (cfg->rs);
- mono_bblock_add_inst (cfg->cbb, vtarg);
- mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, ARMREG_R0, FALSE);
- }
}
MonoInst*
-mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
+ /* FIXME: */
return NULL;
}
return 0;
}
-MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
-{
- return NULL;
-}
-
-MonoInst*
-mono_arch_get_thread_intrinsic (MonoCompile* cfg)
+MonoInst*
+mono_arch_get_domain_intrinsic (MonoCompile* cfg)
{
- return NULL;
+ return mono_get_domain_intrinsic (cfg);
}
guint32
{
}
-void
-mono_arch_fixup_jinfo (MonoCompile *cfg)
-{
- /* max encoded stack usage is 64KB * 4 */
- g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
- cfg->jit_info->used_regs |= cfg->stack_usage << 14;
-}
-
#ifdef MONO_ARCH_HAVE_IMT
+#ifndef DISABLE_JIT
+
void
-mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call)
+mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
{
+ if (cfg->compile_aot) {
+ int method_reg = mono_alloc_ireg (cfg);
+ MonoInst *ins;
+
+ call->dynamic_imt_arg = TRUE;
+
+ if (imt_arg) {
+ mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_AOTCONST);
+ ins->dreg = method_reg;
+ ins->inst_p0 = call->method;
+ ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
+ }
+ } else if (cfg->generic_context || imt_arg || mono_use_llvm) {
+
+ /* Always pass in a register for simplicity */
+ call->dynamic_imt_arg = TRUE;
+
+ cfg->uses_rgctx_reg = TRUE;
+
+ if (imt_arg) {
+ mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
+ } else {
+ MonoInst *ins;
+ int method_reg = mono_alloc_preg (cfg);
+
+ MONO_INST_NEW (cfg, ins, OP_PCONST);
+ ins->inst_p0 = call->method;
+ ins->dreg = method_reg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
+ }
+ }
}
+#endif /* DISABLE_JIT */
+
MonoMethod*
-mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
{
guint32 *code_ptr = (guint32*)code;
code_ptr -= 2;
+
+ if (mono_use_llvm)
+ /* Passed in V5 */
+ return (MonoMethod*)regs [ARMREG_V5];
+
/* The IMT value is stored in the code stream right after the LDC instruction. */
if (!IS_LDR_PC (code_ptr [0])) {
g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
g_assert (IS_LDR_PC (code_ptr [0]));
}
- return (MonoMethod*) code_ptr [1];
+ if (code_ptr [1] == 0)
+ /* This is AOTed code, the IMT method is in V5 */
+ return (MonoMethod*)regs [ARMREG_V5];
+ else
+ return (MonoMethod*) code_ptr [1];
}
-MonoObject*
-mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+MonoVTable*
+mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
{
- return mono_arch_get_this_arg_from_call (mono_method_signature (method), (gssize*)regs, NULL);
+ return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
-
#define ENABLE_WRONG_METHOD_CHECK 0
-#define BASE_SIZE (4 * 4)
+#define BASE_SIZE (6 * 4)
#define BSEARCH_ENTRY_SIZE (4 * 4)
#define CMP_SIZE (3 * 4)
#define BRANCH_SIZE (1 * 4)
}
gpointer
-mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
+ gpointer fail_tramp)
{
int size, i, extra_space = 0;
arminstr_t *code, *start, *vtable_target = NULL;
+ gboolean large_offsets = FALSE;
+ guint32 **constant_pool_starts;
+
size = BASE_SIZE;
+ constant_pool_starts = g_new0 (guint32*, count);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
- g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->vtable_slot])));
+ gboolean fail_case = !item->check_target_idx && fail_tramp;
+
+ if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
+ item->chunk_size += 32;
+ large_offsets = TRUE;
+ }
- if (item->check_target_idx) {
- if (!item->compare_done)
+ if (item->check_target_idx || fail_case) {
+ if (!item->compare_done || fail_case)
item->chunk_size += CMP_SIZE;
item->chunk_size += BRANCH_SIZE;
} else {
item->chunk_size += WMC_SIZE;
#endif
}
+ if (fail_case) {
+ item->chunk_size += 16;
+ large_offsets = TRUE;
+ }
item->chunk_size += CALL_SIZE;
} else {
item->chunk_size += BSEARCH_ENTRY_SIZE;
size += item->chunk_size;
}
- start = code = mono_code_manager_reserve (domain->code_mp, size);
+ if (large_offsets)
+ size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
+
+ if (fail_tramp)
+ code = mono_method_alloc_generic_virtual_thunk (domain, size);
+ else
+ code = mono_domain_code_reserve (domain, size);
+ start = code;
#if DEBUG_IMT
printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
- printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->method, item->method->name, &vtable->vtable [item->vtable_slot], item->is_equals, item->chunk_size);
+ printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
}
#endif
- ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
+ if (large_offsets)
+ ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
+ else
+ ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
vtable_target = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ if (mono_use_llvm) {
+ /* LLVM always passes the IMT method in R5 */
+ ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
+ } else {
+ /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
+ ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
+ ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
+ }
+
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
- arminstr_t *imt_method = NULL;
+ arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
+ gint32 vtable_offset;
+
item->code_target = (guint8*)code;
if (item->is_equals) {
- if (item->check_target_idx) {
- if (!item->compare_done) {
+ gboolean fail_case = !item->check_target_idx && fail_tramp;
+
+ if (item->check_target_idx || fail_case) {
+ if (!item->compare_done || fail_case) {
imt_method = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
}
item->jmp_code = (guint8*)code;
ARM_B_COND (code, ARMCOND_NE, 0);
-
- ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
- ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
} else {
/*Enable the commented code to assert on wrong method*/
#if ENABLE_WRONG_METHOD_CHECK
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
ARM_B_COND (code, ARMCOND_NE, 1);
-#endif
- ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
- ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
-#if ENABLE_WRONG_METHOD_CHECK
ARM_DBRK (code);
#endif
}
+ if (item->has_target_code) {
+ target_code_ins = code;
+ /* Load target address */
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ /* Save it to the fourth slot */
+ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
+ /* Restore registers and branch */
+ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
+
+ code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
+ } else {
+ vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
+ if (!arm_is_imm12 (vtable_offset)) {
+ /*
+ * We need to branch to a computed address but we don't have
+ * a free register to store it, since IP must contain the
+ * vtable address. So we push the two values to the stack, and
+ * load them both using LDM.
+ */
+ /* Compute target address */
+ vtable_offset_ins = code;
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
+ /* Save it to the fourth slot */
+ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
+ /* Restore registers and branch */
+ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
+
+ code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
+ } else {
+ ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
+ if (large_offsets)
+ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
+ ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
+ }
+ }
+
+ if (fail_case) {
+ arm_patch (item->jmp_code, (guchar*)code);
+
+ target_code_ins = code;
+ /* Load target address */
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ /* Save it to the fourth slot */
+ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
+ /* Restore registers and branch */
+ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
+
+ code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
+ item->jmp_code = NULL;
+ }
+
if (imt_method)
- code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->method);
+ code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
/*must emit after unconditional branch*/
if (vtable_target) {
}
/*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
+ constant_pool_starts [i] = code;
if (extra_space) {
code += extra_space;
extra_space = 0;
}
if (i > 0 && item->is_equals) {
int j;
- arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
+ arminstr_t *space_start = constant_pool_starts [i];
for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
- space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->method);
+ space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
}
}
}
}
#endif
+ g_free (constant_pool_starts);
+
mono_arch_flush_icache ((guint8*)start, size);
mono_stats.imt_thunks_size += code - start;
#endif
+gpointer
+mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
+{
+ if (reg == ARMREG_SP)
+ return (gpointer)ctx->esp;
+ else
+ return (gpointer)ctx->regs [reg];
+}
+
+/*
+ * mono_arch_set_breakpoint:
+ *
+ * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
+ * The location should contain code emitted by OP_SEQ_POINT.
+ */
+void
+mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
+{
+ guint8 *code = ip;
+ guint32 native_offset = ip - (guint8*)ji->code_start;
+
+ if (ji->from_aot) {
+ SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
+
+ g_assert (native_offset % 4 == 0);
+ g_assert (info->bp_addrs [native_offset / 4] == 0);
+ info->bp_addrs [native_offset / 4] = bp_trigger_page;
+ } else {
+ int dreg = ARMREG_LR;
+
+ /* Read from another trigger page */
+ ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(int*)code = (int)bp_trigger_page;
+ code += 4;
+ ARM_LDR_IMM (code, dreg, dreg, 0);
+
+ mono_arch_flush_icache (code - 16, 16);
+
+#if 0
+ /* This is currently implemented by emitting an SWI instruction, which
+ * qemu/linux seems to convert to a SIGILL.
+ */
+ *(int*)code = (0xef << 24) | 8;
+ code += 4;
+ mono_arch_flush_icache (code - 4, 4);
+#endif
+ }
+}
+
+/*
+ * mono_arch_clear_breakpoint:
+ *
+ * Clear the breakpoint at IP.
+ */
+void
+mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
+{
+ guint8 *code = ip;
+ int i;
+
+ if (ji->from_aot) {
+ guint32 native_offset = ip - (guint8*)ji->code_start;
+ SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
+
+ g_assert (native_offset % 4 == 0);
+ g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
+ info->bp_addrs [native_offset / 4] = 0;
+ } else {
+ for (i = 0; i < 4; ++i)
+ ARM_NOP (code);
+
+ mono_arch_flush_icache (ip, code - ip);
+ }
+}
+
+/*
+ * mono_arch_start_single_stepping:
+ *
+ * Start single stepping.
+ */
+void
+mono_arch_start_single_stepping (void)
+{
+ mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+}
+
+/*
+ * mono_arch_stop_single_stepping:
+ *
+ * Stop single stepping.
+ */
+void
+mono_arch_stop_single_stepping (void)
+{
+ mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+}
+
+#if __APPLE__
+#define DBG_SIGNAL SIGBUS
+#else
+#define DBG_SIGNAL SIGSEGV
+#endif
+
+/*
+ * mono_arch_is_single_step_event:
+ *
+ * Return whenever the machine state in SIGCTX corresponds to a single
+ * step event.
+ */
+gboolean
+mono_arch_is_single_step_event (void *info, void *sigctx)
+{
+ siginfo_t *sinfo = info;
+
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
+ return TRUE;
+ else
+ return FALSE;
+}
+
+/*
+ * mono_arch_is_breakpoint_event:
+ *
+ * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
+ */
+gboolean
+mono_arch_is_breakpoint_event (void *info, void *sigctx)
+{
+ siginfo_t *sinfo = info;
+
+ if (sinfo->si_signo == DBG_SIGNAL) {
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
+ return TRUE;
+ else
+ return FALSE;
+ } else {
+ return FALSE;
+ }
+}
+
+guint8*
+mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ if (ji->from_aot)
+ ip -= 6 * 4;
+ else
+ ip -= 12;
+
+ return ip;
+}
+
+guint8*
+mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ ip += 4;
+ return ip;
+}
+
+/*
+ * mono_arch_skip_breakpoint:
+ *
+ * See mini-amd64.c for docs.
+ */
+void
+mono_arch_skip_breakpoint (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
+}
+
+/*
+ * mono_arch_skip_single_step:
+ *
+ * See mini-amd64.c for docs.
+ */
+void
+mono_arch_skip_single_step (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
+}
+
+/*
+ * mono_arch_get_seq_point_info:
+ *
+ * See mini-amd64.c for docs.
+ */
+gpointer
+mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
+{
+ SeqPointInfo *info;
+ MonoJitInfo *ji;
+
+ // FIXME: Add a free function
+
+ mono_domain_lock (domain);
+ info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
+ code);
+ mono_domain_unlock (domain);
+
+ if (!info) {
+ ji = mono_jit_info_table_find (domain, (char*)code);
+ g_assert (ji);
+
+ info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
+
+ info->ss_trigger_page = ss_trigger_page;
+ info->bp_trigger_page = bp_trigger_page;
+
+ mono_domain_lock (domain);
+ g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
+ code, info);
+ mono_domain_unlock (domain);
+ }
+
+ return info;
+}