* Dietmar Maurer (dietmar@ximian.com)
*
* (C) 2003 Ximian, Inc.
+ * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
*/
#include "mini.h"
#include <string.h>
#include "cpu-arm.h"
#include "trace.h"
#include "ir-emit.h"
-#ifdef ARM_FPU_FPA
+#include "debugger-agent.h"
+#include "mini-gc.h"
#include "mono/arch/arm/arm-fpa-codegen.h"
-#elif defined(ARM_FPU_VFP)
#include "mono/arch/arm/arm-vfp-codegen.h"
-#endif
#if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
#define HAVE_AEABI_READ_TP 1
#endif
+#ifdef ARM_FPU_VFP_HARD
+#define ARM_FPU_VFP 1
+#endif
+
+#ifdef ARM_FPU_FPA
+#define IS_FPA 1
+#else
+#define IS_FPA 0
+#endif
+
+#ifdef ARM_FPU_VFP
+#define IS_VFP 1
+#else
+#define IS_VFP 0
+#endif
+
+#ifdef MONO_ARCH_SOFT_FLOAT
+#define IS_SOFT_FLOAT 1
+#else
+#define IS_SOFT_FLOAT 0
+#endif
+
+#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
+
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
static CRITICAL_SECTION mini_arch_mutex;
static int v5_supported = 0;
+static int v6_supported = 0;
static int v7_supported = 0;
static int thumb_supported = 0;
+/*
+ * Whenever to use the ARM EABI
+ */
+static int eabi_supported = 0;
+
+/*
+ * Whenever we are on arm/darwin aka the iphone.
+ */
+static int darwin = 0;
+/*
+ * Whenever to use the iphone ABI extensions:
+ * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
+ * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
+ * This is required for debugging/profiling tools to work, but it has some overhead so it should
+ * only be turned on in debug builds.
+ */
+static int iphone_abi = 0;
+
+/*
+ * The FPU we are generating code for. This is NOT runtime configurable right now,
+ * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
+ */
+static MonoArmFPU arm_fpu;
+
+static int i8_align;
+
+static volatile int ss_trigger_var = 0;
+
+static gpointer single_step_func_wrapper;
+static gpointer breakpoint_func_wrapper;
/*
* The code generated for sequence points reads from this location, which is
} \
} while (0)
+static void mono_arch_compute_omit_fp (MonoCompile *cfg);
+
const char*
mono_arch_regname (int reg)
{
case OP_FCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE:
-#ifdef ARM_FPU_FPA
- if (ins->dreg != ARM_FPA_F0)
- ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
-#elif defined(ARM_FPU_VFP)
- if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
- ARM_FMSR (code, ins->dreg, ARMREG_R0);
- ARM_CVTS (code, ins->dreg, ins->dreg);
- } else {
- ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
+ if (IS_FPA) {
+ if (ins->dreg != ARM_FPA_F0)
+ ARM_FPA_MVFD (code, ins->dreg, ARM_FPA_F0);
+ } else if (IS_VFP) {
+ if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
+ ARM_FMSR (code, ins->dreg, ARMREG_R0);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+ } else {
+ ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
+ }
}
-#endif
break;
}
return code;
}
+/*
+ * emit_save_lmf:
+ *
+ * Emit code to push an LMF structure on the LMF stack.
+ * On arm, this is intermixed with the initialization of other fields of the structure.
+ */
+static guint8*
+emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
+{
+ gboolean get_lmf_fast = FALSE;
+ int i;
+
+#ifdef HAVE_AEABI_READ_TP
+ gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
+
+ if (lmf_addr_tls_offset != -1) {
+ get_lmf_fast = TRUE;
+
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"__aeabi_read_tp");
+ code = emit_call_seq (cfg, code);
+
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
+ get_lmf_fast = TRUE;
+ }
+#endif
+ if (!get_lmf_fast) {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_get_lmf_addr");
+ code = emit_call_seq (cfg, code);
+ }
+ /* we build the MonoLMF structure on the stack - see mini-arm.h */
+ /* lmf_offset is the offset from the previous stack pointer,
+ * alloc_size is the total stack space allocated, so the offset
+ * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
+ * The pointer to the struct is put in r1 (new_lmf).
+ * ip is used as scratch
+ * The callee-saved registers are already in the MonoLMF structure
+ */
+ code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
+ /* r0 is the result from mono_get_lmf_addr () */
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
+ /* new_lmf->previous_lmf = *lmf_addr */
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ /* *(lmf_addr) = r1 */
+ ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ /* Skip method (only needed for trampoline LMF frames) */
+ ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
+ ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
+ /* save the current IP */
+ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
+ ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
+
+ for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
+
+ return code;
+}
+
+/*
+ * emit_save_lmf:
+ *
+ * Emit code to pop an LMF structure from the LMF stack.
+ */
+static guint8*
+emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
+{
+ int basereg, offset;
+
+ if (lmf_offset < 32) {
+ basereg = cfg->frame_reg;
+ offset = lmf_offset;
+ } else {
+ basereg = ARMREG_R2;
+ offset = 0;
+ code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
+ }
+
+ /* ip = previous_lmf */
+ ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ /* lr = lmf_addr */
+ ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
+ /* *(lmf_addr) = previous_lmf */
+ ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+
+ return code;
+}
+
#endif /* #ifndef DISABLE_JIT */
/*
return frame_size;
}
-static gpointer
-decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
-{
- char *o = NULL;
- int reg, offset = 0;
- reg = (ldr >> 16 ) & 0xf;
- offset = ldr & 0xfff;
- if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
- offset = -offset;
- /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
- o = (gpointer)regs [reg];
-
- *displacement = offset;
- return o;
-}
-
-gpointer
-mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
-{
- guint32* code = (guint32*)code_ptr;
-
- /* Locate the address of the method-specific trampoline. The call using
- the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
- looks something like this:
-
- ldr rA, rX, #offset
- mov lr, pc
- mov pc, rA
- or better:
- mov lr, pc
- ldr pc, rX, #offset
-
- The call sequence could be also:
- ldr ip, pc, 0
- b skip
- function pointer literal
- skip:
- mov lr, pc
- mov pc, ip
- Note that on ARM5+ we can use one instruction instead of the last two.
- Therefore, we need to locate the 'ldr rA' instruction to know which
- register was used to hold the method addrs.
- */
-
- /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
- --code;
-
- /* Three possible code sequences can happen here:
- * interface call:
- *
- * add lr, [pc + #4]
- * ldr pc, [rX - #offset]
- * .word IMT value
- *
- * virtual call:
- *
- * mov lr, pc
- * ldr pc, [rX - #offset]
- *
- * direct branch with bl:
- *
- * bl #offset
- *
- * direct branch with mov:
- *
- * mv pc, rX
- *
- * We only need to identify interface and virtual calls, the others can be ignored.
- *
- */
- if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
- return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
-
- if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
- return decode_vcall_slot_from_ldr (code [0], regs, displacement);
-
- return NULL;
-}
-
#define MAX_ARCH_DELEGATE_PARAMS 3
static gpointer
}
gpointer
-mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
{
return (gpointer)regs [ARMREG_R0];
}
void
mono_arch_cpu_init (void)
{
+#if defined(__ARM_EABI__)
+ eabi_supported = TRUE;
+#endif
+#if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
+ i8_align = 4;
+#else
+ i8_align = __alignof__ (gint64);
+#endif
+}
+
+static gpointer
+create_function_wrapper (gpointer function)
+{
+ guint8 *start, *code;
+
+ start = code = mono_global_codeman_reserve (96);
+
+ /*
+ * Construct the MonoContext structure on the stack.
+ */
+
+ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
+
+ /* save ip, lr and pc into their correspodings ctx.regs slots. */
+ ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
+ ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
+ ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
+
+ /* save r0..r10 and fp */
+ ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
+ ARM_STM (code, ARMREG_IP, 0x0fff);
+
+ /* now we can update fp. */
+ ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
+
+ /* make ctx.esp hold the actual value of sp at the beginning of this method. */
+ ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
+
+ /* make ctx.eip hold the address of the call. */
+ ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
+ ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
+
+ /* r0 now points to the MonoContext */
+ ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
+
+ /* call */
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(gpointer*)code = function;
+ code += 4;
+ ARM_BLX_REG (code, ARMREG_IP);
+
+ /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
+
+ /* make ip point to the regs array, then restore everything, including pc. */
+ ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
+ ARM_LDM (code, ARMREG_IP, 0xffff);
+
+ mono_arch_flush_icache (start, code - start);
+
+ return start;
}
/*
{
InitializeCriticalSection (&mini_arch_mutex);
- ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
- bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
- mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+ if (mini_get_debug_options ()->soft_breakpoints) {
+ single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
+ breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
+ } else {
+ ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+ bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+ mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+ }
mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
+ mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
+
+#ifdef ARM_FPU_FPA
+ arm_fpu = MONO_ARM_FPU_FPA;
+#elif defined(ARM_FPU_VFP_HARD)
+ arm_fpu = MONO_ARM_FPU_VFP_HARD;
+#elif defined(ARM_FPU_VFP)
+ arm_fpu = MONO_ARM_FPU_VFP;
+#else
+ arm_fpu = MONO_ARM_FPU_NONE;
+#endif
}
/*
thumb_supported = strstr (cpu_arch, "thumb") != NULL;
if (strncmp (cpu_arch, "armv", 4) == 0) {
v5_supported = cpu_arch [4] >= '5';
+ v6_supported = cpu_arch [4] >= '6';
v7_supported = cpu_arch [4] >= '7';
}
} else {
#if __APPLE__
thumb_supported = TRUE;
v5_supported = TRUE;
+ darwin = TRUE;
+ iphone_abi = TRUE;
#else
char buf [512];
char *line;
char *ver = strstr (line, "(v");
if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
v5_supported = TRUE;
+ if (ver && (ver [2] == '6' || ver [2] == '7'))
+ v6_supported = TRUE;
if (ver && (ver [2] == '7'))
v7_supported = TRUE;
continue;
{
GList *regs = NULL;
+ mono_arch_compute_omit_fp (cfg);
+
/*
* FIXME: Interface calls might go through a static rgctx trampoline which
* sets V5, but it doesn't save it, so we need to save it ourselves, and
if (cfg->flags & MONO_CFG_HAS_CALLS)
cfg->uses_rgctx_reg = TRUE;
+ if (cfg->arch.omit_fp)
+ regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
- regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
- if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
+ if (darwin)
+ /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
+ regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
+ else
+ regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
+ if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
/*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
guint16 vtsize; /* in param area */
guint8 reg;
ArgStorage storage;
+ gint32 struct_size;
guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
} ArgInfo;
ainfo->reg = *gr;
}
} else {
-#if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
- int i8_align = 4;
-#else
- int i8_align = __alignof__ (gint64);
-#endif
+ gboolean split;
-#if __ARM_EABI__
- gboolean split = i8_align == 4;
-#else
- gboolean split = TRUE;
-#endif
+ if (eabi_supported)
+ split = i8_align == 4;
+ else
+ split = TRUE;
if (*gr == ARMREG_R3 && split) {
/* first word in r3 and the second on the stack */
ainfo->storage = RegTypeBaseGen;
*stack_size += 4;
} else if (*gr >= ARMREG_R3) {
-#ifdef __ARM_EABI__
- /* darwin aligns longs to 4 byte only */
- if (i8_align == 8) {
- *stack_size += 7;
- *stack_size &= ~7;
+ if (eabi_supported) {
+ /* darwin aligns longs to 4 byte only */
+ if (i8_align == 8) {
+ *stack_size += 7;
+ *stack_size &= ~7;
+ }
}
-#endif
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
ainfo->storage = RegTypeBase;
*stack_size += 8;
} else {
-#ifdef __ARM_EABI__
- if (i8_align == 8 && ((*gr) & 1))
- (*gr) ++;
-#endif
+ if (eabi_supported) {
+ if (i8_align == 8 && ((*gr) & 1))
+ (*gr) ++;
+ }
ainfo->storage = RegTypeIRegPair;
ainfo->reg = *gr;
}
}
static CallInfo*
-get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
{
guint i, gr, pstart;
int n = sig->hasthis + sig->param_count;
MonoType *simpletype;
guint32 stack_size = 0;
CallInfo *cinfo;
+ gboolean is_pinvoke = sig->pinvoke;
if (mp)
cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
- if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (sig->params [0])))) {
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
} else {
align_size &= ~(sizeof (gpointer) - 1);
nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
cinfo->args [n].storage = RegTypeStructByVal;
+ cinfo->args [n].struct_size = size;
/* FIXME: align stack_size if needed */
-#ifdef __ARM_EABI__
- if (align >= 8 && (gr & 1))
- gr ++;
-#endif
+ if (eabi_supported) {
+ if (align >= 8 && (gr & 1))
+ gr ++;
+ }
if (gr > ARMREG_R3) {
cinfo->args [n].size = 0;
cinfo->args [n].vtsize = nwords;
#ifndef DISABLE_JIT
+G_GNUC_UNUSED static void
+break_count (void)
+{
+}
+
+G_GNUC_UNUSED static gboolean
+debug_count (void)
+{
+ static int count = 0;
+ count ++;
+
+ if (!getenv ("COUNT"))
+ return TRUE;
+
+ if (count == atoi (getenv ("COUNT"))) {
+ break_count ();
+ }
+
+ if (count > atoi (getenv ("COUNT"))) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static gboolean
+debug_omit_fp (void)
+{
+#if 0
+ return debug_count ();
+#else
+ return TRUE;
+#endif
+}
+
+/**
+ * mono_arch_compute_omit_fp:
+ *
+ * Determine whenever the frame pointer can be eliminated.
+ */
+static void
+mono_arch_compute_omit_fp (MonoCompile *cfg)
+{
+ MonoMethodSignature *sig;
+ MonoMethodHeader *header;
+ int i, locals_size;
+ CallInfo *cinfo;
+
+ if (cfg->arch.omit_fp_computed)
+ return;
+
+ header = cfg->header;
+
+ sig = mono_method_signature (cfg->method);
+
+ if (!cfg->arch.cinfo)
+ cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cinfo = cfg->arch.cinfo;
+
+ /*
+ * FIXME: Remove some of the restrictions.
+ */
+ cfg->arch.omit_fp = TRUE;
+ cfg->arch.omit_fp_computed = TRUE;
+
+ if (cfg->disable_omit_fp)
+ cfg->arch.omit_fp = FALSE;
+ if (!debug_omit_fp ())
+ cfg->arch.omit_fp = FALSE;
+ /*
+ if (cfg->method->save_lmf)
+ cfg->arch.omit_fp = FALSE;
+ */
+ if (cfg->flags & MONO_CFG_HAS_ALLOCA)
+ cfg->arch.omit_fp = FALSE;
+ if (header->num_clauses)
+ cfg->arch.omit_fp = FALSE;
+ if (cfg->param_area)
+ cfg->arch.omit_fp = FALSE;
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
+ cfg->arch.omit_fp = FALSE;
+ if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
+ (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
+ cfg->arch.omit_fp = FALSE;
+ for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
+ ArgInfo *ainfo = &cinfo->args [i];
+
+ if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
+ /*
+ * The stack offset can only be determined when the frame
+ * size is known.
+ */
+ cfg->arch.omit_fp = FALSE;
+ }
+ }
+
+ locals_size = 0;
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
+ MonoInst *ins = cfg->varinfo [i];
+ int ialign;
+
+ locals_size += mono_type_size (ins->inst_vtype, &ialign);
+ }
+}
+
/*
* Set var information according to the calling convention. arm version.
* The locals var stuff should most likely be split in another method.
MonoMethodHeader *header;
MonoInst *ins;
int i, offset, size, align, curinst;
- int frame_reg = ARMREG_FP;
CallInfo *cinfo;
guint32 ualign;
sig = mono_method_signature (cfg->method);
if (!cfg->arch.cinfo)
- cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
+ cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
- /* FIXME: this will change when we use FP as gcc does */
+ mono_arch_compute_omit_fp (cfg);
+
+ if (cfg->arch.omit_fp)
+ cfg->frame_reg = ARMREG_SP;
+ else
+ cfg->frame_reg = ARMREG_FP;
+
cfg->flags |= MONO_CFG_HAS_SPILLUP;
/* allow room for the vararg method args: void* and long/double */
header = cfg->header;
- /*
- * We use the frame register also for any method that has
- * exception clauses. This way, when the handlers are called,
- * the code will reference local variables using the frame reg instead of
- * the stack pointer: if we had to restore the stack pointer, we'd
- * corrupt the method frames that are already on the stack (since
- * filters get called before stack unwinding happens) when the filter
- * code would call any method (this also applies to finally etc.).
- */
- if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
- frame_reg = ARMREG_FP;
- cfg->frame_reg = frame_reg;
- if (frame_reg != ARMREG_SP) {
- cfg->used_int_regs |= 1 << frame_reg;
- }
+ /* See mono_arch_get_global_int_regs () */
+ if (cfg->flags & MONO_CFG_HAS_CALLS)
+ cfg->uses_rgctx_reg = TRUE;
+
+ if (cfg->frame_reg != ARMREG_SP)
+ cfg->used_int_regs |= 1 << cfg->frame_reg;
- if (cfg->compile_aot || cfg->uses_rgctx_reg)
+ if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
cfg->used_int_regs |= (1 << ARMREG_V5);
offset &= ~(sizeof(gpointer) - 1);
ins->inst_offset = offset;
ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = frame_reg;
+ ins->inst_basereg = cfg->frame_reg;
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr =");
mono_print_ins (cfg->vret_addr);
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = frame_reg;
+ ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = frame_reg;
+ ins->inst_basereg = cfg->frame_reg;
+ ins->inst_offset = offset;
+ offset += size;
+ }
+
+ if (cfg->arch.seq_point_read_var) {
+ MonoInst *ins;
+
+ ins = cfg->arch.seq_point_read_var;
+
+ size = 4;
+ align = 4;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ ins->inst_offset = offset;
+ offset += size;
+
+ ins = cfg->arch.seq_point_ss_method_var;
+ size = 4;
+ align = 4;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ ins->inst_offset = offset;
+ offset += size;
+
+ ins = cfg->arch.seq_point_bp_method_var;
+ size = 4;
+ align = 4;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
+ cfg->locals_min_stack_offset = offset;
+
curinst = cfg->locals_start;
for (i = curinst; i < cfg->num_varinfo; ++i) {
ins = cfg->varinfo [i];
*/
if (align < 4 && size >= 4)
align = 4;
+ if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
+ mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_offset = offset;
- ins->inst_basereg = frame_reg;
+ ins->inst_basereg = cfg->frame_reg;
offset += size;
//g_print ("allocating local %d to %d\n", i, inst->inst_offset);
}
+ cfg->locals_max_stack_offset = offset;
+
curinst = 0;
if (sig->hasthis) {
ins = cfg->args [curinst];
if (ins->opcode != OP_REGVAR) {
ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = frame_reg;
+ ins->inst_basereg = cfg->frame_reg;
offset += sizeof (gpointer) - 1;
offset &= ~(sizeof (gpointer) - 1);
ins->inst_offset = offset;
if (ins->opcode != OP_REGVAR) {
ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = frame_reg;
+ ins->inst_basereg = cfg->frame_reg;
size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
align = ualign;
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
/* The code in the prolog () stores words when storing vtypes received in a register */
if (MONO_TYPE_ISSTRUCT (sig->params [i]))
align = 4;
+ if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
+ mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += align - 1;
offset &= ~(align - 1);
ins->inst_offset = offset;
}
/* align the offset to 8 bytes */
+ if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
+ mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += 8 - 1;
offset &= ~(8 - 1);
sig = mono_method_signature (cfg->method);
if (!cfg->arch.cinfo)
- cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
+ cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == RegTypeStructByVal)
}
}
- if (cfg->gen_seq_points && cfg->compile_aot) {
- MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- ins->flags |= MONO_INST_VOLATILE;
- cfg->arch.seq_point_info_var = ins;
-
- /* Allocate a separate variable for this to save 1 load per seq point */
- ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- ins->flags |= MONO_INST_VOLATILE;
- cfg->arch.ss_trigger_page_var = ins;
+ if (cfg->gen_seq_points) {
+ if (cfg->soft_breakpoints) {
+ MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.seq_point_read_var = ins;
+
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.seq_point_ss_method_var = ins;
+
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.seq_point_bp_method_var = ins;
+
+ g_assert (!cfg->compile_aot);
+ } else if (cfg->compile_aot) {
+ MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.seq_point_info_var = ins;
+
+ /* Allocate a separate variable for this to save 1 load per seq point */
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ss_trigger_page_var = ins;
+ }
}
}
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmp_sig;
- MonoInst *sig_arg;
+ int sig_reg;
if (call->tail_call)
NOT_IMPLEMENTED;
- /* FIXME: Add support for signature tokens to AOT */
- cfg->disable_aot = TRUE;
-
g_assert (cinfo->sig_cookie.storage == RegTypeBase);
/*
tmp_sig->sentinelpos = 0;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
- MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
- sig_arg->dreg = mono_alloc_ireg (cfg);
- sig_arg->inst_p0 = tmp_sig;
- MONO_ADD_INS (cfg->cbb, sig_arg);
+ sig_reg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
}
#ifdef ENABLE_LLVM
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
* - we only pass/receive them in registers in some cases, and only
* in 1 or 2 integer registers.
*/
- if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
+ if (cinfo->vtype_retaddr) {
+ /* Vtype returned using a hidden argument */
+ linfo->ret.storage = LLVMArgVtypeRetAddr;
+ linfo->vret_arg_index = cinfo->vret_arg_index;
+ } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
cfg->exception_message = g_strdup ("unknown ret conv");
cfg->disable_llvm = TRUE;
return linfo;
case RegTypeBase:
linfo->args [i].storage = LLVMArgInIReg;
break;
+ case RegTypeStructByVal:
+ // FIXME: Passing entirely on the stack or split reg/stack
+ if (ainfo->vtsize == 0 && ainfo->size <= 2) {
+ linfo->args [i].storage = LLVMArgVtypeInReg;
+ linfo->args [i].pair_storage [0] = LLVMArgInIReg;
+ if (ainfo->size == 2)
+ linfo->args [i].pair_storage [1] = LLVMArgInIReg;
+ else
+ linfo->args [i].pair_storage [1] = LLVMArgNone;
+ } else {
+ cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
+ cfg->disable_llvm = TRUE;
+ }
+ break;
default:
cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
cfg->disable_llvm = TRUE;
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (NULL, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
for (i = 0; i < n; ++i) {
ArgInfo *ainfo = cinfo->args + i;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
} else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
-#ifndef MONO_ARCH_SOFT_FLOAT
- int creg;
-#endif
-
if (ainfo->size == 4) {
-#ifdef MONO_ARCH_SOFT_FLOAT
- /* mono_emit_call_args () have already done the r8->r4 conversion */
- /* The converted value is in an int vreg */
- MONO_INST_NEW (cfg, ins, OP_MOVE);
- ins->dreg = mono_alloc_ireg (cfg);
- ins->sreg1 = in->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
- mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
-#else
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
- creg = mono_alloc_ireg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
- mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
-#endif
+ if (IS_SOFT_FLOAT) {
+ /* mono_emit_call_args () have already done the r8->r4 conversion */
+ /* The converted value is in an int vreg */
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+ } else {
+ int creg;
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
+ }
} else {
-#ifdef MONO_ARCH_SOFT_FLOAT
- MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
- ins->dreg = mono_alloc_ireg (cfg);
- ins->sreg1 = in->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
- mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
-
- MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
- ins->dreg = mono_alloc_ireg (cfg);
- ins->sreg1 = in->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
- mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
-#else
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
- creg = mono_alloc_ireg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
- mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
- creg = mono_alloc_ireg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
- mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
-#endif
+ if (IS_SOFT_FLOAT) {
+ MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+
+ MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
+ } else {
+ int creg;
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
+ }
}
cfg->flags |= MONO_CFG_HAS_FPOUT;
} else {
ins->inst_p0 = call;
ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
+ mono_call_inst_add_outarg_vt (cfg, call, ins);
MONO_ADD_INS (cfg->cbb, ins);
break;
case RegTypeBase:
if (t->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
} else {
-#ifdef MONO_ARCH_SOFT_FLOAT
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
-#else
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
-#endif
+ if (IS_SOFT_FLOAT)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+ else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
}
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
} else if (!t->byref && (t->type == MONO_TYPE_R8)) {
int creg;
-#ifdef MONO_ARCH_SOFT_FLOAT
- g_assert_not_reached ();
-#endif
+ /* This should work for soft-float as well */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
creg = mono_alloc_ireg (cfg);
ArgInfo *ainfo = ins->inst_p1;
int ovf_size = ainfo->vtsize;
int doffset = ainfo->offset;
- int i, soffset, dreg;
+ int struct_size = ainfo->struct_size;
+ int i, soffset, dreg, tmpreg;
soffset = 0;
for (i = 0; i < ainfo->size; ++i) {
dreg = mono_alloc_ireg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
+ switch (struct_size) {
+ case 1:
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
+ break;
+ case 2:
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
+ break;
+ case 3:
+ tmpreg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
+ MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
+ MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
+ break;
+ default:
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
+ break;
+ }
mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
soffset += sizeof (gpointer);
+ struct_size -= sizeof (gpointer);
}
//g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
if (ovf_size != 0)
- mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
+ mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
}
void
}
return;
}
-#ifdef MONO_ARCH_SOFT_FLOAT
- if (ret->type == MONO_TYPE_R8) {
- MonoInst *ins;
-
- MONO_INST_NEW (cfg, ins, OP_SETFRET);
- ins->dreg = cfg->ret->dreg;
- ins->sreg1 = val->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
- return;
- }
- if (ret->type == MONO_TYPE_R4) {
- /* Already converted to an int in method_to_ir () */
- MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
- return;
- }
-#elif defined(ARM_FPU_VFP)
- if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
- MonoInst *ins;
+ switch (arm_fpu) {
+ case MONO_ARM_FPU_NONE:
+ if (ret->type == MONO_TYPE_R8) {
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_SETFRET);
+ ins->dreg = cfg->ret->dreg;
+ ins->sreg1 = val->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ return;
+ }
+ if (ret->type == MONO_TYPE_R4) {
+ /* Already converted to an int in method_to_ir () */
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ return;
+ }
+ break;
+ case MONO_ARM_FPU_VFP:
+ if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
+ MonoInst *ins;
- MONO_INST_NEW (cfg, ins, OP_SETFRET);
- ins->dreg = cfg->ret->dreg;
- ins->sreg1 = val->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
- return;
- }
-#else
- if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
- MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
- return;
+ MONO_INST_NEW (cfg, ins, OP_SETFRET);
+ ins->dreg = cfg->ret->dreg;
+ ins->sreg1 = val->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ return;
+ }
+ break;
+ case MONO_ARM_FPU_FPA:
+ if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached ();
}
-#endif
}
- /* FIXME: */
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
case RegTypeStructByAddr:
break;
case RegTypeFP:
-#ifdef ARM_FPU_FPA
- return FALSE;
-#elif defined(ARM_FPU_VFP)
- break;
-#else
- return FALSE;
-#endif
+ if (IS_FPA)
+ return FALSE;
+ else if (IS_VFP)
+ break;
+ else
+ return FALSE;
default:
return FALSE;
}
switch (t->type) {
case MONO_TYPE_R4:
case MONO_TYPE_R8:
-#ifdef MONO_ARCH_SOFT_FLOAT
- return FALSE;
-#else
- break;
-#endif
+ if (IS_SOFT_FLOAT)
+ return FALSE;
+ else
+ break;
/*
case MONO_TYPE_I8:
case MONO_TYPE_U8:
ArchDynCallInfo *info;
CallInfo *cinfo;
- cinfo = get_call_info (NULL, sig, FALSE);
+ cinfo = get_call_info (NULL, NULL, sig);
if (!dyn_call_supported (cinfo, sig)) {
g_free (cinfo);
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
- int arg_index, greg, i, j;
+ int arg_index, greg, i, j, pindex;
MonoMethodSignature *sig = dinfo->sig;
g_assert (buf_len >= sizeof (DynCallArgs));
arg_index = 0;
greg = 0;
+ pindex = 0;
+
+ if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
+ p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
+ if (!sig->hasthis)
+ pindex = 1;
+ }
if (dinfo->cinfo->vtype_retaddr)
p->regs [greg ++] = (mgreg_t)ret;
- if (sig->hasthis)
- p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
-
- for (i = 0; i < sig->param_count; i++) {
+ for (i = pindex; i < sig->param_count; i++) {
MonoType *t = mono_type_get_underlying_type (sig->params [i]);
gpointer *arg = args [arg_index ++];
ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
g_assert (ainfo->cinfo->vtype_retaddr);
/* Nothing to do */
break;
-#if defined(ARM_FPU_VFP)
case MONO_TYPE_R4:
+ g_assert (IS_VFP);
*(float*)ret = *(float*)&res;
break;
case MONO_TYPE_R8: {
mgreg_t regs [2];
+ g_assert (IS_VFP);
regs [0] = res;
regs [1] = res2;
*(double*)ret = *(double*)®s;
break;
}
-#endif
default:
g_assert_not_reached ();
}
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg */
-#ifdef ARM_FPU_FPA
- ARM_FIXZ (code, dreg, sreg);
-#elif defined(ARM_FPU_VFP)
- if (is_signed)
- ARM_TOSIZD (code, ARM_VFP_F0, sreg);
- else
- ARM_TOUIZD (code, ARM_VFP_F0, sreg);
- ARM_FMRS (code, dreg, ARM_VFP_F0);
-#endif
+ if (IS_FPA)
+ ARM_FPA_FIXZ (code, dreg, sreg);
+ else if (IS_VFP) {
+ if (is_signed)
+ ARM_TOSIZD (code, ARM_VFP_F0, sreg);
+ else
+ ARM_TOUIZD (code, ARM_VFP_F0, sreg);
+ ARM_FMRS (code, dreg, ARM_VFP_F0);
+ }
if (!is_signed) {
if (size == 1)
ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
}
static void
-handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
+handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
{
PatchData pdata;
pdata.absolute = absolute;
pdata.found = 0;
- mono_domain_lock (domain);
- mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
+ if (dyn_code_mp) {
+ mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
+ }
- if (!pdata.found) {
- /* this uses the first available slot */
- pdata.found = 2;
+ if (pdata.found != 1) {
+ mono_domain_lock (domain);
mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
+
+ if (!pdata.found) {
+ /* this uses the first available slot */
+ pdata.found = 2;
+ mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
+ }
+ mono_domain_unlock (domain);
}
- mono_domain_unlock (domain);
+ if (pdata.found != 1) {
+ GHashTable *hash;
+ GHashTableIter iter;
+ MonoJitDynamicMethodInfo *ji;
+
+ /*
+ * This might be a dynamic method, search its code manager. We can only
+ * use the dynamic method containing CODE, since the others might be freed later.
+ */
+ pdata.found = 0;
+
+ mono_domain_lock (domain);
+ hash = domain_jit_info (domain)->dynamic_code_hash;
+ if (hash) {
+ /* FIXME: Speed this up */
+ g_hash_table_iter_init (&iter, hash);
+ while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
+ mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
+ if (pdata.found == 1)
+ break;
+ }
+ }
+ mono_domain_unlock (domain);
+ }
if (pdata.found != 1)
g_print ("thunk failed for %p from %p\n", target, code);
g_assert (pdata.found == 1);
}
static void
-arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
+arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
{
guint32 *code32 = (void*)code;
guint32 ins = *code32;
}
}
- handle_thunk (domain, TRUE, code, target);
+ handle_thunk (domain, TRUE, code, target, dyn_code_mp);
return;
}
void
arm_patch (guchar *code, const guchar *target)
{
- arm_patch_general (NULL, code, target);
+ arm_patch_general (NULL, code, target, NULL);
}
/*
pos = 0;
- cinfo = get_call_info (NULL, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
if (MONO_TYPE_ISSTRUCT (sig->ret)) {
ArgInfo *ainfo = &cinfo->ret;
switch (ins->opcode) {
case OP_MEMORY_BARRIER:
+ if (v6_supported) {
+ ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
+ ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
+ }
break;
case OP_TLS_GET:
#ifdef HAVE_AEABI_READ_TP
int i;
MonoInst *info_var = cfg->arch.seq_point_info_var;
MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
+ MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
+ MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
+ MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
MonoInst *var;
int dreg = ARMREG_LR;
+ if (cfg->soft_breakpoints) {
+ g_assert (!cfg->compile_aot);
+ }
+
/*
* For AOT, we use one got slot per method, which will point to a
* SeqPointInfo structure, containing all the information required
g_assert (arm_is_imm12 (info_var->inst_offset));
}
- /*
- * Read from the single stepping trigger page. This will cause a
- * SIGSEGV when single stepping is enabled.
- * We do this _before_ the breakpoint, so single stepping after
- * a breakpoint is hit will step to the next IL offset.
- */
- g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
+ if (!cfg->soft_breakpoints) {
+ /*
+ * Read from the single stepping trigger page. This will cause a
+ * SIGSEGV when single stepping is enabled.
+ * We do this _before_ the breakpoint, so single stepping after
+ * a breakpoint is hit will step to the next IL offset.
+ */
+ g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
+ }
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
- if (cfg->compile_aot) {
- /* Load the trigger page addr from the variable initialized in the prolog */
- var = ss_trigger_page_var;
+ if (cfg->soft_breakpoints) {
+ /* Load the address of the sequence point trigger variable. */
+ var = ss_read_var;
+ g_assert (var);
+ g_assert (var->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (var->inst_offset));
+ ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
+
+ /* Read the value and check whether it is non-zero. */
+ ARM_LDR_IMM (code, dreg, dreg, 0);
+ ARM_CMP_REG_IMM (code, dreg, 0, 0);
+
+ /* Load the address of the sequence point method. */
+ var = ss_method_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (var->inst_offset));
ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
+
+ /* Call it conditionally. */
+ ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
} else {
- ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
- ARM_B (code, 0);
- *(int*)code = (int)ss_trigger_page;
- code += 4;
+ if (cfg->compile_aot) {
+ /* Load the trigger page addr from the variable initialized in the prolog */
+ var = ss_trigger_page_var;
+ g_assert (var);
+ g_assert (var->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (var->inst_offset));
+ ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
+ } else {
+ ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(int*)code = (int)ss_trigger_page;
+ code += 4;
+ }
+ ARM_LDR_IMM (code, dreg, dreg, 0);
}
- ARM_LDR_IMM (code, dreg, dreg, 0);
}
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
- if (cfg->compile_aot) {
+ if (cfg->soft_breakpoints) {
+ /* Load the address of the breakpoint method into ip. */
+ var = bp_method_var;
+ g_assert (var);
+ g_assert (var->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (var->inst_offset));
+ ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
+
+ /*
+ * A placeholder for a possible breakpoint inserted by
+ * mono_arch_set_breakpoint ().
+ */
+ ARM_NOP (code);
+ } else if (cfg->compile_aot) {
guint32 offset = code - cfg->native_code;
guint32 val;
/* Add the offset */
val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
- /*
- * Have to emit nops to keep the difference between the offset
- * stored in seq_points and breakpoint instruction constant,
- * mono_arch_get_ip_for_breakpoint () depends on this.
- */
if (val & 0xFF00)
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
- else
- ARM_NOP (code);
if (val & 0xFF0000)
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
- else
- ARM_NOP (code);
g_assert (!(val & 0xFF000000));
/* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
ARM_LDR_IMM (code, dreg, dreg, 0);
break;
}
case OP_FMOVE:
-#ifdef ARM_FPU_FPA
- ARM_MVFD (code, ins->dreg, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CPYD (code, ins->dreg, ins->sreg1);
-#endif
+ if (IS_FPA)
+ ARM_FPA_MVFD (code, ins->dreg, ins->sreg1);
+ else if (IS_VFP)
+ ARM_CPYD (code, ins->dreg, ins->sreg1);
break;
case OP_FCONV_TO_R4:
-#ifdef ARM_FPU_FPA
- ARM_MVFS (code, ins->dreg, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CVTD (code, ins->dreg, ins->sreg1);
- ARM_CVTS (code, ins->dreg, ins->dreg);
-#endif
+ if (IS_FPA)
+ ARM_FPA_MVFS (code, ins->dreg, ins->sreg1);
+ else if (IS_VFP) {
+ ARM_CVTD (code, ins->dreg, ins->sreg1);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+ }
break;
case OP_JMP:
/*
code = emit_load_volatile_arguments (cfg, code);
code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
- ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
+ if (iphone_abi) {
+ if (cfg->used_int_regs)
+ ARM_POP (code, cfg->used_int_regs);
+ ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
+ } else {
+ ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
+ }
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
if (cfg->compile_aot) {
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
break;
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
- ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
+ ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
break;
case OP_ARGLIST: {
g_assert (cfg->sig_cookie < 128);
else
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
code = emit_call_seq (cfg, code);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
code = emit_call_reg (code, ins->sreg1);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
}
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_LOCALLOC: {
ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
arm_patch (branch_to_cond, code);
/* decrement by 4 and set flags */
- ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
+ ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
ARM_B_COND (code, ARMCOND_GE, 0);
arm_patch (code - 4, start_loop);
}
/* Set stack slots using R0 as scratch reg */
/* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
- ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
- ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
}
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
- ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
+ ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
/* Make the call */
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
}
case OP_START_HANDLER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ int i, rot_amount;
+
+ /* Reserve a param area, see filter-stack.exe */
+ if (cfg->param_area) {
+ if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
+ ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
+ ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ }
+ }
if (arm_is_imm12 (spvar->inst_offset)) {
ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
}
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ int i, rot_amount;
+
+ /* Free the param area */
+ if (cfg->param_area) {
+ if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
+ ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
+ ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ }
+ }
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
}
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ int i, rot_amount;
+
+ /* Free the param area */
+ if (cfg->param_area) {
+ if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
+ ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
+ ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ }
+ }
if (arm_is_imm12 (spvar->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
*/
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
max_len += 4 * GPOINTER_TO_INT (ins->klass);
- if (offset > (cfg->code_size - max_len - 16)) {
+ if (offset + max_len > (cfg->code_size - 16)) {
cfg->code_size += max_len;
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
#ifdef ARM_FPU_FPA
case OP_R8CONST:
if (cfg->compile_aot) {
- ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
+ ARM_FPA_LDFD (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 1);
*(guint32*)code = ((guint32*)(ins->inst_p0))[0];
code += 4;
* the displacement in LDFD (aligning to 512).
*/
code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
- ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
+ ARM_FPA_LDFD (code, ins->dreg, ARMREG_LR, 0);
}
break;
case OP_R4CONST:
if (cfg->compile_aot) {
- ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
+ ARM_FPA_LDFS (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(guint32*)code = ((guint32*)(ins->inst_p0))[0];
code += 4;
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
- ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
+ ARM_FPA_LDFS (code, ins->dreg, ARMREG_LR, 0);
}
break;
case OP_STORER8_MEMBASE_REG:
if (!arm_is_fpimm8 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
- ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
+ ARM_FPA_STFD (code, ins->sreg1, ARMREG_LR, 0);
} else {
- ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ ARM_FPA_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_LOADR8_MEMBASE:
if (!arm_is_fpimm8 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
- ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
+ ARM_FPA_LDFD (code, ins->dreg, ARMREG_LR, 0);
} else {
- ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ ARM_FPA_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
}
break;
case OP_STORER4_MEMBASE_REG:
g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ ARM_FPA_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_LOADR4_MEMBASE:
g_assert (arm_is_fpimm8 (ins->inst_offset));
- ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ ARM_FPA_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_ICONV_TO_R_UN: {
int tmpreg;
tmpreg = ins->dreg == 0? 1: 0;
ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
- ARM_FLTD (code, ins->dreg, ins->sreg1);
+ ARM_FPA_FLTD (code, ins->dreg, ins->sreg1);
ARM_B_COND (code, ARMCOND_GE, 8);
/* save the temp register */
ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
- ARM_STFD (code, tmpreg, ARMREG_SP, 0);
- ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
+ ARM_FPA_STFD (code, tmpreg, ARMREG_SP, 0);
+ ARM_FPA_LDFD (code, tmpreg, ARMREG_PC, 12);
ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
- ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
+ ARM_FPA_LDFD (code, tmpreg, ARMREG_SP, 0);
ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
/* skip the constant pool */
ARM_B (code, 8);
break;
}
case OP_ICONV_TO_R4:
- ARM_FLTS (code, ins->dreg, ins->sreg1);
+ ARM_FPA_FLTS (code, ins->dreg, ins->sreg1);
break;
case OP_ICONV_TO_R8:
- ARM_FLTD (code, ins->dreg, ins->sreg1);
+ ARM_FPA_FLTD (code, ins->dreg, ins->sreg1);
break;
#elif defined(ARM_FPU_VFP)
ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FNEG:
- ARM_MNFD (code, ins->dreg, ins->sreg1);
+ ARM_FPA_MNFD (code, ins->dreg, ins->sreg1);
break;
#elif defined(ARM_FPU_VFP)
case OP_FADD:
g_assert_not_reached ();
break;
case OP_FCOMPARE:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
- ARM_FMSTAT (code);
-#endif
+ if (IS_FPA) {
+ ARM_FPA_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+ } else if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
break;
case OP_FCEQ:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
- ARM_FMSTAT (code);
-#endif
+ if (IS_FPA) {
+ ARM_FPA_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+ } else if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_FCLT:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
- ARM_FMSTAT (code);
-#endif
+ if (IS_FPA) {
+ ARM_FPA_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+ } else {
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_FCLT_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
- ARM_FMSTAT (code);
-#endif
+ if (IS_FPA) {
+ ARM_FPA_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+ } else if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_FCGT:
/* swapped */
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
- ARM_FMSTAT (code);
-#endif
+ if (IS_FPA) {
+ ARM_FPA_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+ } else if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
+ }
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_FCGT_UN:
/* swapped */
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
- ARM_FMSTAT (code);
-#endif
+ if (IS_FPA) {
+ ARM_FPA_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
+ } else if (IS_VFP) {
+ ARM_CMPD (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
+ }
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
g_assert_not_reached ();
break;
case OP_FBGE:
-#ifdef ARM_FPU_VFP
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
-#else
- /* FPA requires EQ even thou the docs suggests that just CS is enough */
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
-#endif
+ if (IS_VFP) {
+ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
+ } else {
+ /* FPA requires EQ even thou the docs suggests that just CS is enough */
+ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
+ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
+ }
break;
case OP_FBGE_UN:
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
break;
case OP_CKFINITE: {
-#ifdef ARM_FPU_FPA
- if (ins->dreg != ins->sreg1)
- ARM_MVFD (code, ins->dreg, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
- ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
- ARM_B (code, 1);
- *(guint32*)code = 0xffffffff;
- code += 4;
- *(guint32*)code = 0x7fefffff;
- code += 4;
- ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
- ARM_FMSTAT (code);
- EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
- ARM_CMPD (code, ins->sreg1, ins->sreg1);
- ARM_FMSTAT (code);
- EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
-
- ARM_CPYD (code, ins->dreg, ins->sreg1);
-#endif
+ if (IS_FPA) {
+ if (ins->dreg != ins->sreg1)
+ ARM_FPA_MVFD (code, ins->dreg, ins->sreg1);
+ } else if (IS_VFP) {
+ ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
+ ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
+ ARM_B (code, 1);
+ *(guint32*)code = 0xffffffff;
+ code += 4;
+ *(guint32*)code = 0x7fefffff;
+ code += 4;
+ ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
+ ARM_FMSTAT (code);
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
+ ARM_CMPD (code, ins->sreg1, ins->sreg1);
+ ARM_FMSTAT (code);
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
+ ARM_CPYD (code, ins->dreg, ins->sreg1);
+ }
break;
}
+
+ case OP_GC_LIVENESS_DEF:
+ case OP_GC_LIVENESS_USE:
+ case OP_GC_PARAM_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ break;
+ case OP_GC_SPILL_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
+ break;
+
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
} while (0)
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
+mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
{
MonoJumpInfo *patch_info;
gboolean compile_aot = !run_cctors;
default:
break;
}
- arm_patch_general (domain, ip, target);
+ arm_patch_general (domain, ip, target, dyn_code_mp);
}
}
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
- int alloc_size, pos, max_offset, i, rot_amount;
+ int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
guint8 *code;
CallInfo *cinfo;
int tracing = 0;
tracing = 1;
sig = mono_method_signature (method);
- cfg->code_size = 256 + sig->param_count * 20;
+ cfg->code_size = 256 + sig->param_count * 64;
code = cfg->native_code = g_malloc (cfg->code_size);
mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
- ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
-
alloc_size = cfg->stack_offset;
pos = 0;
+ prev_sp_offset = 0;
if (!method->save_lmf) {
- /* We save SP by storing it into IP and saving IP */
- ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
- prev_sp_offset = 8; /* ip and lr */
+ if (iphone_abi) {
+ /*
+ * The iphone uses R7 as the frame pointer, and it points at the saved
+ * r7+lr:
+ * <lr>
+ * r7 -> <r7>
+ * <rest of frame>
+ * We can't use r7 as a frame pointer since it points into the middle of
+ * the frame, so we keep using our own frame pointer.
+ * FIXME: Optimize this.
+ */
+ g_assert (darwin);
+ ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
+ ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
+ prev_sp_offset += 8; /* r7 and lr */
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
+ mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
+
+ /* No need to push LR again */
+ if (cfg->used_int_regs)
+ ARM_PUSH (code, cfg->used_int_regs);
+ } else {
+ ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
+ prev_sp_offset += 4;
+ }
for (i = 0; i < 16; ++i) {
if (cfg->used_int_regs & (1 << i))
prev_sp_offset += 4;
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
reg_offset = 0;
for (i = 0; i < 16; ++i) {
- if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
+ if ((cfg->used_int_regs & (1 << i))) {
mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
+ mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
reg_offset += 4;
}
}
+ if (iphone_abi) {
+ mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
+ mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
+ } else {
+ mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
+ mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
+ }
} else {
+ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
ARM_PUSH (code, 0x5ff0);
- prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
+ prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
reg_offset = 0;
for (i = 0; i < 16; ++i) {
lmf_offset = pos;
}
alloc_size += pos;
+ orig_alloc_size = alloc_size;
// align to MONO_ARCH_FRAME_ALIGNMENT bytes
if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
//g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
prev_sp_offset += alloc_size;
+ for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
+ mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
+
/* compute max_offset in order to use short forward jumps
* we could skip do it on arm because the immediate displacement
* for jumps is large enough, it may be useful later for constant pools
/* load arguments allocated to register from the stack */
pos = 0;
- cinfo = get_call_info (NULL, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
ArgInfo *ainfo = &cinfo->ret;
code = emit_call_seq (cfg, code);
}
- if (method->save_lmf) {
- gboolean get_lmf_fast = FALSE;
-
-#ifdef HAVE_AEABI_READ_TP
- gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
-
- if (lmf_addr_tls_offset != -1) {
- get_lmf_fast = TRUE;
-
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"__aeabi_read_tp");
- code = emit_call_seq (cfg, code);
-
- ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
- get_lmf_fast = TRUE;
- }
-#endif
- if (!get_lmf_fast) {
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_get_lmf_addr");
- code = emit_call_seq (cfg, code);
- }
- /* we build the MonoLMF structure on the stack - see mini-arm.h */
- /* lmf_offset is the offset from the previous stack pointer,
- * alloc_size is the total stack space allocated, so the offset
- * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
- * The pointer to the struct is put in r1 (new_lmf).
- * r2 is used as scratch
- * The callee-saved registers are already in the MonoLMF structure
- */
- code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
- /* r0 is the result from mono_get_lmf_addr () */
- ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
- /* new_lmf->previous_lmf = *lmf_addr */
- ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
- ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
- /* *(lmf_addr) = r1 */
- ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
- /* Skip method (only needed for trampoline LMF frames) */
- ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
- /* save the current IP */
- ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
- ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
- }
+ if (method->save_lmf)
+ code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
if (tracing)
code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
}
/* Initialize ss_trigger_page_var */
- {
+ if (!cfg->soft_breakpoints) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
int dreg = ARMREG_LR;
}
}
+ if (cfg->arch.seq_point_read_var) {
+ MonoInst *read_ins = cfg->arch.seq_point_read_var;
+ MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
+ MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
+
+ g_assert (read_ins->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (read_ins->inst_offset));
+ g_assert (ss_method_ins->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
+ g_assert (bp_method_ins->opcode == OP_REGOFFSET);
+ g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
+
+ ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
+ ARM_B (code, 2);
+ *(volatile int **)code = &ss_trigger_var;
+ code += 4;
+ *(gpointer*)code = single_step_func_wrapper;
+ code += 4;
+ *(gpointer*)code = breakpoint_func_wrapper;
+ code += 4;
+
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
+ ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
+ ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
+ ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
+ }
+
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
g_free (cinfo);
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
}
/*
}
if (method->save_lmf) {
- int lmf_offset;
+ int lmf_offset, reg, sp_adj, regmask;
/* all but r0-r3, sp and pc */
- pos += sizeof (MonoLMF) - (4 * 10);
+ pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
lmf_offset = pos;
- /* r2 contains the pointer to the current LMF */
- code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
- /* ip = previous_lmf */
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
- /* lr = lmf_addr */
- ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
- /* *(lmf_addr) = previous_lmf */
- ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
- /* FIXME: speedup: there is no actual need to restore the registers if
- * we didn't actually change them (idea from Zoltan).
- */
- /* restore iregs */
+
+ code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
+
+ /* This points to r4 inside MonoLMF->iregs */
+ sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
+ reg = ARMREG_R4;
+ regmask = 0x9ff0; /* restore lr to pc */
+ /* Skip caller saved registers not used by the method */
+ while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
+ regmask &= ~(1 << reg);
+ sp_adj += 4;
+ reg ++;
+ }
/* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
- ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
- ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
+ code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
+ /* restore iregs */
+ ARM_POP (code, regmask);
} else {
if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
- ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
+ }
+
+ if (iphone_abi) {
+ /* Restore saved gregs */
+ if (cfg->used_int_regs)
+ ARM_POP (code, cfg->used_int_regs);
+ /* Restore saved r7, restore LR to PC */
+ ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
+ } else {
+ ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
}
- /* FIXME: add v4 thumb interworking support */
- ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
}
cfg->code_len = code - cfg->native_code;
return MONO_EXC_NULL_REF;
if (strcmp (name, "ArrayTypeMismatchException") == 0)
return MONO_EXC_ARRAY_TYPE_MISMATCH;
+ if (strcmp (name, "ArgumentException") == 0)
+ return MONO_EXC_ARGUMENT;
g_error ("Unknown intrinsic exception %s\n", name);
return -1;
}
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
}
code = cfg->native_code + cfg->code_len;
mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
}
- } else if (cfg->generic_context || imt_arg) {
+ } else if (cfg->generic_context || imt_arg || mono_use_llvm) {
/* Always pass in a register for simplicity */
call->dynamic_imt_arg = TRUE;
{
guint32 *code_ptr = (guint32*)code;
code_ptr -= 2;
+
+ if (mono_use_llvm)
+ /* Passed in V5 */
+ return (MonoMethod*)regs [ARMREG_V5];
+
/* The IMT value is stored in the code stream right after the LDC instruction. */
if (!IS_LDR_PC (code_ptr [0])) {
g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
vtable_target = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
- ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
- ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
+ if (mono_use_llvm) {
+ /* LLVM always passes the IMT method in R5 */
+ ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
+ } else {
+ /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
+ ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
+ ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
+ }
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
#endif
-gpointer
+mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
- if (reg == ARMREG_SP)
- return (gpointer)ctx->esp;
- else
- return (gpointer)ctx->regs [reg];
+ return ctx->regs [reg];
+}
+
+void
+mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
+{
+ ctx->regs [reg] = val;
+}
+
+/*
+ * mono_arch_get_trampolines:
+ *
+ * Return a list of MonoTrampInfo structures describing arch specific trampolines
+ * for AOT.
+ */
+GSList *
+mono_arch_get_trampolines (gboolean aot)
+{
+ return mono_arm_get_exception_trampolines (aot);
}
/*
{
guint8 *code = ip;
guint32 native_offset = ip - (guint8*)ji->code_start;
+ MonoDebugOptions *opt = mini_get_debug_options ();
- if (ji->from_aot) {
+ if (opt->soft_breakpoints) {
+ g_assert (!ji->from_aot);
+ code += 4;
+ ARM_BLX_REG (code, ARMREG_LR);
+ mono_arch_flush_icache (code - 4, 4);
+ } else if (ji->from_aot) {
SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
g_assert (native_offset % 4 == 0);
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
+ MonoDebugOptions *opt = mini_get_debug_options ();
guint8 *code = ip;
int i;
- if (ji->from_aot) {
+ if (opt->soft_breakpoints) {
+ g_assert (!ji->from_aot);
+ code += 4;
+ ARM_NOP (code);
+ mono_arch_flush_icache (code - 4, 4);
+ } else if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
void
mono_arch_start_single_stepping (void)
{
- mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+ if (ss_trigger_page)
+ mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+ else
+ ss_trigger_var = 1;
}
/*
void
mono_arch_stop_single_stepping (void)
{
- mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+ if (ss_trigger_page)
+ mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+ else
+ ss_trigger_var = 0;
}
#if __APPLE__
{
siginfo_t *sinfo = info;
+ if (!ss_trigger_page)
+ return FALSE;
+
/* Sometimes the address is off by 4 */
if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
return TRUE;
{
siginfo_t *sinfo = info;
+ if (!ss_trigger_page)
+ return FALSE;
+
if (sinfo->si_signo == DBG_SIGNAL) {
/* Sometimes the address is off by 4 */
if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
}
}
-guint8*
-mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
-{
- guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
-
- if (ji->from_aot)
- ip -= 6 * 4;
- else
- ip -= 12;
-
- return ip;
-}
-
-guint8*
-mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
-{
- guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
-
- ip += 4;
-
- return ip;
-}
-
/*
* mono_arch_skip_breakpoint:
*
* See mini-amd64.c for docs.
*/
void
-mono_arch_skip_breakpoint (MonoContext *ctx)
+mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
}
return info;
}
+
+/*
+ * mono_arch_set_target:
+ *
+ * Set the target architecture the JIT backend should generate code for, in the form
+ * of a GNU target triplet. Only used in AOT mode.
+ */
+void
+mono_arch_set_target (char *mtriple)
+{
+ /* The GNU target triple format is not very well documented */
+ if (strstr (mtriple, "armv7")) {
+ v6_supported = TRUE;
+ v7_supported = TRUE;
+ }
+ if (strstr (mtriple, "armv6")) {
+ v6_supported = TRUE;
+ }
+ if (strstr (mtriple, "darwin")) {
+ v5_supported = TRUE;
+ thumb_supported = TRUE;
+ darwin = TRUE;
+ iphone_abi = TRUE;
+ }
+ if (strstr (mtriple, "gnueabi"))
+ eabi_supported = TRUE;
+}