return "unknown";
}
+#ifndef DISABLE_JIT
+
static guint8*
emit_big_add (guint8 *code, int dreg, int sreg, int imm)
{
return code;
}
+#endif /* #ifndef DISABLE_JIT */
+
/*
* mono_arch_get_argument_info:
* @csig: a method signature
return frame_size;
}
-static gpointer
-decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
-{
- char *o = NULL;
- int reg, offset = 0;
- reg = (ldr >> 16 ) & 0xf;
- offset = ldr & 0xfff;
- if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
- offset = -offset;
- /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
- o = (gpointer)regs [reg];
-
- *displacement = offset;
- return o;
-}
-
-gpointer
-mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
-{
- guint32* code = (guint32*)code_ptr;
-
- /* Locate the address of the method-specific trampoline. The call using
- the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
- looks something like this:
-
- ldr rA, rX, #offset
- mov lr, pc
- mov pc, rA
- or better:
- mov lr, pc
- ldr pc, rX, #offset
-
- The call sequence could be also:
- ldr ip, pc, 0
- b skip
- function pointer literal
- skip:
- mov lr, pc
- mov pc, ip
- Note that on ARM5+ we can use one instruction instead of the last two.
- Therefore, we need to locate the 'ldr rA' instruction to know which
- register was used to hold the method addrs.
- */
-
- /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
- --code;
-
- /* Three possible code sequences can happen here:
- * interface call:
- *
- * add lr, [pc + #4]
- * ldr pc, [rX - #offset]
- * .word IMT value
- *
- * virtual call:
- *
- * mov lr, pc
- * ldr pc, [rX - #offset]
- *
- * direct branch with bl:
- *
- * bl #offset
- *
- * direct branch with mov:
- *
- * mv pc, rX
- *
- * We only need to identify interface and virtual calls, the others can be ignored.
- *
- */
- if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
- return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
-
- if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
- return decode_vcall_slot_from_ldr (code [0], regs, displacement);
-
- return NULL;
-}
-
#define MAX_ARCH_DELEGATE_PARAMS 3
static gpointer
int i;
code = get_delegate_invoke_impl (TRUE, 0, &code_len);
- res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
code = get_delegate_invoke_impl (FALSE, i, &code_len);
- res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
}
return res;
}
if (mono_aot_only)
- start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
+ start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
else
start = get_delegate_invoke_impl (TRUE, 0, NULL);
cached = start;
if (mono_aot_only) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
- start = mono_aot_get_named_code (name);
+ start = mono_aot_get_trampoline (name);
g_free (name);
} else {
start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
}
gpointer
-mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
{
- /* FIXME: handle returning a struct */
- if (MONO_TYPE_ISSTRUCT (sig->ret))
- return (gpointer)regs [ARMREG_R1];
return (gpointer)regs [ARMREG_R0];
}
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+
+ mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
+ mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
}
/*
mono_arch_cpu_optimizazions (guint32 *exclude_mask)
{
guint32 opts = 0;
+ const char *cpu_arch = getenv ("MONO_CPU_ARCH");
+ if (cpu_arch != NULL) {
+ thumb_supported = strstr (cpu_arch, "thumb") != NULL;
+ if (strncmp (cpu_arch, "armv", 4) == 0) {
+ v5_supported = cpu_arch [4] >= '5';
+ v7_supported = cpu_arch [4] >= '7';
+ }
+ } else {
#if __APPLE__
thumb_supported = TRUE;
v5_supported = TRUE;
/*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
}
#endif
+ }
/* no arm-specific optimizations yet */
*exclude_mask = 0;
return opts;
}
+#ifndef DISABLE_JIT
+
static gboolean
is_regsize_var (MonoType *t) {
if (t->byref)
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
- if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
+ if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
/*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
return 2;
}
+#endif /* #ifndef DISABLE_JIT */
+
#ifndef __GNUC_PREREQ
#define __GNUC_PREREQ(maj, min) (0)
#endif
typedef struct {
int nargs;
guint32 stack_usage;
- guint32 struct_ret;
gboolean vtype_retaddr;
+ /* The index of the vret arg in the argument list */
+ int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
}
static CallInfo*
-get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
{
- guint i, gr;
+ guint i, gr, pstart;
int n = sig->hasthis + sig->param_count;
MonoType *simpletype;
guint32 stack_size = 0;
if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
cinfo->ret.storage = RegTypeStructByVal;
} else {
- add_general (&gr, &stack_size, &cinfo->ret, TRUE);
- cinfo->struct_ret = ARMREG_R0;
cinfo->vtype_retaddr = TRUE;
}
}
+ pstart = 0;
n = 0;
- if (sig->hasthis) {
- add_general (&gr, &stack_size, cinfo->args + n, TRUE);
- n++;
+ /*
+ * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+ * the first argument, allowing 'this' to be always passed in the first arg reg.
+ * Also do this if the first argument is a reference type, since virtual calls
+ * are sometimes made using calli without sig->hasthis set, like in the delegate
+ * invoke wrappers.
+ */
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
+ } else {
+ add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
+ pstart = 1;
+ }
+ n ++;
+ add_general (&gr, &stack_size, &cinfo->ret, TRUE);
+ cinfo->vret_arg_index = 1;
+ } else {
+ /* this */
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
+ n ++;
+ }
+
+ if (cinfo->vtype_retaddr)
+ add_general (&gr, &stack_size, &cinfo->ret, TRUE);
}
- DEBUG(printf("params: %d\n", sig->param_count));
- for (i = 0; i < sig->param_count; ++i) {
+
+ DEBUG(printf("params: %d\n", sig->param_count));
+ for (i = pstart; i < sig->param_count; ++i) {
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
n++;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ if (!mono_type_generic_inst_is_valuetype (simpletype)) {
cinfo->args [n].size = sizeof (gpointer);
add_general (&gr, &stack_size, cinfo->args + n, TRUE);
n++;
cinfo->ret.storage = RegTypeFP;*/
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (simpletype)) {
cinfo->ret.storage = RegTypeGeneral;
cinfo->ret.reg = ARMREG_R0;
break;
return cinfo;
}
+#ifndef DISABLE_JIT
/*
* Set var information according to the calling convention. arm version.
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
- MonoInst *inst;
+ MonoInst *ins;
int i, offset, size, align, curinst;
int frame_reg = ARMREG_FP;
CallInfo *cinfo;
sig = mono_method_signature (cfg->method);
if (!cfg->arch.cinfo)
- cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
+ cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
cinfo = cfg->arch.cinfo;
/* FIXME: this will change when we use FP as gcc does */
if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
/*
* We use the frame register also for any method that has
cfg->used_int_regs |= 1 << frame_reg;
}
- if (cfg->compile_aot || cfg->uses_rgctx_reg)
+ if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
cfg->used_int_regs |= (1 << ARMREG_V5);
offset &= ~(sizeof (gpointer) - 1);
cfg->ret->inst_offset = - offset;
} else {
- inst = cfg->vret_addr;
+ ins = cfg->vret_addr;
offset += sizeof(gpointer) - 1;
offset &= ~(sizeof(gpointer) - 1);
- inst->inst_offset = offset;
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
+ ins->inst_offset = offset;
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr =");
mono_print_ins (cfg->vret_addr);
offset += sizeof(gpointer);
}
+ /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
+ if (cfg->arch.seq_point_info_var) {
+ MonoInst *ins;
+
+ ins = cfg->arch.seq_point_info_var;
+
+ size = 4;
+ align = 4;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
+ ins->inst_offset = offset;
+ offset += size;
+
+ ins = cfg->arch.ss_trigger_page_var;
+ size = 4;
+ align = 4;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
+ ins->inst_offset = offset;
+ offset += size;
+ }
+
curinst = cfg->locals_start;
for (i = curinst; i < cfg->num_varinfo; ++i) {
- inst = cfg->varinfo [i];
- if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
+ ins = cfg->varinfo [i];
+ if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structure */
- if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
- size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
+ if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (ins->inst_vtype) && ins->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
+ size = mono_class_native_size (mono_class_from_mono_type (ins->inst_vtype), &ualign);
align = ualign;
}
else
- size = mono_type_size (inst->inst_vtype, &align);
+ size = mono_type_size (ins->inst_vtype, &align);
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
* since it loads/stores misaligned words, which don't do the right thing.
align = 4;
offset += align - 1;
offset &= ~(align - 1);
- inst->inst_offset = offset;
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_offset = offset;
+ ins->inst_basereg = frame_reg;
offset += size;
//g_print ("allocating local %d to %d\n", i, inst->inst_offset);
}
curinst = 0;
if (sig->hasthis) {
- inst = cfg->args [curinst];
- if (inst->opcode != OP_REGVAR) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
+ ins = cfg->args [curinst];
+ if (ins->opcode != OP_REGVAR) {
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
offset += sizeof (gpointer) - 1;
offset &= ~(sizeof (gpointer) - 1);
- inst->inst_offset = offset;
+ ins->inst_offset = offset;
offset += sizeof (gpointer);
}
curinst++;
}
for (i = 0; i < sig->param_count; ++i) {
- inst = cfg->args [curinst];
+ ins = cfg->args [curinst];
- if (inst->opcode != OP_REGVAR) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
+ if (ins->opcode != OP_REGVAR) {
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = frame_reg;
size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
align = ualign;
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
align = 4;
offset += align - 1;
offset &= ~(align - 1);
- inst->inst_offset = offset;
+ ins->inst_offset = offset;
offset += size;
}
curinst++;
sig = mono_method_signature (cfg->method);
if (!cfg->arch.cinfo)
- cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
+ cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == RegTypeStructByVal)
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
* - we only pass/receive them in registers in some cases, and only
* in 1 or 2 integer registers.
*/
- if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP) {
+ if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
cfg->exception_message = g_strdup ("unknown ret conv");
cfg->disable_llvm = TRUE;
return linfo;
switch (ainfo->storage) {
case RegTypeGeneral:
case RegTypeIRegPair:
+ case RegTypeBase:
linfo->args [i].storage = LLVMArgInIReg;
break;
default:
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (NULL, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig, sig->pinvoke);
for (i = 0; i < n; ++i) {
ArgInfo *ainfo = cinfo->args + i;
if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
MonoInst *ins;
- MONO_INST_NEW (cfg, ins, OP_SETLRET);
- ins->sreg1 = val->dreg + 1;
- ins->sreg2 = val->dreg + 2;
- MONO_ADD_INS (cfg->cbb, ins);
+ if (COMPILE_LLVM (cfg)) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_SETLRET);
+ ins->sreg1 = val->dreg + 1;
+ ins->sreg2 = val->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
return;
}
#ifdef MONO_ARCH_SOFT_FLOAT
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
+#endif /* #ifndef DISABLE_JIT */
+
gboolean
mono_arch_is_inst_imm (gint64 imm)
{
ArchDynCallInfo *info;
CallInfo *cinfo;
- cinfo = get_call_info (NULL, sig, FALSE);
+ cinfo = get_call_info (NULL, NULL, sig, FALSE);
if (!dyn_call_supported (cinfo, sig)) {
g_free (cinfo);
}
}
+#ifndef DISABLE_JIT
+
/*
* Allow tracing to work with this interface (with an optional argument)
*/
return code;
}
+#endif /* #ifndef DISABLE_JIT */
+
typedef struct {
guchar *code;
const guchar *target;
return thumb_supported;
}
+#ifndef DISABLE_JIT
+
/*
* emit_load_volatile_arguments:
*
pos = 0;
- cinfo = get_call_info (NULL, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig, sig->pinvoke);
if (MONO_TYPE_ISSTRUCT (sig->ret)) {
ArgInfo *ainfo = &cinfo->ret;
return code;
}
-#ifndef DISABLE_JIT
-
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
case OP_NOT_NULL:
break;
case OP_SEQ_POINT: {
- int i, il_offset;
+ int i;
MonoInst *info_var = cfg->arch.seq_point_info_var;
MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
MonoInst *var;
ARM_LDR_IMM (code, dreg, dreg, 0);
}
- il_offset = ins->inst_imm;
-
- if (!cfg->seq_points)
- cfg->seq_points = g_ptr_array_new ();
- g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (il_offset));
- g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (code - cfg->native_code));
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
if (cfg->compile_aot) {
guint32 offset = code - cfg->native_code;
g_assert (arm_is_imm12 (ins->inst_offset));
g_assert (ins->sreg1 != ARMREG_LR);
call = (MonoCallInst*)ins;
- if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
/*
case OP_CALL_HANDLER:
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
ARM_BL (code, 0);
+ mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
*/
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
max_len += 4 * GPOINTER_TO_INT (ins->klass);
- if (offset > (cfg->code_size - max_len - 16)) {
+ if (offset + max_len > (cfg->code_size - 16)) {
cfg->code_size += max_len;
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
+#ifndef MONO_CROSS_COMPILE
#ifdef HAVE_AEABI_READ_TP
mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
#endif
+#endif
}
#define patch_lis_ori(ip,val) do {\
}
}
+#ifndef DISABLE_JIT
+
/*
* Stack frame layout:
*
/* load arguments allocated to register from the stack */
pos = 0;
- cinfo = get_call_info (NULL, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig, sig->pinvoke);
if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
ArgInfo *ainfo = &cinfo->ret;
/* *(lmf_addr) = r1 */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* Skip method (only needed for trampoline LMF frames) */
- ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
+ ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, esp));
/* save the current IP */
ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
return MONO_EXC_NULL_REF;
if (strcmp (name, "ArrayTypeMismatchException") == 0)
return MONO_EXC_ARRAY_TYPE_MISMATCH;
+ if (strcmp (name, "ArgumentException") == 0)
+ return MONO_EXC_ARGUMENT;
g_error ("Unknown intrinsic exception %s\n", name);
return -1;
}
MonoJumpInfo *patch_info;
int i;
guint8 *code;
- const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
- guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
+ guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
+ guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
int max_epilog_size = 50;
+ for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
+ exc_throw_pos [i] = NULL;
+ exc_throw_found [i] = 0;
+ }
+
/* count the number of exception infos */
/*
}
+#endif /* #ifndef DISABLE_JIT */
+
static gboolean tls_offset_inited = FALSE;
void
#ifdef MONO_ARCH_HAVE_IMT
+#ifndef DISABLE_JIT
+
void
mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
{
mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
}
- } else if (cfg->generic_context) {
+ } else if (cfg->generic_context || imt_arg || mono_use_llvm) {
/* Always pass in a register for simplicity */
call->dynamic_imt_arg = TRUE;
}
}
+#endif /* DISABLE_JIT */
+
MonoMethod*
mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
{
guint32 *code_ptr = (guint32*)code;
code_ptr -= 2;
+
+ if (mono_use_llvm)
+ /* Passed in V5 */
+ return (MonoMethod*)regs [ARMREG_V5];
+
/* The IMT value is stored in the code stream right after the LDC instruction. */
if (!IS_LDR_PC (code_ptr [0])) {
g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
return (MonoMethod*) code_ptr [1];
}
-MonoObject*
-mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
-{
- return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), regs, NULL);
-}
-
MonoVTable*
mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
{
size = BASE_SIZE;
constant_pool_starts = g_new0 (guint32*, count);
- /*
- * We might be called with a fail_tramp from the IMT builder code even if
- * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
- */
- //g_assert (!fail_tramp);
-
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
- if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
+ gboolean fail_case = !item->check_target_idx && fail_tramp;
+
+ if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
item->chunk_size += 32;
large_offsets = TRUE;
}
- if (item->check_target_idx) {
- if (!item->compare_done)
+ if (item->check_target_idx || fail_case) {
+ if (!item->compare_done || fail_case)
item->chunk_size += CMP_SIZE;
item->chunk_size += BRANCH_SIZE;
} else {
item->chunk_size += WMC_SIZE;
#endif
}
+ if (fail_case) {
+ item->chunk_size += 16;
+ large_offsets = TRUE;
+ }
item->chunk_size += CALL_SIZE;
} else {
item->chunk_size += BSEARCH_ENTRY_SIZE;
if (large_offsets)
size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
- start = code = mono_domain_code_reserve (domain, size);
+ if (fail_tramp)
+ code = mono_method_alloc_generic_virtual_thunk (domain, size);
+ else
+ code = mono_domain_code_reserve (domain, size);
+ start = code;
#if DEBUG_IMT
printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
vtable_target = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
- ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
- ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
+ if (mono_use_llvm) {
+ /* LLVM always passes the IMT method in R5 */
+ ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
+ } else {
+ /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
+ ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
+ ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
+ }
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
- arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
+ arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
gint32 vtable_offset;
item->code_target = (guint8*)code;
if (item->is_equals) {
- if (item->check_target_idx) {
- if (!item->compare_done) {
+ gboolean fail_case = !item->check_target_idx && fail_tramp;
+
+ if (item->check_target_idx || fail_case) {
+ if (!item->compare_done || fail_case) {
imt_method = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
#endif
}
- vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
- if (!arm_is_imm12 (vtable_offset)) {
- /*
- * We need to branch to a computed address but we don't have
- * a free register to store it, since IP must contain the
- * vtable address. So we push the two values to the stack, and
- * load them both using LDM.
- */
- /* Compute target address */
- vtable_offset_ins = code;
+ if (item->has_target_code) {
+ target_code_ins = code;
+ /* Load target address */
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
- ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
/* Save it to the fourth slot */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
/* Restore registers and branch */
ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
- code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
+ code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
} else {
- ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
- if (large_offsets)
- ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
- ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
+ vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
+ if (!arm_is_imm12 (vtable_offset)) {
+ /*
+ * We need to branch to a computed address but we don't have
+ * a free register to store it, since IP must contain the
+ * vtable address. So we push the two values to the stack, and
+ * load them both using LDM.
+ */
+ /* Compute target address */
+ vtable_offset_ins = code;
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
+ /* Save it to the fourth slot */
+ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
+ /* Restore registers and branch */
+ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
+
+ code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
+ } else {
+ ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
+ if (large_offsets)
+ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
+ ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
+ }
+ }
+
+ if (fail_case) {
+ arm_patch (item->jmp_code, (guchar*)code);
+
+ target_code_ins = code;
+ /* Load target address */
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
+ /* Save it to the fourth slot */
+ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
+ /* Restore registers and branch */
+ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
+
+ code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
+ item->jmp_code = NULL;
}
if (imt_method)