#define HAVE_AEABI_READ_TP 1
#endif
+#define THUNK_SIZE (3 * 4)
+
#ifdef __native_client_codegen__
const guint kNaClAlignment = kNaClAlignmentARM;
const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
static volatile int ss_trigger_var = 0;
-static gpointer single_step_func_wrapper;
-static gpointer breakpoint_func_wrapper;
+static gpointer single_step_tramp, breakpoint_tramp;
/*
* The code generated for sequence points reads from this location, which is
} else {
ARM_BL (code, 0);
}
+ cfg->thunk_area += THUNK_SIZE;
#endif
return code;
}
}
#endif
-static guint8*
-emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
-{
- switch (ins->opcode) {
- case OP_FCALL:
- case OP_FCALL_REG:
- case OP_FCALL_MEMBASE:
- if (IS_VFP) {
- MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
- if (sig_ret->type == MONO_TYPE_R4) {
- if (IS_HARD_FLOAT) {
- ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
- } else {
- ARM_FMSR (code, ins->dreg, ARMREG_R0);
- ARM_CVTS (code, ins->dreg, ins->dreg);
- }
- } else {
- if (IS_HARD_FLOAT) {
- ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
- } else {
- ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
- }
- }
- }
- break;
- }
-
- return code;
-}
-
/*
* emit_save_lmf:
*
{
GSList *list;
+ g_assert (!cfg->r4fp);
+
for (list = inst->float_args; list; list = list->next) {
FloatArgData *fad = list->data;
MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
#endif
}
-static gpointer
-create_function_wrapper (gpointer function)
-{
- guint8 *start, *code;
-
- start = code = mono_global_codeman_reserve (96);
-
- /*
- * Construct the MonoContext structure on the stack.
- */
-
- ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
-
- /* save ip, lr and pc into their correspodings ctx.regs slots. */
- ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
- ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
- ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
-
- /* save r0..r10 and fp */
- ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
- ARM_STM (code, ARMREG_IP, 0x0fff);
-
- /* now we can update fp. */
- ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
-
- /* make ctx.esp hold the actual value of sp at the beginning of this method. */
- ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
- ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
- ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
-
- /* make ctx.eip hold the address of the call. */
- ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
- ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
-
- /* r0 now points to the MonoContext */
- ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
-
- /* call */
-#ifdef USE_JUMP_TABLES
- {
- gpointer *jte = mono_jumptable_add_entry ();
- code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
- jte [0] = function;
- }
-#else
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- ARM_B (code, 0);
- *(gpointer*)code = function;
- code += 4;
-#endif
- ARM_BLX_REG (code, ARMREG_IP);
-
- /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
- ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
- ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
- ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
-
- /* make ip point to the regs array, then restore everything, including pc. */
- ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
- ARM_LDM (code, ARMREG_IP, 0xffff);
-
- mono_arch_flush_icache (start, code - start);
- mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
-
- return start;
-}
-
/*
* Initialize architecture specific code.
*/
const char *cpu_arch;
mono_mutex_init_recursive (&mini_arch_mutex);
-#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
if (mini_get_debug_options ()->soft_breakpoints) {
- single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
- breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
+ single_step_tramp = mini_get_single_step_trampoline ();
+ breakpoint_tramp = mini_get_breakpoint_trampoline ();
} else {
-#else
- {
-#endif
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
arm_fpu = MONO_ARM_FPU_VFP;
#if defined(ARM_FPU_NONE) && !defined(__APPLE__)
- /* If we're compiling with a soft float fallback and it
- turns out that no VFP unit is available, we need to
- switch to soft float. We don't do this for iOS, since
- iOS devices always have a VFP unit. */
+ /*
+ * If we're compiling with a soft float fallback and it
+ * turns out that no VFP unit is available, we need to
+ * switch to soft float. We don't do this for iOS, since
+ * iOS devices always have a VFP unit.
+ */
if (!mono_hwcap_arm_has_vfp)
arm_fpu = MONO_ARM_FPU_NONE;
+
+ /*
+ * This environment variable can be useful in testing
+ * environments to make sure the soft float fallback
+ * works. Most ARM devices have VFP units these days, so
+ * normally soft float code would not be exercised much.
+ */
+ const char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
+
+ if (soft && !strncmp (soft, "1", 1))
+ arm_fpu = MONO_ARM_FPU_NONE;
#endif
#endif
RegTypeGSharedVtInReg,
/* gsharedvt argument passed by addr on stack */
RegTypeGSharedVtOnStack,
+ RegTypeHFA
} ArgStorage;
typedef struct {
gint32 offset;
guint16 vtsize; /* in param area */
+ /* RegTypeHFA */
+ int esize;
+ /* RegTypeHFA */
+ int nregs;
guint8 reg;
ArgStorage storage;
gint32 struct_size;
typedef struct {
int nargs;
guint32 stack_usage;
- gboolean vtype_retaddr;
- /* The index of the vret arg in the argument list */
+ /* The index of the vret arg in the argument list for RegTypeStructByAddr */
int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
}
}
+static gboolean
+is_hfa (MonoType *t, int *out_nfields, int *out_esize)
+{
+ MonoClass *klass;
+ gpointer iter;
+ MonoClassField *field;
+ MonoType *ftype, *prev_ftype = NULL;
+ int nfields = 0;
+
+ klass = mono_class_from_mono_type (t);
+ iter = NULL;
+ while ((field = mono_class_get_fields (klass, &iter))) {
+ if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
+ continue;
+ ftype = mono_field_get_type (field);
+ ftype = mini_type_get_underlying_type (NULL, ftype);
+
+ if (MONO_TYPE_ISSTRUCT (ftype)) {
+ int nested_nfields, nested_esize;
+
+ if (!is_hfa (ftype, &nested_nfields, &nested_esize))
+ return FALSE;
+ if (nested_esize == 4)
+ ftype = &mono_defaults.single_class->byval_arg;
+ else
+ ftype = &mono_defaults.double_class->byval_arg;
+ if (prev_ftype && prev_ftype->type != ftype->type)
+ return FALSE;
+ prev_ftype = ftype;
+ nfields += nested_nfields;
+ } else {
+ if (!(!ftype->byref && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
+ return FALSE;
+ if (prev_ftype && prev_ftype->type != ftype->type)
+ return FALSE;
+ prev_ftype = ftype;
+ nfields ++;
+ }
+ }
+ if (nfields == 0 || nfields > 4)
+ return FALSE;
+ *out_nfields = nfields;
+ *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
+ return TRUE;
+}
+
static CallInfo*
get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
{
guint i, gr, fpr, pstart;
gint float_spare;
int n = sig->hasthis + sig->param_count;
- MonoType *simpletype;
+ int nfields, esize;
+ guint32 align;
+ MonoType *t;
guint32 stack_size = 0;
CallInfo *cinfo;
gboolean is_pinvoke = sig->pinvoke;
- MonoType *t;
+ gboolean vtype_retaddr = FALSE;
if (mp)
cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
float_spare = -1;
t = mini_type_get_underlying_type (gsctx, sig->ret);
- if (MONO_TYPE_ISSTRUCT (t)) {
- guint32 align;
+ switch (t->type) {
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ cinfo->ret.storage = RegTypeGeneral;
+ cinfo->ret.reg = ARMREG_R0;
+ break;
+ case MONO_TYPE_U8:
+ case MONO_TYPE_I8:
+ cinfo->ret.storage = RegTypeIRegPair;
+ cinfo->ret.reg = ARMREG_R0;
+ break;
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ cinfo->ret.storage = RegTypeFP;
- if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
- cinfo->ret.storage = RegTypeStructByVal;
+ if (IS_HARD_FLOAT) {
+ cinfo->ret.reg = ARM_VFP_F0;
+ } else {
+ cinfo->ret.reg = ARMREG_R0;
+ }
+ break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t)) {
+ cinfo->ret.storage = RegTypeGeneral;
+ cinfo->ret.reg = ARMREG_R0;
+ break;
+ }
+ // FIXME: Only for variable types
+ if (mini_is_gsharedvt_type_gsctx (gsctx, t)) {
+ cinfo->ret.storage = RegTypeStructByAddr;
+ break;
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ case MONO_TYPE_TYPEDBYREF:
+ if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
+ cinfo->ret.storage = RegTypeHFA;
+ cinfo->ret.reg = 0;
+ cinfo->ret.nregs = nfields;
+ cinfo->ret.esize = esize;
} else {
- cinfo->vtype_retaddr = TRUE;
+ if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer))
+ cinfo->ret.storage = RegTypeStructByVal;
+ else
+ cinfo->ret.storage = RegTypeStructByAddr;
}
- } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
- cinfo->vtype_retaddr = TRUE;
+ break;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ g_assert (mini_is_gsharedvt_type_gsctx (gsctx, t));
+ cinfo->ret.storage = RegTypeStructByAddr;
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
}
+ vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr;
+
pstart = 0;
n = 0;
/*
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
- if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
} else {
pstart = 1;
}
n ++;
- add_general (&gr, &stack_size, &cinfo->ret, TRUE);
+ cinfo->ret.reg = gr;
+ gr ++;
cinfo->vret_arg_index = 1;
} else {
/* this */
add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
n ++;
}
-
- if (cinfo->vtype_retaddr)
- add_general (&gr, &stack_size, &cinfo->ret, TRUE);
+ if (vtype_retaddr) {
+ cinfo->ret.reg = gr;
+ gr ++;
+ }
}
DEBUG(printf("params: %d\n", sig->param_count));
n++;
continue;
}
- simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
- switch (simpletype->type) {
- case MONO_TYPE_BOOLEAN:
+ t = mini_type_get_underlying_type (gsctx, sig->params [i]);
+ switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
cinfo->args [n].size = 1;
add_general (&gr, &stack_size, ainfo, TRUE);
- n++;
break;
- case MONO_TYPE_CHAR:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
cinfo->args [n].size = 2;
add_general (&gr, &stack_size, ainfo, TRUE);
- n++;
break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
cinfo->args [n].size = 4;
add_general (&gr, &stack_size, ainfo, TRUE);
- n++;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_ARRAY:
cinfo->args [n].size = sizeof (gpointer);
add_general (&gr, &stack_size, ainfo, TRUE);
- n++;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (simpletype)) {
+ if (!mono_type_generic_inst_is_valuetype (t)) {
cinfo->args [n].size = sizeof (gpointer);
add_general (&gr, &stack_size, ainfo, TRUE);
- n++;
break;
}
- if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
+ if (mini_is_gsharedvt_type_gsctx (gsctx, t)) {
/* gsharedvt arguments are passed by ref */
- g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
+ g_assert (mini_is_gsharedvt_type_gsctx (gsctx, t));
add_general (&gr, &stack_size, ainfo, TRUE);
switch (ainfo->storage) {
case RegTypeGeneral:
default:
g_assert_not_reached ();
}
- n++;
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE: {
gint size;
int align_size;
- int nwords;
+ int nwords, nfields, esize;
guint32 align;
- if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
+ if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
+ if (fpr + nfields < ARM_VFP_F16) {
+ ainfo->storage = RegTypeHFA;
+ ainfo->reg = fpr;
+ ainfo->nregs = nfields;
+ ainfo->esize = esize;
+ fpr += nfields;
+ break;
+ } else {
+ fpr = ARM_VFP_F16;
+ }
+ }
+
+ if (t->type == MONO_TYPE_TYPEDBYREF) {
size = sizeof (MonoTypedRef);
align = sizeof (gpointer);
} else {
if (is_pinvoke)
size = mono_class_native_size (klass, &align);
else
- size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
+ size = mini_type_stack_size_full (gsctx, t, &align, FALSE);
}
DEBUG(printf ("load %d bytes struct\n", size));
align_size = size;
ainfo->offset = stack_size;
/*g_print ("offset for arg %d at %d\n", n, stack_size);*/
stack_size += nwords * sizeof (gpointer);
- n++;
break;
}
case MONO_TYPE_U8:
case MONO_TYPE_I8:
ainfo->size = 8;
add_general (&gr, &stack_size, ainfo, FALSE);
- n++;
break;
case MONO_TYPE_R4:
ainfo->size = 4;
add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
else
add_general (&gr, &stack_size, ainfo, TRUE);
-
- n++;
break;
case MONO_TYPE_R8:
ainfo->size = 8;
add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
else
add_general (&gr, &stack_size, ainfo, FALSE);
-
- n++;
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt arguments are passed by ref */
- g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
+ g_assert (mini_is_gsharedvt_type_gsctx (gsctx, t));
add_general (&gr, &stack_size, ainfo, TRUE);
switch (ainfo->storage) {
case RegTypeGeneral:
default:
g_assert_not_reached ();
}
- n++;
break;
default:
- g_error ("Can't trampoline 0x%x", sig->params [i]->type);
+ g_error ("Can't handle 0x%x", sig->params [i]->type);
}
+ n ++;
}
/* Handle the case where there are no implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
}
- {
- simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
- switch (simpletype->type) {
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_STRING:
- cinfo->ret.storage = RegTypeGeneral;
- cinfo->ret.reg = ARMREG_R0;
- break;
- case MONO_TYPE_U8:
- case MONO_TYPE_I8:
- cinfo->ret.storage = RegTypeIRegPair;
- cinfo->ret.reg = ARMREG_R0;
- break;
- case MONO_TYPE_R4:
- case MONO_TYPE_R8:
- cinfo->ret.storage = RegTypeFP;
-
- if (IS_HARD_FLOAT) {
- cinfo->ret.reg = ARM_VFP_F0;
- } else {
- cinfo->ret.reg = ARMREG_R0;
- }
-
- break;
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (simpletype)) {
- cinfo->ret.storage = RegTypeGeneral;
- cinfo->ret.reg = ARMREG_R0;
- break;
- }
- // FIXME: Only for variable types
- if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
- cinfo->ret.storage = RegTypeStructByAddr;
- g_assert (cinfo->vtype_retaddr);
- break;
- }
- /* Fall through */
- case MONO_TYPE_VALUETYPE:
- case MONO_TYPE_TYPEDBYREF:
- if (cinfo->ret.storage != RegTypeStructByVal)
- cinfo->ret.storage = RegTypeStructByAddr;
- break;
- case MONO_TYPE_VAR:
- case MONO_TYPE_MVAR:
- g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
- cinfo->ret.storage = RegTypeStructByAddr;
- g_assert (cinfo->vtype_retaddr);
- break;
- case MONO_TYPE_VOID:
- break;
- default:
- g_error ("Can't handle as return value 0x%x", sig->ret->type);
- }
- }
-
/* align stack size to 8 */
DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
stack_size = (stack_size + 7) & ~7;
* the extra stack space would be left on the stack after the tail call.
*/
res = c1->stack_usage >= c2->stack_usage;
- callee_ret = mini_replace_type (callee_sig->ret);
+ callee_ret = mini_get_underlying_type (cfg, callee_sig->ret);
if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
/* An address on the callee's stack is passed as the first argument */
res = FALSE;
MonoType *sig_ret;
int i, offset, size, align, curinst;
CallInfo *cinfo;
+ ArgInfo *ainfo;
guint32 ualign;
sig = mono_method_signature (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
mono_arch_compute_omit_fp (cfg);
if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
- cfg->used_int_regs |= (1 << ARMREG_V5);
+ cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG);
offset = 0;
curinst = 0;
- if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
+ if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) {
if (sig_ret->type != MONO_TYPE_VOID) {
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = ARMREG_R0;
if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
offset += 8;
- /* the MonoLMF structure is stored just below the stack pointer */
- if (cinfo->ret.storage == RegTypeStructByVal) {
+ switch (cinfo->ret.storage) {
+ case RegTypeStructByVal:
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = cfg->frame_reg;
offset += sizeof (gpointer) - 1;
offset &= ~(sizeof (gpointer) - 1);
cfg->ret->inst_offset = - offset;
offset += sizeof(gpointer);
- } else if (cinfo->vtype_retaddr) {
+ break;
+ case RegTypeHFA:
+ /* Allocate a local to hold the result, the epilog will copy it to the correct place */
+ offset = ALIGN_TO (offset, 8);
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = cfg->frame_reg;
+ cfg->ret->inst_offset = offset;
+ // FIXME:
+ offset += 32;
+ break;
+ case RegTypeStructByAddr:
ins = cfg->vret_addr;
offset += sizeof(gpointer) - 1;
offset &= ~(sizeof(gpointer) - 1);
mono_print_ins (cfg->vret_addr);
}
offset += sizeof(gpointer);
+ break;
+ default:
+ break;
}
/* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
}
for (i = 0; i < sig->param_count; ++i) {
+ ainfo = cinfo->args + i;
+
ins = cfg->args [curinst];
+ switch (ainfo->storage) {
+ case RegTypeHFA:
+ offset = ALIGN_TO (offset, 8);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ /* These arguments are saved to the stack in the prolog */
+ ins->inst_offset = offset;
+ if (cfg->verbose_level >= 2)
+ printf ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
+ // FIXME:
+ offset += 32;
+ break;
+ default:
+ break;
+ }
+
if (ins->opcode != OP_REGVAR) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
if (cinfo->ret.storage == RegTypeStructByVal)
cfg->ret_var_is_local = TRUE;
- if (cinfo->vtype_retaddr) {
+ if (cinfo->ret.storage == RegTypeStructByAddr) {
cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
}
}
- if (cfg->gen_seq_points_debug_data) {
+ if (cfg->gen_sdb_seq_points) {
if (cfg->soft_breakpoints) {
MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
* - we only pass/receive them in registers in some cases, and only
* in 1 or 2 integer registers.
*/
- if (cinfo->vtype_retaddr) {
+ if (cinfo->ret.storage == RegTypeStructByAddr) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
linfo->vret_arg_index = cinfo->vret_arg_index;
linfo->args [i].storage = LLVMArgInIReg;
break;
case RegTypeStructByVal:
- // FIXME: Passing entirely on the stack or split reg/stack
- if (ainfo->vtsize == 0 && ainfo->size <= 2) {
- linfo->args [i].storage = LLVMArgVtypeInReg;
- linfo->args [i].pair_storage [0] = LLVMArgInIReg;
- if (ainfo->size == 2)
- linfo->args [i].pair_storage [1] = LLVMArgInIReg;
- else
- linfo->args [i].pair_storage [1] = LLVMArgNone;
- } else {
- cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
- cfg->disable_llvm = TRUE;
- }
+ linfo->args [i].storage = LLVMArgAsIArgs;
+ linfo->args [i].nslots = ainfo->struct_size / sizeof (gpointer);
break;
default:
cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+
+ switch (cinfo->ret.storage) {
+ case RegTypeStructByVal:
+ /* The JIT will transform this into a normal call */
+ call->vret_in_reg = TRUE;
+ break;
+ case RegTypeHFA:
+ /*
+ * The vtype is returned in registers, save the return area address in a local, and save the vtype into
+ * the location pointed to by it after call in emit_move_return_value ().
+ */
+ if (!cfg->arch.vret_addr_loc) {
+ cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ /* Prevent it from being register allocated or optimized away */
+ ((MonoInst*)cfg->arch.vret_addr_loc)->flags |= MONO_INST_VOLATILE;
+ }
+
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ((MonoInst*)cfg->arch.vret_addr_loc)->dreg, call->vret_var->dreg);
+ break;
+ case RegTypeStructByAddr: {
+ MonoInst *vtarg;
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, vtarg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
+ break;
+ }
+ default:
+ break;
+ }
for (i = 0; i < n; ++i) {
ArgInfo *ainfo = cinfo->args + i;
case RegTypeStructByVal:
case RegTypeGSharedVtInReg:
case RegTypeGSharedVtOnStack:
+ case RegTypeHFA:
MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
ins->opcode = OP_OUTARG_VT;
ins->sreg1 = in->dreg;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
- if (cinfo->ret.storage == RegTypeStructByVal) {
- /* The JIT will transform this into a normal call */
- call->vret_in_reg = TRUE;
- } else if (cinfo->vtype_retaddr) {
- MonoInst *vtarg;
- MONO_INST_NEW (cfg, vtarg, OP_MOVE);
- vtarg->sreg1 = call->vret_var->dreg;
- vtarg->dreg = mono_alloc_preg (cfg);
- MONO_ADD_INS (cfg->cbb, vtarg);
-
- mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
- }
-
+ call->call_info = cinfo;
call->stack_usage = cinfo->stack_usage;
+}
- g_free (cinfo);
+static void
+add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
+{
+ MonoInst *ins;
+
+ switch (storage) {
+ case RegTypeFP:
+ MONO_INST_NEW (cfg, ins, OP_FMOVE);
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = arg->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ MonoInst *load;
ArgInfo *ainfo = ins->inst_p1;
int ovf_size = ainfo->vtsize;
int doffset = ainfo->offset;
int struct_size = ainfo->struct_size;
int i, soffset, dreg, tmpreg;
- if (ainfo->storage == RegTypeGSharedVtInReg) {
+ switch (ainfo->storage) {
+ case RegTypeGSharedVtInReg:
/* Pass by addr */
mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
- return;
- }
- if (ainfo->storage == RegTypeGSharedVtOnStack) {
+ break;
+ case RegTypeGSharedVtOnStack:
/* Pass by addr on stack */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
- return;
- }
+ break;
+ case RegTypeHFA:
+ for (i = 0; i < ainfo->nregs; ++i) {
+ if (ainfo->esize == 4)
+ MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
+ else
+ MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
+ load->dreg = mono_alloc_freg (cfg);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * ainfo->esize;
+ MONO_ADD_INS (cfg->cbb, load);
- soffset = 0;
- for (i = 0; i < ainfo->size; ++i) {
- dreg = mono_alloc_ireg (cfg);
- switch (struct_size) {
- case 1:
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
- break;
- case 2:
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
- break;
- case 3:
- tmpreg = mono_alloc_ireg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
- MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
- MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
- break;
- default:
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
- break;
+ if (ainfo->esize == 4) {
+ FloatArgData *fad;
+
+ /* See RegTypeFP in mono_arch_emit_call () */
+ MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
+ float_arg->flags |= MONO_INST_VOLATILE;
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg);
+
+ fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
+ fad->vreg = float_arg->dreg;
+ fad->hreg = ainfo->reg + i;
+
+ call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
+ } else {
+ add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + i, load);
+ }
}
- mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
- soffset += sizeof (gpointer);
- struct_size -= sizeof (gpointer);
+ break;
+ default:
+ soffset = 0;
+ for (i = 0; i < ainfo->size; ++i) {
+ dreg = mono_alloc_ireg (cfg);
+ switch (struct_size) {
+ case 1:
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
+ break;
+ case 2:
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
+ break;
+ case 3:
+ tmpreg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
+ MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
+ MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
+ break;
+ default:
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
+ break;
+ }
+ mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
+ soffset += sizeof (gpointer);
+ struct_size -= sizeof (gpointer);
+ }
+ //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
+ if (ovf_size != 0)
+ mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
+ break;
}
- //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
- if (ovf_size != 0)
- mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
}
void
// FIXME: Preprocess the info to speed up start_dyn_call ()
info->sig = sig;
info->cinfo = cinfo;
- info->rtype = mini_replace_type (sig->ret);
+ info->rtype = mini_type_get_underlying_type (NULL, sig->ret);
info->param_types = g_new0 (MonoType*, sig->param_count);
for (i = 0; i < sig->param_count; ++i)
- info->param_types [i] = mini_replace_type (sig->params [i]);
+ info->param_types [i] = mini_type_get_underlying_type (NULL, sig->params [i]);
return (MonoDynCallInfo*)info;
}
pindex = 1;
}
- if (dinfo->cinfo->vtype_retaddr)
+ if (dinfo->cinfo->ret.storage == RegTypeStructByAddr)
p->regs [greg ++] = (mgreg_t)ret;
for (i = pindex; i < sig->param_count; i++) {
case MONO_TYPE_U:
p->regs [slot] = (mgreg_t)*arg;
break;
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
p->regs [slot] = *(guint8*)arg;
break;
p->regs [slot] = *(gint16*)arg;
break;
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
p->regs [slot] = *(guint16*)arg;
break;
case MONO_TYPE_I4:
*(gint8*)ret = res;
break;
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
*(guint8*)ret = res;
break;
case MONO_TYPE_I2:
*(gint16*)ret = res;
break;
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
*(guint16*)ret = res;
break;
case MONO_TYPE_I4:
/* Fall though */
}
case MONO_TYPE_VALUETYPE:
- g_assert (ainfo->cinfo->vtype_retaddr);
+ g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr);
/* Nothing to do */
break;
case MONO_TYPE_R4:
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n, *last_ins = NULL;
+ MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
+ MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
+
switch (ins->opcode) {
case OP_MUL_IMM:
case OP_IMUL_IMM:
}
break;
}
- last_ins = ins;
- ins = ins->next;
}
- bb->last_ins = last_ins;
}
/*
ins->opcode = map_to_reg_reg_op (ins->opcode);
last_ins = temp;
goto loop_start; /* make it handle the possibly big ins->inst_offset */
- case OP_FCOMPARE: {
+ case OP_FCOMPARE:
+ case OP_RCOMPARE: {
gboolean swap = FALSE;
int reg;
return code;
}
-#endif /* #ifndef DISABLE_JIT */
+static guchar*
+emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
+{
+ /* sreg is a float, dreg is an integer reg */
+ g_assert (IS_VFP);
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
+ if (is_signed)
+ ARM_TOSIZS (code, vfp_scratch1, sreg);
+ else
+ ARM_TOUIZS (code, vfp_scratch1, sreg);
+ ARM_FMRS (code, dreg, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
-typedef struct {
- guchar *code;
- const guchar *target;
- int absolute;
- int found;
-} PatchData;
+ if (!is_signed) {
+ if (size == 1)
+ ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
+ else if (size == 2) {
+ ARM_SHL_IMM (code, dreg, dreg, 16);
+ ARM_SHR_IMM (code, dreg, dreg, 16);
+ }
+ } else {
+ if (size == 1) {
+ ARM_SHL_IMM (code, dreg, dreg, 24);
+ ARM_SAR_IMM (code, dreg, dreg, 24);
+ } else if (size == 2) {
+ ARM_SHL_IMM (code, dreg, dreg, 16);
+ ARM_SAR_IMM (code, dreg, dreg, 16);
+ }
+ }
+ return code;
+}
+
+#endif /* #ifndef DISABLE_JIT */
#define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
-static int
-search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
- PatchData *pdata = (PatchData*)user_data;
- guchar *code = data;
- guint32 *thunks = data;
- guint32 *endthunks = (guint32*)(code + bsize);
- int count = 0;
- int difflow, diffhigh;
-
- /* always ensure a call from pdata->code can reach to the thunks without further thunks */
- difflow = (char*)pdata->code - (char*)thunks;
- diffhigh = (char*)pdata->code - (char*)endthunks;
- if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
- return 0;
+static void
+emit_thunk (guint8 *code, gconstpointer target)
+{
+ guint8 *p = code;
- /*
- * The thunk is composed of 3 words:
- * load constant from thunks [2] into ARM_IP
- * bx to ARM_IP
- * address constant
- * Note that the LR register is already setup
- */
- //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
- if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
- while (thunks < endthunks) {
- //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
- if (thunks [2] == (guint32)pdata->target) {
- arm_patch (pdata->code, (guchar*)thunks);
- mono_arch_flush_icache (pdata->code, 4);
- pdata->found = 1;
- return 1;
- } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
- /* found a free slot instead: emit thunk */
- /* ARMREG_IP is fine to use since this can't be an IMT call
- * which is indirect
- */
- code = (guchar*)thunks;
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- if (thumb_supported)
- ARM_BX (code, ARMREG_IP);
- else
- ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
- thunks [2] = (guint32)pdata->target;
- mono_arch_flush_icache ((guchar*)thunks, 12);
-
- arm_patch (pdata->code, (guchar*)thunks);
- mono_arch_flush_icache (pdata->code, 4);
- pdata->found = 1;
- return 1;
- }
- /* skip 12 bytes, the size of the thunk */
- thunks += 3;
- count++;
- }
- //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
- }
- return 0;
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ if (thumb_supported)
+ ARM_BX (code, ARMREG_IP);
+ else
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
+ *(guint32*)code = (guint32)target;
+ code += 4;
+ mono_arch_flush_icache (p, code - p);
}
static void
-handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
+handle_thunk (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
{
- PatchData pdata;
+ MonoJitInfo *ji = NULL;
+ MonoThunkJitInfo *info;
+ guint8 *thunks, *p;
+ int thunks_size;
+ guint8 *orig_target;
+ guint8 *target_thunk;
if (!domain)
domain = mono_domain_get ();
- pdata.code = code;
- pdata.target = target;
- pdata.absolute = absolute;
- pdata.found = 0;
+ if (cfg) {
+ /*
+ * This can be called multiple times during JITting,
+ * save the current position in cfg->arch to avoid
+ * doing a O(n^2) search.
+ */
+ if (!cfg->arch.thunks) {
+ cfg->arch.thunks = cfg->thunks;
+ cfg->arch.thunks_size = cfg->thunk_area;
+ }
+ thunks = cfg->arch.thunks;
+ thunks_size = cfg->arch.thunks_size;
+ if (!thunks_size) {
+ g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
+ g_assert_not_reached ();
+ }
- if (dyn_code_mp) {
- mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
- }
+ g_assert (*(guint32*)thunks == 0);
+ emit_thunk (thunks, target);
+ arm_patch (code, thunks);
- if (pdata.found != 1) {
- mono_domain_lock (domain);
- mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
+ cfg->arch.thunks += THUNK_SIZE;
+ cfg->arch.thunks_size -= THUNK_SIZE;
+ } else {
+ ji = mini_jit_info_table_find (domain, (char*)code, NULL);
+ g_assert (ji);
+ info = mono_jit_info_get_thunk_info (ji);
+ g_assert (info);
- if (!pdata.found) {
- /* this uses the first available slot */
- pdata.found = 2;
- mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
- }
- mono_domain_unlock (domain);
- }
+ thunks = (guint8*)ji->code_start + info->thunks_offset;
+ thunks_size = info->thunks_size;
- if (pdata.found != 1) {
- GHashTable *hash;
- GHashTableIter iter;
- MonoJitDynamicMethodInfo *ji;
+ orig_target = mono_arch_get_call_target (code + 4);
- /*
- * This might be a dynamic method, search its code manager. We can only
- * use the dynamic method containing CODE, since the others might be freed later.
- */
- pdata.found = 0;
+ mono_mini_arch_lock ();
- mono_domain_lock (domain);
- hash = domain_jit_info (domain)->dynamic_code_hash;
- if (hash) {
- /* FIXME: Speed this up */
- g_hash_table_iter_init (&iter, hash);
- while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
- mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
- if (pdata.found == 1)
+ target_thunk = NULL;
+ if (orig_target >= thunks && orig_target < thunks + thunks_size) {
+ /* The call already points to a thunk, because of trampolines etc. */
+ target_thunk = orig_target;
+ } else {
+ for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
+ if (((guint32*)p) [0] == 0) {
+ /* Free entry */
+ target_thunk = p;
break;
+ }
}
}
- mono_domain_unlock (domain);
+
+ //printf ("THUNK: %p %p %p\n", code, target, target_thunk);
+
+ if (!target_thunk) {
+ mono_mini_arch_unlock ();
+ g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
+ g_assert_not_reached ();
+ }
+
+ emit_thunk (target_thunk, target);
+ arm_patch (code, target_thunk);
+ mono_arch_flush_icache (code, 4);
+
+ mono_mini_arch_unlock ();
}
- if (pdata.found != 1)
- g_print ("thunk failed for %p from %p\n", target, code);
- g_assert (pdata.found == 1);
}
static void
-arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
+arm_patch_general (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
{
guint32 *code32 = (void*)code;
guint32 ins = *code32;
}
}
- handle_thunk (domain, TRUE, code, target, dyn_code_mp);
+ handle_thunk (cfg, domain, code, target);
return;
}
void
arm_patch (guchar *code, const guchar *target)
{
- arm_patch_general (NULL, code, target, NULL);
+ arm_patch_general (NULL, NULL, code, target);
}
/*
cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
- if (cinfo->vtype_retaddr) {
+ if (cinfo->ret.storage == RegTypeStructByAddr) {
ArgInfo *ainfo = &cinfo->ret;
inst = cfg->vret_addr;
g_assert (arm_is_imm12 (inst->inst_offset));
NOT_IMPLEMENTED;
}
}
- pos ++;
+ pos ++;
+ }
+
+ g_free (cinfo);
+
+ return code;
+}
+
+static guint8*
+emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
+{
+ CallInfo *cinfo;
+ MonoCallInst *call;
+
+ call = (MonoCallInst*)ins;
+ cinfo = call->call_info;
+
+ switch (cinfo->ret.storage) {
+ case RegTypeHFA: {
+ MonoInst *loc = cfg->arch.vret_addr_loc;
+ int i;
+
+ /* Load the destination address */
+ g_assert (loc && loc->opcode == OP_REGOFFSET);
+
+ if (arm_is_imm12 (loc->inst_offset)) {
+ ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
+ } else {
+ code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset);
+ ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR);
+ }
+ for (i = 0; i < cinfo->ret.nregs; ++i) {
+ if (cinfo->ret.esize == 4)
+ ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4);
+ else
+ ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ switch (ins->opcode) {
+ case OP_FCALL:
+ case OP_FCALL_REG:
+ case OP_FCALL_MEMBASE:
+ if (IS_VFP) {
+ MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
+ if (sig_ret->type == MONO_TYPE_R4) {
+ if (IS_HARD_FLOAT) {
+ ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
+ } else {
+ ARM_FMSR (code, ins->dreg, ARMREG_R0);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+ }
+ } else {
+ if (IS_HARD_FLOAT) {
+ ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
+ } else {
+ ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
+ }
+ }
+ }
+ break;
+ case OP_RCALL:
+ case OP_RCALL_REG:
+ case OP_RCALL_MEMBASE: {
+ MonoType *sig_ret;
+
+ g_assert (IS_VFP);
+
+ sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
+ g_assert (sig_ret->type == MONO_TYPE_R4);
+ if (IS_HARD_FLOAT) {
+ ARM_CPYS (code, ins->dreg, ARM_VFP_F0);
+ } else {
+ ARM_FMSR (code, ins->dreg, ARMREG_R0);
+ ARM_CPYS (code, ins->dreg, ins->dreg);
+ }
+ break;
+ }
+ default:
+ break;
}
-
- g_free (cinfo);
return code;
}
break;
case OP_ATOMIC_LOAD_R4:
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0);
ARM_CVTS (code, ins->dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
break;
case OP_ATOMIC_STORE_R4:
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
+ ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
ARM_CVTD (code, vfp_scratch1, ins->sreg1);
- ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
+ ARM_FSTS (code, vfp_scratch1, ARMREG_LR, 0);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
break;
case OP_ATOMIC_STORE_R8:
break;
}
case OP_FMOVE:
- if (IS_VFP)
+ if (IS_VFP && ins->dreg != ins->sreg1)
ARM_CPYD (code, ins->dreg, ins->sreg1);
break;
+ case OP_MOVE_F_TO_I4:
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
+ ARM_CVTD (code, vfp_scratch1, ins->sreg1);
+ ARM_FMRS (code, ins->dreg, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
+ break;
+ case OP_MOVE_I4_TO_F:
+ ARM_FMSR (code, ins->dreg, ins->sreg1);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+ break;
case OP_FCONV_TO_R4:
if (IS_VFP) {
- ARM_CVTD (code, ins->dreg, ins->sreg1);
- ARM_CVTS (code, ins->dreg, ins->dreg);
+ if (cfg->r4fp) {
+ ARM_CVTD (code, ins->dreg, ins->sreg1);
+ } else {
+ ARM_CVTD (code, ins->dreg, ins->sreg1);
+ ARM_CVTS (code, ins->dreg, ins->dreg);
+ }
}
break;
case OP_JMP:
break;
}
case OP_FCALL:
+ case OP_RCALL:
case OP_LCALL:
case OP_VCALL:
case OP_VCALL2:
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
+ case OP_RCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
case OP_VCALL2_REG:
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
+ case OP_RCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
- gboolean imt_arg = FALSE;
-
g_assert (ins->sreg1 != ARMREG_LR);
call = (MonoCallInst*)ins;
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, call, code, &max_len, &offset);
-
- if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
- imt_arg = TRUE;
if (!arm_is_imm12 (ins->inst_offset))
code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
-#ifdef USE_JUMP_TABLES
-#define LR_BIAS 0
-#else
-#define LR_BIAS 4
-#endif
- if (imt_arg)
- ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
- else
- ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
-#undef LR_BIAS
+ ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
if (!arm_is_imm12 (ins->inst_offset))
ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
else
ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
- if (imt_arg) {
- /*
- * We can't embed the method in the code stream in PIC code, or
- * in gshared code.
- * Instead, we put it in V5 in code emitted by
- * mono_arch_emit_imt_argument (), and embed NULL here to
- * signal the IMT thunk that the value is in V5.
- */
-#ifdef USE_JUMP_TABLES
- /* In case of jumptables we always use value in V5. */
-#else
-
- if (call->dynamic_imt_arg)
- *((gpointer*)code) = NULL;
- else
- *((gpointer*)code) = (gpointer)call->method;
- code += 4;
-#endif
- }
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
case OP_CALL_HANDLER:
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
code = mono_arm_patchable_bl (code, ARMCOND_AL);
+ cfg->thunk_area += THUNK_SIZE;
mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
break;
+ case OP_GET_EX_OBJ:
+ if (ins->dreg != ARMREG_R0)
+ ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0);
+ break;
+
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
ARM_B (code, 0);
*(guint32*)code = ((guint32*)(ins->inst_p0))[0];
code += 4;
- ARM_CVTS (code, ins->dreg, ins->dreg);
+ if (!cfg->r4fp)
+ ARM_CVTS (code, ins->dreg, ins->dreg);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
- ARM_CVTS (code, ins->dreg, ins->dreg);
+ if (!cfg->r4fp)
+ ARM_CVTS (code, ins->dreg, ins->dreg);
}
break;
case OP_STORER8_MEMBASE_REG:
break;
case OP_STORER4_MEMBASE_REG:
g_assert (arm_is_fpimm8 (ins->inst_offset));
- code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
- ARM_CVTD (code, vfp_scratch1, ins->sreg1);
- ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
- code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
+ if (cfg->r4fp) {
+ ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
+ } else {
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
+ ARM_CVTD (code, vfp_scratch1, ins->sreg1);
+ ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
+ }
break;
case OP_LOADR4_MEMBASE:
- g_assert (arm_is_fpimm8 (ins->inst_offset));
- code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
- ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
- ARM_CVTS (code, ins->dreg, vfp_scratch1);
- code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
+ if (cfg->r4fp) {
+ ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ } else {
+ g_assert (arm_is_fpimm8 (ins->inst_offset));
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
+ ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
+ ARM_CVTS (code, ins->dreg, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
+ }
break;
case OP_ICONV_TO_R_UN: {
g_assert_not_reached ();
break;
}
case OP_ICONV_TO_R4:
- code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
- ARM_FMSR (code, vfp_scratch1, ins->sreg1);
- ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
- ARM_CVTS (code, ins->dreg, vfp_scratch1);
- code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
+ if (cfg->r4fp) {
+ ARM_FMSR (code, ins->dreg, ins->sreg1);
+ ARM_FSITOS (code, ins->dreg, ins->dreg);
+ } else {
+ code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
+ ARM_FMSR (code, vfp_scratch1, ins->sreg1);
+ ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
+ ARM_CVTS (code, ins->dreg, vfp_scratch1);
+ code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
+ }
break;
case OP_ICONV_TO_R8:
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
case OP_SETFRET: {
MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
if (sig_ret->type == MONO_TYPE_R4) {
- ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
+ if (cfg->r4fp) {
+ g_assert (!IS_HARD_FLOAT);
+ ARM_FMRS (code, ARMREG_R0, ins->sreg1);
+ } else {
+ ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
- if (!IS_HARD_FLOAT) {
- ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
+ if (!IS_HARD_FLOAT)
+ ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
}
} else {
- if (IS_HARD_FLOAT) {
+ if (IS_HARD_FLOAT)
ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
- } else {
+ else
ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
- }
}
break;
}
ARM_FMSTAT (code);
}
break;
+ case OP_RCOMPARE:
+ g_assert (IS_VFP);
+ ARM_CMPS (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ break;
case OP_FCEQ:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
break;
}
+ case OP_RCONV_TO_I1:
+ code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
+ break;
+ case OP_RCONV_TO_U1:
+ code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
+ break;
+ case OP_RCONV_TO_I2:
+ code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
+ break;
+ case OP_RCONV_TO_U2:
+ code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
+ break;
+ case OP_RCONV_TO_I4:
+ code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
+ break;
+ case OP_RCONV_TO_U4:
+ code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
+ break;
+ case OP_RCONV_TO_R4:
+ g_assert (IS_VFP);
+ if (ins->dreg != ins->sreg1)
+ ARM_CPYS (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_RCONV_TO_R8:
+ g_assert (IS_VFP);
+ ARM_CVTS (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_RADD:
+ ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
+ case OP_RSUB:
+ ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
+ case OP_RMUL:
+ ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
+ case OP_RDIV:
+ ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
+ case OP_RNEG:
+ ARM_NEGS (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_RCEQ:
+ if (IS_VFP) {
+ ARM_CMPS (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
+ break;
+ case OP_RCLT:
+ if (IS_VFP) {
+ ARM_CMPS (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
+ break;
+ case OP_RCLT_UN:
+ if (IS_VFP) {
+ ARM_CMPS (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
+ break;
+ case OP_RCGT:
+ if (IS_VFP) {
+ ARM_CMPS (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
+ break;
+ case OP_RCGT_UN:
+ if (IS_VFP) {
+ ARM_CMPS (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
+ break;
+ case OP_RCNEQ:
+ if (IS_VFP) {
+ ARM_CMPS (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
+ break;
+ case OP_RCGE:
+ if (IS_VFP) {
+ ARM_CMPS (code, ins->sreg1, ins->sreg2);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
+ break;
+ case OP_RCLE:
+ if (IS_VFP) {
+ ARM_CMPS (code, ins->sreg2, ins->sreg1);
+ ARM_FMSTAT (code);
+ }
+ ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
+ ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
+ break;
+
case OP_GC_LIVENESS_DEF:
case OP_GC_LIVENESS_USE:
case OP_GC_PARAM_SLOT_LIVENESS_DEF:
} while (0)
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
+mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
- MonoJumpInfo *patch_info;
- gboolean compile_aot = !run_cctors;
+ unsigned char *ip = ji->ip.i + code;
- for (patch_info = ji; patch_info; patch_info = patch_info->next) {
- unsigned char *ip = patch_info->ip.i + code;
- const unsigned char *target;
+ if (ji->type == MONO_PATCH_INFO_SWITCH) {
+ }
- if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
+ switch (ji->type) {
+ case MONO_PATCH_INFO_SWITCH: {
#ifdef USE_JUMP_TABLES
- gpointer *jt = mono_jumptable_get_entry (ip);
+ gpointer *jt = mono_jumptable_get_entry (ip);
#else
- gpointer *jt = (gpointer*)(ip + 8);
+ gpointer *jt = (gpointer*)(ip + 8);
#endif
- int i;
- /* jt is the inlined jump table, 2 instructions after ip
- * In the normal case we store the absolute addresses,
- * otherwise the displacements.
- */
- for (i = 0; i < patch_info->data.table->table_size; i++)
- jt [i] = code + (int)patch_info->data.table->table [i];
- continue;
- }
-
- if (compile_aot) {
- switch (patch_info->type) {
- case MONO_PATCH_INFO_BB:
- case MONO_PATCH_INFO_LABEL:
- break;
- default:
- /* No need to patch these */
- continue;
- }
- }
-
- target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
-
- switch (patch_info->type) {
- case MONO_PATCH_INFO_IP:
- g_assert_not_reached ();
- patch_lis_ori (ip, ip);
- continue;
- case MONO_PATCH_INFO_METHOD_REL:
- g_assert_not_reached ();
- *((gpointer *)(ip)) = code + patch_info->data.offset;
- continue;
- case MONO_PATCH_INFO_METHODCONST:
- case MONO_PATCH_INFO_CLASS:
- case MONO_PATCH_INFO_IMAGE:
- case MONO_PATCH_INFO_FIELD:
- case MONO_PATCH_INFO_VTABLE:
- case MONO_PATCH_INFO_IID:
- case MONO_PATCH_INFO_SFLDA:
- case MONO_PATCH_INFO_LDSTR:
- case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
- case MONO_PATCH_INFO_LDTOKEN:
- g_assert_not_reached ();
- /* from OP_AOTCONST : lis + ori */
- patch_lis_ori (ip, target);
- continue;
- case MONO_PATCH_INFO_R4:
- case MONO_PATCH_INFO_R8:
- g_assert_not_reached ();
- *((gconstpointer *)(ip + 2)) = patch_info->data.target;
- continue;
- case MONO_PATCH_INFO_EXC_NAME:
- g_assert_not_reached ();
- *((gconstpointer *)(ip + 1)) = patch_info->data.name;
- continue;
- case MONO_PATCH_INFO_NONE:
- case MONO_PATCH_INFO_BB_OVF:
- case MONO_PATCH_INFO_EXC_OVF:
- /* everything is dealt with at epilog output time */
- continue;
- default:
- break;
- }
- arm_patch_general (domain, ip, target, dyn_code_mp);
+ int i;
+ /* jt is the inlined jump table, 2 instructions after ip
+ * In the normal case we store the absolute addresses,
+ * otherwise the displacements.
+ */
+ for (i = 0; i < ji->data.table->table_size; i++)
+ jt [i] = code + (int)ji->data.table->table [i];
+ break;
+ }
+ case MONO_PATCH_INFO_IP:
+ g_assert_not_reached ();
+ patch_lis_ori (ip, ip);
+ break;
+ case MONO_PATCH_INFO_METHOD_REL:
+ g_assert_not_reached ();
+ *((gpointer *)(ip)) = target;
+ break;
+ case MONO_PATCH_INFO_METHODCONST:
+ case MONO_PATCH_INFO_CLASS:
+ case MONO_PATCH_INFO_IMAGE:
+ case MONO_PATCH_INFO_FIELD:
+ case MONO_PATCH_INFO_VTABLE:
+ case MONO_PATCH_INFO_IID:
+ case MONO_PATCH_INFO_SFLDA:
+ case MONO_PATCH_INFO_LDSTR:
+ case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
+ case MONO_PATCH_INFO_LDTOKEN:
+ g_assert_not_reached ();
+ /* from OP_AOTCONST : lis + ori */
+ patch_lis_ori (ip, target);
+ break;
+ case MONO_PATCH_INFO_R4:
+ case MONO_PATCH_INFO_R8:
+ g_assert_not_reached ();
+ *((gconstpointer *)(ip + 2)) = target;
+ break;
+ case MONO_PATCH_INFO_EXC_NAME:
+ g_assert_not_reached ();
+ *((gconstpointer *)(ip + 1)) = target;
+ break;
+ case MONO_PATCH_INFO_NONE:
+ case MONO_PATCH_INFO_BB_OVF:
+ case MONO_PATCH_INFO_EXC_OVF:
+ /* everything is dealt with at epilog output time */
+ break;
+ default:
+ arm_patch_general (cfg, domain, ip, target);
+ break;
}
}
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
- int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
+ int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part;
guint8 *code;
CallInfo *cinfo;
int tracing = 0;
cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
- if (cinfo->vtype_retaddr) {
+ if (cinfo->ret.storage == RegTypeStructByAddr) {
ArgInfo *ainfo = &cinfo->ret;
inst = cfg->vret_addr;
g_assert (arm_is_imm12 (inst->inst_offset));
if (cfg->verbose_level > 2)
g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
+
if (inst->opcode == OP_REGVAR) {
if (ainfo->storage == RegTypeGeneral)
ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
if (cfg->verbose_level > 2)
g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
} else {
- /* the argument should be put on the stack: FIXME handle size != word */
- if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
+ switch (ainfo->storage) {
+ case RegTypeHFA:
+ for (part = 0; part < ainfo->nregs; part ++) {
+ if (ainfo->esize == 4)
+ ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
+ else
+ ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
+ }
+ break;
+ case RegTypeGeneral:
+ case RegTypeIRegPair:
+ case RegTypeGSharedVtInReg:
switch (ainfo->size) {
case 1:
if (arm_is_imm12 (inst->inst_offset))
}
break;
}
- } else if (ainfo->storage == RegTypeBaseGen) {
+ break;
+ case RegTypeBaseGen:
if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
}
- } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
+ break;
+ case RegTypeBase:
+ case RegTypeGSharedVtOnStack:
if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
} else {
}
break;
}
- } else if (ainfo->storage == RegTypeFP) {
+ break;
+ case RegTypeFP: {
int imm8, rot_amount;
if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
else
ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
- } else if (ainfo->storage == RegTypeStructByVal) {
+ break;
+ }
+ case RegTypeStructByVal: {
int doffset = inst->inst_offset;
int soffset = 0;
int cur_reg;
//g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
}
- } else if (ainfo->storage == RegTypeStructByAddr) {
+ break;
+ }
+ case RegTypeStructByAddr:
g_assert_not_reached ();
/* FIXME: handle overrun! with struct sizes not multiple of 4 */
code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
- } else
+ default:
g_assert_not_reached ();
+ break;
+ }
}
pos++;
}
#ifdef USE_JUMP_TABLES
jte = mono_jumptable_add_entries (3);
jte [0] = (gpointer)&ss_trigger_var;
- jte [1] = single_step_func_wrapper;
- jte [2] = breakpoint_func_wrapper;
+ jte [1] = single_step_tramp;
+ jte [2] = breakpoint_tramp;
code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
#else
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_B (code, 2);
*(volatile int **)code = &ss_trigger_var;
code += 4;
- *(gpointer*)code = single_step_func_wrapper;
+ *(gpointer*)code = single_step_tramp;
code += 4;
- *(gpointer*)code = breakpoint_func_wrapper;
+ *(gpointer*)code = breakpoint_tramp;
code += 4;
#endif
/* Load returned vtypes into registers if needed */
cinfo = cfg->arch.cinfo;
- if (cinfo->ret.storage == RegTypeStructByVal) {
+ switch (cinfo->ret.storage) {
+ case RegTypeStructByVal: {
MonoInst *ins = cfg->ret;
if (arm_is_imm12 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
}
+ break;
+ }
+ case RegTypeHFA: {
+ MonoInst *ins = cfg->ret;
+
+ for (i = 0; i < cinfo->ret.nregs; ++i) {
+ if (cinfo->ret.esize == 4)
+ ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
+ else
+ ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
+ }
+ break;
+ }
+ default:
+ break;
}
if (method->save_lmf) {
patch_info->data.name = "mono_arch_throw_corlib_exception";
patch_info->ip.i = code - cfg->native_code;
ARM_BL (code, 0);
+ cfg->thunk_area += THUNK_SIZE;
*(guint32*)(gpointer)code = exc_class->type_token;
code += 4;
#endif
{
}
-#ifndef DISABLE_JIT
-
-void
-mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
-{
- int method_reg = mono_alloc_ireg (cfg);
-#ifdef USE_JUMP_TABLES
- int use_jumptables = TRUE;
-#else
- int use_jumptables = FALSE;
-#endif
-
- if (cfg->compile_aot) {
- MonoInst *ins;
-
- call->dynamic_imt_arg = TRUE;
-
- if (imt_arg) {
- MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
- } else {
- MONO_INST_NEW (cfg, ins, OP_AOTCONST);
- ins->dreg = method_reg;
- ins->inst_p0 = call->method;
- ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
- MONO_ADD_INS (cfg->cbb, ins);
- }
- mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
- } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
- /* Always pass in a register for simplicity */
- call->dynamic_imt_arg = TRUE;
-
- cfg->uses_rgctx_reg = TRUE;
-
- if (imt_arg) {
- MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
- } else {
- MonoInst *ins;
-
- MONO_INST_NEW (cfg, ins, OP_PCONST);
- ins->inst_p0 = call->method;
- ins->dreg = method_reg;
- MONO_ADD_INS (cfg->cbb, ins);
- }
-
- mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
- }
-}
-
-#endif /* DISABLE_JIT */
-
MonoMethod*
mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
{
-#ifdef USE_JUMP_TABLES
- return (MonoMethod*)regs [ARMREG_V5];
-#else
- gpointer method;
- guint32 *code_ptr = (guint32*)code;
- code_ptr -= 2;
- method = GUINT_TO_POINTER (code_ptr [1]);
-
- if (mono_use_llvm)
- /* Passed in V5 */
- return (MonoMethod*)regs [ARMREG_V5];
-
- /* The IMT value is stored in the code stream right after the LDC instruction. */
- /* This is no longer true for the gsharedvt_in trampoline */
- /*
- if (!IS_LDR_PC (code_ptr [0])) {
- g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
- g_assert (IS_LDR_PC (code_ptr [0]));
- }
- */
- if (method == 0)
- /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
- return (MonoMethod*)regs [ARMREG_V5];
- else
- return (MonoMethod*) method;
-#endif
+ return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
MonoVTable*
#ifdef USE_JUMP_TABLES
ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
- /* If jumptables we always pass the IMT method in R5 */
- ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
#define VTABLE_JTI 0
#define IMT_METHOD_OFFSET 0
#define TARGET_CODE_OFFSET 1
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
vtable_target = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
-
- if (mono_use_llvm) {
- /* LLVM always passes the IMT method in R5 */
- ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
- } else {
- /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
- ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
- ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
- }
#endif
+ ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];