if (type->byref)
return OP_MOVE;
- type = mini_replace_type (type);
+ type = mini_get_underlying_type (cfg, type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
return OP_MOVE;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
return OP_MOVE;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_LMOVE;
#endif
case MONO_TYPE_R4:
- return OP_FMOVE;
+ return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
case MONO_TYPE_R8:
return OP_FMOVE;
case MONO_TYPE_VALUETYPE:
if (mini_type_var_is_vt (cfg, type))
return OP_VMOVE;
else
- return OP_MOVE;
+ return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
default:
g_error ("unknown type 0x%02x in type_to_regstore", type->type);
}
} while (0)
#endif
+/* Emit conversions so both operands of a binary opcode are of the same type */
+static void
+add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
+{
+ MonoInst *arg1 = *arg1_ref;
+ MonoInst *arg2 = *arg2_ref;
+
+ if (cfg->r4fp &&
+ ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
+ (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
+ MonoInst *conv;
+
+ /* Mixing r4/r8 is allowed by the spec */
+ if (arg1->type == STACK_R4) {
+ int dreg = alloc_freg (cfg);
+
+ EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
+ conv->type = STACK_R8;
+ ins->sreg1 = dreg;
+ *arg1_ref = conv;
+ }
+ if (arg2->type == STACK_R4) {
+ int dreg = alloc_freg (cfg);
+
+ EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
+ conv->type = STACK_R8;
+ ins->sreg2 = dreg;
+ *arg2_ref = conv;
+ }
+ }
+
#if SIZEOF_REGISTER == 8
-#define ADD_WIDEN_OP(ins, arg1, arg2) do { \
- /* FIXME: Need to add many more cases */ \
- if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
- MonoInst *widen; \
- int dr = alloc_preg (cfg); \
- EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
- (ins)->sreg2 = widen->dreg; \
- } \
- } while (0)
-#else
-#define ADD_WIDEN_OP(ins, arg1, arg2)
+ /* FIXME: Need to add many more cases */
+ if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
+ MonoInst *widen;
+
+ int dr = alloc_preg (cfg);
+ EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
+ (ins)->sreg2 = widen->dreg;
+ }
#endif
+}
#define ADD_BINOP(op) do { \
MONO_INST_NEW (cfg, ins, (op)); \
sp -= 2; \
ins->sreg1 = sp [0]->dreg; \
ins->sreg2 = sp [1]->dreg; \
- type_from_op (ins, sp [0], sp [1]); \
+ type_from_op (cfg, ins, sp [0], sp [1]); \
CHECK_TYPE (ins); \
/* Have to insert a widening op */ \
- ADD_WIDEN_OP (ins, sp [0], sp [1]); \
+ add_widen_op (cfg, ins, &sp [0], &sp [1]); \
ins->dreg = alloc_dreg ((cfg), (ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
- *sp++ = mono_decompose_opcode ((cfg), (ins)); \
+ *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
} while (0)
#define ADD_UNOP(op) do { \
MONO_INST_NEW (cfg, ins, (op)); \
sp--; \
ins->sreg1 = sp [0]->dreg; \
- type_from_op (ins, sp [0], NULL); \
+ type_from_op (cfg, ins, sp [0], NULL); \
CHECK_TYPE (ins); \
(ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
- *sp++ = mono_decompose_opcode (cfg, ins); \
+ *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
} while (0)
#define ADD_BINCOND(next_block) do { \
MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
cmp->sreg1 = sp [0]->dreg; \
cmp->sreg2 = sp [1]->dreg; \
- type_from_op (cmp, sp [0], sp [1]); \
+ type_from_op (cfg, cmp, sp [0], sp [1]); \
CHECK_TYPE (cmp); \
- type_from_op (ins, sp [0], sp [1]); \
+ add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
+ type_from_op (cfg, ins, sp [0], sp [1]); \
ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
GET_BBLOCK (cfg, tblock, target); \
link_bblock (cfg, bblock, tblock); \
{
MonoClass *klass;
- type = mini_replace_type (type);
+ type = mini_get_underlying_type (cfg, type);
inst->klass = klass = mono_class_from_mono_type (type);
if (type->byref) {
inst->type = STACK_MP;
return;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
inst->type = STACK_I4;
inst->type = STACK_I8;
return;
case MONO_TYPE_R4:
+ inst->type = cfg->r4_stack_type;
+ break;
case MONO_TYPE_R8:
inst->type = STACK_R8;
return;
g_assert (cfg->gsharedvt);
inst->type = STACK_VTYPE;
} else {
- inst->type = STACK_OBJ;
+ type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
}
return;
default:
{STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
- {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
+ {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
{STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
- {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
+ {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
+ {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
};
static const char
neg_table [] = {
- STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
+ STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
};
/* reduce the size of this table */
static const char
bin_comp_table [STACK_MAX] [STACK_MAX] = {
-/* Inv i L p F & O vt */
+/* Inv i L p F & O vt r4 */
{0},
{0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
{0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
{0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
- {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
+ {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
{0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
{0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
{0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
+ {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
};
/* reduce the size of this table */
/* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
static const guint16
binops_op_map [STACK_MAX] = {
- 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
+ 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
};
/* handles from CEE_NEG to CEE_CONV_U8 */
static const guint16
unops_op_map [STACK_MAX] = {
- 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
+ 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
};
/* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
static const guint16
ovfops_op_map [STACK_MAX] = {
- 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
+ 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
};
/* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
static const guint16
ovf2ops_op_map [STACK_MAX] = {
- 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
+ 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
};
/* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
static const guint16
ovf3ops_op_map [STACK_MAX] = {
- 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
+ 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
};
/* handles from CEE_BEQ to CEE_BLT_UN */
static const guint16
beqops_op_map [STACK_MAX] = {
- 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
+ 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
};
/* handles from CEE_CEQ to CEE_CLT_UN */
static const guint16
ceqops_op_map [STACK_MAX] = {
- 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
+ 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
};
/*
* it should set it to invalid for some types (a conv.x on an object)
*/
static void
-type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
-
+type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
+{
switch (ins->opcode) {
/* binops */
case CEE_ADD:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE;
+ else if (src1->type == STACK_R4)
+ ins->opcode = OP_RCOMPARE;
else if (src1->type == STACK_R8)
ins->opcode = OP_FCOMPARE;
else
ins->opcode += ovf2ops_op_map [src1->type];
break;
case CEE_CONV_R4:
+ ins->type = cfg->r4_stack_type;
+ ins->opcode += unops_op_map [src1->type];
+ break;
case CEE_CONV_R8:
ins->type = STACK_R8;
ins->opcode += unops_op_map [src1->type];
ins->type = STACK_I8;
break;
case OP_LOADR4_MEMBASE:
+ ins->type = cfg->r4_stack_type;
+ break;
case OP_LOADR8_MEMBASE:
ins->type = STACK_R8;
break;
case STACK_I4: return &mono_defaults.int32_class->byval_arg;
case STACK_I8: return &mono_defaults.int64_class->byval_arg;
case STACK_PTR: return &mono_defaults.int_class->byval_arg;
+ case STACK_R4: return &mono_defaults.single_class->byval_arg;
case STACK_R8: return &mono_defaults.double_class->byval_arg;
case STACK_MP:
return &ins->klass->this_arg;
}
static G_GNUC_UNUSED int
-type_to_stack_type (MonoType *t)
+type_to_stack_type (MonoCompile *cfg, MonoType *t)
{
t = mono_type_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return STACK_I4;
case MONO_TYPE_U8:
return STACK_I8;
case MONO_TYPE_R4:
+ return cfg->r4_stack_type;
case MONO_TYPE_R8:
return STACK_R8;
case MONO_TYPE_VALUETYPE:
}
static int
-ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
+ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
{
- if (type->byref)
- return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
-
handle_enum:
- type = mini_get_basic_type_from_generic (gsctx, type);
- type = mini_replace_type (type);
+ type = mini_get_underlying_type (cfg, type);
switch (type->type) {
case MONO_TYPE_VOID:
return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_U8:
return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
case MONO_TYPE_R4:
+ if (cfg->r4fp)
+ return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
+ else
+ return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
case MONO_TYPE_R8:
return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
case MONO_TYPE_VALUETYPE:
MonoType *simple_type;
MonoClass *klass;
- target = mini_replace_type (target);
if (target->byref) {
/* FIXME: check that the pointed to types match */
if (arg->type == STACK_MP)
return 1;
}
- simple_type = mono_type_get_underlying_type (target);
+ simple_type = mini_get_underlying_type (cfg, target);
switch (simple_type->type) {
case MONO_TYPE_VOID:
return 1;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (arg->type != STACK_I4 && arg->type != STACK_PTR)
return 1;
return 0;
case MONO_TYPE_R4:
+ if (arg->type != cfg->r4_stack_type)
+ return 1;
+ return 0;
case MONO_TYPE_R8:
if (arg->type != STACK_R8)
return 1;
return 1;
continue;
}
- simple_type = sig->params [i];
- simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
+ simple_type = mini_get_underlying_type (cfg, sig->params [i]);
handle_enum:
switch (simple_type->type) {
case MONO_TYPE_VOID:
continue;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
return 1;
continue;
case MONO_TYPE_R4:
+ if (args [i]->type != cfg->r4_stack_type)
+ return 1;
+ continue;
case MONO_TYPE_R8:
if (args [i]->type != STACK_R8)
return 1;
return OP_VOIDCALL;
case OP_FCALL_MEMBASE:
return OP_FCALL;
+ case OP_RCALL_MEMBASE:
+ return OP_RCALL;
case OP_VCALL_MEMBASE:
return OP_VCALL;
case OP_LCALL_MEMBASE:
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
} else
- MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
+ MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
call->args = args;
call->signature = sig;
call->rgctx_reg = rgctx;
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
((MonoCallInst*)ins)->fptr_is_patch = TRUE;
return ins;
}
+
+static gboolean
+direct_icalls_enabled (MonoCompile *cfg)
+{
+ /* LLVM on amd64 can't handle calls to non-32 bit addresses */
+#ifdef TARGET_AMD64
+ if (cfg->compile_llvm)
+ return FALSE;
+#endif
+ if (cfg->gen_seq_points_debug_data || cfg->disable_direct_icalls)
+ return FALSE;
+ return TRUE;
+}
+
+MonoInst*
+mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
+{
+ /*
+ * Call the jit icall without a wrapper if possible.
+ * The wrapper is needed for the following reasons:
+ * - to handle exceptions thrown using mono_raise_exceptions () from the
+ * icall function. The EH code needs the lmf frame pushed by the
+ * wrapper to be able to unwind back to managed code.
+ * - to be able to do stack walks for asynchronously suspended
+ * threads when debugging.
+ */
+ if (info->no_raise && direct_icalls_enabled (cfg)) {
+ char *name;
+ int costs;
+
+ if (!info->wrapper_method) {
+ name = g_strdup_printf ("__icall_wrapper_%s", info->name);
+ info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
+ g_free (name);
+ mono_memory_barrier ();
+ }
+
+ /*
+ * Inline the wrapper method, which is basically a call to the C icall, and
+ * an exception check.
+ */
+ costs = inline_method (cfg, info->wrapper_method, NULL,
+ args, NULL, cfg->real_offset, TRUE, out_cbb);
+ g_assert (costs > 0);
+ g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
+
+ return args [0];
+ } else {
+ return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
+ }
+}
static MonoInst*
mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
wbarrier->sreg1 = ptr->dreg;
wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
- } else if (card_table) {
+ } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
int offset_reg = alloc_preg (cfg);
int card_reg = alloc_preg (cfg);
MonoInst *ins;
mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
{
MonoInst *iargs [4];
- int context_used, n;
+ int n;
guint32 align = 0;
MonoMethod *memcpy_method;
MonoInst *size_ins = NULL;
MonoInst *memcpy_ins = NULL;
g_assert (klass);
+ if (cfg->generic_sharing_context)
+ klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
+
/*
* This check breaks with spilled vars... need to handle it during verification anyway.
* g_assert (klass && klass == src->klass && klass == dest->klass);
if (mini_is_gsharedvt_klass (cfg, klass)) {
g_assert (!native);
- context_used = mini_class_check_context_used (cfg, klass);
size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
}
}
}
- if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
+ if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
/* FIXME: Optimize the case when src/dest is OP_LDADDR */
mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
} else {
mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
{
MonoInst *iargs [3];
- int n, context_used;
+ int n;
guint32 align;
MonoMethod *memset_method;
MonoInst *size_ins = NULL;
static MonoMethod *bzero_method;
/* FIXME: Optimize this for the case when dest is an LDADDR */
-
mono_class_init (klass);
if (mini_is_gsharedvt_klass (cfg, klass)) {
- context_used = mini_class_check_context_used (cfg, klass);
size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
if (!bzero_method)
n = mono_class_value_size (klass, &align);
- if (n <= sizeof (gpointer) * 5) {
+ if (n <= sizeof (gpointer) * 8) {
mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
}
else {
g_assert (klass->rank == 0);
element_class = emit_get_rgctx_klass (cfg, context_used,
- klass->element_class, MONO_RGCTX_INFO_KLASS);
+ klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
MonoInst *unbox_call;
MonoMethodSignature *unbox_sig;
- MonoInst *var;
-
- var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
unbox_sig->ret = &klass->byval_arg;
MonoInst *data;
int rgctx_info;
MonoInst *iargs [2];
+ gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
- MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
+ MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
if (cfg->opt & MONO_OPT_SHARED)
rgctx_info = MONO_RGCTX_INFO_KLASS;
alloc_ftn = mono_object_new_specific;
}
- if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
+ if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
+ if (known_instance_size) {
+ int size = mono_class_instance_size (klass);
+
+ EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
+ }
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
+ }
return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
}
}
#ifndef MONO_CROSS_COMPILE
- managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
+ managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
#endif
if (managed_alloc) {
+ int size = mono_class_instance_size (klass);
+
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
+ EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
if (mini_is_gsharedvt_klass (cfg, klass)) {
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *res, *is_ref, *src_var, *addr;
- int addr_reg, dreg;
+ int dreg;
dreg = alloc_ireg (cfg);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
- addr_reg = alloc_ireg (cfg);
/* val is a vtype, so has to load the value manually */
src_var = get_vreg_to_inst (cfg, val->dreg);
}
}
-
static gboolean
mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
{
return FALSE;
}
+static GHashTable* direct_icall_type_hash;
+
+static gboolean
+icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
+{
+ /* LLVM on amd64 can't handle calls to non-32 bit addresses */
+ if (!direct_icalls_enabled (cfg))
+ return FALSE;
+
+ /*
+ * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
+ * Whitelist a few icalls for now.
+ */
+ if (!direct_icall_type_hash) {
+ GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
+
+ g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
+ g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
+ g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
+ mono_memory_barrier ();
+ direct_icall_type_hash = h;
+ }
+
+ if (cmethod->klass == mono_defaults.math_class)
+ return TRUE;
+ /* No locking needed */
+ if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
+ return TRUE;
+ return FALSE;
+}
+
#define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
static MonoInst*
return res;
}
+static int
+get_castclass_cache_idx (MonoCompile *cfg)
+{
+ /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
+ cfg->castclass_cache_index ++;
+ return (cfg->method_index << 16) | cfg->castclass_cache_index;
+}
+
static MonoInst*
emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
{
/* inline cache*/
if (cfg->compile_aot) {
- /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
- cfg->castclass_cache_index ++;
- idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
+ idx = get_castclass_cache_idx (cfg);
EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
} else {
EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
return ins;
}
+static G_GNUC_UNUSED MonoInst*
+handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
+{
+ MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
+ guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
+ gboolean is_i4;
+
+ switch (enum_type->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+#if SIZEOF_REGISTER == 8
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+#endif
+ is_i4 = FALSE;
+ break;
+ default:
+ is_i4 = TRUE;
+ break;
+ }
+
+ {
+ MonoInst *load, *and, *cmp, *ceq;
+ int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
+ int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
+ int dest_reg = alloc_ireg (cfg);
+
+ EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
+ EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
+ EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
+ EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
+
+ ceq->type = STACK_I4;
+
+ if (!is_i4) {
+ load = mono_decompose_opcode (cfg, load, NULL);
+ and = mono_decompose_opcode (cfg, and, NULL);
+ cmp = mono_decompose_opcode (cfg, cmp, NULL);
+ ceq = mono_decompose_opcode (cfg, ceq, NULL);
+ }
+
+ return ceq;
+ }
+}
+
/*
* Returns NULL and set the cfg exception on error.
*/
return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
}
+/*
+ * handle_constrained_gsharedvt_call:
+ *
+ * Handle constrained calls where the receiver is a gsharedvt type.
+ * Return the instruction representing the call. Set the cfg exception on failure.
+ */
+static MonoInst*
+handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
+ gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
+{
+ MonoInst *ins = NULL;
+ MonoBasicBlock *bblock = *ref_bblock;
+ gboolean emit_widen = *ref_emit_widen;
+
+ /*
+ * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
+ * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
+ * pack the arguments into an array, and do the rest of the work in in an icall.
+ */
+ if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
+ (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
+ (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
+ MonoInst *args [16];
+
+ /*
+ * This case handles calls to
+ * - object:ToString()/Equals()/GetHashCode(),
+ * - System.IComparable<T>:CompareTo()
+ * - System.IEquatable<T>:Equals ()
+ * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
+ */
+
+ args [0] = sp [0];
+ if (mono_method_check_context_used (cmethod))
+ args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
+ else
+ EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
+ args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
+
+ /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
+ if (fsig->hasthis && fsig->param_count) {
+ /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
+ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
+ ins->dreg = alloc_preg (cfg);
+ ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
+ MONO_ADD_INS (cfg->cbb, ins);
+ args [4] = ins;
+
+ if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
+ int addr_reg;
+
+ args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+
+ EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
+ addr_reg = ins->dreg;
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
+ } else {
+ EMIT_NEW_ICONST (cfg, args [3], 0);
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
+ }
+ } else {
+ EMIT_NEW_ICONST (cfg, args [3], 0);
+ EMIT_NEW_ICONST (cfg, args [4], 0);
+ }
+ ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
+ emit_widen = FALSE;
+
+ if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
+ ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
+ } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
+ MonoInst *add;
+
+ /* Unbox */
+ NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
+ MONO_ADD_INS (cfg->cbb, add);
+ /* Load value */
+ NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
+ MONO_ADD_INS (cfg->cbb, ins);
+ /* ins represents the call result */
+ }
+ } else {
+ GSHAREDVT_FAILURE (CEE_CALLVIRT);
+ }
+
+ *ref_emit_widen = emit_widen;
+ *ref_bblock = bblock;
+
+ return ins;
+
+ exception_exit:
+ return NULL;
+}
+
static void
mono_emit_load_got_addr (MonoCompile *cfg)
{
MonoInst *addr;
MonoMethod *addr_method;
int element_size;
+ MonoClass *eclass = cmethod->klass->element_class;
rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
if (rank == 1)
- return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
+ return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
#ifndef MONO_ARCH_EMULATE_MUL_DIV
/* emit_ldelema_2 depends on OP_LMUL */
- if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
- return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
+ if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
+ return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
}
#endif
- element_size = mono_class_array_element_size (cmethod->klass->element_class);
+ if (mini_is_gsharedvt_variable_klass (cfg, eclass))
+ element_size = 0;
+ else
+ element_size = mono_class_array_element_size (eclass);
addr_method = mono_marshal_get_array_address (rank, element_size);
addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
int dreg = alloc_ireg (cfg);
int index_reg = alloc_preg (cfg);
- int mult_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
#if SIZEOF_REGISTER == 8
#if defined(TARGET_X86) || defined(TARGET_AMD64)
EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
add_reg = ins->dreg;
- /* Avoid a warning */
- mult_reg = 0;
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
add_reg, 0);
#else
+ int mult_reg = alloc_preg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
#endif
- type_from_op (ins, NULL, NULL);
+ type_from_op (cfg, ins, NULL, NULL);
return ins;
} else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
int dreg = alloc_ireg (cfg);
int vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
- type_from_op (ins, NULL, NULL);
+ type_from_op (cfg, ins, NULL, NULL);
return ins;
#if !defined(MONO_ARCH_EMULATE_MUL_DIV)
args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
- type_from_op (ins, NULL, NULL);
+ type_from_op (cfg, ins, NULL, NULL);
return ins;
} else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
- type_from_op (ins, NULL, NULL);
+ type_from_op (cfg, ins, NULL, NULL);
return ins;
} else
return ins;
} else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
+ guint32 opcode = 0;
+ gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
+
+ if (fsig->params [0]->type == MONO_TYPE_I1)
+ opcode = OP_LOADI1_MEMBASE;
+ else if (fsig->params [0]->type == MONO_TYPE_U1)
+ opcode = OP_LOADU1_MEMBASE;
+ else if (fsig->params [0]->type == MONO_TYPE_I2)
+ opcode = OP_LOADI2_MEMBASE;
+ else if (fsig->params [0]->type == MONO_TYPE_U2)
+ opcode = OP_LOADU2_MEMBASE;
+ else if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_LOADI4_MEMBASE;
+ else if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_LOADU4_MEMBASE;
+ else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
+ opcode = OP_LOADI8_MEMBASE;
+ else if (fsig->params [0]->type == MONO_TYPE_R4)
+ opcode = OP_LOADR4_MEMBASE;
+ else if (fsig->params [0]->type == MONO_TYPE_R8)
+ opcode = OP_LOADR8_MEMBASE;
+ else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
+ opcode = OP_LOAD_MEMBASE;
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ switch (fsig->params [0]->type) {
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->type = STACK_I4;
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ ins->dreg = mono_alloc_lreg (cfg);
+ ins->type = STACK_I8;
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ ins->dreg = mono_alloc_ireg (cfg);
+#if SIZEOF_REGISTER == 8
+ ins->type = STACK_I8;
+#else
+ ins->type = STACK_I4;
+#endif
+ break;
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->type = STACK_R8;
+ break;
+ default:
+ g_assert (mini_type_is_reference (cfg, fsig->params [0]));
+ ins->dreg = mono_alloc_ireg_ref (cfg);
+ ins->type = STACK_OBJ;
+ break;
+ }
+
+ if (opcode == OP_LOADI8_MEMBASE)
+ ins = mono_decompose_opcode (cfg, ins, NULL);
+
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
+
+ return ins;
+ }
+ } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
+ guint32 opcode = 0;
+ gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
+
+ if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
+ opcode = OP_STOREI1_MEMBASE_REG;
+ else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
+ opcode = OP_STOREI2_MEMBASE_REG;
+ else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_STOREI4_MEMBASE_REG;
+ else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
+ opcode = OP_STOREI8_MEMBASE_REG;
+ else if (fsig->params [0]->type == MONO_TYPE_R4)
+ opcode = OP_STORER4_MEMBASE_REG;
+ else if (fsig->params [0]->type == MONO_TYPE_R8)
+ opcode = OP_STORER8_MEMBASE_REG;
+ else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
+ opcode = OP_STORE_MEMBASE_REG;
+
+ if (opcode) {
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->sreg1 = args [1]->dreg;
+ ins->inst_destbasereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ if (opcode == OP_STOREI8_MEMBASE_REG)
+ ins = mono_decompose_opcode (cfg, ins, NULL);
+
+ return ins;
+ }
}
} else if (cmethod->klass == mono_defaults.monitor_class) {
#if defined(MONO_ARCH_MONITOR_OBJECT_REG)
MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
ins->dreg = mono_alloc_preg (cfg);
ins->sreg1 = args [0]->dreg;
+ ins->type = STACK_I8;
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
MONO_ADD_INS (cfg->cbb, ins);
} else {
load_ins->dreg = mono_alloc_preg (cfg);
load_ins->inst_basereg = args [0]->dreg;
load_ins->inst_offset = 0;
+ load_ins->type = STACK_I8;
MONO_ADD_INS (cfg->cbb, load_ins);
emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
}
}
else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
- guint32 opcode;
+ MonoInst *f2i = NULL, *i2f;
+ guint32 opcode, f2i_opcode, i2f_opcode;
gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
+ gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
- if (fsig->params [0]->type == MONO_TYPE_I4) {
+ if (fsig->params [0]->type == MONO_TYPE_I4 ||
+ fsig->params [0]->type == MONO_TYPE_R4) {
opcode = OP_ATOMIC_EXCHANGE_I4;
+ f2i_opcode = OP_MOVE_F_TO_I4;
+ i2f_opcode = OP_MOVE_I4_TO_F;
cfg->has_atomic_exchange_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
- else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
- (fsig->params [0]->type == MONO_TYPE_I))
+ else if (is_ref ||
+ fsig->params [0]->type == MONO_TYPE_I8 ||
+ fsig->params [0]->type == MONO_TYPE_R8 ||
+ fsig->params [0]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_EXCHANGE_I8;
+ f2i_opcode = OP_MOVE_F_TO_I8;
+ i2f_opcode = OP_MOVE_I8_TO_F;
+ }
#else
- else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
+ else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_EXCHANGE_I4;
cfg->has_atomic_exchange_i4 = TRUE;
}
if (!mono_arch_opcode_supported (opcode))
return NULL;
+ if (is_float) {
+ /* TODO: Decompose these opcodes instead of bailing here. */
+ if (COMPILE_SOFT_FLOAT (cfg))
+ return NULL;
+
+ MONO_INST_NEW (cfg, f2i, f2i_opcode);
+ f2i->dreg = mono_alloc_ireg (cfg);
+ f2i->sreg1 = args [1]->dreg;
+ if (f2i_opcode == OP_MOVE_F_TO_I4)
+ f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
+ MONO_ADD_INS (cfg->cbb, f2i);
+ }
+
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
- ins->sreg2 = args [1]->dreg;
+ ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
switch (fsig->params [0]->type) {
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
+ ins->type = STACK_I8;
+ break;
case MONO_TYPE_I:
+#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
+#else
+ ins->type = STACK_I4;
+#endif
+ break;
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ ins->type = STACK_R8;
break;
default:
g_assert (mini_type_is_reference (cfg, fsig->params [0]));
break;
}
+ if (is_float) {
+ MONO_INST_NEW (cfg, i2f, i2f_opcode);
+ i2f->dreg = mono_alloc_freg (cfg);
+ i2f->sreg1 = ins->dreg;
+ i2f->type = STACK_R8;
+ if (i2f_opcode == OP_MOVE_I4_TO_F)
+ i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
+ MONO_ADD_INS (cfg->cbb, i2f);
+
+ ins = i2f;
+ }
+
if (cfg->gen_write_barriers && is_ref)
emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
- int size = 0;
+ MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
+ guint32 opcode, f2i_opcode, i2f_opcode;
gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
+ gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
- if (fsig->params [1]->type == MONO_TYPE_I4)
- size = 4;
- else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
- size = sizeof (gpointer);
+ if (fsig->params [1]->type == MONO_TYPE_I4 ||
+ fsig->params [1]->type == MONO_TYPE_R4) {
+ opcode = OP_ATOMIC_CAS_I4;
+ f2i_opcode = OP_MOVE_F_TO_I4;
+ i2f_opcode = OP_MOVE_I4_TO_F;
+ cfg->has_atomic_cas_i4 = TRUE;
+ }
#if SIZEOF_REGISTER == 8
- else if (fsig->params [1]->type == MONO_TYPE_I8)
- size = 8;
+ else if (is_ref ||
+ fsig->params [1]->type == MONO_TYPE_I8 ||
+ fsig->params [1]->type == MONO_TYPE_R8 ||
+ fsig->params [1]->type == MONO_TYPE_I) {
+ opcode = OP_ATOMIC_CAS_I8;
+ f2i_opcode = OP_MOVE_F_TO_I8;
+ i2f_opcode = OP_MOVE_I8_TO_F;
+ }
+#else
+ else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
+ opcode = OP_ATOMIC_CAS_I4;
+ cfg->has_atomic_cas_i4 = TRUE;
+ }
#endif
+ else
+ return NULL;
- if (size == 4) {
- if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
- return NULL;
- MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
- ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
- ins->sreg1 = args [0]->dreg;
- ins->sreg2 = args [1]->dreg;
- ins->sreg3 = args [2]->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
- cfg->has_atomic_cas_i4 = TRUE;
- } else if (size == 8) {
- if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
+ if (!mono_arch_opcode_supported (opcode))
+ return NULL;
+
+ if (is_float) {
+ /* TODO: Decompose these opcodes instead of bailing here. */
+ if (COMPILE_SOFT_FLOAT (cfg))
return NULL;
- MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
- ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
- ins->sreg1 = args [0]->dreg;
- ins->sreg2 = args [1]->dreg;
- ins->sreg3 = args [2]->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
- } else {
- /* g_assert_not_reached (); */
+
+ MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
+ f2i_new->dreg = mono_alloc_ireg (cfg);
+ f2i_new->sreg1 = args [1]->dreg;
+ if (f2i_opcode == OP_MOVE_F_TO_I4)
+ f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
+ MONO_ADD_INS (cfg->cbb, f2i_new);
+
+ MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
+ f2i_cmp->dreg = mono_alloc_ireg (cfg);
+ f2i_cmp->sreg1 = args [2]->dreg;
+ if (f2i_opcode == OP_MOVE_F_TO_I4)
+ f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
+ MONO_ADD_INS (cfg->cbb, f2i_cmp);
}
- if (ins) {
- switch (fsig->params [0]->type) {
- case MONO_TYPE_I4:
- ins->type = STACK_I4;
- break;
- case MONO_TYPE_I8:
- case MONO_TYPE_I:
- ins->type = STACK_I8;
- break;
- default:
- g_assert (mini_type_is_reference (cfg, fsig->params [0]));
- ins->type = STACK_OBJ;
- break;
- }
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
+ ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ switch (fsig->params [1]->type) {
+ case MONO_TYPE_I4:
+ ins->type = STACK_I4;
+ break;
+ case MONO_TYPE_I8:
+ ins->type = STACK_I8;
+ break;
+ case MONO_TYPE_I:
+#if SIZEOF_REGISTER == 8
+ ins->type = STACK_I8;
+#else
+ ins->type = STACK_I4;
+#endif
+ break;
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ ins->type = STACK_R8;
+ break;
+ default:
+ g_assert (mini_type_is_reference (cfg, fsig->params [1]));
+ ins->type = STACK_OBJ;
+ break;
+ }
+
+ if (is_float) {
+ MONO_INST_NEW (cfg, i2f, i2f_opcode);
+ i2f->dreg = mono_alloc_freg (cfg);
+ i2f->sreg1 = ins->dreg;
+ i2f->type = STACK_R8;
+ if (i2f_opcode == OP_MOVE_I4_TO_F)
+ i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
+ MONO_ADD_INS (cfg->cbb, i2f);
+
+ ins = i2f;
}
if (cfg->gen_write_barriers && is_ref)
cmp->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, cmp);
- MONO_INST_NEW (cfg, ceq, OP_CEQ);
+ MONO_INST_NEW (cfg, ceq, OP_ICEQ);
ceq->dreg = alloc_ireg (cfg);
ceq->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ceq);
if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
guint32 opcode = 0;
gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
+ gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
if (fsig->params [0]->type == MONO_TYPE_I1)
opcode = OP_ATOMIC_LOAD_I1;
opcode = OP_ATOMIC_LOAD_I4;
else if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_ATOMIC_LOAD_U4;
+ else if (fsig->params [0]->type == MONO_TYPE_R4)
+ opcode = OP_ATOMIC_LOAD_R4;
+ else if (fsig->params [0]->type == MONO_TYPE_R8)
+ opcode = OP_ATOMIC_LOAD_R8;
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
opcode = OP_ATOMIC_LOAD_I8;
return NULL;
MONO_INST_NEW (cfg, ins, opcode);
- ins->dreg = mono_alloc_ireg (cfg);
+ ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
ins->sreg1 = args [0]->dreg;
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
MONO_ADD_INS (cfg->cbb, ins);
+
+ switch (fsig->params [0]->type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ ins->type = STACK_I4;
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ ins->type = STACK_I8;
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+#if SIZEOF_REGISTER == 8
+ ins->type = STACK_I8;
+#else
+ ins->type = STACK_I4;
+#endif
+ break;
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ ins->type = STACK_R8;
+ break;
+ default:
+ g_assert (mini_type_is_reference (cfg, fsig->params [0]));
+ ins->type = STACK_OBJ;
+ break;
+ }
}
}
opcode = OP_ATOMIC_STORE_I4;
else if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_ATOMIC_STORE_U4;
+ else if (fsig->params [0]->type == MONO_TYPE_R4)
+ opcode = OP_ATOMIC_STORE_R4;
+ else if (fsig->params [0]->type == MONO_TYPE_R8)
+ opcode = OP_ATOMIC_STORE_R8;
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
opcode = OP_ATOMIC_STORE_I8;
g_assert (vtable); /*Should not fail since it System.String*/
#ifndef MONO_CROSS_COMPILE
- managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
+ managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
#endif
if (!managed_alloc)
return NULL;
emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
{
static double r8_0 = 0.0;
+ static float r4_0 = 0.0;
MonoInst *ins;
int t;
- rtype = mini_replace_type (rtype);
+ rtype = mini_get_underlying_type (cfg, rtype);
t = rtype->type;
if (rtype->byref) {
MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
} else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
+ } else if (cfg->r4fp && t == MONO_TYPE_R4) {
+ MONO_INST_NEW (cfg, ins, OP_R4CONST);
+ ins->type = STACK_R4;
+ ins->inst_p0 = (void*)&r4_0;
+ ins->dreg = dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
} else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
MONO_INST_NEW (cfg, ins, OP_R8CONST);
ins->type = STACK_R8;
{
int t;
- rtype = mini_replace_type (rtype);
+ rtype = mini_get_underlying_type (cfg, rtype);
t = rtype->type;
if (rtype->byref) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
} else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
+ } else if (cfg->r4fp && t == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
} else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
return 0;
#endif
+ if (!fsig)
+ fsig = mono_method_signature (cmethod);
+
if (cfg->verbose_level > 2)
printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
prev_ret_var_set = cfg->ret_var_set;
prev_disable_inline = cfg->disable_inline;
- if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
+ if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
virtual = TRUE;
costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
cfg->cbb = ebblock;
}
- *out_cbb = cfg->cbb;
+ if (out_cbb)
+ *out_cbb = cfg->cbb;
if (rvar) {
/*
token = read32 (ip + 2);
klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
CHECK_TYPELOAD (klass);
- type = mini_replace_type (&klass->byval_arg);
+ type = mini_get_underlying_type (cfg, &klass->byval_arg);
emit_init_local (cfg, local, type, TRUE);
return ip + 6;
}
for (i = 0; i < attrs->num_attrs; ++i) {
MonoCustomAttrEntry *attr = &attrs->attrs [i];
const gchar *p;
- int len;
MonoMethodSignature *sig;
if (!attr->ctor || attr->ctor->klass != klass)
continue;
/* Decode the attribute. See reflection.c */
- len = attr->data_size;
p = (const char*)attr->data;
g_assert (read16 (p) == 0x0001);
p += 2;
MonoImage *image;
guint32 token, ins_flag;
MonoClass *klass;
- MonoClass *constrained_call = NULL;
+ MonoClass *constrained_class = NULL;
unsigned char *ip, *end, *target, *err_pos;
MonoMethodSignature *sig;
MonoGenericContext *generic_context = NULL;
dreg = alloc_freg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
- ins->type = STACK_R8;
+ ins->type = cfg->r4_stack_type;
} else {
MONO_INST_NEW (cfg, ins, OP_R4CONST);
- ins->type = STACK_R8;
+ ins->type = cfg->r4_stack_type;
ins->dreg = alloc_dreg (cfg, STACK_R8);
ins->inst_p0 = f;
MONO_ADD_INS (bblock, ins);
start_new_bblock = 1;
break;
}
- case CEE_CALLI:
+ case CEE_CALLI: {
+ MonoInst *addr;
+ MonoMethodSignature *fsig;
+
+ CHECK_OPSIZE (5);
+ token = read32 (ip + 1);
+
+ ins = NULL;
+
+ //GSHAREDVT_FAILURE (*ip);
+ cmethod = NULL;
+ CHECK_STACK (1);
+ --sp;
+ addr = *sp;
+ fsig = mini_get_signature (method, token, generic_context);
+
+ if (method->dynamic && fsig->pinvoke) {
+ MonoInst *args [3];
+
+ /*
+ * This is a call through a function pointer using a pinvoke
+ * signature. Have to create a wrapper and call that instead.
+ * FIXME: This is very slow, need to create a wrapper at JIT time
+ * instead based on the signature.
+ */
+ EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
+ EMIT_NEW_PCONST (cfg, args [1], fsig);
+ args [2] = addr;
+ addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
+ }
+
+ n = fsig->param_count + fsig->hasthis;
+
+ CHECK_STACK (n);
+
+ //g_assert (!virtual || fsig->hasthis);
+
+ sp -= n;
+
+ inline_costs += 10 * num_calls++;
+
+ /*
+ * Making generic calls out of gsharedvt methods.
+ * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
+ * patching gshared method addresses into a gsharedvt method.
+ */
+ if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
+ /*
+ * We pass the address to the gsharedvt trampoline in the rgctx reg
+ */
+ MonoInst *callee = addr;
+
+ if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
+ /* Not tested */
+ GSHAREDVT_FAILURE (*ip);
+
+ addr = emit_get_rgctx_sig (cfg, context_used,
+ fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
+ goto calli_end;
+ }
+
+ /* Prevent inlining of methods with indirect calls */
+ INLINE_FAILURE ("indirect call");
+
+ if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
+ int info_type;
+ gpointer info_data;
+
+ /*
+ * Instead of emitting an indirect call, emit a direct call
+ * with the contents of the aotconst as the patch info.
+ */
+ if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
+ info_type = addr->inst_c1;
+ info_data = addr->inst_p0;
+ } else {
+ info_type = addr->inst_right->inst_c1;
+ info_data = addr->inst_right->inst_left;
+ }
+
+ if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
+ ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
+ NULLIFY_INS (addr);
+ goto calli_end;
+ }
+ }
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+
+ calli_end:
+
+ /* End of call, INS should contain the result of the call, if any */
+
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
+ g_assert (ins);
+ *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+ }
+
+ CHECK_CFG_EXCEPTION;
+
+ ip += 5;
+ ins_flag = 0;
+ constrained_class = NULL;
+ break;
+ }
case CEE_CALL:
case CEE_CALLVIRT: {
MonoInst *addr = NULL;
MonoMethodSignature *fsig = NULL;
int array_rank = 0;
int virtual = *ip == CEE_CALLVIRT;
- int calli = *ip == CEE_CALLI;
gboolean pass_imt_from_rgctx = FALSE;
MonoInst *imt_arg = NULL;
MonoInst *keep_this_alive = NULL;
gboolean push_res = TRUE;
gboolean skip_ret = FALSE;
gboolean delegate_invoke = FALSE;
+ gboolean direct_icall = FALSE;
+ MonoMethod *cil_method;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
ins = NULL;
- if (calli) {
- //GSHAREDVT_FAILURE (*ip);
- cmethod = NULL;
- CHECK_STACK (1);
- --sp;
- addr = *sp;
- fsig = mini_get_signature (method, token, generic_context);
- n = fsig->param_count + fsig->hasthis;
-
- if (method->dynamic && fsig->pinvoke) {
- MonoInst *args [3];
-
- /*
- * This is a call through a function pointer using a pinvoke
- * signature. Have to create a wrapper and call that instead.
- * FIXME: This is very slow, need to create a wrapper at JIT time
- * instead based on the signature.
- */
- EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
- EMIT_NEW_PCONST (cfg, args [1], fsig);
- args [2] = addr;
- addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
- }
- } else {
- MonoMethod *cil_method;
-
- cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
- cil_method = cmethod;
+ cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
+ cil_method = cmethod;
- if (constrained_call) {
- if (method->wrapper_type != MONO_WRAPPER_NONE) {
- if (cfg->verbose_level > 2)
- printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
- if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
- constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
- cfg->generic_sharing_context)) {
- cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context, &cfg->error);
- CHECK_CFG_ERROR;
- }
- } else {
- if (cfg->verbose_level > 2)
- printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
-
- if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
- /*
- * This is needed since get_method_constrained can't find
- * the method in klass representing a type var.
- * The type var is guaranteed to be a reference type in this
- * case.
- */
- if (!mini_is_gsharedvt_klass (cfg, constrained_call))
- g_assert (!cmethod->klass->valuetype);
- } else {
- cmethod = mono_get_method_constrained_checked (image, token, constrained_call, generic_context, &cil_method, &cfg->error);
- CHECK_CFG_ERROR;
+ if (constrained_class) {
+ if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
+ if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
+ g_assert (!cmethod->klass->valuetype);
+ if (!mini_type_is_reference (cfg, &constrained_class->byval_arg)) {
+ /* FIXME: gshared type constrained to a primitive type */
+ GENERIC_SHARING_FAILURE (CEE_CALL);
}
}
}
-
- if (!cmethod || mono_loader_get_last_error ())
- LOAD_ERROR;
- if (!dont_verify && !cfg->skip_visibility) {
- MonoMethod *target_method = cil_method;
- if (method->is_inflated) {
- target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
- }
- if (!mono_method_can_access_method (method_definition, target_method) &&
- !mono_method_can_access_method (method, cil_method))
- METHOD_ACCESS_FAILURE (method, cil_method);
- }
-
- if (mono_security_core_clr_enabled ())
- ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
-
- if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
- /* MS.NET seems to silently convert this to a callvirt */
- virtual = 1;
-
- {
- /*
- * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
- * converts to a callvirt.
- *
- * tests/bug-515884.il is an example of this behavior
- */
- const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
- const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
- if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
- virtual = 1;
- }
-
- if (!cmethod->klass->inited)
- if (!mono_class_init (cmethod->klass))
- TYPE_LOAD_ERROR (cmethod->klass);
- if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
- mini_class_is_system_array (cmethod->klass)) {
- array_rank = cmethod->klass->rank;
- fsig = mono_method_signature (cmethod);
+ if (method->wrapper_type != MONO_WRAPPER_NONE) {
+ if (cfg->verbose_level > 2)
+ printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
+ if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
+ constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
+ cfg->generic_sharing_context)) {
+ cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
+ CHECK_CFG_ERROR;
+ }
} else {
- fsig = mono_method_signature (cmethod);
-
- if (!fsig)
- LOAD_ERROR;
+ if (cfg->verbose_level > 2)
+ printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
- if (fsig->pinvoke) {
- MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
- check_for_pending_exc, cfg->compile_aot);
- fsig = mono_method_signature (wrapper);
- } else if (constrained_call) {
- fsig = mono_method_signature (cmethod);
+ if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
+ /*
+ * This is needed since get_method_constrained can't find
+ * the method in klass representing a type var.
+ * The type var is guaranteed to be a reference type in this
+ * case.
+ */
+ if (!mini_is_gsharedvt_klass (cfg, constrained_class))
+ g_assert (!cmethod->klass->valuetype);
} else {
- fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
+ cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
CHECK_CFG_ERROR;
}
}
+ }
+
+ if (!cmethod || mono_loader_get_last_error ())
+ LOAD_ERROR;
+ if (!dont_verify && !cfg->skip_visibility) {
+ MonoMethod *target_method = cil_method;
+ if (method->is_inflated) {
+ target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
+ }
+ if (!mono_method_can_access_method (method_definition, target_method) &&
+ !mono_method_can_access_method (method, cil_method))
+ METHOD_ACCESS_FAILURE (method, cil_method);
+ }
- mono_save_token_info (cfg, image, token, cil_method);
-
- if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
- need_seq_point = TRUE;
+ if (mono_security_core_clr_enabled ())
+ ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
- n = fsig->param_count + fsig->hasthis;
+ if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
+ /* MS.NET seems to silently convert this to a callvirt */
+ virtual = 1;
- /* Don't support calls made using type arguments for now */
+ {
/*
- if (cfg->gsharedvt) {
- if (mini_is_gsharedvt_signature (cfg, fsig))
- GSHAREDVT_FAILURE (*ip);
- }
- */
+ * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
+ * converts to a callvirt.
+ *
+ * tests/bug-515884.il is an example of this behavior
+ */
+ const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
+ const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
+ if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
+ virtual = 1;
+ }
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- }
+ if (!cmethod->klass->inited)
+ if (!mono_class_init (cmethod->klass))
+ TYPE_LOAD_ERROR (cmethod->klass);
- if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
- g_assert_not_reached ();
+ fsig = mono_method_signature (cmethod);
+ if (!fsig)
+ LOAD_ERROR;
+ if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
+ mini_class_is_system_array (cmethod->klass)) {
+ array_rank = cmethod->klass->rank;
+ } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
+ direct_icall = TRUE;
+ } else if (fsig->pinvoke) {
+ MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
+ check_for_pending_exc, cfg->compile_aot);
+ fsig = mono_method_signature (wrapper);
+ } else if (constrained_class) {
+ } else {
+ fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
+ CHECK_CFG_ERROR;
+ }
+
+ mono_save_token_info (cfg, image, token, cil_method);
+
+ if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
+ need_seq_point = TRUE;
+
+ /* Don't support calls made using type arguments for now */
+ /*
+ if (cfg->gsharedvt) {
+ if (mini_is_gsharedvt_signature (cfg, fsig))
+ GSHAREDVT_FAILURE (*ip);
+ }
+ */
+
+ if (mono_security_cas_enabled ()) {
+ if (check_linkdemand (cfg, method, cmethod))
+ INLINE_FAILURE ("linkdemand");
+ CHECK_CFG_EXCEPTION;
}
- if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
+ if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
+ g_assert_not_reached ();
+
+ n = fsig->param_count + fsig->hasthis;
+
+ if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
UNVERIFIED;
- if (!cfg->generic_sharing_context && cmethod)
+ if (!cfg->generic_sharing_context)
g_assert (!mono_method_check_context_used (cmethod));
CHECK_STACK (n);
sp -= n;
- if (constrained_call) {
- if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
- /*
- * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
- */
- if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
+ if (constrained_class) {
+ if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
+ if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
/* The 'Own method' case below */
} else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
/* 'The type parameter is instantiated as a reference type' case below. */
- } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
- (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
- (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
- MonoInst *args [16];
-
- /*
- * This case handles calls to
- * - object:ToString()/Equals()/GetHashCode(),
- * - System.IComparable<T>:CompareTo()
- * - System.IEquatable<T>:Equals ()
- * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
- */
-
- args [0] = sp [0];
- if (mono_method_check_context_used (cmethod))
- args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
- else
- EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
- args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
-
- /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
- if (fsig->hasthis && fsig->param_count) {
- /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
- MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
- ins->dreg = alloc_preg (cfg);
- ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
- MONO_ADD_INS (cfg->cbb, ins);
- args [4] = ins;
-
- if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
- int addr_reg;
-
- args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
-
- EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
- addr_reg = ins->dreg;
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
- } else {
- EMIT_NEW_ICONST (cfg, args [3], 0);
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
- }
- } else {
- EMIT_NEW_ICONST (cfg, args [3], 0);
- EMIT_NEW_ICONST (cfg, args [4], 0);
- }
- ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
- emit_widen = FALSE;
-
- if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
- ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
- } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
- MonoInst *add;
-
- /* Unbox */
- NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
- MONO_ADD_INS (cfg->cbb, add);
- /* Load value */
- NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
- MONO_ADD_INS (cfg->cbb, ins);
- /* ins represents the call result */
- }
-
- goto call_end;
} else {
- GSHAREDVT_FAILURE (*ip);
+ ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
+ CHECK_CFG_EXCEPTION;
+ g_assert (ins);
+ goto call_end;
}
}
+
/*
* We have the `constrained.' prefix opcode.
*/
- if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
+ if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
/*
* The type parameter is instantiated as a valuetype,
* but that type doesn't override the method we're
* calling, so we need to box `this'.
*/
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
- ins->klass = constrained_call;
- sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
+ ins->klass = constrained_class;
+ sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
CHECK_CFG_EXCEPTION;
- } else if (!constrained_call->valuetype) {
+ } else if (!constrained_class->valuetype) {
int dreg = alloc_ireg_ref (cfg);
/*
/* Interface method */
int ioffset, slot;
- mono_class_setup_vtable (constrained_call);
- CHECK_TYPELOAD (constrained_call);
- ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
+ mono_class_setup_vtable (constrained_class);
+ CHECK_TYPELOAD (constrained_class);
+ ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
if (ioffset == -1)
- TYPE_LOAD_ERROR (constrained_call);
+ TYPE_LOAD_ERROR (constrained_class);
slot = mono_method_get_vtable_slot (cmethod);
if (slot == -1)
TYPE_LOAD_ERROR (cmethod->klass);
- cmethod = constrained_call->vtable [ioffset + slot];
+ cmethod = constrained_class->vtable [ioffset + slot];
if (cmethod->klass == mono_defaults.enum_class) {
/* Enum implements some interfaces, so treat this as the first case */
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
- ins->klass = constrained_call;
- sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
+ ins->klass = constrained_class;
+ sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
CHECK_CFG_EXCEPTION;
}
}
virtual = 0;
}
- constrained_call = NULL;
+ constrained_class = NULL;
}
- if (!calli && check_call_signature (cfg, fsig, sp))
+ if (check_call_signature (cfg, fsig, sp))
UNVERIFIED;
#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
- if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
+ if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
delegate_invoke = TRUE;
#endif
- if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
+ if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
type_to_eval_stack_type ((cfg), fsig->ret, ins);
* If the callee is a shared method, then its static cctor
* might not get called after the call was patched.
*/
- if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
+ if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_generic_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
- if (cmethod)
- check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
+ check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
- if (cfg->generic_sharing_context && cmethod) {
+ if (cfg->generic_sharing_context) {
MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
context_used = mini_method_check_context_used (cfg, cmethod);
if (pass_imt_from_rgctx) {
g_assert (!pass_vtable);
- g_assert (cmethod);
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
/* Calling virtual generic methods */
- if (cmethod && virtual &&
- (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
+ if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
!(MONO_METHOD_IS_FINAL (cmethod) &&
cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
fsig->generic_param_count &&
* To work around this, we extend such try blocks to include the last x bytes
* of the Monitor.Enter () call.
*/
- if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
+ if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
MonoBasicBlock *tbb;
GET_BBLOCK (cfg, tbb, ip + 5);
}
/* Conversion to a JIT intrinsic */
- if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
+ if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
type_to_eval_stack_type ((cfg), fsig->ret, ins);
}
/* Inlining */
- if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
+ if ((cfg->opt & MONO_OPT_INLINE) &&
(!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
mono_method_check_inlining (cfg, cmethod)) {
int costs;
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
- if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
+ if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
+ !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
MonoRgctxInfoType info_type;
if (virtual) {
}
}
- if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
- /* test_0_multi_dim_arrays () in gshared.cs */
- GSHAREDVT_FAILURE (*ip);
-
if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
keep_this_alive = sp [0];
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
goto call_end;
- } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
- /*
- * We pass the address to the gsharedvt trampoline in the rgctx reg
- */
- MonoInst *callee = addr;
-
- if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
- /* Not tested */
- GSHAREDVT_FAILURE (*ip);
-
- addr = emit_get_rgctx_sig (cfg, context_used,
- fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
- goto call_end;
}
/* Generic sharing */
goto call_end;
}
- /* Indirect calls */
- if (addr) {
- if (call_opcode == CEE_CALL)
- g_assert (context_used);
- else if (call_opcode == CEE_CALLI)
- g_assert (!vtable_arg);
- else
- /* FIXME: what the hell is this??? */
- g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
- !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
+ /* Direct calls to icalls */
+ if (direct_icall) {
+ MonoMethod *wrapper;
+ int costs;
- /* Prevent inlining of methods with indirect calls */
- INLINE_FAILURE ("indirect call");
+ /* Inline the wrapper */
+ wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
- if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
- int info_type;
- gpointer info_data;
+ costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
+ g_assert (costs > 0);
+ cfg->real_offset += 5;
- /*
- * Instead of emitting an indirect call, emit a direct call
- * with the contents of the aotconst as the patch info.
- */
- if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
- info_type = addr->inst_c1;
- info_data = addr->inst_p0;
- } else {
- info_type = addr->inst_right->inst_c1;
- info_data = addr->inst_right->inst_left;
- }
-
- if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
- ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
- NULLIFY_INS (addr);
- goto call_end;
- }
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
+ /* *sp is already set by inline_method */
+ sp++;
+ push_res = FALSE;
}
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
+
+ inline_costs += costs;
+
goto call_end;
}
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
emit_write_barrier (cfg, addr, val);
+ if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
+ GSHAREDVT_FAILURE (*ip);
} else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
/* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
- if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
+ if ((ins_flag & MONO_INST_TAILCALL) &&
!vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
supported_tail_call = TRUE;
ip += 1;
}
ins_flag = 0;
- constrained_call = NULL;
+ constrained_class = NULL;
if (need_seq_point)
emit_seq_point (cfg, method, ip, FALSE, TRUE);
break;
emit_pop_lmf (cfg);
if (cfg->ret) {
- MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
+ MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
if (seq_points && !sym_seq_points) {
/*
MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
cmp->sreg1 = sp [0]->dreg;
- type_from_op (cmp, sp [0], NULL);
+ type_from_op (cfg, cmp, sp [0], NULL);
CHECK_TYPE (cmp);
#if SIZEOF_REGISTER == 4
MONO_ADD_INS (bblock, cmp);
MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
- type_from_op (ins, sp [0], NULL);
+ type_from_op (cfg, ins, sp [0], NULL);
MONO_ADD_INS (bblock, ins);
ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
GET_BBLOCK (cfg, tblock, target);
NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
ins->type = ldind_type [*ip - CEE_LDIND_I1];
+ if (*ip == CEE_LDIND_R4)
+ ins->type = cfg->r4_stack_type;
ins->flags |= ins_flag;
MONO_ADD_INS (bblock, ins);
*sp++ = ins;
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
- type_from_op (ins, sp [0], sp [1]);
+ type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
ins->dreg = alloc_dreg ((cfg), (ins)->type);
MONO_ADD_INS ((cfg)->cbb, (ins));
- *sp++ = mono_decompose_opcode (cfg, ins);
+ *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
ip++;
break;
case CEE_ADD:
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
- type_from_op (ins, sp [0], sp [1]);
+ type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
- ADD_WIDEN_OP (ins, sp [0], sp [1]);
+ add_widen_op (cfg, ins, &sp [0], &sp [1]);
ins->dreg = alloc_dreg ((cfg), (ins)->type);
/* FIXME: Pass opcode to is_inst_imm */
ins->inst_imm = (gssize)(sp [1]->inst_c0);
ins->sreg2 = -1;
- /* Might be followed by an instruction added by ADD_WIDEN_OP */
+ /* Might be followed by an instruction added by add_widen_op */
if (sp [1]->next == NULL)
NULLIFY_INS (sp [1]);
}
}
MONO_ADD_INS ((cfg)->cbb, (ins));
- *sp++ = mono_decompose_opcode (cfg, ins);
+ *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
ip++;
break;
case CEE_NEG:
case CEE_CONV_OVF_U:
CHECK_STACK (1);
- if (sp [-1]->type == STACK_R8) {
+ if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
ADD_UNOP (CEE_CONV_OVF_I8);
ADD_UNOP (*ip);
} else {
case CEE_CONV_OVF_U4:
CHECK_STACK (1);
- if (sp [-1]->type == STACK_R8) {
+ if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
ADD_UNOP (CEE_CONV_OVF_U8);
ADD_UNOP (*ip);
} else {
if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
MonoInst *args [3];
+ int idx;
/* obj */
args [0] = *sp;
EMIT_NEW_CLASSCONST (cfg, args [1], klass);
/* inline cache*/
- if (cfg->compile_aot)
- EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
- else
+ if (cfg->compile_aot) {
+ idx = get_castclass_cache_idx (cfg);
+ EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
+ } else {
EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
+ }
*sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
ip += 5;
token = read32 (ip + 1);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
-
+
mono_save_token_info (cfg, image, token, klass);
context_used = mini_class_check_context_used (cfg, klass);
}
case CEE_BOX: {
MonoInst *val;
+ MonoClass *enum_class;
+ MonoMethod *has_flag;
CHECK_STACK (1);
--sp;
UNVERIFIED;
/* frequent check in generic code: box (struct), brtrue */
+ /*
+ * Look for:
+ *
+ * <push int/long ptr>
+ * <push int/long>
+ * box MyFlags
+ * constrained. MyFlags
+ * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
+ *
+ * If we find this sequence and the operand types on box and constrained
+ * are equal, we can emit a specialized instruction sequence instead of
+ * the very slow HasFlag () call.
+ */
+ if ((cfg->opt & MONO_OPT_INTRINS) &&
+ /* Cheap checks first. */
+ ip + 5 + 6 + 5 < end &&
+ ip [5] == CEE_PREFIX1 &&
+ ip [6] == CEE_CONSTRAINED_ &&
+ ip [11] == CEE_CALLVIRT &&
+ ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
+ mono_class_is_enum (klass) &&
+ (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
+ (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
+ has_flag->klass == mono_defaults.enum_class &&
+ !strcmp (has_flag->name, "HasFlag") &&
+ has_flag->signature->hasthis &&
+ has_flag->signature->param_count == 1) {
+ CHECK_TYPELOAD (enum_class);
+
+ if (enum_class == klass) {
+ MonoInst *enum_this, *enum_flag;
+
+ ip += 5 + 6 + 5;
+ --sp;
+
+ enum_this = sp [0];
+ enum_flag = sp [1];
+
+ *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
+ break;
+ }
+ }
+
// FIXME: LLVM can't handle the inconsistent bb linking
if (!mono_class_is_nullable (klass) &&
ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
ins->type = STACK_I4;
ins->dreg = alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins);
- *sp = mono_decompose_opcode (cfg, ins);
+ *sp = mono_decompose_opcode (cfg, ins, &bblock);
}
if (context_used) {
ins->type = STACK_R8;
MONO_ADD_INS (bblock, ins);
- *sp++ = mono_decompose_opcode (cfg, ins);
+ *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
++ip;
break;
break;
}
+ case CEE_MONO_LDPTR_CARD_TABLE: {
+ int shift_bits;
+ gpointer card_mask;
+ CHECK_STACK_OVF (1);
+
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
+ else
+ EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
+
+ *sp++ = ins;
+ ip += 2;
+ inline_costs += 10 * num_calls++;
+ break;
+ }
+ case CEE_MONO_LDPTR_NURSERY_START: {
+ int shift_bits;
+ size_t size;
+ CHECK_STACK_OVF (1);
+
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
+ else
+ EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
+
+ *sp++ = ins;
+ ip += 2;
+ inline_costs += 10 * num_calls++;
+ break;
+ }
+ case CEE_MONO_LDPTR_INT_REQ_FLAG: {
+ CHECK_STACK_OVF (1);
+
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
+ else
+ EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
+
+ *sp++ = ins;
+ ip += 2;
+ inline_costs += 10 * num_calls++;
+ break;
+ }
case CEE_MONO_LDPTR: {
gpointer ptr;
token = read32 (ip + 2);
ptr = mono_method_get_wrapper_data (method, token);
- /* FIXME: Generalize this */
- if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
- EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
- *sp++ = ins;
- ip += 6;
- break;
- }
EMIT_NEW_PCONST (cfg, ins, ptr);
*sp++ = ins;
ip += 6;
case CEE_CGT_UN:
case CEE_CLT:
case CEE_CLT_UN: {
- MonoInst *cmp;
+ MonoInst *cmp, *arg1, *arg2;
+
CHECK_STACK (2);
+ sp -= 2;
+ arg1 = sp [0];
+ arg2 = sp [1];
+
/*
* The following transforms:
* CEE_CEQ into OP_CEQ
* CEE_CLT_UN into OP_CLT_UN
*/
MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
-
+
MONO_INST_NEW (cfg, ins, cmp->opcode);
- sp -= 2;
- cmp->sreg1 = sp [0]->dreg;
- cmp->sreg2 = sp [1]->dreg;
- type_from_op (cmp, sp [0], sp [1]);
+ cmp->sreg1 = arg1->dreg;
+ cmp->sreg2 = arg2->dreg;
+ type_from_op (cfg, cmp, arg1, arg2);
CHECK_TYPE (cmp);
- if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
+ add_widen_op (cfg, cmp, &arg1, &arg2);
+ if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
cmp->opcode = OP_LCOMPARE;
- else if (sp [0]->type == STACK_R8)
+ else if (arg1->type == STACK_R4)
+ cmp->opcode = OP_RCOMPARE;
+ else if (arg1->type == STACK_R8)
cmp->opcode = OP_FCOMPARE;
else
cmp->opcode = OP_ICOMPARE;
MONO_ADD_INS (bblock, cmp);
ins->type = STACK_I4;
ins->dreg = alloc_dreg (cfg, ins->type);
- type_from_op (ins, sp [0], sp [1]);
+ type_from_op (cfg, ins, arg1, arg2);
- if (cmp->opcode == OP_FCOMPARE) {
+ if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
/*
* The backends expect the fceq opcodes to do the
* comparison too.
break;
case CEE_ENDFILTER: {
MonoExceptionClause *clause, *nearest;
- int cc, nearest_num;
+ int cc;
CHECK_STACK (1);
--sp;
ip += 2;
nearest = NULL;
- nearest_num = 0;
for (cc = 0; cc < header->num_clauses; ++cc) {
clause = &header->clauses [cc];
if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
- (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
+ (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
nearest = clause;
- nearest_num = cc;
- }
}
g_assert (nearest);
if ((ip - header->code) != nearest->handler_offset)
case CEE_CONSTRAINED_:
CHECK_OPSIZE (6);
token = read32 (ip + 2);
- constrained_call = mini_get_class (method, token, generic_context);
- CHECK_TYPELOAD (constrained_call);
+ constrained_class = mini_get_class (method, token, generic_context);
+ CHECK_TYPELOAD (constrained_class);
ip += 6;
break;
case CEE_CPBLK:
ins->inst_destbasereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
spec = INS_INFO (ins->opcode);
- } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
+ } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
ins->opcode = store_opcode;
ins->inst_destbasereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;