#include <sys/time.h>
#endif
+#ifdef HAVE_ALLOCA_H
+#include <alloca.h>
+#endif
+
#ifdef HAVE_VALGRIND_MEMCHECK_H
#include <valgrind/memcheck.h>
#endif
#include <mono/metadata/monitor.h>
#include <mono/utils/mono-compiler.h>
-#define NEW_IR
#include "mini.h"
#include "trace.h"
#include "jit-icalls.h"
-#include "aliasing.h"
-
#define BRANCH_COST 100
#define INLINE_LENGTH_LIMIT 20
#define INLINE_FAILURE do {\
void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
-int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
- MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
- guint inline_offset, gboolean is_virtual_call);
-
/* helper methods signature */
extern MonoMethodSignature *helper_sig_class_init_trampoline;
extern MonoMethodSignature *helper_sig_domain_get;
#ifdef MINI_OP
#undef MINI_OP
#endif
-#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
+#ifdef MINI_OP3
+#undef MINI_OP3
+#endif
+#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
+#define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
#define NONE ' '
#define IREG 'i'
#define FREG 'f'
#define VREG 'v'
#define XREG 'x'
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
#define LREG IREG
#else
#define LREG 'l'
#include "mini-ops.h"
};
#undef MINI_OP
+#undef MINI_OP3
+
+#define MINI_OP(a,b,dest,src1,src2) (((src1) != NONE) + ((src2) != NONE)),
+#define MINI_OP3(a,b,dest,src1,src2,src3) (((src1) != NONE) + ((src2) != NONE) + ((src3) != NONE)),
+const gint8 ins_sreg_counts[] = {
+#include "mini-ops.h"
+};
+#undef MINI_OP
+#undef MINI_OP3
extern GHashTable *jit_icall_name_hash;
(vi)->idx = (id); \
} while (0)
+void
+mono_inst_set_src_registers (MonoInst *ins, int *regs)
+{
+ ins->sreg1 = regs [0];
+ ins->sreg2 = regs [1];
+ ins->sreg3 = regs [2];
+}
+
guint32
mono_alloc_ireg (MonoCompile *cfg)
{
return OP_MOVE;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
return OP_MOVE;
#else
return OP_LMOVE;
return OP_FMOVE;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
- type = type->data.klass->enum_basetype;
+ type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
}
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
} \
} while (0)
-#ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
-#define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
- int _length_reg = alloc_ireg (cfg); \
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
- MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
- } while (0)
-#endif
-
-#define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
- if (!(cfg->opt & MONO_OPT_ABCREM)) { \
- MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
- } else { \
- MonoInst *ins; \
- MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
- ins->sreg1 = array_reg; \
- ins->sreg2 = index_reg; \
- ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
- MONO_ADD_INS ((cfg)->cbb, ins); \
- (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
- (cfg)->cbb->has_array_access = TRUE; \
- } \
- } while (0)
-
#if defined(__i386__) || defined(__x86_64__)
#define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
} while (0)
#endif
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
#define ADD_WIDEN_OP(ins, arg1, arg2) do { \
/* FIXME: Need to add many more cases */ \
- if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
+ if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
MonoInst *widen; \
int dr = alloc_preg (cfg); \
EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
MonoExceptionClause *clause;
int i;
- /* first search for handlers and filters */
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
else
return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
}
- }
- /* search the try blocks */
- for (i = 0; i < header->num_clauses; ++i) {
- clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, offset))
return ((i + 1) << 8) | clause->flags;
}
return;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
- type = type->data.klass->enum_basetype;
+ type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
} else {
inst->klass = klass;
case OP_LCOMPARE:
case OP_ICOMPARE:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
- if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
+ if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE;
else if (src1->type == STACK_R8)
ins->opcode = OP_FCOMPARE;
break;
case OP_ICOMPARE_IMM:
ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
- if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
+ if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE_IMM;
break;
case CEE_BEQ:
break;
case STACK_PTR:
case STACK_MP:
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->opcode = OP_MOVE;
switch (size) {
case 1:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
- break;
+ return;
case 2:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
- break;
+ return;
case 4:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
- break;
-#if SIZEOF_VOID_P == 8
+ return;
+#if SIZEOF_REGISTER == 8
case 8:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
+ return;
#endif
}
- return;
}
val_reg = alloc_preg (cfg);
- if (sizeof (gpointer) == 8)
+ if (SIZEOF_REGISTER == 8)
MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
else
MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
}
#if !NO_UNALIGNED_ACCESS
- if (sizeof (gpointer) == 8) {
+ if (SIZEOF_REGISTER == 8) {
if (offset % 8) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
offset += 4;
#endif /* DISABLE_JIT */
void
-mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
+mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
{
int cur_reg;
}
#if !NO_UNALIGNED_ACCESS
- if (sizeof (gpointer) == 8) {
+ if (SIZEOF_REGISTER == 8) {
while (size >= 8) {
cur_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
- type = type->data.klass->enum_basetype;
+ type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
} else
return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
continue;
case MONO_TYPE_VALUETYPE:
if (simple_type->data.klass->enumtype) {
- simple_type = simple_type->data.klass->enum_basetype;
+ simple_type = mono_class_enum_basetype (simple_type->data.klass);
goto handle_enum;
}
if (args [i]->type != STACK_VTYPE)
{
#ifdef MONO_ARCH_RGCTX_REG
MonoCallInst *call;
- int rgctx_reg;
+ int rgctx_reg = -1;
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
if (method->string_ctor) {
/* Create the real signature */
/* FIXME: Cache these */
- MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
+ MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
ctor_sig->ret = &mono_defaults.string_class->byval_arg;
sig = ctor_sig;
this_reg = this->dreg;
+#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
+ if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
+ /* Make a call to delegate->invoke_impl */
+ call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
+ call->inst.inst_basereg = this_reg;
+ call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
+ MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
+
+ return (MonoInst*)call;
+ }
+#endif
+
if ((!cfg->compile_aot || enable_for_aot) &&
(!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
- ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
+ (MONO_METHOD_IS_FINAL (method) &&
method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
/*
* the method is not virtual, we just need to ensure this is not null
return (MonoInst*)call;
}
-#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
- if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
- /* Make a call to delegate->invoke_impl */
- call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
- call->inst.inst_basereg = this_reg;
- call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
- MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
-
- return (MonoInst*)call;
- }
-#endif
-
- if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
- ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
- (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
+ if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
/*
* the method is virtual, but we can statically dispatch since either
* it's class or the method itself are sealed.
slot_reg = vtable_reg;
call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
(mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
+#ifdef MONO_ARCH_HAVE_IMT
if (imt_arg) {
g_assert (mono_method_signature (method)->generic_param_count);
emit_imt_argument (cfg, call, imt_arg);
}
+#endif
}
call->inst.sreg1 = slot_reg;
else
n = mono_class_value_size (klass, &align);
+#if HAVE_WRITE_BARRIERS
+ /* if native is true there should be no references in the struct */
+ if (klass->has_references && !native) {
+ /* Avoid barriers when storing to the stack */
+ if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
+ (dest->opcode == OP_LDADDR))) {
+ iargs [0] = dest;
+ iargs [1] = src;
+ EMIT_NEW_PCONST (cfg, iargs [2], klass);
+
+ mono_emit_jit_icall (cfg, mono_value_copy, iargs);
+ }
+ }
+#endif
+
if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
/* FIXME: Optimize the case when src/dest is OP_LDADDR */
- mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
+ mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
} else {
iargs [0] = dest;
iargs [1] = src;
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
alloc_ftn = mono_object_new;
- } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
+ } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
/* This happens often in argument checking code, eg. throw new FooException... */
/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
MONO_START_BB (cfg, false_bb);
- MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
+ MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, is_null_bb);
}
/* Set invoke_impl field */
- trampoline = mono_create_delegate_trampoline (klass);
- EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
+ if (cfg->compile_aot) {
+ EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
+ } else {
+ trampoline = mono_create_delegate_trampoline (klass);
+ EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
+ }
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
/* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
static gboolean
mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
{
- MonoMethodHeader *header = mono_method_get_header (method);
+ MonoMethodHeader *header;
MonoVTable *vtable;
#ifdef MONO_ARCH_SOFT_FLOAT
MonoMethodSignature *sig = mono_method_signature (method);
return TRUE;
#endif
+ if (method->is_inflated)
+ /* Avoid inflating the header */
+ header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
+ else
+ header = mono_method_get_header (method);
+
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
array_reg = arr->dreg;
index_reg = index->dreg;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
index2_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
#else
- index2_reg = index_reg;
+ if (index->type == STACK_I8) {
+ index2_reg = alloc_preg (cfg);
+ MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
+ } else {
+ index2_reg = index_reg;
+ }
#endif
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
int mult_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
#else
(strcmp (cmethod->klass->name, "Interlocked") == 0)) {
ins = NULL;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
/* 64 bit reads are already atomic */
MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_ADD_NEW_I4;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_ADD_NEW_I4;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_ADD_NEW_I4;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
- ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
}
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_EXCHANGE_I4;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
(fsig->params [0]->type == MONO_TYPE_I) ||
(fsig->params [0]->type == MONO_TYPE_OBJECT))
}
#endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
-#ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
- /*
- * Can't implement CompareExchange methods this way since they have
- * three arguments. We can implement one of the common cases, where the new
- * value is a constant.
- */
+#ifdef MONO_ARCH_HAVE_ATOMIC_CAS
if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
- if ((fsig->params [1]->type == MONO_TYPE_I4 ||
- (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
- && args [2]->opcode == OP_ICONST) {
- MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
+ int size = 0;
+ if (fsig->params [1]->type == MONO_TYPE_I4)
+ size = 4;
+ else if (fsig->params [1]->type == MONO_TYPE_I || MONO_TYPE_IS_REFERENCE (fsig->params [1]))
+ size = sizeof (gpointer);
+ else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
+ size = 8;
+ if (size == 4) {
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
- ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
+ ins->sreg3 = args [2]->dreg;
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
+ } else if (size == 8) {
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
+ ins->dreg = alloc_ireg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->sreg2 = args [1]->dreg;
+ ins->sreg3 = args [2]->dreg;
+ ins->type = STACK_I8;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ /* g_assert_not_reached (); */
}
- /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
}
-#endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
+#endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
if (ins)
return ins;
guint32 prev_cil_offset_to_bb_len;
MonoMethod *prev_current_method;
MonoGenericContext *prev_generic_context;
+ gboolean ret_var_set, prev_ret_var_set;
g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
prev_cbb = cfg->cbb;
prev_current_method = cfg->current_method;
prev_generic_context = cfg->generic_context;
+ prev_ret_var_set = cfg->ret_var_set;
- costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
+ costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
+
+ ret_var_set = cfg->ret_var_set;
cfg->inlined_method = prev_inlined_method;
cfg->real_offset = prev_real_offset;
cfg->arg_types = prev_arg_types;
cfg->current_method = prev_current_method;
cfg->generic_context = prev_generic_context;
+ cfg->ret_var_set = prev_ret_var_set;
if ((costs >= 0 && costs < 60) || inline_allways) {
if (cfg->verbose_level > 2)
* If the inlined method contains only a throw, then the ret var is not
* set, so set it to a dummy value.
*/
- if (!cfg->ret_var_set) {
+ if (!ret_var_set) {
static double r8_0 = 0.0;
switch (rvar->type) {
}
static MonoMethod*
-verification_exception (void)
+field_access_exception (void)
{
static MonoMethod *method = NULL;
if (!method) {
MonoSecurityManager *secman = mono_security_manager_get_methods ();
method = mono_class_get_method_from_name (secman->securitymanager,
- "VerificationException", 0);
+ "FieldAccessException", 2);
}
g_assert (method);
return method;
}
static void
-emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
+emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
+ MonoBasicBlock *bblock, unsigned char *ip)
{
- MonoMethod *thrower = verification_exception ();
+ MonoMethod *thrower = field_access_exception ();
+ MonoInst *args [2];
- mono_emit_method_call (cfg, thrower, NULL, NULL);
+ EMIT_NEW_METHODCONST (cfg, args [0], caller);
+ EMIT_NEW_METHODCONST (cfg, args [1], field);
+ mono_emit_method_call (cfg, thrower, args, NULL);
+}
+
+/*
+ * Return the original method is a wrapper is specified. We can only access
+ * the custom attributes from the original method.
+ */
+static MonoMethod*
+get_original_method (MonoMethod *method)
+{
+ if (method->wrapper_type == MONO_WRAPPER_NONE)
+ return method;
+
+ /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
+ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
+ return NULL;
+
+ /* in other cases we need to find the original method */
+ return mono_marshal_method_from_wrapper (method);
}
static void
-ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
- MonoBasicBlock *bblock, unsigned char *ip)
+ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
+ MonoBasicBlock *bblock, unsigned char *ip)
{
- MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
- MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
- gboolean is_safe = TRUE;
+ /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
+ if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
+ return;
- if (!(caller_level >= callee_level ||
- caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
- callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
- is_safe = FALSE;
- }
+ /* we can't get the coreclr security level on wrappers since they don't have the attributes */
+ caller = get_original_method (caller);
+ if (!caller)
+ return;
- if (!is_safe)
- emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
+ /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
+ if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
+ emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
}
-static gboolean
-method_is_safe (MonoMethod *method)
+static void
+ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
+ MonoBasicBlock *bblock, unsigned char *ip)
{
- /*
- if (strcmp (method->name, "unsafeMethod") == 0)
- return FALSE;
- */
- return TRUE;
+ /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
+ if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
+ return;
+
+ /* we can't get the coreclr security level on wrappers since they don't have the attributes */
+ caller = get_original_method (caller);
+ if (!caller)
+ return;
+
+ /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
+ if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
+ emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
}
/*
restart = TRUE;
break;
}
+ case OP_CKFINITE: {
+ MonoInst *iargs [2];
+ MonoInst *call, *cmp;
+
+ /* Convert to icall+icompare+cond_exc+move */
+
+ /* Create dummy MonoInst's for the arguments */
+ MONO_INST_NEW (cfg, iargs [0], OP_ARG);
+ iargs [0]->dreg = ins->sreg1;
+
+ call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
+
+ MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
+ cmp->sreg1 = call->dreg;
+ cmp->inst_imm = 1;
+ MONO_ADD_INS (cfg->cbb, cmp);
+
+ MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
+
+ /* Do the assignment if the value is finite */
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
+
+ restart = TRUE;
+ break;
+ }
default:
if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
mono_print_ins (ins);
{
MonoInst *ins;
guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
- if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
+ if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
+ ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
/* Optimize reg-reg moves away */
/*
* Can't optimize other opcodes, since sp[0] might point to
return NULL;
}
+static gboolean
+is_exception_class (MonoClass *class)
+{
+ while (class) {
+ if (class == mono_defaults.exception_class)
+ return TRUE;
+ class = class->parent;
+ }
+ return FALSE;
+}
+
/*
- * mono_method_to_ir: translates IL into basic blocks containing trees
+ * mono_method_to_ir:
+ *
+ * Translate the .net IL into linear IR.
*/
int
-mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
+mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call)
{
clause->data.catch_class &&
cfg->generic_sharing_context &&
mono_class_check_context_used (clause->data.catch_class)) {
- if (mono_method_get_context (method)->method_inst)
- GENERIC_SHARING_FAILURE (CEE_NOP);
-
/*
* In shared generic code with catch
* clauses containing type variables
}
}
} else {
- arg_array = alloca (sizeof (MonoInst *) * num_args);
+ arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
cfg->cbb = start_bblock;
cfg->args = arg_array;
mono_save_args (cfg, sig, inline_args);
}
}
}
- if (!method_is_safe (method))
- emit_throw_verification_exception (cfg, bblock, ip);
}
if (header->code_size == 0)
}
class_inits = NULL;
+ /* We force the vtable variable here for all shared methods
+ for the possibility that they might show up in a stack
+ trace where their exact instantiation is needed. */
+ if (cfg->generic_sharing_context && method == cfg->method) {
+ if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method)->method_inst ||
+ method->klass->valuetype) {
+ mono_get_vtable_var (cfg);
+ } else {
+ /* FIXME: Is there a better way to do this?
+ We need the variable live for the duration
+ of the whole method. */
+ cfg->args [0]->flags |= MONO_INST_INDIRECT;
+ }
+ }
+
/* add a check for this != NULL to inlined methods */
if (is_virtual_call) {
MonoInst *arg_ins;
case CEE_LDC_R4: {
float *f;
/* FIXME: we should really allocate this only late in the compilation process */
- mono_domain_lock (cfg->domain);
f = mono_domain_alloc (cfg->domain, sizeof (float));
- mono_domain_unlock (cfg->domain);
CHECK_OPSIZE (5);
CHECK_STACK_OVF (1);
MONO_INST_NEW (cfg, ins, OP_R4CONST);
case CEE_LDC_R8: {
double *d;
/* FIXME: we should really allocate this only late in the compilation process */
- mono_domain_lock (cfg->domain);
d = mono_domain_alloc (cfg->domain, sizeof (double));
- mono_domain_unlock (cfg->domain);
CHECK_OPSIZE (9);
CHECK_STACK_OVF (1);
MONO_INST_NEW (cfg, ins, OP_R8CONST);
case CEE_JMP: {
MonoCallInst *call;
+ INLINE_FAILURE;
+
CHECK_OPSIZE (5);
if (stack_start != sp)
UNVERIFIED;
if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
CHECK_CFG_EXCEPTION;
- }
#ifdef __x86_64__
{
MonoMethodSignature *fsig = mono_method_signature (cmethod);
int i, n;
- /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
-
/* Handle tail calls similarly to calls */
n = fsig->param_count + fsig->hasthis;
cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
cil_method = cmethod;
} else if (constrained_call) {
- cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
+ if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
+ /*
+ * This is needed since get_method_constrained can't find
+ * the method in klass representing a type var.
+ * The type var is guaranteed to be a reference type in this
+ * case.
+ */
+ cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
+ cil_method = cmethod;
+ g_assert (!cmethod->klass->valuetype);
+ } else {
+ cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
+ }
} else {
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
cil_method = cmethod;
CHECK_CFG_EXCEPTION;
}
- if (cmethod->string_ctor)
+ if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
g_assert_not_reached ();
}
}
if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
- (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
+ MONO_METHOD_IS_FINAL (cmethod)) {
if (virtual)
check_this = TRUE;
virtual = 0;
/* Calling virtual generic methods */
if (cmethod && virtual &&
(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
- !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
+ !(MONO_METHOD_IS_FINAL (cmethod) &&
cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
mono_method_signature (cmethod)->generic_param_count) {
MonoInst *this_temp, *this_arg_temp, *store;
INLINE_FAILURE;
#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
- if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
- cmethod->wrapper_type == MONO_WRAPPER_NONE) {
+ if (cmethod->wrapper_type == MONO_WRAPPER_NONE) {
g_assert (!imt_arg);
if (context_used) {
imt_arg = emit_get_rgctx_method (cfg, context_used,
- cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
+ cmethod, MONO_RGCTX_INFO_METHOD);
} else {
- // FIXME:
- cfg->disable_aot = TRUE;
g_assert (cmethod->is_inflated);
- EMIT_NEW_PCONST (cfg, imt_arg,
- ((MonoMethodInflated*)cmethod)->context.method_inst);
+ EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
}
ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
} else
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
- (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
+ (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod))) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret)) {
MonoCallInst *call;
/* Prevent inlining of methods with tail calls (the call stack would be altered) */
/* Inlining */
if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
- (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
+ (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
mono_method_check_inlining (cfg, cmethod) &&
!g_list_find (dont_inline, cmethod)) {
int costs;
if (context_used && !imt_arg && !array_rank &&
(!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
!mono_class_generic_sharing_enabled (cmethod->klass)) &&
- (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
+ (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
INLINE_FAILURE;
else if (*ip == CEE_CALLI)
g_assert (!vtable_arg);
else
+ /* FIXME: what the hell is this??? */
g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
!(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
NOT_IMPLEMENTED;
#endif
} else {
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
+ if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
+ /*
+ * Instead of emitting an indirect call, emit a direct call
+ * with the contents of the aotconst as the patch info.
+ */
+ ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
+ NULLIFY_INS (addr);
+ } else {
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
+ }
}
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
if (fsig->pinvoke && !fsig->ret->byref) {
type_from_op (cmp, sp [0], NULL);
CHECK_TYPE (cmp);
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
if (cmp->opcode == OP_LCOMPARE_IMM) {
/* Convert it to OP_LCOMPARE */
MONO_INST_NEW (cfg, ins, OP_I8CONST);
else
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
/* The upper word might not be zero, and we add it to a 64 bit address later */
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
#endif
CHECK_STACK (2);
sp -= 2;
+#if HAVE_WRITE_BARRIERS
+ if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
+ /* insert call to write barrier */
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, sp, NULL);
+ ins_flag = 0;
+ ip++;
+ break;
+ }
+#endif
+
NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
ins->flags |= ins_flag;
ins_flag = 0;
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
if (sp [1]->opcode == OP_I8CONST) {
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
ins->inst_imm = sp [1]->inst_l;
#else
ins->inst_ls_word = sp [1]->inst_ls_word;
ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
ins->sreg2 = -1;
- sp [1]->opcode = OP_NOP;
+ /* Might be followed by an instruction added by ADD_WIDEN_OP */
+ if (sp [1]->next == NULL)
+ sp [1]->opcode = OP_NOP;
}
}
MONO_ADD_INS ((cfg)->cbb, (ins));
int data = sp [-1]->inst_c0;
sp [-1]->opcode = OP_I8CONST;
sp [-1]->type = STACK_I8;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
if ((*ip) == CEE_CONV_U8)
sp [-1]->inst_c0 = (guint32)data;
else
if (bblock->out_of_line) {
MonoInst *iargs [2];
- if (cfg->method->klass->image == mono_defaults.corlib) {
+ if (image == mono_defaults.corlib) {
/*
* Avoid relocations in AOT and save some space by using a
* version of helper_ldstr specialized to mscorlib.
* Generate smaller code for the common newobj <exception> instruction in
* argument checking code.
*/
- if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
+ if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
+ is_exception_class (cmethod->klass) && n <= 2 &&
((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
MonoInst *iargs [3];
iargs [0] = NULL;
if (mini_class_is_system_array (cmethod->klass)) {
- if (context_used)
- GENERIC_SHARING_FAILURE (*ip);
- g_assert (!context_used);
g_assert (!vtable_arg);
- EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
+
+ if (context_used) {
+ *sp = emit_get_rgctx_method (cfg, context_used,
+ cmethod, MONO_RGCTX_INFO_METHOD);
+ } else {
+ EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
+ }
/* Avoid varargs in the common case */
if (fsig->param_count == 1)
!mono_class_generic_sharing_enabled (cmethod->klass))) {
MonoInst *cmethod_addr;
- g_assert (!callvirt_this_arg);
-
cmethod_addr = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
} else {
INLINE_FAILURE;
- mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
- callvirt_this_arg, NULL, vtable_arg);
+ ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
+ callvirt_this_arg, NULL, vtable_arg);
+ if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
+ GENERIC_SHARING_FAILURE (*ip);
}
}
FIELD_ACCESS_FAILURE;
mono_class_init (klass);
+ /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
+ any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
+ if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
+ */
+
foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
if (*ip == CEE_STFLD) {
if (target_type_is_incompatible (cfg, field->type, sp [1]))
} else {
MonoInst *store;
+#if HAVE_WRITE_BARRIERS
+ if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
+ /* insert call to write barrier */
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ MonoInst *iargs [2];
+ int dreg;
+
+ dreg = alloc_preg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
+ iargs [1] = sp [1];
+ mono_emit_method_call (cfg, write_barrier, iargs, NULL);
+ }
+#endif
+
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
store->flags |= ins_flag;
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
FIELD_ACCESS_FAILURE;
+ /* if the class is Critical then transparent code cannot access it's fields */
+ if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
+
/*
* We can only support shared generic static
* field access on architectures where the
if (*ip == CEE_LDSFLDA) {
ins->klass = mono_class_from_mono_type (field->type);
+ ins->type = STACK_PTR;
*sp++ = ins;
} else if (*ip == CEE_STSFLD) {
MonoInst *store;
gpointer addr = (char*)vtable->data + field->offset;
int ro_type = field->type->type;
if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
- ro_type = field->type->data.klass->enum_basetype->type;
+ ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
}
/* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
is_const = TRUE;
EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
sp++;
break;
+#ifndef HAVE_MOVING_COLLECTOR
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_STRING:
type_to_eval_stack_type ((cfg), field->type, *sp);
sp++;
break;
+#endif
case MONO_TYPE_I8:
case MONO_TYPE_U8:
EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
goto load_error;
mono_class_init (handle_class);
if (cfg->generic_sharing_context) {
- if (handle_class == mono_defaults.typehandle_class) {
+ if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
+ mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
+ /* This case handles ldtoken
+ of an open type, like for
+ typeof(Gen<>). */
+ context_used = 0;
+ } else if (handle_class == mono_defaults.typehandle_class) {
/* If we get a MONO_TYPE_CLASS
then we need to provide the
open type, not an
cmp->sreg2 = sp [1]->dreg;
type_from_op (cmp, sp [0], sp [1]);
CHECK_TYPE (cmp);
- if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
+ if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
cmp->opcode = OP_LCOMPARE;
else if (sp [0]->type == STACK_R8)
cmp->opcode = OP_FCOMPARE;
}
case CEE_LDFTN: {
MonoInst *argconst;
- MonoMethod *cil_method, *ctor_method;
+ MonoMethod *cil_method;
gboolean needs_static_rgctx_invoke;
CHECK_STACK_OVF (1);
/* FIXME: SGEN support */
/* FIXME: handle shared static generic methods */
/* FIXME: handle this in shared code */
- if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
- MonoInst *target_ins;
+ if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
+ MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
+ if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
+ MonoInst *target_ins;
+ MonoMethod *invoke;
- ip += 6;
- if (cfg->verbose_level > 3)
- g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
- target_ins = sp [-1];
- sp --;
- *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
- ip += 5;
- sp ++;
- break;
+ invoke = mono_get_delegate_invoke (ctor_method->klass);
+ if (!invoke || !mono_method_signature (invoke))
+ goto load_error;
+
+ ip += 6;
+ if (cfg->verbose_level > 3)
+ g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
+ target_ins = sp [-1];
+ sp --;
+ *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
+ ip += 5;
+ sp ++;
+ break;
+ }
}
#endif
sp -= 3;
if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
- mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
+ mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
} else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
/* emit_memset only works when val == 0 */
mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
MonoInst *store;
cfg->cbb = init_localsbb;
- cfg->ip = header->code;
+ cfg->ip = NULL;
for (i = 0; i < header->num_locals; ++i) {
MonoType *ptype = header->locals [i];
int t = ptype->type;
dreg = cfg->locals [i]->dreg;
if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
- t = ptype->data.klass->enum_basetype->type;
+ t = mono_class_enum_basetype (ptype->data.klass)->type;
if (ptype->byref) {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
return OP_LOADI4_MEM;
case OP_LOADU4_MEMBASE:
return OP_LOADU4_MEM;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
case OP_LOADI8_MEMBASE:
return OP_LOADI8_MEM;
#endif
mono_op_to_op_imm_noemul (int opcode)
{
switch (opcode) {
-#if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
+#if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
case OP_LSHR:
case OP_LSHL:
case OP_LSHR_UN:
g_assert (ins->opcode >= MONO_CEE_LAST);
- for (regindex = 0; regindex < 3; regindex ++) {
+ for (regindex = 0; regindex < 4; regindex ++) {
int vreg;
if (regindex == 0) {
if (regtype == ' ')
continue;
vreg = ins->sreg1;
- } else {
+ } else if (regindex == 2) {
regtype = spec [MONO_INST_SRC2];
if (regtype == ' ')
continue;
vreg = ins->sreg2;
+ } else if (regindex == 3) {
+ regtype = spec [MONO_INST_SRC3];
+ if (regtype == ' ')
+ continue;
+ vreg = ins->sreg3;
}
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
if (regtype == 'l') {
/*
* Since some instructions reference the original long vreg,
case STACK_PTR:
case STACK_MP:
case STACK_VTYPE:
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
case STACK_I8:
#endif
#if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
cfg->varinfo [pos]->inst_c0 = pos;
memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
cfg->vars [pos].idx = pos;
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
if (cfg->varinfo [pos]->type == STACK_I8) {
/* Modify the two component vars too */
MonoInst *var1;
guint32 i, lvregs_len;
gboolean dest_has_lvreg = FALSE;
guint32 stacktypes [128];
+ MonoInst **live_range_start, **live_range_end;
+ MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
*need_local_opts = FALSE;
stacktypes ['x'] = STACK_VTYPE;
#endif
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
/* Create MonoInsts for longs */
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
lvregs_len = 0;
+
+ /*
+ * These arrays contain the first and last instructions accessing a given
+ * variable.
+ * Since we emit bblocks in the same order we process them here, and we
+ * don't split live ranges, these will precisely describe the live range of
+ * the variable, i.e. the instruction range where a valid value can be found
+ * in the variables location.
+ */
+ /* FIXME: Only do this if debugging info is requested */
+ live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
+ live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
+ live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
+ live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
/* Add spill loads/stores */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->cbb = bb;
MONO_BB_FOR_EACH_INS (bb, ins) {
const char *spec = INS_INFO (ins->opcode);
- int regtype, srcindex, sreg, tmp_reg, prev_dreg;
+ int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
gboolean store, no_lvreg;
+ int sregs [MONO_MAX_SRC_REGS];
if (G_UNLIKELY (cfg->verbose_level > 2))
mono_print_ins (ins);
spec2 [MONO_INST_DEST] = ' ';
spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
+ spec2 [MONO_INST_SRC3] = ' ';
spec = spec2;
} else if (MONO_IS_STORE_MEMINDEX (ins))
g_assert_not_reached ();
store = FALSE;
no_lvreg = FALSE;
- if (G_UNLIKELY (cfg->verbose_level > 2))
- printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
+ if (G_UNLIKELY (cfg->verbose_level > 2)) {
+ printf ("\t %.3s %d", spec, ins->dreg);
+ num_sregs = mono_inst_get_src_registers (ins, sregs);
+ for (srcindex = 0; srcindex < 3; ++srcindex)
+ printf (" %d", sregs [srcindex]);
+ printf ("\n");
+ }
/***************/
/* DREG */
MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
MonoInst *store_ins;
int store_opcode;
+ MonoInst *def_ins = ins;
+ int dreg = ins->dreg; /* The original vreg */
store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
g_assert (var->opcode == OP_REGOFFSET);
if (ins->opcode == OP_MOVE) {
NULLIFY_INS (ins);
+ def_ins = NULL;
} else {
ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
ins->inst_basereg = var->inst_basereg;
mono_bblock_insert_after_ins (bb, ins, store_ins);
NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
mono_bblock_insert_after_ins (bb, ins, store_ins);
+ def_ins = store_ins;
}
else {
g_assert (store_opcode != OP_STOREV_MEMBASE);
ins->inst_imm = ins->inst_c0;
ins->inst_destbasereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
+ spec = INS_INFO (ins->opcode);
} else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
ins->opcode = store_opcode;
ins->inst_destbasereg = var->inst_basereg;
spec2 [MONO_INST_DEST] = ' ';
spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
+ spec2 [MONO_INST_SRC3] = ' ';
spec = spec2;
} else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
// FIXME: The backends expect the base reg to be in inst_basereg
/* Insert it after the instruction */
mono_bblock_insert_after_ins (bb, ins, store_ins);
+ def_ins = store_ins;
+
/*
* We can't assign ins->dreg to var->dreg here, since the
* sregs could use it. So set a flag, and do it after
}
}
}
+
+ if (def_ins && !live_range_start [dreg]) {
+ live_range_start [dreg] = def_ins;
+ live_range_start_bb [dreg] = bb;
+ }
}
/************/
/* SREGS */
/************/
- for (srcindex = 0; srcindex < 2; ++srcindex) {
- regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
- sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
+ num_sregs = mono_inst_get_src_registers (ins, sregs);
+ for (srcindex = 0; srcindex < 3; ++srcindex) {
+ regtype = spec [MONO_INST_SRC1 + srcindex];
+ sreg = sregs [srcindex];
g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
MonoInst *var = get_vreg_to_inst (cfg, sreg);
+ MonoInst *use_ins = ins;
MonoInst *load_ins;
guint32 load_opcode;
if (var->opcode == OP_REGVAR) {
- if (srcindex == 0)
- ins->sreg1 = var->dreg;
- else
- ins->sreg2 = var->dreg;
+ sregs [srcindex] = var->dreg;
+ //mono_inst_set_src_registers (ins, sregs);
+ live_range_end [sreg] = use_ins;
+ live_range_end_bb [sreg] = bb;
continue;
}
g_assert (load_opcode != OP_LOADV_MEMBASE);
if (vreg_to_lvreg [sreg]) {
+ g_assert (vreg_to_lvreg [sreg] != -1);
+
/* The variable is already loaded to an lvreg */
if (G_UNLIKELY (cfg->verbose_level > 2))
printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
- if (srcindex == 0)
- ins->sreg1 = vreg_to_lvreg [sreg];
- else
- ins->sreg2 = vreg_to_lvreg [sreg];
+ sregs [srcindex] = vreg_to_lvreg [sreg];
+ //mono_inst_set_src_registers (ins, sregs);
continue;
}
/* Try to fuse the load into the instruction */
if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
- ins->inst_basereg = var->inst_basereg;
+ sregs [0] = var->inst_basereg;
+ //mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
} else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
- ins->sreg2 = var->inst_basereg;
+ sregs [1] = var->inst_basereg;
+ //mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
} else {
if (MONO_IS_REAL_MOVE (ins)) {
*/
sreg = ins->dreg;
}
+ g_assert (sreg != -1);
vreg_to_lvreg [var->dreg] = sreg;
g_assert (lvregs_len < 1024);
lvregs [lvregs_len ++] = var->dreg;
}
}
- if (srcindex == 0)
- ins->sreg1 = sreg;
- else
- ins->sreg2 = sreg;
+ sregs [srcindex] = sreg;
+ //mono_inst_set_src_registers (ins, sregs);
if (regtype == 'l') {
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
+ use_ins = load_ins;
}
else {
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
g_assert (load_opcode != OP_LOADI8_MEMBASE);
#endif
NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
mono_bblock_insert_before_ins (bb, ins, load_ins);
+ use_ins = load_ins;
}
}
+
+ if (var->dreg < orig_next_vreg) {
+ live_range_end [var->dreg] = use_ins;
+ live_range_end_bb [var->dreg] = bb;
+ }
}
}
+ mono_inst_set_src_registers (ins, sregs);
if (dest_has_lvreg) {
+ g_assert (ins->dreg != -1);
vreg_to_lvreg [prev_dreg] = ins->dreg;
g_assert (lvregs_len < 1024);
lvregs [lvregs_len ++] = prev_dreg;
for (i = 0; i < lvregs_len; i++)
vreg_to_lvreg [lvregs [i]] = 0;
lvregs_len = 0;
+ } else if (ins->opcode == OP_NOP) {
+ ins->dreg = -1;
+ MONO_INST_NULLIFY_SREGS (ins);
}
if (cfg->verbose_level > 2)
mono_print_ins_index (1, ins);
}
}
+
+#ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
+ /*
+ * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
+ * by storing the current native offset into MonoMethodVar->live_range_start/end.
+ */
+ for (i = 0; i < cfg->num_varinfo; ++i) {
+ int vreg = MONO_VARINFO (cfg, i)->vreg;
+ MonoInst *ins;
+
+ if (live_range_start [vreg]) {
+ MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
+ ins->inst_c0 = i;
+ ins->inst_c1 = vreg;
+ mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
+ }
+ if (live_range_end [vreg]) {
+ MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
+ ins->inst_c0 = i;
+ ins->inst_c1 = vreg;
+ mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
+ }
+ }
+#endif
+
+ g_free (live_range_start);
+ g_free (live_range_end);
+ g_free (live_range_start_bb);
+ g_free (live_range_end_bb);
}
/**
* fcompare + branchCC.
* - create a helper function for allocating a stack slot, taking into account
* MONO_CFG_HAS_SPILLUP.
- * - merge new GC changes in mini.c.
* - merge r68207.
* - merge the ia64 switch changes.
- * - merge the mips conditional changes.
- * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
- * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
- * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
* - optimize mono_regstate2_alloc_int/float.
* - fix the pessimistic handling of variables accessed in exception handler blocks.
* - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
* - LAST MERGE: 108395.
* - when returning vtypes in registers, generate IR and append it to the end of the
* last bb instead of doing it in the epilog.
- * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
- * ones in inssel.h.
* - change the store opcodes so they use sreg1 instead of dreg to store the base register.
*/