#include <sys/time.h>
#endif
+#ifdef HAVE_ALLOCA_H
+#include <alloca.h>
+#endif
+
#ifdef HAVE_VALGRIND_MEMCHECK_H
#include <valgrind/memcheck.h>
#endif
#include <mono/metadata/security-manager.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/security-core-clr.h>
+#include <mono/metadata/monitor.h>
#include <mono/utils/mono-compiler.h>
-#define NEW_IR
#include "mini.h"
#include "trace.h"
#include "jit-icalls.h"
-#include "aliasing.h"
-
#define BRANCH_COST 100
#define INLINE_LENGTH_LIMIT 20
#define INLINE_FAILURE do {\
void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
-int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
- MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
- guint inline_offset, gboolean is_virtual_call);
-
/* helper methods signature */
extern MonoMethodSignature *helper_sig_class_init_trampoline;
extern MonoMethodSignature *helper_sig_domain_get;
extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
+extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
/*
* Instruction metadata
#define FREG 'f'
#define VREG 'v'
#define XREG 'x'
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
#define LREG IREG
#else
#define LREG 'l'
return OP_MOVE;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
return OP_MOVE;
#else
return OP_LMOVE;
return OP_FMOVE;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
- type = type->data.klass->enum_basetype;
+ type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
}
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
} \
} while (0)
-#ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
-#define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
- int _length_reg = alloc_ireg (cfg); \
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
- MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
- } while (0)
-#endif
-
-#define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
- if (!(cfg->opt & MONO_OPT_ABCREM)) { \
- MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
- } else { \
- MonoInst *ins; \
- MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
- ins->sreg1 = array_reg; \
- ins->sreg2 = index_reg; \
- ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
- MONO_ADD_INS ((cfg)->cbb, ins); \
- (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
- (cfg)->cbb->has_array_access = TRUE; \
- } \
- } while (0)
-
#if defined(__i386__) || defined(__x86_64__)
#define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
} while (0)
#endif
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
#define ADD_WIDEN_OP(ins, arg1, arg2) do { \
/* FIXME: Need to add many more cases */ \
- if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
+ if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
MonoInst *widen; \
int dr = alloc_preg (cfg); \
EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
return;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
- type = type->data.klass->enum_basetype;
+ type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
} else {
inst->klass = klass;
case OP_LCOMPARE:
case OP_ICOMPARE:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
- if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
+ if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE;
else if (src1->type == STACK_R8)
ins->opcode = OP_FCOMPARE;
break;
case OP_ICOMPARE_IMM:
ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
- if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
+ if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE_IMM;
break;
case CEE_BEQ:
ins->type = STACK_PTR;
switch (src1->type) {
case STACK_I4:
- ins->opcode = OP_MOVE;
+ ins->opcode = OP_ICONV_TO_U;
break;
case STACK_PTR:
case STACK_MP:
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->opcode = OP_MOVE;
/*
* Don't use this if a generic_context is set, since that means AOT can't
* look up the method using just the image+token.
+ * table == 0 means this is a reference made from a wrapper.
*/
- if (cfg->compile_aot && !cfg->generic_context) {
+ if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
jump_info_token->image = image;
jump_info_token->token = token;
case 4:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
break;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
case 8:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
#endif
val_reg = alloc_preg (cfg);
- if (sizeof (gpointer) == 8)
+ if (SIZEOF_REGISTER == 8)
MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
else
MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
}
#if !NO_UNALIGNED_ACCESS
- if (sizeof (gpointer) == 8) {
+ if (SIZEOF_REGISTER == 8) {
if (offset % 8) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
offset += 4;
#endif /* DISABLE_JIT */
void
-mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
+mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
{
int cur_reg;
}
#if !NO_UNALIGNED_ACCESS
- if (sizeof (gpointer) == 8) {
+ if (SIZEOF_REGISTER == 8) {
while (size >= 8) {
cur_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
- type = type->data.klass->enum_basetype;
+ type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
} else
return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
continue;
case MONO_TYPE_VALUETYPE:
if (simple_type->data.klass->enumtype) {
- simple_type = simple_type->data.klass->enum_basetype;
+ simple_type = mono_class_enum_basetype (simple_type->data.klass);
goto handle_enum;
}
if (args [i]->type != STACK_VTYPE)
{
#ifdef MONO_ARCH_RGCTX_REG
MonoCallInst *call;
- int rgctx_reg;
+ int rgctx_reg = -1;
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
if (method->string_ctor) {
/* Create the real signature */
/* FIXME: Cache these */
- MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
+ MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
ctor_sig->ret = &mono_defaults.string_class->byval_arg;
sig = ctor_sig;
this_reg = this->dreg;
+#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
+ if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
+ /* Make a call to delegate->invoke_impl */
+ call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
+ call->inst.inst_basereg = this_reg;
+ call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
+ MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
+
+ return (MonoInst*)call;
+ }
+#endif
+
if ((!cfg->compile_aot || enable_for_aot) &&
(!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
- ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
+ (MONO_METHOD_IS_FINAL (method) &&
method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
/*
* the method is not virtual, we just need to ensure this is not null
return (MonoInst*)call;
}
-#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
- if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
- /* Make a call to delegate->invoke_impl */
- call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
- call->inst.inst_basereg = this_reg;
- call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
- MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
-
- return (MonoInst*)call;
- }
-#endif
-
- if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
- ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
- (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
+ if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
/*
* the method is virtual, but we can statically dispatch since either
* it's class or the method itself are sealed.
call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
- /* Initialize method->slot */
- mono_class_setup_vtable (method->klass);
-
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
slot_reg = vtable_reg;
call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
(mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
+#ifdef MONO_ARCH_HAVE_IMT
if (imt_arg) {
g_assert (mono_method_signature (method)->generic_param_count);
emit_imt_argument (cfg, call, imt_arg);
}
+#endif
}
call->inst.sreg1 = slot_reg;
else
n = mono_class_value_size (klass, &align);
+#if HAVE_WRITE_BARRIERS
+ /* if native is true there should be no references in the struct */
+ if (klass->has_references && !native) {
+ /* Avoid barriers when storing to the stack */
+ if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
+ (dest->opcode == OP_LDADDR))) {
+ iargs [0] = dest;
+ iargs [1] = src;
+ EMIT_NEW_PCONST (cfg, iargs [2], klass);
+
+ mono_emit_jit_icall (cfg, mono_value_copy, iargs);
+ }
+ }
+#endif
+
if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
/* FIXME: Optimize the case when src/dest is OP_LDADDR */
- mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
+ mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
} else {
iargs [0] = dest;
iargs [1] = src;
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
alloc_ftn = mono_object_new;
- } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
+ } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
/* This happens often in argument checking code, eg. throw new FooException... */
/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
MONO_START_BB (cfg, false_bb);
- MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
+ MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, is_null_bb);
}
/* Set invoke_impl field */
- trampoline = mono_create_delegate_trampoline (klass);
- EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
+ if (cfg->compile_aot) {
+ EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
+ } else {
+ trampoline = mono_create_delegate_trampoline (klass);
+ EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
+ }
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
/* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
MONO_ADD_INS (cfg->bb_exit, dummy_use);
}
+static int inline_limit;
+static gboolean inline_limit_inited;
+
static gboolean
mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
{
- MonoMethodHeader *header = mono_method_get_header (method);
+ MonoMethodHeader *header;
MonoVTable *vtable;
#ifdef MONO_ARCH_SOFT_FLOAT
MonoMethodSignature *sig = mono_method_signature (method);
return TRUE;
#endif
+ if (method->is_inflated)
+ /* Avoid inflating the header */
+ header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
+ else
+ header = mono_method_get_header (method);
+
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
/* also consider num_locals? */
/* Do the size check early to avoid creating vtables */
- if (getenv ("MONO_INLINELIMIT")) {
- if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
- return FALSE;
- }
- } else if (header->code_size >= INLINE_LENGTH_LIMIT)
+ if (!inline_limit_inited) {
+ if (getenv ("MONO_INLINELIMIT"))
+ inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
+ else
+ inline_limit = INLINE_LENGTH_LIMIT;
+ inline_limit_inited = TRUE;
+ }
+ if (header->code_size >= inline_limit)
return FALSE;
/*
array_reg = arr->dreg;
index_reg = index->dreg;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
index2_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
#else
- index2_reg = index_reg;
+ if (index->type == STACK_I8) {
+ index2_reg = alloc_preg (cfg);
+ MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
+ } else {
+ index2_reg = index_reg;
+ }
#endif
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
int mult_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
#else
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
+ } else if (cmethod->klass == mono_defaults.monitor_class) {
+#if defined(MONO_ARCH_MONITOR_OBJECT_REG)
+ if (strcmp (cmethod->name, "Enter") == 0) {
+ MonoCallInst *call;
+
+ call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
+ NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
+ mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
+ MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
+
+ return (MonoInst*)call;
+ } else if (strcmp (cmethod->name, "Exit") == 0) {
+ MonoCallInst *call;
+
+ call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
+ NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
+ mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
+ MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
+
+ return (MonoInst*)call;
+ }
+#elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
+ MonoMethod *fast_method = NULL;
+
+ /* Avoid infinite recursion */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
+ (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
+ strcmp (cfg->method->name, "FastMonitorExit") == 0))
+ return NULL;
+
+ if (strcmp (cmethod->name, "Enter") == 0 ||
+ strcmp (cmethod->name, "Exit") == 0)
+ fast_method = mono_monitor_get_fast_path (cmethod);
+ if (!fast_method)
+ return NULL;
+
+ return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
+#endif
} else if (mini_class_is_system_array (cmethod->klass) &&
strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
MonoInst *addr, *store, *load;
(strcmp (cmethod->klass->name, "Interlocked") == 0)) {
ins = NULL;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
/* 64 bit reads are already atomic */
MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_ADD_NEW_I4;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_ADD_NEW_I4;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_ADD_NEW_I4;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
- ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
}
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_EXCHANGE_I4;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
(fsig->params [0]->type == MONO_TYPE_I) ||
(fsig->params [0]->type == MONO_TYPE_OBJECT))
* value is a constant.
*/
if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
- if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
+ if ((fsig->params [1]->type == MONO_TYPE_I4 ||
+ (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
+ && args [2]->opcode == OP_ICONST) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
guint32 prev_cil_offset_to_bb_len;
MonoMethod *prev_current_method;
MonoGenericContext *prev_generic_context;
+ gboolean ret_var_set, prev_ret_var_set;
g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
prev_cbb = cfg->cbb;
prev_current_method = cfg->current_method;
prev_generic_context = cfg->generic_context;
+ prev_ret_var_set = cfg->ret_var_set;
- costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
+ costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
+
+ ret_var_set = cfg->ret_var_set;
cfg->inlined_method = prev_inlined_method;
cfg->real_offset = prev_real_offset;
cfg->arg_types = prev_arg_types;
cfg->current_method = prev_current_method;
cfg->generic_context = prev_generic_context;
+ cfg->ret_var_set = prev_ret_var_set;
if ((costs >= 0 && costs < 60) || inline_allways) {
if (cfg->verbose_level > 2)
MonoBasicBlock *prev = ebblock->in_bb [0];
mono_merge_basic_blocks (cfg, prev, ebblock);
cfg->cbb = prev;
+ if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
+ mono_merge_basic_blocks (cfg, prev_cbb, prev);
+ cfg->cbb = prev_cbb;
+ }
} else {
cfg->cbb = ebblock;
}
* If the inlined method contains only a throw, then the ret var is not
* set, so set it to a dummy value.
*/
- if (!cfg->ret_var_set) {
+ if (!ret_var_set) {
static double r8_0 = 0.0;
switch (rvar->type) {
* sequence and return the pointer to the data and the size.
*/
static const char*
-initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
+initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
{
/*
* newarr[System.Int32]
if (!field)
return NULL;
+ *out_field_token = field_token;
+
cmethod = mini_get_method (NULL, method, token, NULL, NULL);
if (!cmethod)
return NULL;
} else {
/*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
g_assert (!aot);
- data_ptr = field->data;
+ data_ptr = mono_field_get_data (field);
}
return data_ptr;
}
restart = TRUE;
break;
}
+ case OP_CKFINITE: {
+ MonoInst *iargs [2];
+ MonoInst *call, *cmp;
+
+ /* Convert to icall+icompare+cond_exc+move */
+
+ /* Create dummy MonoInst's for the arguments */
+ MONO_INST_NEW (cfg, iargs [0], OP_ARG);
+ iargs [0]->dreg = ins->sreg1;
+
+ call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
+
+ MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
+ cmp->sreg1 = call->dreg;
+ cmp->inst_imm = 1;
+ MONO_ADD_INS (cfg->cbb, cmp);
+
+ MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
+
+ /* Do the assignment if the value is finite */
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
+
+ restart = TRUE;
+ break;
+ }
default:
if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
mono_print_ins (ins);
{
MonoInst *ins;
guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
- if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
+ if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
+ ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
/* Optimize reg-reg moves away */
/*
* Can't optimize other opcodes, since sp[0] might point to
return NULL;
}
+static gboolean
+is_exception_class (MonoClass *class)
+{
+ while (class) {
+ if (class == mono_defaults.exception_class)
+ return TRUE;
+ class = class->parent;
+ }
+ return FALSE;
+}
+
/*
- * mono_method_to_ir: translates IL into basic blocks containing trees
+ * mono_method_to_ir:
+ *
+ * Translate the .net IL into linear IR.
*/
int
-mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
+mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call)
{
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
+ tblock->flags |= BB_EXCEPTION_HANDLER;
tblock->real_offset = clause->data.filter_offset;
tblock->in_scount = 1;
tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
clause->data.catch_class &&
cfg->generic_sharing_context &&
mono_class_check_context_used (clause->data.catch_class)) {
- if (mono_method_get_context (method)->method_inst)
- GENERIC_SHARING_FAILURE (CEE_NOP);
-
/*
* In shared generic code with catch
* clauses containing type variables
}
}
} else {
- arg_array = alloca (sizeof (MonoInst *) * num_args);
+ arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
cfg->cbb = start_bblock;
cfg->args = arg_array;
mono_save_args (cfg, sig, inline_args);
}
class_inits = NULL;
+ /* We force the vtable variable here for all shared methods
+ for the possibility that they might show up in a stack
+ trace where their exact instantiation is needed. */
+ if (cfg->generic_sharing_context)
+ mono_get_vtable_var (cfg);
+
/* add a check for this != NULL to inlined methods */
if (is_virtual_call) {
MonoInst *arg_ins;
case CEE_LDC_R4: {
float *f;
/* FIXME: we should really allocate this only late in the compilation process */
- mono_domain_lock (cfg->domain);
f = mono_domain_alloc (cfg->domain, sizeof (float));
- mono_domain_unlock (cfg->domain);
CHECK_OPSIZE (5);
CHECK_STACK_OVF (1);
MONO_INST_NEW (cfg, ins, OP_R4CONST);
case CEE_LDC_R8: {
double *d;
/* FIXME: we should really allocate this only late in the compilation process */
- mono_domain_lock (cfg->domain);
d = mono_domain_alloc (cfg->domain, sizeof (double));
- mono_domain_unlock (cfg->domain);
CHECK_OPSIZE (9);
CHECK_STACK_OVF (1);
MONO_INST_NEW (cfg, ins, OP_R8CONST);
MonoMethodSignature *fsig = mono_method_signature (cmethod);
int i, n;
- /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
-
/* Handle tail calls similarly to calls */
n = fsig->param_count + fsig->hasthis;
cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
cil_method = cmethod;
} else if (constrained_call) {
- cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
+ if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
+ /*
+ * This is needed since get_method_constrained can't find
+ * the method in klass representing a type var.
+ * The type var is guaranteed to be a reference type in this
+ * case.
+ */
+ cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
+ cil_method = cmethod;
+ g_assert (!cmethod->klass->valuetype);
+ } else {
+ cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
+ }
} else {
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
cil_method = cmethod;
CHECK_CFG_EXCEPTION;
}
- if (cmethod->string_ctor)
+ if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
g_assert_not_reached ();
}
}
if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
- (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
+ MONO_METHOD_IS_FINAL (cmethod)) {
if (virtual)
check_this = TRUE;
virtual = 0;
/* Calling virtual generic methods */
if (cmethod && virtual &&
(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
- !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
+ !(MONO_METHOD_IS_FINAL (cmethod) &&
cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
mono_method_signature (cmethod)->generic_param_count) {
MonoInst *this_temp, *this_arg_temp, *store;
/* Inlining */
if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
- (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
+ (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
mono_method_check_inlining (cfg, cmethod) &&
!g_list_find (dont_inline, cmethod)) {
int costs;
if (context_used && !imt_arg && !array_rank &&
(!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
!mono_class_generic_sharing_enabled (cmethod->klass)) &&
- (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
+ (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
INLINE_FAILURE;
else if (*ip == CEE_CALLI)
g_assert (!vtable_arg);
else
+ /* FIXME: what the hell is this??? */
g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
!(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
NOT_IMPLEMENTED;
#endif
} else {
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
+ if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
+ /*
+ * Instead of emitting an indirect call, emit a direct call
+ * with the contents of the aotconst as the patch info.
+ */
+ ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
+ NULLIFY_INS (addr);
+ } else {
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
+ }
}
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
if (fsig->pinvoke && !fsig->ret->byref) {
type_from_op (cmp, sp [0], NULL);
CHECK_TYPE (cmp);
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
if (cmp->opcode == OP_LCOMPARE_IMM) {
/* Convert it to OP_LCOMPARE */
MONO_INST_NEW (cfg, ins, OP_I8CONST);
MonoBasicBlock **targets;
MonoBasicBlock *default_bblock;
MonoJumpInfoBBTable *table;
-#ifndef __arm__
int offset_reg = alloc_preg (cfg);
int target_reg = alloc_preg (cfg);
int table_reg = alloc_preg (cfg);
int sum_reg = alloc_preg (cfg);
-#endif
+ gboolean use_op_switch;
CHECK_OPSIZE (5);
CHECK_STACK (1);
table->table = targets;
table->table_size = n;
+ use_op_switch = FALSE;
#ifdef __arm__
/* ARM implements SWITCH statements differently */
/* FIXME: Make it use the generic implementation */
- /* the backend code will deal with aot vs normal case */
- MONO_INST_NEW (cfg, ins, OP_SWITCH);
- ins->sreg1 = src1->dreg;
- ins->inst_p0 = table;
- ins->inst_many_bb = targets;
- ins->klass = GUINT_TO_POINTER (n);
- MONO_ADD_INS (cfg->cbb, ins);
-#else
- if (sizeof (gpointer) == 8)
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
- else
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
-
-#if SIZEOF_VOID_P == 8
- /* The upper word might not be zero, and we add it to a 64 bit address later */
- MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
+ if (!cfg->compile_aot)
+ use_op_switch = TRUE;
#endif
-
- if (cfg->compile_aot) {
- MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
- } else {
- MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
- ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
+
+ if (use_op_switch) {
+ MONO_INST_NEW (cfg, ins, OP_SWITCH);
+ ins->sreg1 = src1->dreg;
ins->inst_p0 = table;
- ins->dreg = table_reg;
+ ins->inst_many_bb = targets;
+ ins->klass = GUINT_TO_POINTER (n);
MONO_ADD_INS (cfg->cbb, ins);
- }
+ } else {
+ if (sizeof (gpointer) == 8)
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
+ else
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
- /* FIXME: Use load_memindex */
- MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
- MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
+#if SIZEOF_REGISTER == 8
+ /* The upper word might not be zero, and we add it to a 64 bit address later */
+ MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
#endif
+
+ if (cfg->compile_aot) {
+ MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
+ ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
+ ins->inst_p0 = table;
+ ins->dreg = table_reg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+ /* FIXME: Use load_memindex */
+ MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
+ MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
+ }
start_new_bblock = 1;
inline_costs += (BRANCH_COST * 2);
break;
CHECK_STACK (2);
sp -= 2;
+#if HAVE_WRITE_BARRIERS
+ if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
+ /* insert call to write barrier */
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, sp, NULL);
+ ins_flag = 0;
+ ip++;
+ break;
+ }
+#endif
+
NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
ins->flags |= ins_flag;
ins_flag = 0;
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
if (sp [1]->opcode == OP_I8CONST) {
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
ins->inst_imm = sp [1]->inst_l;
#else
ins->inst_ls_word = sp [1]->inst_ls_word;
ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
ins->sreg2 = -1;
- sp [1]->opcode = OP_NOP;
+ /* Might be followed by an instruction added by ADD_WIDEN_OP */
+ if (sp [1]->next == NULL)
+ sp [1]->opcode = OP_NOP;
}
}
MONO_ADD_INS ((cfg)->cbb, (ins));
int data = sp [-1]->inst_c0;
sp [-1]->opcode = OP_I8CONST;
sp [-1]->type = STACK_I8;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
if ((*ip) == CEE_CONV_U8)
sp [-1]->inst_c0 = (guint32)data;
else
if (bblock->out_of_line) {
MonoInst *iargs [2];
- if (cfg->method->klass->image == mono_defaults.corlib) {
+ if (image == mono_defaults.corlib) {
/*
* Avoid relocations in AOT and save some space by using a
* version of helper_ldstr specialized to mscorlib.
* Generate smaller code for the common newobj <exception> instruction in
* argument checking code.
*/
- if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
+ if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
+ is_exception_class (cmethod->klass) && n <= 2 &&
((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
MonoInst *iargs [3];
iargs [0] = NULL;
if (mini_class_is_system_array (cmethod->klass)) {
- if (context_used)
- GENERIC_SHARING_FAILURE (*ip);
- g_assert (!context_used);
g_assert (!vtable_arg);
- EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
+
+ if (context_used) {
+ *sp = emit_get_rgctx_method (cfg, context_used,
+ cmethod, MONO_RGCTX_INFO_METHOD);
+ } else {
+ EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
+ }
/* Avoid varargs in the common case */
if (fsig->param_count == 1)
!mono_class_generic_sharing_enabled (cmethod->klass))) {
MonoInst *cmethod_addr;
- g_assert (!callvirt_this_arg);
-
cmethod_addr = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
} else {
INLINE_FAILURE;
- mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
- callvirt_this_arg, NULL, vtable_arg);
+ ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
+ callvirt_this_arg, NULL, vtable_arg);
+ if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
+ GENERIC_SHARING_FAILURE (*ip);
}
}
ins->inst_target_bb = tblock;
GET_BBLOCK (cfg, tblock, ip);
/*
- * This leads to some inconsistency, since the two bblocks are not
- * really connected, but it is needed for handling stack arguments
- * correct (See test_0_box_brtrue_opt_regress_81102).
+ * This leads to some inconsistency, since the two bblocks are
+ * not really connected, but it is needed for handling stack
+ * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
+ * FIXME: This should only be needed if sp != stack_start, but that
+ * doesn't work for some reason (test failure in mcs/tests on x86).
*/
link_bblock (cfg, bblock, tblock);
if (sp != stack_start) {
} else {
MonoInst *store;
+#if HAVE_WRITE_BARRIERS
+ if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
+ /* insert call to write barrier */
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ MonoInst *iargs [2];
+ int dreg;
+
+ dreg = alloc_preg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
+ iargs [1] = sp [1];
+ mono_emit_method_call (cfg, write_barrier, iargs, NULL);
+ }
+#endif
+
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
store->flags |= ins_flag;
if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
if (cfg->verbose_level > 2)
- printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
+ printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
class_inits = g_slist_prepend (class_inits, vtable);
} else {
if (cfg->run_cctors) {
if (*ip == CEE_LDSFLDA) {
ins->klass = mono_class_from_mono_type (field->type);
+ ins->type = STACK_PTR;
*sp++ = ins;
} else if (*ip == CEE_STSFLD) {
MonoInst *store;
gpointer addr = (char*)vtable->data + field->offset;
int ro_type = field->type->type;
if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
- ro_type = field->type->data.klass->enum_basetype->type;
+ ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
}
- /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
+ /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
is_const = TRUE;
switch (ro_type) {
case MONO_TYPE_BOOLEAN:
EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
sp++;
break;
+#ifndef HAVE_MOVING_COLLECTOR
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_STRING:
type_to_eval_stack_type ((cfg), field->type, *sp);
sp++;
break;
+#endif
case MONO_TYPE_I8:
case MONO_TYPE_U8:
EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
MonoInst *len_ins;
const char *data_ptr;
int data_size = 0;
+ guint32 field_token;
CHECK_STACK (1);
--sp;
* for small sizes open code the memcpy
* ensure the rva field is big enough
*/
- if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
+ if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
MonoMethod *memcpy_method = get_memcpy_method ();
MonoInst *iargs [3];
int add_reg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
if (cfg->compile_aot) {
- EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
+ EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
} else {
EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
}
CHECK_OPSIZE (5);
n = read32 (ip + 1);
- if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
+ if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
+ method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
handle = mono_method_get_wrapper_data (method, n);
handle_class = mono_method_get_wrapper_data (method, n + 1);
if (handle_class == mono_defaults.typehandle_class)
goto load_error;
mono_class_init (handle_class);
if (cfg->generic_sharing_context) {
- if (handle_class == mono_defaults.typehandle_class) {
+ if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
+ mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
+ /* This case handles ldtoken
+ of an open type, like for
+ typeof(Gen<>). */
+ context_used = 0;
+ } else if (handle_class == mono_defaults.typehandle_class) {
/* If we get a MONO_TYPE_CLASS
then we need to provide the
open type, not an
g_assert_not_reached ();
}
- if (cfg->opt & MONO_OPT_SHARED) {
+ if ((cfg->opt & MONO_OPT_SHARED) &&
+ method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
+ method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
MonoInst *addr, *vtvar, *iargs [3];
int method_context_used;
cmp->sreg2 = sp [1]->dreg;
type_from_op (cmp, sp [0], sp [1]);
CHECK_TYPE (cmp);
- if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
+ if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
cmp->opcode = OP_LCOMPARE;
else if (sp [0]->type == STACK_R8)
cmp->opcode = OP_FCOMPARE;
}
case CEE_LDFTN: {
MonoInst *argconst;
- MonoMethod *cil_method, *ctor_method;
+ MonoMethod *cil_method;
gboolean needs_static_rgctx_invoke;
CHECK_STACK_OVF (1);
/* FIXME: SGEN support */
/* FIXME: handle shared static generic methods */
/* FIXME: handle this in shared code */
- if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
- MonoInst *target_ins;
+ if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
+ MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
+ if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
+ MonoInst *target_ins;
- ip += 6;
- if (cfg->verbose_level > 3)
- g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
- target_ins = sp [-1];
- sp --;
- *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
- ip += 5;
- sp ++;
- break;
+ ip += 6;
+ if (cfg->verbose_level > 3)
+ g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
+ target_ins = sp [-1];
+ sp --;
+ *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
+ ip += 5;
+ sp ++;
+ break;
+ }
}
#endif
sp -= 3;
if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
- mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
+ mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
} else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
/* emit_memset only works when val == 0 */
mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
MonoInst *store;
cfg->cbb = init_localsbb;
- cfg->ip = header->code;
+ cfg->ip = NULL;
for (i = 0; i < header->num_locals; ++i) {
MonoType *ptype = header->locals [i];
int t = ptype->type;
dreg = cfg->locals [i]->dreg;
if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
- t = ptype->data.klass->enum_basetype->type;
+ t = mono_class_enum_basetype (ptype->data.klass)->type;
if (ptype->byref) {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
MONO_ADD_INS (init_localsbb, ins);
EMIT_NEW_LOCSTORE (cfg, store, i, ins);
} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
-+ ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
+ ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
} else {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
return OP_LOADI4_MEM;
case OP_LOADU4_MEMBASE:
return OP_LOADU4_MEM;
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
case OP_LOADI8_MEMBASE:
return OP_LOADI8_MEM;
#endif
mono_op_to_op_imm_noemul (int opcode)
{
switch (opcode) {
-#if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
+#if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
case OP_LSHR:
case OP_LSHL:
case OP_LSHR_UN:
vreg = ins->sreg2;
}
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
if (regtype == 'l') {
/*
* Since some instructions reference the original long vreg,
case STACK_PTR:
case STACK_MP:
case STACK_VTYPE:
-#if SIZEOF_VOID_P == 8
+#if SIZEOF_REGISTER == 8
case STACK_I8:
#endif
#if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
cfg->varinfo [pos]->inst_c0 = pos;
memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
cfg->vars [pos].idx = pos;
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
if (cfg->varinfo [pos]->type == STACK_I8) {
/* Modify the two component vars too */
MonoInst *var1;
guint32 i, lvregs_len;
gboolean dest_has_lvreg = FALSE;
guint32 stacktypes [128];
+ MonoInst **live_range_start, **live_range_end;
+ MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
*need_local_opts = FALSE;
stacktypes ['x'] = STACK_VTYPE;
#endif
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
/* Create MonoInsts for longs */
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
lvregs_len = 0;
+
+ /*
+ * These arrays contain the first and last instructions accessing a given
+ * variable.
+ * Since we emit bblocks in the same order we process them here, and we
+ * don't split live ranges, these will precisely describe the live range of
+ * the variable, i.e. the instruction range where a valid value can be found
+ * in the variables location.
+ */
+ /* FIXME: Only do this if debugging info is requested */
+ live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
+ live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
+ live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
+ live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
/* Add spill loads/stores */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
MonoInst *store_ins;
int store_opcode;
+ MonoInst *def_ins = ins;
+ int dreg = ins->dreg; /* The original vreg */
store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
g_assert (var->opcode == OP_REGOFFSET);
if (ins->opcode == OP_MOVE) {
NULLIFY_INS (ins);
+ def_ins = NULL;
} else {
ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
ins->inst_basereg = var->inst_basereg;
mono_bblock_insert_after_ins (bb, ins, store_ins);
NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
mono_bblock_insert_after_ins (bb, ins, store_ins);
+ def_ins = store_ins;
}
else {
g_assert (store_opcode != OP_STOREV_MEMBASE);
/* Insert it after the instruction */
mono_bblock_insert_after_ins (bb, ins, store_ins);
+ def_ins = store_ins;
+
/*
* We can't assign ins->dreg to var->dreg here, since the
* sregs could use it. So set a flag, and do it after
}
}
}
+
+ if (def_ins && !live_range_start [dreg]) {
+ live_range_start [dreg] = def_ins;
+ live_range_start_bb [dreg] = bb;
+ }
}
/************/
g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
MonoInst *var = get_vreg_to_inst (cfg, sreg);
+ MonoInst *use_ins = ins;
MonoInst *load_ins;
guint32 load_opcode;
ins->sreg1 = var->dreg;
else
ins->sreg2 = var->dreg;
+ live_range_end [var->dreg] = use_ins;
+ live_range_end_bb [var->dreg] = bb;
continue;
}
mono_bblock_insert_before_ins (bb, ins, load_ins);
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
+ use_ins = load_ins;
}
else {
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_REGISTER == 4
g_assert (load_opcode != OP_LOADI8_MEMBASE);
#endif
NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
mono_bblock_insert_before_ins (bb, ins, load_ins);
+ use_ins = load_ins;
}
}
+
+ if (var->dreg < orig_next_vreg) {
+ live_range_end [var->dreg] = use_ins;
+ live_range_end_bb [var->dreg] = bb;
+ }
}
}
mono_print_ins_index (1, ins);
}
}
+
+#ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
+ /*
+ * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
+ * by storing the current native offset into MonoMethodVar->live_range_start/end.
+ */
+ for (i = 0; i < cfg->num_varinfo; ++i) {
+ int vreg = MONO_VARINFO (cfg, i)->vreg;
+ MonoInst *ins;
+
+ if (live_range_start [vreg]) {
+ MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
+ ins->inst_c0 = i;
+ mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
+ }
+ if (live_range_end [vreg]) {
+ MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
+ ins->inst_c0 = i;
+ mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
+ }
+ }
+#endif
+
+ g_free (live_range_start);
+ g_free (live_range_end);
+ g_free (live_range_start_bb);
+ g_free (live_range_end_bb);
}
/**
* fcompare + branchCC.
* - create a helper function for allocating a stack slot, taking into account
* MONO_CFG_HAS_SPILLUP.
- * - merge new GC changes in mini.c.
* - merge r68207.
* - merge the ia64 switch changes.
- * - merge the mips conditional changes.
- * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
- * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
- * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
* - optimize mono_regstate2_alloc_int/float.
* - fix the pessimistic handling of variables accessed in exception handler blocks.
* - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
* - LAST MERGE: 108395.
* - when returning vtypes in registers, generate IR and append it to the end of the
* last bb instead of doing it in the epilog.
- * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
- * ones in inssel.h.
* - change the store opcodes so they use sreg1 instead of dreg to store the base register.
*/