return alloc_dreg (cfg, stack_type);
}
+/*
+ * mono_alloc_ireg_ref:
+ *
+ * Allocate an IREG, and mark it as holding a GC ref.
+ */
+guint32
+mono_alloc_ireg_ref (MonoCompile *cfg)
+{
+ return alloc_ireg_ref (cfg);
+}
+
+/*
+ * mono_alloc_ireg_mp:
+ *
+ * Allocate an IREG, and mark it as holding a managed pointer.
+ */
+guint32
+mono_alloc_ireg_mp (MonoCompile *cfg)
+{
+ return alloc_ireg_mp (cfg);
+}
+
+/*
+ * mono_alloc_ireg_copy:
+ *
+ * Allocate an IREG with the same GC type as VREG.
+ */
+guint32
+mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
+{
+ if (vreg_is_ref (cfg, vreg))
+ return alloc_ireg_ref (cfg);
+ else if (vreg_is_mp (cfg, vreg))
+ return alloc_ireg_mp (cfg);
+ else
+ return alloc_ireg (cfg);
+}
+
guint
mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
{
#if defined(TARGET_X86) || defined(TARGET_AMD64)
#define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
- (dest)->dreg = alloc_preg ((cfg)); \
+ (dest)->dreg = alloc_ireg_mp ((cfg)); \
(dest)->sreg1 = (sr1); \
(dest)->sreg2 = (sr2); \
(dest)->inst_imm = (imm); \
case OP_LCOMPARE:
case OP_ICOMPARE:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
- if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
+ if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE;
else if (src1->type == STACK_R8)
ins->opcode = OP_FCOMPARE;
break;
case OP_ICOMPARE_IMM:
ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
- if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
+ if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE_IMM;
break;
case CEE_BEQ:
break;
case STACK_PTR:
case STACK_MP:
-#if SIZEOF_REGISTER == 8
+#if SIZEOF_VOID_P == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->opcode = OP_MOVE;
static inline void
mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
{
- return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
+ mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
}
static inline void
static void
mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
{
- return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
+ mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
}
static void
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
- if (rgctx_arg)
- printf ("MOO!\n");
-
call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
call->inst.sreg1 = addr->dreg;
sig = ctor_sig;
}
+ context_used = mono_method_check_context_used (method);
+
might_be_remote = this && sig->hasthis &&
(method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
- !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
+ !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
- context_used = mono_method_check_context_used (method);
if (might_be_remote && context_used) {
MonoInst *addr;
#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
+ MonoInst *dummy_use;
+
MONO_EMIT_NULL_CHECK (cfg, this_reg);
/* Make a call to delegate->invoke_impl */
call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
+ /* We must emit a dummy use here because the delegate trampoline will
+ replace the 'this' argument with the delegate target making this activation
+ no longer a root for the delegate.
+ This is an issue for delegates that target collectible code such as dynamic
+ methods of GC'able assemblies.
+
+ For a test case look into #667921.
+
+ FIXME: a dummy use is not the best way to do it as the local register allocator
+ will put it on a caller save register and spil it around the call.
+ Ideally, we would either put it on a callee save register or only do the store part.
+ */
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
+
return (MonoInst*)call;
}
#endif
MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
call->inst.opcode = callvirt_to_call (call->inst.opcode);
-
- MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
-
- return (MonoInst*)call;
- }
-
- if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
+ } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
/*
* the method is virtual, but we can statically dispatch since either
* it's class or the method itself are sealed.
MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
call->inst.opcode = callvirt_to_call (call->inst.opcode);
- MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
-
- return (MonoInst*)call;
- }
-
- call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
+ } else {
+ call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
- vtable_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
- if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
- slot_reg = -1;
+ vtable_reg = alloc_preg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
+ if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ slot_reg = -1;
#ifdef MONO_ARCH_HAVE_IMT
- if (mono_use_imt) {
- guint32 imt_slot = mono_method_get_imt_slot (method);
- emit_imt_argument (cfg, call, imt_arg);
- slot_reg = vtable_reg;
- call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
- }
+ if (mono_use_imt) {
+ guint32 imt_slot = mono_method_get_imt_slot (method);
+ emit_imt_argument (cfg, call, imt_arg);
+ slot_reg = vtable_reg;
+ call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
+ }
#endif
- if (slot_reg == -1) {
- slot_reg = alloc_preg (cfg);
- mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
- call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
- }
- } else {
- slot_reg = vtable_reg;
- call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
- ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
+ if (slot_reg == -1) {
+ slot_reg = alloc_preg (cfg);
+ mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
+ call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
+ }
+ } else {
+ slot_reg = vtable_reg;
+ call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
+ ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
#ifdef MONO_ARCH_HAVE_IMT
- if (imt_arg) {
- g_assert (mono_method_signature (method)->generic_param_count);
- emit_imt_argument (cfg, call, imt_arg);
- }
+ if (imt_arg) {
+ g_assert (mono_method_signature (method)->generic_param_count);
+ emit_imt_argument (cfg, call, imt_arg);
+ }
#endif
- }
+ }
- call->inst.sreg1 = slot_reg;
- call->virtual = TRUE;
+ call->inst.sreg1 = slot_reg;
+ call->virtual = TRUE;
+ }
}
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
}
static void
-create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
+create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
{
MonoClassField *field;
gpointer iter = NULL;
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
- if (mono_type_is_reference (field->type)) {
+ if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
g_assert ((foffset % SIZEOF_VOID_P) == 0);
*wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
} else {
- /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
MonoClass *field_class = mono_class_from_mono_type (field->type);
if (field_class->has_references)
- create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
+ create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
}
}
}
if (size > 32 * SIZEOF_VOID_P)
return FALSE;
- create_write_barrier_bitmap (klass, &need_wb, 0);
+ create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
/* We don't unroll more than 5 stores to avoid code bloat. */
if (size > 5 * SIZEOF_VOID_P) {
return vtable_var;
} else {
MonoInst *ins;
- int vtable_reg, res_reg;
+ int vtable_reg;
vtable_reg = alloc_preg (cfg);
- res_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
return ins;
}
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- rgctx = emit_get_rgctx (cfg, method, context_used);
+ rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
} else {
reset_cast_details (cfg);
}
- NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
+ NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
MONO_ADD_INS (cfg->cbb, add);
add->type = STACK_MP;
add->klass = klass;
static gboolean
-mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
+mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
{
int i;
MonoGenericContainer *container;
if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
continue;
type = ginst->type_argv [i];
- if (MONO_TYPE_IS_REFERENCE (type))
- return TRUE;
-
- if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
+ if (mini_type_is_reference (cfg, type))
return TRUE;
}
return FALSE;
if (context_used) {
MonoInst *args [3];
- if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
+ if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
MonoInst *cache_ins;
MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
int obj_reg = src->dreg;
int vtable_reg = alloc_preg (cfg);
- int res_reg = alloc_preg (cfg);
+ int res_reg = alloc_ireg_ref (cfg);
MonoInst *klass_inst = NULL;
if (context_used) {
MonoInst *args [3];
- if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
+ if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
MonoInst *cache_ins;
static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
- ins->type = STACK_PTR;
+ ins->klass = mono_class_get_element_class (klass);
+ ins->type = STACK_MP;
return ins;
}
#endif
- add_reg = alloc_preg (cfg);
+ add_reg = alloc_ireg_mp (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
- ins->type = STACK_PTR;
+ ins->klass = mono_class_get_element_class (klass);
+ ins->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
{
int bounds_reg = alloc_preg (cfg);
- int add_reg = alloc_preg (cfg);
+ int add_reg = alloc_ireg_mp (cfg);
int mult_reg = alloc_preg (cfg);
int mult2_reg = alloc_preg (cfg);
int low1_reg = alloc_preg (cfg);
} else if (cmethod->klass == mono_defaults.object_class) {
if (strcmp (cmethod->name, "GetType") == 0) {
- int dreg = alloc_preg (cfg);
+ int dreg = alloc_ireg_ref (cfg);
int vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
*/
if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
int dreg = alloc_ireg (cfg);
- int bounds_reg = alloc_ireg (cfg);
+ int bounds_reg = alloc_ireg_mp (cfg);
MonoBasicBlock *end_bb, *szarray_bb;
gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
return NULL;
MONO_INST_NEW (cfg, ins, opcode);
- ins->dreg = mono_alloc_ireg (cfg);
+ ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
#ifdef MONO_ARCH_HAVE_ATOMIC_CAS
if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
int size = 0;
- gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
+ gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
if (fsig->params [1]->type == MONO_TYPE_I4)
size = 4;
else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
size = 8;
if (size == 4) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
- ins->dreg = alloc_ireg (cfg);
+ ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
ins->sreg3 = args [2]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else if (size == 8) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
- ins->dreg = alloc_ireg (cfg);
+ ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
ins->sreg3 = args [2]->dreg;
guint32 prev_cil_offset_to_bb_len;
MonoMethod *prev_current_method;
MonoGenericContext *prev_generic_context;
- gboolean ret_var_set, prev_ret_var_set;
+ gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
if (!cmethod->inline_info) {
- mono_jit_stats.inlineable_methods++;
+ cfg->stat_inlineable_methods++;
cmethod->inline_info = 1;
}
rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
}
-
prev_locals = cfg->locals;
cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
for (i = 0; i < cheader->num_locals; ++i)
prev_generic_context = cfg->generic_context;
prev_ret_var_set = cfg->ret_var_set;
- costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
+ if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
+ virtual = TRUE;
+
+ costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
ret_var_set = cfg->ret_var_set;
if (cfg->verbose_level > 2)
printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
- mono_jit_stats.inlined_methods++;
+ cfg->stat_inlined_methods++;
/* always add some code to avoid block split failures */
MONO_INST_NEW (cfg, ins, OP_NOP);
static gboolean
generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
{
- MonoType *type;
-
- if (cfg->generic_sharing_context)
- type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
- else
- type = &klass->byval_arg;
- return MONO_TYPE_IS_REFERENCE (type);
+ return mini_type_is_reference (cfg, &klass->byval_arg);
}
static void
token = read32 (ip + 2);
klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
CHECK_TYPELOAD (klass);
- if (generic_class_is_reference_type (cfg, klass)) {
- MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
- } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
+ if (mini_type_is_reference (cfg, &klass->byval_arg)) {
MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
} else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
if (ass->jit_optimizer_disabled_inited)
return ass->jit_optimizer_disabled;
- klass = mono_class_from_name_cached (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
+ if (!klass)
+ klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
+ if (!klass) {
+ /* Linked away */
+ ass->jit_optimizer_disabled = FALSE;
+ mono_memory_barrier ();
+ ass->jit_optimizer_disabled_inited = TRUE;
+ return FALSE;
+ }
attrs = mono_custom_attrs_from_assembly (ass);
if (attrs) {
return supported_tail_call;
}
+/* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
+ * it to the thread local value based on the tls_offset field. Every other kind of access to
+ * the field causes an assert.
+ */
+static gboolean
+is_magic_tls_access (MonoClassField *field)
+{
+ if (strcmp (field->name, "tlsdata"))
+ return FALSE;
+ if (strcmp (field->parent->name, "ThreadLocal`1"))
+ return FALSE;
+ return field->parent->image == mono_defaults.corlib;
+}
+
+/* emits the code needed to access a managed tls var (like ThreadStatic)
+ * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
+ * pointer for the current thread.
+ * Returns the MonoInst* representing the address of the tls var.
+ */
+static MonoInst*
+emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
+{
+ MonoInst *addr;
+ int static_data_reg, array_reg, dreg;
+ int offset2_reg, idx_reg;
+ // inlined access to the tls data
+ // idx = (offset >> 24) - 1;
+ // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
+ static_data_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
+ idx_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
+ MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
+ array_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
+ offset2_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
+ dreg = alloc_ireg (cfg);
+ EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
+ return addr;
+}
+
+/*
+ * redirect access to the tlsdata field to the tls var given by the tls_offset field.
+ * this address is cached per-method in cached_tls_addr.
+ */
+static MonoInst*
+create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
+{
+ MonoInst *load, *addr, *temp, *store, *thread_ins;
+ MonoClassField *offset_field;
+
+ if (*cached_tls_addr) {
+ EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
+ return addr;
+ }
+ thread_ins = mono_get_thread_intrinsic (cfg);
+ offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
+
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
+ if (thread_ins) {
+ MONO_ADD_INS (cfg->cbb, thread_ins);
+ } else {
+ MonoMethod *thread_method;
+ thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
+ thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
+ }
+ addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
+ addr->klass = mono_class_from_mono_type (tls_field->type);
+ addr->type = STACK_MP;
+ *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
+ EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
+
+ EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
+ return addr;
+}
+
/*
* mono_method_to_ir:
*
int context_used;
gboolean init_locals, seq_points, skip_dead_blocks;
gboolean disable_inline;
+ MonoInst *cached_tls_addr = NULL;
disable_inline = is_jit_optimizer_disabled (method);
dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
+ dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
image = method->klass->image;
header = mono_method_get_header (method);
ip = (unsigned char*)header->code;
cfg->cil_start = ip;
end = ip + header->code_size;
- mono_jit_stats.cil_code_size += header->code_size;
+ cfg->stat_cil_code_size += header->code_size;
init_locals = header->init_locals;
seq_points = cfg->gen_seq_points && cfg->method == method;
cfg->bb_entry = start_bblock;
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
+#if defined(__native_client_codegen__)
+ MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
+ ins->dreg = alloc_dreg (cfg, STACK_I4);
+ MONO_ADD_INS (start_bblock, ins);
+#endif
/* EXIT BLOCK */
NEW_BBLOCK (cfg, end_bblock);
if (security && (cfg->method == method)) {
MonoInst *args [2];
- mono_jit_stats.cas_demand_generation++;
+ cfg->stat_cas_demand_generation++;
if (actions.demand.blob) {
/* Add code for SecurityAction.Demand */
sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
CHECK_CFG_EXCEPTION;
} else if (!constrained_call->valuetype) {
- int dreg = alloc_preg (cfg);
+ int dreg = alloc_ireg_ref (cfg);
/*
* The type parameter is instantiated as a reference
case CEE_LDIND_I8:
dreg = alloc_lreg (cfg);
break;
+ case CEE_LDIND_REF:
+ dreg = alloc_ireg_ref (cfg);
+ break;
default:
dreg = alloc_preg (cfg);
}
sp -= 2;
if (generic_class_is_reference_type (cfg, klass)) {
MonoInst *store, *load;
- int dreg = alloc_preg (cfg);
+ int dreg = alloc_ireg_ref (cfg);
NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
load->flags |= ins_flag;
if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
+ if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
MonoInst *args [3];
/* inline cache*/
/*FIXME AOT support*/
- EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
+ else
+ EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
/*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
*sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
+ if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
MonoInst *args [3];
if (generic_class_is_reference_type (cfg, klass)) {
/* CASTCLASS FIXME kill this huge slice of duplicated code*/
- if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
+ if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
MonoInst *args [3];
/* inline cache*/
/*FIXME AOT support*/
- EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
+ else
+ EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
/*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
*sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
FIELD_ACCESS_FAILURE;
mono_class_init (klass);
+ if (*ip != CEE_LDFLDA && is_magic_tls_access (field))
+ UNVERIFIED;
/* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
MonoInst *ptr;
int dreg;
- dreg = alloc_preg (cfg);
+ dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
emit_write_barrier (cfg, ptr, sp [1], -1);
}
}
if (*ip == CEE_LDFLDA) {
- if (sp [0]->type == STACK_OBJ) {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
- MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
- }
+ if (is_magic_tls_access (field)) {
+ ins = sp [0];
+ *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
+ } else {
+ if (sp [0]->type == STACK_OBJ) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
+ MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
+ }
- dreg = alloc_preg (cfg);
+ dreg = alloc_ireg_mp (cfg);
- EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
- ins->klass = mono_class_from_mono_type (field->type);
- ins->type = STACK_MP;
- *sp++ = ins;
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
+ ins->klass = mono_class_from_mono_type (field->type);
+ ins->type = STACK_MP;
+ *sp++ = ins;
+ }
} else {
MonoInst *load;
CHECK_TYPELOAD (klass);
if (!addr) {
- if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
- if (cfg->verbose_level > 2)
- printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
- class_inits = g_slist_prepend (class_inits, vtable);
+ if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
+ if (!(g_slist_find (class_inits, vtable))) {
+ mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
+ if (cfg->verbose_level > 2)
+ printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
+ class_inits = g_slist_prepend (class_inits, vtable);
+ }
} else {
if (cfg->run_cctors) {
MonoException *ex;
CHECK_TYPELOAD (array_type);
MONO_INST_NEW (cfg, ins, OP_NEWARR);
- ins->dreg = alloc_preg (cfg);
+ ins->dreg = alloc_ireg_ref (cfg);
ins->sreg1 = sp [0]->dreg;
ins->inst_newa_class = klass;
ins->type = STACK_OBJ;
if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
MonoMethod *memcpy_method = get_memcpy_method ();
MonoInst *iargs [3];
- int add_reg = alloc_preg (cfg);
+ int add_reg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
if (cfg->compile_aot) {
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- dreg = alloc_preg (cfg);
MONO_INST_NEW (cfg, ins, OP_LDLEN);
ins->dreg = alloc_preg (cfg);
ins->sreg1 = sp [0]->dreg;
CHECK_STACK (1);
--sp;
MONO_INST_NEW (cfg, ins, OP_MOVE);
- ins->dreg = alloc_preg (cfg);
+ ins->dreg = alloc_ireg_mp (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, ins);
cmp->sreg2 = sp [1]->dreg;
type_from_op (cmp, sp [0], sp [1]);
CHECK_TYPE (cmp);
- if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
+ if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
cmp->opcode = OP_LCOMPARE;
else if (sp [0]->type == STACK_R8)
cmp->opcode = OP_FCOMPARE;
target_ins = sp [-1];
+ if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
+
if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
/*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
CHECK_STACK_OVF (1);
CHECK_OPSIZE (6);
token = read32 (ip + 2);
- if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
+ if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
MonoType *type = mono_type_create_from_typespec (image, token);
token = mono_type_size (type, &ialign);
} else {
switch (opcode) {
case OP_X86_PUSH:
+#ifdef __mono_ilp32__
+ if (load_opcode == OP_LOADI8_MEMBASE)
+#else
if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
+#endif
return OP_X86_PUSH_MEMBASE;
break;
/* FIXME: This only works for 32 bit immediates
break;
case OP_COMPARE:
case OP_LCOMPARE:
+#ifdef __mono_ilp32__
+ if (load_opcode == OP_LOAD_MEMBASE)
+ return OP_AMD64_ICOMPARE_MEMBASE_REG;
+ if (load_opcode == OP_LOADI8_MEMBASE)
+#else
if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
+#endif
return OP_AMD64_COMPARE_MEMBASE_REG;
break;
case OP_ICOMPARE:
#endif
#ifdef TARGET_AMD64
+#ifdef __mono_ilp32__
+ if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
+#else
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
+#endif
switch (opcode) {
case OP_ICOMPARE:
return OP_AMD64_ICOMPARE_REG_MEMBASE;
case OP_IXOR:
return OP_X86_XOR_REG_MEMBASE;
}
+#ifdef __mono_ilp32__
+ } else if (load_opcode == OP_LOADI8_MEMBASE) {
+#else
} else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
+#endif
switch (opcode) {
case OP_COMPARE:
case OP_LCOMPARE:
switch (regtype) {
case 'i':
- mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
+ if (vreg_is_ref (cfg, vreg))
+ mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
+ else
+ mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
break;
case 'l':
mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
}
#endif
+ if (cfg->compute_gc_maps) {
+ /* registers need liveness info even for !non refs */
+ for (i = 0; i < cfg->num_varinfo; i++) {
+ MonoInst *ins = cfg->varinfo [i];
+
+ if (ins->opcode == OP_REGVAR)
+ ins->flags |= MONO_INST_GC_TRACK;
+ }
+ }
+
/* FIXME: widening and truncation */
/*
live_range_start_bb [dreg] = bb;
}
- // FIXME: Only for stack objects
- if (cfg->compute_gc_maps && def_ins) {
+ if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
MonoInst *tmp;
MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
tmp->inst_c1 = dreg;
- mono_bblock_insert_after_ins (bb, ins, tmp);
+ mono_bblock_insert_after_ins (bb, def_ins, tmp);
}
}
//mono_inst_set_src_registers (ins, sregs);
live_range_end [sreg] = use_ins;
live_range_end_bb [sreg] = bb;
+
+ if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
+ MonoInst *tmp;
+
+ MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
+ /* var->dreg is a hreg */
+ tmp->inst_c1 = sreg;
+ mono_bblock_insert_after_ins (bb, ins, tmp);
+ }
+
continue;
}
live_range_end_bb [var->dreg] = bb;
}
- // FIXME: Only for stack objects
- if (cfg->compute_gc_maps && var->dreg < orig_next_vreg) {
+ if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
MonoInst *tmp;
MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);