static void
emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
{
+ int method_reg;
+
+ if (COMPILE_LLVM (cfg)) {
+ method_reg = alloc_preg (cfg);
+
+ if (imt_arg) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
+ } else if (cfg->compile_aot) {
+ MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
+ } else {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_PCONST);
+ ins->inst_p0 = call->method;
+ ins->dreg = method_reg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+#ifdef ENABLE_LLVM
+ call->imt_arg_reg = method_reg;
+#endif
#ifdef MONO_ARCH_IMT_REG
- int method_reg = alloc_preg (cfg);
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
+#else
+ /* Need this to keep the IMT arg alive */
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
+#endif
+ return;
+ }
+
+#ifdef MONO_ARCH_IMT_REG
+ method_reg = alloc_preg (cfg);
if (imt_arg) {
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
MONO_ADD_INS (cfg->cbb, ins);
}
-#ifdef ENABLE_LLVM
- if (COMPILE_LLVM (cfg))
- call->imt_arg_reg = method_reg;
-#endif
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
#else
mono_arch_emit_imt_argument (cfg, call, imt_arg);
return memcpy_method;
}
+static void
+create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
+{
+ MonoClassField *field;
+ gpointer iter = NULL;
+
+ while ((field = mono_class_get_fields (klass, &iter))) {
+ int foffset;
+
+ if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
+ continue;
+ foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
+ if (mono_type_is_reference (field->type)) {
+ g_assert ((foffset % SIZEOF_VOID_P) == 0);
+ *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
+ } else {
+ /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
+ MonoClass *field_class = mono_class_from_mono_type (field->type);
+ if (field_class->has_references)
+ create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
+ }
+ }
+}
+
+static gboolean
+mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
+{
+ int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
+ unsigned need_wb = 0;
+
+ if (align == 0)
+ align = 4;
+
+ /*types with references can't have alignment smaller than sizeof(void*) */
+ if (align < SIZEOF_VOID_P)
+ return FALSE;
+
+ /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
+ if (size > 32 * SIZEOF_VOID_P)
+ return FALSE;
+
+ create_write_barrier_bitmap (klass, &need_wb, 0);
+
+ /* We don't unroll more than 5 stores to avoid code bloat. */
+ if (size > 5 * SIZEOF_VOID_P) {
+ /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
+ size += (SIZEOF_VOID_P - 1);
+ size &= ~(SIZEOF_VOID_P - 1);
+
+ EMIT_NEW_ICONST (cfg, iargs [2], size);
+ EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
+ mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
+ return TRUE;
+ }
+
+ destreg = iargs [0]->dreg;
+ srcreg = iargs [1]->dreg;
+ offset = 0;
+
+ dest_ptr_reg = alloc_preg (cfg);
+ tmp_reg = alloc_preg (cfg);
+
+ /*tmp = dreg*/
+ EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
+
+ while (size >= SIZEOF_VOID_P) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
+
+ if (need_wb & 0x1) {
+ MonoInst *dummy_use;
+
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
+
+ MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
+ dummy_use->sreg1 = dest_ptr_reg;
+ MONO_ADD_INS (cfg->cbb, dummy_use);
+ }
+
+
+ offset += SIZEOF_VOID_P;
+ size -= SIZEOF_VOID_P;
+ need_wb >>= 1;
+
+ /*tmp += sizeof (void*)*/
+ if (size >= SIZEOF_VOID_P) {
+ NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
+ MONO_ADD_INS (cfg->cbb, iargs [0]);
+ }
+ }
+
+ /* Those cannot be references since size < sizeof (void*) */
+ while (size >= 4) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 4;
+ size -= 4;
+ }
+
+ while (size >= 2) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 2;
+ size -= 2;
+ }
+
+ while (size >= 1) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 1;
+ size -= 1;
+ }
+
+ return TRUE;
+}
+
/*
* Emit code to copy a valuetype of type @klass whose address is stored in
* @src->dreg to memory whose address is stored at @dest->dreg.
void
mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
{
- MonoInst *iargs [3];
+ MonoInst *iargs [4];
int n;
guint32 align = 0;
MonoMethod *memcpy_method;
else
n = mono_class_value_size (klass, &align);
-#if HAVE_WRITE_BARRIERS
/* if native is true there should be no references in the struct */
- if (klass->has_references && !native) {
+ if (cfg->gen_write_barriers && klass->has_references && !native) {
/* Avoid barriers when storing to the stack */
if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
(dest->opcode == OP_LDADDR))) {
if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (context_used) {
+
+ /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
+ if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
+ return;
+ } else if (context_used) {
iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- } else {
+ } else {
if (cfg->compile_aot) {
EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
} else {
}
}
- /* FIXME: this does the memcpy as well (or
- should), so we don't need the memcpy
- afterwards */
mono_emit_jit_icall (cfg, mono_value_copy, iargs);
+ return;
}
}
-#endif
if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
/* FIXME: Optimize the case when src/dest is OP_LDADDR */
g_assert_not_reached ();
}
-#if HAVE_WRITE_BARRIERS
- if (is_ref) {
+ if (cfg->gen_write_barriers && is_ref) {
MonoInst *dummy_use;
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
}
-#endif
}
#endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
} else {
/* g_assert_not_reached (); */
}
-#if HAVE_WRITE_BARRIERS
- if (is_ref) {
+ if (cfg->gen_write_barriers && is_ref) {
MonoInst *dummy_use;
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
}
-#endif
}
#endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
image = method->klass->image;
header = mono_method_get_header (method);
+ if (!header) {
+ MonoLoaderError *error;
+
+ if ((error = mono_loader_get_last_error ())) {
+ cfg->exception_type = error->exception_type;
+ } else {
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
+ }
+ goto exception_exit;
+ }
generic_container = mono_method_get_generic_container (method);
sig = mono_method_signature (method);
num_args = sig->hasthis + sig->param_count;
INLINE_FAILURE;
#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
- /* The llvm vcall trampolines doesn't support generic virtual calls yet */
- if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
+ if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
g_assert (!imt_arg);
if (!context_used)
g_assert (cmethod->is_inflated);
ins_flag = 0;
MONO_ADD_INS (bblock, ins);
-#if HAVE_WRITE_BARRIERS
- if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
+ if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
MonoInst *dummy_use;
/* insert call to write barrier */
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, sp, NULL);
EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
}
-#endif
inline_costs += 1;
++ip;
store->flags |= ins_flag;
MONO_ADD_INS (cfg->cbb, store);
-#if HAVE_WRITE_BARRIERS
- if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
+ if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
MonoInst *dummy_use;
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, sp, NULL);
EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
}
-#endif
} else {
mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
}
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
- store->flags |= MONO_INST_FAULT;
+ if (sp [0]->opcode != OP_LDADDR)
+ store->flags |= MONO_INST_FAULT;
-#if HAVE_WRITE_BARRIERS
- if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
+ if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
/* insert call to write barrier */
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
MonoInst *iargs [2], *dummy_use;
EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
}
-#endif
store->flags |= ins_flag;
}
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
load->flags |= ins_flag;
- load->flags |= MONO_INST_FAULT;
+ if (sp [0]->opcode != OP_LDADDR)
+ load->flags |= MONO_INST_FAULT;
*sp++ = load;
}
}
CHECK_TYPELOAD (klass);
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
-#if HAVE_WRITE_BARRIERS
- if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
+ if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
generic_class_is_reference_type (cfg, klass)) {
MonoInst *dummy_use;
/* insert call to write barrier */
mono_emit_method_call (cfg, write_barrier, sp, NULL);
EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
}
-#endif
ins_flag = 0;
ip += 5;
inline_costs += 1;
if (cfg->generic_sharing_context)
invoke_context_used = mono_method_check_context_used (invoke);
-#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
+#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
/* FIXME: SGEN support */
- if (invoke_context_used == 0) {
+ if (!cfg->gen_write_barriers && invoke_context_used == 0) {
MonoInst *target_ins;
ip += 6;