#include "jit.h"
#include "debugger-agent.h"
-#define BRANCH_COST 100
+#define BRANCH_COST 10
#define INLINE_LENGTH_LIMIT 20
#define INLINE_FAILURE do {\
if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
static void
emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
{
+ int method_reg;
+
+ if (COMPILE_LLVM (cfg)) {
+ method_reg = alloc_preg (cfg);
+
+ if (imt_arg) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
+ } else if (cfg->compile_aot) {
+ MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
+ } else {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_PCONST);
+ ins->inst_p0 = call->method;
+ ins->dreg = method_reg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+#ifdef ENABLE_LLVM
+ call->imt_arg_reg = method_reg;
+#endif
#ifdef MONO_ARCH_IMT_REG
- int method_reg = alloc_preg (cfg);
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
+#else
+ /* Need this to keep the IMT arg alive */
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
+#endif
+ return;
+ }
+
+#ifdef MONO_ARCH_IMT_REG
+ method_reg = alloc_preg (cfg);
if (imt_arg) {
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
MONO_ADD_INS (cfg->cbb, ins);
}
-#ifdef ENABLE_LLVM
- if (COMPILE_LLVM (cfg))
- call->imt_arg_reg = method_reg;
-#endif
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
#else
mono_arch_emit_imt_argument (cfg, call, imt_arg);
return memcpy_method;
}
-#if HAVE_WRITE_BARRIERS
-
static void
create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
{
}
}
+static void
+emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
+{
+#ifdef HAVE_SGEN_GC
+ int card_table_shift_bits;
+ gpointer card_table_mask;
+ guint8 *card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
+ MonoInst *dummy_use;
+
+#ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
+ int nursery_shift_bits;
+ size_t nursery_size;
+
+ mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
+
+ if (!cfg->compile_aot && card_table && nursery_shift_bits > 0) {
+ MonoInst *wbarrier;
+
+ MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
+ wbarrier->sreg1 = ptr->dreg;
+ if (value)
+ wbarrier->sreg2 = value->dreg;
+ else
+ wbarrier->sreg2 = value_reg;
+ MONO_ADD_INS (cfg->cbb, wbarrier);
+ } else
+#endif
+ if (card_table) {
+ int offset_reg = alloc_preg (cfg);
+ int card_reg = alloc_preg (cfg);
+ MonoInst *ins;
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
+ if (card_table_mask)
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
+
+ /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
+ * IMM's larger than 32bits.
+ */
+ if (cfg->compile_aot) {
+ MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_PCONST);
+ ins->inst_p0 = card_table;
+ ins->dreg = card_reg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+ MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
+ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
+ } else {
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
+ }
+
+ if (value) {
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
+ } else {
+ MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
+ dummy_use->sreg1 = value_reg;
+ MONO_ADD_INS (cfg->cbb, dummy_use);
+ }
+#endif
+}
+
static gboolean
mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
{
/* We don't unroll more than 5 stores to avoid code bloat. */
if (size > 5 * SIZEOF_VOID_P) {
- /*FIXME this is a temporary fix while issues with valuetypes are solved.*/
-#if SIZEOF_VOID_P == 8
- return FALSE;
-#endif
/*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
size += (SIZEOF_VOID_P - 1);
size &= ~(SIZEOF_VOID_P - 1);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
- if (need_wb & 0x1) {
- MonoInst *dummy_use;
-
- MonoMethod *write_barrier = mono_gc_get_write_barrier ();
- mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
-
- MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
- dummy_use->sreg1 = dest_ptr_reg;
- MONO_ADD_INS (cfg->cbb, dummy_use);
- }
-
+ if (need_wb & 0x1)
+ emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
offset += SIZEOF_VOID_P;
size -= SIZEOF_VOID_P;
return TRUE;
}
-#endif
/*
* Emit code to copy a valuetype of type @klass whose address is stored in
else
n = mono_class_value_size (klass, &align);
-#if HAVE_WRITE_BARRIERS
/* if native is true there should be no references in the struct */
- if (klass->has_references && !native) {
+ if (cfg->gen_write_barriers && klass->has_references && !native) {
/* Avoid barriers when storing to the stack */
if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
(dest->opcode == OP_LDADDR))) {
return;
}
}
-#endif
if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
/* FIXME: Optimize the case when src/dest is OP_LDADDR */
if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (array_class);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
+ MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
if (cfg->opt & MONO_OPT_SHARED) {
int class_reg = alloc_preg (cfg);
/* Decompose later to allow more optimizations */
EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
ins->type = STACK_I4;
+ ins->flags |= MONO_INST_FAULT;
cfg->cbb->has_array_access = TRUE;
cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
type_from_op (ins, NULL, NULL);
return ins;
-#if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
- } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
+#if !defined(MONO_ARCH_EMULATE_MUL_DIV)
+ } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
int dreg = alloc_ireg (cfg);
int t1 = alloc_ireg (cfg);
} else if (cmethod->klass == mono_defaults.array_class) {
if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
+
+#ifndef MONO_BIG_ARRAYS
+ /*
+ * This is an inline version of GetLength/GetLowerBound(0) used frequently in
+ * Array methods.
+ */
+ if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
+ int dreg = alloc_ireg (cfg);
+ int bounds_reg = alloc_ireg (cfg);
+ MonoBasicBlock *end_bb, *szarray_bb;
+ gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
+
+ NEW_BBLOCK (cfg, end_bb);
+ NEW_BBLOCK (cfg, szarray_bb);
+
+ EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
+ args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
+ /* Non-szarray case */
+ if (get_length)
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
+ bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
+ else
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
+ bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+ MONO_START_BB (cfg, szarray_bb);
+ /* Szarray case */
+ if (get_length)
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
+ args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
+ else
+ MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
+ MONO_START_BB (cfg, end_bb);
+
+ EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
+ ins->type = STACK_I4;
+
+ return ins;
+ }
+#endif
+
if (cmethod->name [0] != 'g')
return NULL;
}
} else if (cmethod->klass == mono_defaults.monitor_class) {
#if defined(MONO_ARCH_MONITOR_OBJECT_REG)
- if (strcmp (cmethod->name, "Enter") == 0) {
+ /* The trampolines don't work under SGEN */
+ gboolean is_moving_gc = mono_gc_is_moving ();
+
+ if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1 && !is_moving_gc) {
MonoCallInst *call;
if (COMPILE_LLVM (cfg)) {
}
return (MonoInst*)call;
- } else if (strcmp (cmethod->name, "Exit") == 0) {
+ } else if (strcmp (cmethod->name, "Exit") == 0 && !is_moving_gc) {
MonoCallInst *call;
if (COMPILE_LLVM (cfg)) {
strcmp (cfg->method->name, "FastMonitorExit") == 0))
return NULL;
- if (strcmp (cmethod->name, "Enter") == 0 ||
+ if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
strcmp (cmethod->name, "Exit") == 0)
fast_method = mono_monitor_get_fast_path (cmethod);
if (!fast_method)
g_assert_not_reached ();
}
-#if HAVE_WRITE_BARRIERS
- if (is_ref) {
- MonoInst *dummy_use;
- MonoMethod *write_barrier = mono_gc_get_write_barrier ();
- mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
- EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
- }
-#endif
+ if (cfg->gen_write_barriers && is_ref)
+ emit_write_barrier (cfg, args [0], args [1], -1);
}
#endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
} else {
/* g_assert_not_reached (); */
}
-#if HAVE_WRITE_BARRIERS
- if (is_ref) {
- MonoInst *dummy_use;
- MonoMethod *write_barrier = mono_gc_get_write_barrier ();
- mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
- EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
- }
-#endif
+ if (cfg->gen_write_barriers && is_ref)
+ emit_write_barrier (cfg, args [0], args [1], -1);
}
#endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
{
if (method->klass == mono_defaults.string_class) {
/* managed string allocation support */
- if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
+ if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
MonoMethod *managed_alloc = NULL;
return 0;
}
+ /*Must verify before creating locals as it can cause the JIT to assert.*/
+ if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
+ mono_metadata_free_mh (cheader);
+ return 0;
+ }
+
/* allocate space to store the return value */
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
dont_verify_stloc = TRUE;
}
- if (!dont_verify && mini_method_verify (cfg, method_definition))
- goto exception_exit;
-
if (mono_debug_using_mono_debugger ())
cfg->keep_cil_nops = TRUE;
cfg->bb_exit = end_bblock;
end_bblock->cil_code = NULL;
end_bblock->cil_length = 0;
+ end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
g_assert (cfg->num_bblocks == 2);
arg_array = cfg->args;
/* FIXME: check the signature matches */
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
- if (!cmethod)
+ if (!cmethod || mono_loader_get_last_error ())
goto load_error;
if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
cil_method = cmethod;
}
- if (!cmethod)
+ if (!cmethod || mono_loader_get_last_error ())
goto load_error;
if (!dont_verify && !cfg->skip_visibility) {
MonoMethod *target_method = cil_method;
vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
- if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
- MONO_METHOD_IS_FINAL (cmethod)) {
+ /* !marshalbyref is needed to properly handle generic methods + remoting */
+ if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
+ MONO_METHOD_IS_FINAL (cmethod)) &&
+ !cmethod->klass->marshalbyref) {
if (virtual)
check_this = TRUE;
virtual = 0;
/* Conversion to a JIT intrinsic */
if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
+ bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
type_to_eval_stack_type ((cfg), fsig->ret, ins);
*sp = ins;
target = ip + n * sizeof (guint32);
GET_BBLOCK (cfg, default_bblock, target);
+ default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
for (i = 0; i < n; ++i) {
GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
targets [i] = tblock;
+ targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
ip += 4;
}
ins_flag = 0;
MONO_ADD_INS (bblock, ins);
-#if HAVE_WRITE_BARRIERS
- if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
- MonoInst *dummy_use;
- /* insert call to write barrier */
- MonoMethod *write_barrier = mono_gc_get_write_barrier ();
- mono_emit_method_call (cfg, write_barrier, sp, NULL);
- EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
- }
-#endif
+ if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
+ emit_write_barrier (cfg, sp [0], sp [1], -1);
inline_costs += 1;
++ip;
store->flags |= ins_flag;
MONO_ADD_INS (cfg->cbb, store);
-#if HAVE_WRITE_BARRIERS
- if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
- MonoInst *dummy_use;
- MonoMethod *write_barrier = mono_gc_get_write_barrier ();
- mono_emit_method_call (cfg, write_barrier, sp, NULL);
- EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
- }
-#endif
+ if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
+ emit_write_barrier (cfg, sp [0], sp [1], -1);
} else {
mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
}
CHECK_OPSIZE (5);
token = read32 (ip + 1);
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
- if (!cmethod)
+ if (!cmethod || mono_loader_get_last_error ())
goto load_error;
fsig = mono_method_get_signature (cmethod, image, token);
if (!fsig)
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
}
+ if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
+ emit_generic_class_init (cfg, cmethod->klass);
+ CHECK_TYPELOAD (cmethod->klass);
+ }
+
if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
if (generic_class_is_reference_type (cfg, klass)) {
/* CASTCLASS FIXME kill this huge slice of duplicated code*/
- if (context_used) {
- MonoInst *iargs [2];
-
- /* obj */
- iargs [0] = *sp;
- /* klass */
- iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
- *sp ++ = ins;
- ip += 5;
- inline_costs += 2;
- } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
MonoMethod *mono_castclass;
MonoInst *iargs [1];
int costs;
*sp++ = iargs [0];
inline_costs += costs;
} else {
- ins = handle_castclass (cfg, klass, *sp, 0);
+ ins = handle_castclass (cfg, klass, *sp, context_used);
CHECK_CFG_EXCEPTION;
bblock = cfg->cbb;
*sp ++ = ins;
if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
UNVERIFIED;
/* frequent check in generic code: box (struct), brtrue */
+
+ // FIXME: LLVM can't handle the inconsistent bb linking
if (!mono_class_is_nullable (klass) &&
- ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
- /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
+ ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
+ (ip [5] == CEE_BRTRUE ||
+ ip [5] == CEE_BRTRUE_S ||
+ ip [5] == CEE_BRFALSE ||
+ ip [5] == CEE_BRFALSE_S)) {
+ gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
+ int dreg;
+ MonoBasicBlock *true_bb, *false_bb;
+
ip += 5;
- MONO_INST_NEW (cfg, ins, OP_BR);
- if (*ip == CEE_BRTRUE_S) {
+
+ if (cfg->verbose_level > 3) {
+ printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
+ printf ("<box+brtrue opt>\n");
+ }
+
+ switch (*ip) {
+ case CEE_BRTRUE_S:
+ case CEE_BRFALSE_S:
CHECK_OPSIZE (2);
ip++;
target = ip + 1 + (signed char)(*ip);
ip++;
- } else {
+ break;
+ case CEE_BRTRUE:
+ case CEE_BRFALSE:
CHECK_OPSIZE (5);
ip++;
target = ip + 4 + (gint)(read32 (ip));
ip += 4;
+ break;
+ default:
+ g_assert_not_reached ();
}
- GET_BBLOCK (cfg, tblock, target);
- link_bblock (cfg, bblock, tblock);
- ins->inst_target_bb = tblock;
- GET_BBLOCK (cfg, tblock, ip);
+
/*
- * This leads to some inconsistency, since the two bblocks are
- * not really connected, but it is needed for handling stack
+ * We need to link both bblocks, since it is needed for handling stack
* arguments correctly (See test_0_box_brtrue_opt_regress_81102).
- * FIXME: This should only be needed if sp != stack_start, but that
- * doesn't work for some reason (test failure in mcs/tests on x86).
+ * Branching to only one of them would lead to inconsistencies, so
+ * generate an ICONST+BRTRUE, the branch opts will get rid of them.
*/
- link_bblock (cfg, bblock, tblock);
+ GET_BBLOCK (cfg, true_bb, target);
+ GET_BBLOCK (cfg, false_bb, ip);
+
+ mono_link_bblock (cfg, cfg->cbb, true_bb);
+ mono_link_bblock (cfg, cfg->cbb, false_bb);
+
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
- MONO_ADD_INS (bblock, ins);
+
+ if (COMPILE_LLVM (cfg)) {
+ dreg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
+
+ MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
+ } else {
+ /* The JIT can't eliminate the iconst+compare */
+ MONO_INST_NEW (cfg, ins, OP_BR);
+ ins->inst_target_bb = is_true ? true_bb : false_bb;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
start_new_bblock = 1;
break;
}
if (sp [0]->opcode != OP_LDADDR)
store->flags |= MONO_INST_FAULT;
-#if HAVE_WRITE_BARRIERS
- if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
+ if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
/* insert call to write barrier */
- MonoMethod *write_barrier = mono_gc_get_write_barrier ();
- MonoInst *iargs [2], *dummy_use;
+ MonoInst *ptr;
int dreg;
dreg = alloc_preg (cfg);
- EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
- iargs [1] = sp [1];
- mono_emit_method_call (cfg, write_barrier, iargs, NULL);
-
- EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
+ EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
+ emit_write_barrier (cfg, ptr, sp [1], -1);
}
-#endif
store->flags |= ins_flag;
}
sp [0] = ins;
}
- MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
-
if (*ip == CEE_LDFLDA) {
+ if (sp [0]->type == STACK_OBJ) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
+ MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
+ }
+
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
} else {
MonoInst *load;
+ MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
+
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
load->flags |= ins_flag;
if (sp [0]->opcode != OP_LDADDR)
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
-#ifndef HAVE_MOVING_COLLECTOR
+ EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
+ type_to_eval_stack_type ((cfg), field->type, *sp);
+ sp++;
+ break;
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
-#endif
- EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
- type_to_eval_stack_type ((cfg), field->type, *sp);
- sp++;
+ if (!mono_gc_is_moving ()) {
+ EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
+ type_to_eval_stack_type ((cfg), field->type, *sp);
+ sp++;
+ } else {
+ is_const = FALSE;
+ }
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
CHECK_TYPELOAD (klass);
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
-#if HAVE_WRITE_BARRIERS
- if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
+ if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
generic_class_is_reference_type (cfg, klass)) {
- MonoInst *dummy_use;
/* insert call to write barrier */
- MonoMethod *write_barrier = mono_gc_get_write_barrier ();
- mono_emit_method_call (cfg, write_barrier, sp, NULL);
- EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
+ emit_write_barrier (cfg, sp [0], sp [1], -1);
}
-#endif
ins_flag = 0;
ip += 5;
inline_costs += 1;
ins->dreg = alloc_preg (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I4;
+ /* This flag will be inherited by the decomposition */
+ ins->flags |= MONO_INST_FAULT;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
cfg->cbb->has_array_access = TRUE;
CHECK_OPSIZE (6);
n = read32 (ip + 2);
cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
- if (!cmethod)
+ if (!cmethod || mono_loader_get_last_error ())
goto load_error;
mono_class_init (cmethod->klass);
if (cfg->generic_sharing_context)
invoke_context_used = mono_method_check_context_used (invoke);
-#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
+#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
/* FIXME: SGEN support */
- if (invoke_context_used == 0) {
+ if (!cfg->gen_write_barriers && invoke_context_used == 0) {
MonoInst *target_ins;
ip += 6;
CHECK_OPSIZE (6);
n = read32 (ip + 2);
cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
- if (!cmethod)
+ if (!cmethod || mono_loader_get_last_error ())
goto load_error;
mono_class_init (cmethod->klass);
#endif
#ifdef TARGET_AMD64
- switch (opcode) {
- case OP_ICOMPARE:
- if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
+ if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
+ switch (opcode) {
+ case OP_ICOMPARE:
return OP_AMD64_ICOMPARE_REG_MEMBASE;
- break;
- case OP_COMPARE:
- case OP_LCOMPARE:
- if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
- return OP_AMD64_COMPARE_REG_MEMBASE;
- break;
- case OP_IADD:
- if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
+ case OP_IADD:
return OP_X86_ADD_REG_MEMBASE;
- case OP_ISUB:
- if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
+ case OP_ISUB:
return OP_X86_SUB_REG_MEMBASE;
- case OP_IAND:
- if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
+ case OP_IAND:
return OP_X86_AND_REG_MEMBASE;
- case OP_IOR:
- if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
+ case OP_IOR:
return OP_X86_OR_REG_MEMBASE;
- case OP_IXOR:
- if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
+ case OP_IXOR:
return OP_X86_XOR_REG_MEMBASE;
- case OP_LADD:
- if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
+ }
+ } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
+ switch (opcode) {
+ case OP_COMPARE:
+ case OP_LCOMPARE:
+ return OP_AMD64_COMPARE_REG_MEMBASE;
+ case OP_LADD:
return OP_AMD64_ADD_REG_MEMBASE;
- case OP_LSUB:
- if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
+ case OP_LSUB:
return OP_AMD64_SUB_REG_MEMBASE;
- case OP_LAND:
- if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
+ case OP_LAND:
return OP_AMD64_AND_REG_MEMBASE;
- case OP_LOR:
- if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
+ case OP_LOR:
return OP_AMD64_OR_REG_MEMBASE;
- case OP_LXOR:
- if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
+ case OP_LXOR:
return OP_AMD64_XOR_REG_MEMBASE;
+ }
}
#endif