/* helper methods signatures */
static MonoMethodSignature *helper_sig_domain_get;
static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
-static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
-
+static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
/* type loading helpers */
static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
{
helper_sig_domain_get = mono_create_icall_signature ("ptr");
helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
- helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
+ helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
}
static MONO_NEVER_INLINE void
return FALSE;
}
+static gboolean
+method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
+{
+ if (cmethod->klass == mono_defaults.systemtype_class) {
+ if (!strcmp (cmethod->name, "GetType"))
+ return TRUE;
+ }
+ return FALSE;
+}
+
#define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
static MonoInst*
int vtable_reg = alloc_preg (cfg);
MonoInst *klass_inst = NULL;
- if (src->opcode == OP_PCONST && src->inst_p0 == 0)
+ if (MONO_INS_IS_PCONST_NULL (src))
return src;
if (context_used) {
/* Set target field */
/* Optimize away setting of NULL target */
- if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
+ if (!MONO_INS_IS_PCONST_NULL (target)) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
if (is_set) {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
- if (mini_type_is_reference (fsig->params [2]))
+ if (mini_type_is_reference (&eklass->byval_arg))
emit_write_barrier (cfg, addr, load);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
{
if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
- !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
+ !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
MonoInst *iargs [3];
return NULL;
} else if (cmethod->klass == mono_defaults.monitor_class) {
gboolean is_enter = FALSE;
+ gboolean is_v4 = FALSE;
- if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
+ if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
+ is_enter = TRUE;
+ is_v4 = TRUE;
+ }
+ if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
is_enter = TRUE;
if (is_enter) {
NEW_BBLOCK (cfg, end_bb);
- ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter_fast, args);
+ ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
- ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter, args);
+ ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
MONO_START_BB (cfg, end_bb);
return ins;
}
/*
* inline_method:
*
- * Return the cost of inlining CMETHOD.
+ * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
*/
static int
inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
- guchar *ip, guint real_offset, gboolean inline_always)
+ guchar *ip, guint real_offset, gboolean inline_always)
{
MonoError error;
MonoInst *ins, *rvar = NULL;
GHashTable *prev_cbb_hash;
MonoBasicBlock **prev_cil_offset_to_bb;
MonoBasicBlock *prev_cbb;
- unsigned char* prev_cil_start;
+ const unsigned char *prev_ip;
+ unsigned char *prev_cil_start;
guint32 prev_cil_offset_to_bb_len;
MonoMethod *prev_current_method;
MonoGenericContext *prev_generic_context;
prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
prev_cil_start = cfg->cil_start;
+ prev_ip = cfg->ip;
prev_cbb = cfg->cbb;
prev_current_method = cfg->current_method;
prev_generic_context = cfg->generic_context;
cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
cfg->cil_start = prev_cil_start;
+ cfg->ip = prev_ip;
cfg->locals = prev_locals;
cfg->args = prev_args;
cfg->arg_types = prev_arg_types;
icall_args [0] = thunk_arg_ins;
icall_args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
icall_args [0] = thunk_arg_ins;
icall_args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
ftndesc_ins->dreg = ftndesc_reg;
/*
* Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
return FALSE;
}
- attrs = mono_custom_attrs_from_assembly_checked (ass, &error);
+ attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
mono_error_cleanup (&error); /* FIXME don't swallow the error */
if (attrs) {
for (i = 0; i < attrs->num_attrs; ++i) {
/*
* mono_method_to_ir:
*
- * Translate the .net IL into linear IR.
+ * Translate the .net IL into linear IR.
+ *
+ * @start_bblock: if not NULL, the starting basic block, used during inlining.
+ * @end_bblock: if not NULL, the ending basic block, used during inlining.
+ * @return_var: if not NULL, the place where the return value is stored, used during inlining.
+ * @inline_args: if not NULL, contains the arguments to the inline call
+ * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
+ * @is_virtual_call: whether this method is being called as a result of a call to callvirt
+ *
+ * This method is used to turn ECMA IL into Mono's internal Linear IR
+ * reprensetation. It is used both for entire methods, as well as
+ * inlining existing methods. In the former case, the @start_bblock,
+ * @end_bblock, @return_var, @inline_args are all set to NULL, and the
+ * inline_offset is set to zero.
+ *
+ * Returns: the inline cost, or -1 if there was an error processing this method.
*/
int
mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
if (!header) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
goto exception_exit;
+ } else {
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
+
generic_container = mono_method_get_generic_container (method);
sig = mono_method_signature (method);
num_args = sig->hasthis + sig->param_count;
/* we use a separate basic block for the initialization code */
NEW_BBLOCK (cfg, init_localsbb);
- cfg->bb_init = init_localsbb;
+ if (cfg->method == method)
+ cfg->bb_init = init_localsbb;
init_localsbb->real_offset = cfg->real_offset;
start_bblock->next_bb = init_localsbb;
init_localsbb->next_bb = cfg->cbb;
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE (*ip);
- if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
+ if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
g_assert (!imt_arg);
if (!context_used)
g_assert (cmethod->is_inflated);
}
if (!has_vtargs) {
+ if (need_seq_point) {
+ emit_seq_point (cfg, method, ip, FALSE, TRUE);
+ need_seq_point = FALSE;
+ }
for (i = 0; i < n; ++i)
EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
MONO_INST_NEW (cfg, ins, OP_BR);
inline_costs += 10 * num_calls++;
+ /*
+ * Synchronized wrappers.
+ * Its hard to determine where to replace a method with its synchronized
+ * wrapper without causing an infinite recursion. The current solution is
+ * to add the synchronized wrapper in the trampolines, and to
+ * change the called method to a dummy wrapper, and resolve that wrapper
+ * to the real method in mono_jit_compile_method ().
+ */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
+ MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
+ if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
+ cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
+ }
+
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
- if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
+ if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
emit_write_barrier (cfg, addr, val);
if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
GSHAREDVT_FAILURE (*ip);
}
}
- /*
- * Synchronized wrappers.
- * Its hard to determine where to replace a method with its synchronized
- * wrapper without causing an infinite recursion. The current solution is
- * to add the synchronized wrapper in the trampolines, and to
- * change the called method to a dummy wrapper, and resolve that wrapper
- * to the real method in mono_jit_compile_method ().
- */
- if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
- MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
- if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
- cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
- }
-
/*
* Virtual calls in llvm-only mode.
*/
EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
}
+ if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
+ /*
+ * Clang can convert these calls to tail calls which screw up the stack
+ * walk. This happens even when the -fno-optimize-sibling-calls
+ * option is passed to clang.
+ * Work around this by emitting a dummy call.
+ */
+ mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
+ }
+
CHECK_CFG_EXCEPTION;
ip += 5;
MONO_ADD_INS (cfg->cbb, ins);
- if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
+ if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
emit_write_barrier (cfg, sp [0], sp [1]);
inline_costs += 1;
res = handle_unbox_gsharedvt (cfg, klass, *sp);
inline_costs += 2;
} else if (generic_class_is_reference_type (cfg, klass)) {
- MONO_INST_NEW (cfg, res, OP_CASTCLASS);
- res->dreg = alloc_preg (cfg);
- res->sreg1 = (*sp)->dreg;
- res->klass = klass;
- res->type = STACK_OBJ;
- MONO_ADD_INS (cfg->cbb, res);
- cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
+ if (MONO_INS_IS_PCONST_NULL (*sp)) {
+ EMIT_NEW_PCONST (cfg, res, NULL);
+ res->type = STACK_OBJ;
+ } else {
+ MONO_INST_NEW (cfg, res, OP_CASTCLASS);
+ res->dreg = alloc_preg (cfg);
+ res->sreg1 = (*sp)->dreg;
+ res->klass = klass;
+ res->type = STACK_OBJ;
+ MONO_ADD_INS (cfg->cbb, res);
+ cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
+ }
} else if (mono_class_is_nullable (klass)) {
res = handle_unbox_nullable (cfg, *sp, klass, context_used);
} else {
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ }
+
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
if (sp [0]->opcode != OP_LDADDR)
store->flags |= MONO_INST_FAULT;
- if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
+ if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
if (mini_is_gsharedvt_klass (klass)) {
g_assert (wbarrier_ptr_ins);
emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
ins->flags |= ins_flag;
if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
- generic_class_is_reference_type (cfg, klass)) {
+ generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
/* insert call to write barrier */
emit_write_barrier (cfg, sp [0], sp [1]);
}
}
case CEE_THROW:
CHECK_STACK (1);
+ if (sp [-1]->type != STACK_OBJ)
+ UNVERIFIED;
+
MONO_INST_NEW (cfg, ins, OP_THROW);
--sp;
ins->sreg1 = sp [0]->dreg;
case CEE_MONO_LDDOMAIN:
CHECK_STACK_OVF (1);
EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
+ ip += 2;
+ *sp++ = ins;
+ break;
+ case CEE_MONO_GET_LAST_ERROR:
+ CHECK_OPSIZE (2);
+ CHECK_STACK_OVF (1);
+
+ MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
+ ins->dreg = alloc_dreg (cfg, STACK_I4);
+ ins->type = STACK_I4;
+ MONO_ADD_INS (cfg->cbb, ins);
+
ip += 2;
*sp++ = ins;
break;
if (cfg->method == method) {
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- bb->region = mono_find_block_region (cfg, bb->real_offset);
+ if (bb == cfg->bb_init)
+ bb->region = -1;
+ else
+ bb->region = mono_find_block_region (cfg, bb->real_offset);
if (cfg->spvars)
mono_create_spvar_for_region (cfg, bb->region);
if (cfg->verbose_level > 2)
g_slist_free (class_inits);
mono_basic_block_free (original_bb);
cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
- cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
if (cfg->exception_type)
return -1;
else