#include <mono/metadata/monitor.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/profiler.h>
+#include <mono/metadata/debug-mono-symfile.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/metadata/mono-basic-block.h>
#define BRANCH_COST 10
#define INLINE_LENGTH_LIMIT 20
-#define INLINE_FAILURE do {\
- if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
- goto inline_failure;\
+#define INLINE_FAILURE(msg) do { \
+ if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
+ if (cfg->verbose_level >= 2) \
+ printf ("inline failed: %s\n", msg); \
+ goto inline_failure; \
+ } \
} while (0)
#define CHECK_CFG_EXCEPTION do {\
if (cfg->exception_type != MONO_EXCEPTION_NONE)\
#define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
+#define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
+
#define GET_BBLOCK(cfg,tblock,ip) do { \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
int stypes_reg = alloc_preg (cfg);
int stype = alloc_preg (cfg);
+ mono_class_setup_supertypes (klass);
+
if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
int stypes_reg = alloc_preg (cfg);
int stype = alloc_preg (cfg);
+ mono_class_setup_supertypes (klass);
+
if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
inline static MonoCallInst *
mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
- MonoInst **args, int calli, int virtual, int tail, int rgctx)
+ MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
{
MonoCallInst *call;
#ifdef MONO_ARCH_SOFT_FLOAT
}
#endif
+ call->need_unbox_trampoline = unbox_trampoline;
+
#ifdef ENABLE_LLVM
if (COMPILE_LLVM (cfg))
mono_llvm_emit_call (cfg, call);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
- call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
+ call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
call->inst.sreg1 = addr->dreg;
int context_used;
MonoCallInst *call;
int rgctx_reg = 0;
+ gboolean need_unbox_trampoline;
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
return mono_emit_calli (cfg, sig, args, addr, NULL);
}
- call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
+ need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
+
+ call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
if (might_be_remote)
call->method = mono_marshal_get_remoting_invoke_with_check (method);
g_assert (sig);
- call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
call->fptr = func;
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
#endif
}
+static void
+emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
+{
+ MonoInst *ins;
+
+ if (cfg->gen_seq_points && cfg->method == method) {
+ NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+}
+
static void
save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
{
* in mono_delegate_trampoline (), we allocate a per-domain memory slot to
* store it, and we fill it after the method has been compiled.
*/
- if (!cfg->compile_aot && !method->dynamic) {
+ if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
MonoInst *code_slot_ins;
if (context_used) {
if (cfg->compile_aot) {
EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
} else {
- trampoline = mono_create_delegate_trampoline (klass);
+ trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
inline_limit = INLINE_LENGTH_LIMIT;
inline_limit_inited = TRUE;
}
- if (header.code_size >= inline_limit)
+ if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
return FALSE;
/*
int realidx1_reg = alloc_preg (cfg);
int realidx2_reg = alloc_preg (cfg);
int sum_reg = alloc_preg (cfg);
- int index1, index2;
+ int index1, index2, tmpreg;
MonoInst *ins;
guint32 size;
index1 = index_ins1->dreg;
index2 = index_ins2->dreg;
+#if SIZEOF_REGISTER == 8
+ /* The array reg is 64 bits but the index reg is only 32 */
+ if (COMPILE_LLVM (cfg)) {
+ /* Not needed */
+ } else {
+ tmpreg = alloc_preg (cfg);
+ MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
+ index1 = tmpreg;
+ tmpreg = alloc_preg (cfg);
+ MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
+ index2 = tmpreg;
+ }
+#else
+ // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
+ tmpreg = -1;
+#endif
+
/* range checking */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
if (is_set) {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
+ if (mini_type_is_reference (cfg, fsig->params [2]))
+ emit_write_barrier (cfg, addr, load, -1);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
return store;
}
+
+static gboolean
+generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
+{
+ return mini_type_is_reference (cfg, &klass->byval_arg);
+}
+
+static MonoInst*
+emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
+{
+ if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
+ !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
+ MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
+ MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
+ MonoInst *iargs [3];
+
+ if (!helper->slot)
+ mono_class_setup_vtable (obj_array);
+ g_assert (helper->slot);
+
+ if (sp [0]->type != STACK_OBJ)
+ return NULL;
+ if (sp [2]->type != STACK_OBJ)
+ return NULL;
+
+ iargs [2] = sp [2];
+ iargs [1] = sp [1];
+ iargs [0] = sp [0];
+
+ return mono_emit_method_call (cfg, helper, iargs, sp [0]);
+ } else {
+ MonoInst *ins;
+ if (sp [1]->opcode == OP_ICONST) {
+ int array_reg = sp [0]->dreg;
+ int index_reg = sp [1]->dreg;
+ int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
+
+ if (safety_checks)
+ MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
+ } else {
+ MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
+ if (generic_class_is_reference_type (cfg, klass))
+ emit_write_barrier (cfg, addr, sp [2], -1);
+ }
+ return ins;
+ }
+}
+
+static MonoInst*
+emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
+{
+ MonoClass *eklass;
+
+ if (is_set)
+ eklass = mono_class_from_mono_type (fsig->params [2]);
+ else
+ eklass = mono_class_from_mono_type (fsig->ret);
+
+
+ if (is_set) {
+ return emit_array_store (cfg, eklass, args, FALSE);
+ } else {
+ MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
+ return ins;
+ }
+}
+
static MonoInst*
mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
return ins;
}
+static MonoInst*
+llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
+ int opcode = 0;
+
+ /* The LLVM backend supports these intrinsics */
+ if (cmethod->klass == mono_defaults.math_class) {
+ if (strcmp (cmethod->name, "Sin") == 0) {
+ opcode = OP_SIN;
+ } else if (strcmp (cmethod->name, "Cos") == 0) {
+ opcode = OP_COS;
+ } else if (strcmp (cmethod->name, "Sqrt") == 0) {
+ opcode = OP_SQRT;
+ } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
+ opcode = OP_ABS;
+ }
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->type = STACK_R8;
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+ opcode = 0;
+ if (cfg->opt & MONO_OPT_CMOV) {
+ if (strcmp (cmethod->name, "Min") == 0) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_IMIN;
+ if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_IMIN_UN;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_LMIN;
+ else if (fsig->params [0]->type == MONO_TYPE_U8)
+ opcode = OP_LMIN_UN;
+ } else if (strcmp (cmethod->name, "Max") == 0) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_IMAX;
+ if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_IMAX_UN;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_LMAX;
+ else if (fsig->params [0]->type == MONO_TYPE_U8)
+ opcode = OP_LMAX_UN;
+ }
+ }
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->sreg2 = args [1]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+ }
+
+ return ins;
+}
+
+static MonoInst*
+mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ if (cmethod->klass == mono_defaults.array_class) {
+ if (strcmp (cmethod->name, "UnsafeStore") == 0)
+ return emit_array_unsafe_access (cfg, fsig, args, TRUE);
+ if (strcmp (cmethod->name, "UnsafeLoad") == 0)
+ return emit_array_unsafe_access (cfg, fsig, args, FALSE);
+ }
+
+ return NULL;
+}
+
static MonoInst*
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
}
#endif
+ if (COMPILE_LLVM (cfg)) {
+ ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
+ if (ins)
+ return ins;
+ }
+
return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
}
return costs + 1;
} else {
if (cfg->verbose_level > 2)
- printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
+ printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
cfg->exception_type = MONO_EXCEPTION_NONE;
mono_loader_clear_error ();
cfg->exception_ptr = exception;
}
-static gboolean
-generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
-{
- return mini_type_is_reference (cfg, &klass->byval_arg);
-}
-
static void
emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
{
/* Debugging support */
#if 0
if (supported_tail_call) {
- static int count = 0;
- count ++;
- if (getenv ("COUNT")) {
- if (count == atoi (getenv ("COUNT")))
- printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
- if (count > atoi (getenv ("COUNT")))
- supported_tail_call = FALSE;
- }
+ if (!mono_debug_count ())
+ supported_tail_call = FALSE;
}
#endif
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
gboolean init_locals, seq_points, skip_dead_blocks;
- gboolean disable_inline;
+ gboolean disable_inline, sym_seq_points = FALSE;
MonoInst *cached_tls_addr = NULL;
+ MonoDebugMethodInfo *minfo;
+ MonoBitSet *seq_point_locs = NULL;
disable_inline = is_jit_optimizer_disabled (method);
seq_points = cfg->gen_seq_points && cfg->method == method;
+ if (cfg->gen_seq_points && cfg->method == method) {
+ minfo = mono_debug_lookup_method (method);
+ if (minfo) {
+ int i, n_il_offsets;
+ int *il_offsets;
+ int *line_numbers;
+
+ mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
+ seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
+ sym_seq_points = TRUE;
+ for (i = 0; i < n_il_offsets; ++i) {
+ if (il_offsets [i] < header->code_size)
+ mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
+ }
+ }
+ }
+
/*
* Methods without init_locals set could cause asserts in various passes
* (#497220).
* Currently, we generate these automatically at points where the IL
* stack is empty.
*/
- if (seq_points && sp == stack_start) {
+ if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
/*
* Make methods interruptable at the beginning, and at the targets of
* backward branches.
- * Also, do this at the start of every bblock too,
+ * Also, do this at the start of every bblock in methods with clauses too,
* to be able to handle instructions with inprecise control flow like
* throw/endfinally.
- * FIXME: Avoid this somehow.
+ * Backward branches are handled at the end of method-to-ir ().
*/
- gboolean intr_loc = ip == header->code || cfg->cbb->in_count > 1 || !cfg->cbb->last_ins;
+ gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
/* Avoid sequence points on empty IL like .volatile */
// FIXME: Enable this
switch (*ip) {
case CEE_NOP:
+ if (seq_points && !sym_seq_points && sp != stack_start) {
+ /*
+ * The C# compiler uses these nops to notify the JIT that it should
+ * insert seq points.
+ */
+ NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
if (cfg->keep_cil_nops)
MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
else
case CEE_JMP: {
MonoCallInst *call;
- INLINE_FAILURE;
+ INLINE_FAILURE ("jmp");
CHECK_OPSIZE (5);
if (stack_start != sp)
MonoInst *vtable_arg = NULL;
gboolean check_this = FALSE;
gboolean supported_tail_call = FALSE;
+ gboolean need_seq_point = FALSE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
cil_method = cmethod;
}
-
+
if (!cmethod || mono_loader_get_last_error ())
LOAD_ERROR;
if (!dont_verify && !cfg->skip_visibility) {
if (!cmethod->klass->inited)
if (!mono_class_init (cmethod->klass))
- LOAD_ERROR;
+ TYPE_LOAD_ERROR (cmethod->klass);
if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
mini_class_is_system_array (cmethod->klass)) {
mono_save_token_info (cfg, image, token, cil_method);
+ if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
+ /*
+ * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
+ * foo (bar (), baz ())
+ * works correctly. MS does this also:
+ * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
+ * The problem with this approach is that the debugger will stop after all calls returning a value,
+ * even for simple cases, like:
+ * int i = foo ();
+ */
+ /* Special case a few common successor opcodes */
+ if (!(ip + 5 < end && ip [5] == CEE_POP))
+ need_seq_point = TRUE;
+ }
+
n = fsig->param_count + fsig->hasthis;
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
}
/*
* We have the `constrained.' prefix opcode.
*/
- if (constrained_call->valuetype && !cmethod->klass->valuetype) {
+ if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
/*
* The type parameter is instantiated as a valuetype,
* but that type doesn't override the method we're
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
ins->type = STACK_OBJ;
sp [0] = ins;
- } else if (cmethod->klass->valuetype)
+ } else {
+ if (cmethod->klass->valuetype) {
+ /* Own method */
+ } else {
+ /* Interface method */
+ int ioffset, slot;
+
+ mono_class_setup_vtable (constrained_call);
+ CHECK_TYPELOAD (constrained_call);
+ ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
+ if (ioffset == -1)
+ TYPE_LOAD_ERROR (constrained_call);
+ slot = mono_method_get_vtable_slot (cmethod);
+ if (slot == -1)
+ TYPE_LOAD_ERROR (cmethod->klass);
+ cmethod = constrained_call->vtable [ioffset + slot];
+
+ if (cmethod->klass == mono_defaults.enum_class) {
+ /* Enum implements some interfaces, so treat this as the first case */
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
+ ins->klass = constrained_call;
+ sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
+ CHECK_CFG_EXCEPTION;
+ }
+ }
virtual = 0;
+ }
constrained_call = NULL;
}
if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
UNVERIFIED;
+ if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
+ bblock = cfg->cbb;
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
+ type_to_eval_stack_type ((cfg), fsig->ret, ins);
+ *sp = ins;
+ sp++;
+ }
+
+ CHECK_CFG_EXCEPTION;
+
+ ip += 5;
+ ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
+ break;
+ }
+
/*
* If the callee is a shared method, then its static cctor
* might not get called after the call was patched.
g_assert (mono_method_signature (cmethod)->is_inflated);
/* Prevent inlining of methods that contain indirect calls */
- INLINE_FAILURE;
+ INLINE_FAILURE ("virtual generic call");
#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
/* Prevent inlining of methods that call wrappers */
- INLINE_FAILURE;
+ INLINE_FAILURE ("wrapper call");
cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
always = TRUE;
}
cfg->real_offset += 5;
bblock = cfg->cbb;
- if (!MONO_TYPE_IS_VOID (fsig->ret))
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
/* *sp is already set by inline_method */
sp++;
+ }
inline_costs += costs;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
}
int i;
/* Prevent inlining of methods with tail calls (the call stack would be altered) */
- INLINE_FAILURE;
+ INLINE_FAILURE ("tail call");
/* keep it simple */
for (i = fsig->param_count - 1; i >= 0; i--) {
!mono_class_generic_sharing_enabled (cmethod->klass)) &&
(!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
- INLINE_FAILURE;
+ INLINE_FAILURE ("gshared");
g_assert (cfg->generic_sharing_context && cmethod);
g_assert (!addr);
!(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
/* Prevent inlining of methods with indirect calls */
- INLINE_FAILURE;
+ INLINE_FAILURE ("indirect call");
if (vtable_arg) {
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
ip += 5;
ins_flag = 0;
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
MonoCallInst *call;
/* Prevent inlining of methods with tail calls (the call stack would be altered) */
- INLINE_FAILURE;
+ INLINE_FAILURE ("tail call");
//printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
#ifdef MONO_ARCH_USE_OP_TAIL_CALL
/* Handle tail calls similarly to calls */
- call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
+ call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE, FALSE);
#else
MONO_INST_NEW_CALL (cfg, call, OP_JMP);
call->tail_call = TRUE;
break;
}
+ /*
+ * Synchronized wrappers.
+ * Its hard to determine where to replace a method with its synchronized
+ * wrapper without causing an infinite recursion. The current solution is
+ * to add the synchronized wrapper in the trampolines, and to
+ * change the called method to a dummy wrapper, and resolve that wrapper
+ * to the real method in mono_jit_compile_method ().
+ */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod) {
+ cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
+ }
+
/* Common call */
- INLINE_FAILURE;
+ INLINE_FAILURE ("call");
ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
imt_arg, vtable_arg);
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
case CEE_RET:
* (test case: test_0_inline_throw ()).
*/
if (return_var && cfg->cbb->in_count) {
+ MonoType *ret_type = mono_method_signature (method)->ret;
+
MonoInst *store;
CHECK_STACK (1);
--sp;
+
+ if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
+ UNVERIFIED;
+
//g_assert (returnvar != -1);
EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
cfg->ret_var_set = TRUE;
if (cfg->ret) {
MonoType *ret_type = mono_method_signature (method)->ret;
- if (seq_points) {
+ if (seq_points && !sym_seq_points) {
/*
* Place a seq point here too even through the IL stack is not
* empty, so a step over on
/* Use the immediate opcodes if possible */
if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
- int imm_opcode = mono_op_to_op_imm (ins->opcode);
+ int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
mono_save_token_info (cfg, image, token, cmethod);
if (!mono_class_init (cmethod->klass))
- LOAD_ERROR;
+ TYPE_LOAD_ERROR (cmethod->klass);
if (cfg->generic_sharing_context)
context_used = mono_method_check_context_used (cmethod);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
} else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
inline_costs += costs - 5;
} else {
- INLINE_FAILURE;
+ INLINE_FAILURE ("inline failure");
mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
}
} else if (context_used &&
mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
} else {
- INLINE_FAILURE;
+ INLINE_FAILURE ("ctor call");
ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
callvirt_this_arg, NULL, vtable_arg);
}
ftype = mono_field_get_type (field);
- g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
+ if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
+ UNVERIFIED;
/* The special_static_fields field is init'd in mono_class_vtable, so it needs
* to be called here.
/* .cctors: too many apps depend on them */
/* running with a specific order... */
if (! vtable->initialized)
- INLINE_FAILURE;
+ INLINE_FAILURE ("class init");
ex = mono_runtime_class_init_full (vtable, FALSE);
if (ex) {
set_exception_object (cfg, ex);
ins->sreg1 = sp [0]->dreg;
ins->inst_newa_class = klass;
ins->type = STACK_OBJ;
- ins->klass = klass;
+ ins->klass = array_type;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
cfg->cbb->has_array_access = TRUE;
case CEE_STELEM_R8:
case CEE_STELEM_REF:
case CEE_STELEM: {
- MonoInst *addr;
-
CHECK_STACK (3);
sp -= 3;
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- /* storing a NULL doesn't need any of the complex checks in stelemref */
- if (generic_class_is_reference_type (cfg, klass) &&
- !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
- MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
- MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
- MonoInst *iargs [3];
-
- if (!helper->slot)
- mono_class_setup_vtable (obj_array);
- g_assert (helper->slot);
-
- if (sp [0]->type != STACK_OBJ)
- UNVERIFIED;
- if (sp [2]->type != STACK_OBJ)
- UNVERIFIED;
-
- iargs [2] = sp [2];
- iargs [1] = sp [1];
- iargs [0] = sp [0];
-
- mono_emit_method_call (cfg, helper, iargs, sp [0]);
- } else {
- if (sp [1]->opcode == OP_ICONST) {
- int array_reg = sp [0]->dreg;
- int index_reg = sp [1]->dreg;
- int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
-
- MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
- } else {
- addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
- }
- }
+ emit_array_store (cfg, klass, sp, TRUE);
if (*ip == CEE_STELEM)
ip += 5;
start_new_bblock = 1;
break;
case CEE_ENDFINALLY:
+ /* mono_save_seq_point_info () depends on this */
+ if (sp != stack_start)
+ emit_seq_point (cfg, method, ip, FALSE);
MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
MONO_ADD_INS (bblock, ins);
ip++;
token = read32 (ip + 2);
func = mono_method_get_wrapper_data (method, token);
info = mono_find_jit_icall_by_addr (func);
+ if (!info)
+ g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
g_assert (info);
CHECK_STACK (info->sig->param_count);
ip += 5;
break;
}
+ case CEE_MONO_JIT_ATTACH: {
+ MonoInst *args [16];
+ MonoInst *ad_ins, *lmf_ins;
+ MonoBasicBlock *next_bb = NULL;
+
+ cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+
+ EMIT_NEW_PCONST (cfg, ins, NULL);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
+
+#if TARGET_WIN32
+ ad_ins = NULL;
+ lmf_ins = NULL;
+#else
+ ad_ins = mono_get_domain_intrinsic (cfg);
+ lmf_ins = mono_get_lmf_intrinsic (cfg);
+#endif
+
+#ifdef MONO_ARCH_HAVE_TLS_GET
+ if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
+ NEW_BBLOCK (cfg, next_bb);
+
+ MONO_ADD_INS (cfg->cbb, ad_ins);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
+
+ MONO_ADD_INS (cfg->cbb, lmf_ins);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
+ }
+#endif
+
+ if (cfg->compile_aot) {
+ /* AOT code is only used in the root domain */
+ EMIT_NEW_PCONST (cfg, args [0], NULL);
+ } else {
+ EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
+ }
+ ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
+
+ if (next_bb) {
+ MONO_START_BB (cfg, next_bb);
+ bblock = cfg->cbb;
+ }
+ ip += 2;
+ break;
+ }
+ case CEE_MONO_JIT_DETACH: {
+ MonoInst *args [16];
+
+ /* Restore the original domain */
+ dreg = alloc_ireg (cfg);
+ EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
+ mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
+ ip += 2;
+ break;
+ }
default:
g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
break;
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
} else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
} else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
break;
}
case CEE_SIZEOF: {
- guint32 align;
+ guint32 val;
int ialign;
CHECK_STACK_OVF (1);
token = read32 (ip + 2);
if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
MonoType *type = mono_type_create_from_typespec (image, token);
- token = mono_type_size (type, &ialign);
+ val = mono_type_size (type, &ialign);
} else {
MonoClass *klass = mono_class_get_full (image, token, generic_context);
CHECK_TYPELOAD (klass);
mono_class_init (klass);
- token = mono_class_value_size (klass, &align);
+ val = mono_type_size (&klass->byval_arg, &ialign);
}
- EMIT_NEW_ICONST (cfg, ins, token);
+ EMIT_NEW_ICONST (cfg, ins, val);
*sp++= ins;
ip += 6;
break;
}
}
+ if (seq_points) {
+ MonoBasicBlock *bb;
+
+ /*
+ * Make seq points at backward branch targets interruptable.
+ */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
+ bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
+ }
+
/* Add a sequence point for method entry/exit events */
if (seq_points) {
NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
case OP_IREM:
case OP_IREM_UN:
return -1;
+#endif
+#if defined(MONO_ARCH_EMULATE_MUL_DIV)
+ case OP_IMUL:
+ return -1;
#endif
default:
return mono_op_to_op_imm (opcode);