#include <mono/metadata/monitor.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/profiler.h>
+#include <mono/metadata/debug-mono-symfile.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/metadata/mono-basic-block.h>
#define BRANCH_COST 10
#define INLINE_LENGTH_LIMIT 20
-#define INLINE_FAILURE do {\
- if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
- goto inline_failure;\
+#define INLINE_FAILURE(msg) do { \
+ if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
+ if (cfg->verbose_level >= 2) \
+ printf ("inline failed: %s\n", msg); \
+ goto inline_failure; \
+ } \
} while (0)
#define CHECK_CFG_EXCEPTION do {\
if (cfg->exception_type != MONO_EXCEPTION_NONE)\
goto exception_exit; \
} \
} while (0)
+#define GSHAREDVT_FAILURE(opcode) do { \
+ if (cfg->gsharedvt) { \
+ cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
+ if (cfg->verbose_level >= 2) \
+ printf ("%s\n", cfg->exception_message); \
+ mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
+ goto exception_exit; \
+ } \
+ } while (0)
#define OUT_OF_MEMORY_FAILURE do { \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
goto exception_exit; \
int mono_op_to_op_imm_noemul (int opcode);
MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
-void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
-void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
/* helper methods signatures */
static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->generic_sharing_context);
- return OP_MOVE;
+ if (mini_type_var_is_vt (cfg, type))
+ return OP_VMOVE;
+ else
+ return OP_MOVE;
default:
g_error ("unknown type 0x%02x in type_to_regstore", type->type);
}
*/
#ifndef DISABLE_JIT
-#define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
+/*
+ * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
+ * foo<T> (int i) { ldarg.0; box T; }
+ */
+#define UNVERIFIED do { \
+ if (cfg->gsharedvt) { \
+ if (cfg->verbose_level > 2) \
+ printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
+ mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
+ goto exception_exit; \
+ } \
+ if (mini_get_debug_options ()->break_on_unverified) \
+ G_BREAKPOINT (); \
+ else \
+ goto unverified; \
+} while (0)
#define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
+#define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
+
#define GET_BBLOCK(cfg,tblock,ip) do { \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
case MONO_TYPE_GENERICINST:
type = &type->data.generic_class->container_class->byval_arg;
goto handle_enum;
- case MONO_TYPE_VAR :
- case MONO_TYPE_MVAR :
- /* FIXME: all the arguments must be references for now,
- * later look inside cfg and see if the arg num is
- * really a reference
- */
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
g_assert (cfg->generic_sharing_context);
- inst->type = STACK_OBJ;
+ if (mini_is_gsharedvt_type (cfg, type)) {
+ g_assert (cfg->gsharedvt);
+ inst->type = STACK_VTYPE;
+ } else {
+ inst->type = STACK_OBJ;
+ }
return;
default:
g_error ("unknown type 0x%02x in eval stack type", type->type);
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
- /* FIXME: all the arguments must be references for now,
- * later look inside cfg and see if the arg num is
- * really a reference
- */
g_assert (cfg->generic_sharing_context);
- if (arg->type != STACK_OBJ)
- return 1;
+ if (mini_type_var_is_vt (cfg, simple_type)) {
+ if (arg->type != STACK_VTYPE)
+ return 1;
+ } else {
+ if (arg->type != STACK_OBJ)
+ return 1;
+ }
return 0;
default:
g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
#ifdef MONO_ARCH_HAVE_IMT
static void
-emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
+emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
{
int method_reg;
if (imt_arg) {
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else if (cfg->compile_aot) {
- MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
+ MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
} else {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_PCONST);
- ins->inst_p0 = call->method;
+ ins->inst_p0 = method;
ins->dreg = method_reg;
MONO_ADD_INS (cfg->cbb, ins);
}
if (imt_arg) {
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else if (cfg->compile_aot) {
- MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
+ MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
} else {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_PCONST);
- ins->inst_p0 = call->method;
+ ins->inst_p0 = method;
ins->dreg = method_reg;
MONO_ADD_INS (cfg->cbb, ins);
}
inline static MonoCallInst *
mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
- MonoInst **args, int calli, int virtual, int tail, int rgctx)
+ MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
{
MonoCallInst *call;
#ifdef MONO_ARCH_SOFT_FLOAT
type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
if (tail) {
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
+ if (mini_type_is_vtype (cfg, sig->ret)) {
call->vret_var = cfg->vret_addr;
//g_assert_not_reached ();
}
- } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
+ } else if (mini_type_is_vtype (cfg, sig->ret)) {
MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
MonoInst *loada;
}
#endif
+ call->need_unbox_trampoline = unbox_trampoline;
+
#ifdef ENABLE_LLVM
if (COMPILE_LLVM (cfg))
mono_llvm_emit_call (cfg, call);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
- call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
+ call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
call->inst.sreg1 = addr->dreg;
return (MonoInst*)call;
}
+/* This is like calli, but we pass rgctx/imt arguments as well */
+static MonoInst*
+emit_gsharedvt_call (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoMethod *method, MonoInst *imt_arg, MonoInst *rgctx_arg)
+{
+ MonoCallInst *call;
+ int rgctx_reg = -1;
+
+ if (rgctx_arg) {
+ rgctx_reg = mono_alloc_preg (cfg);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
+ }
+
+ call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
+
+ call->inst.sreg1 = addr->dreg;
+
+ if (imt_arg)
+ emit_imt_argument (cfg, call, method, imt_arg);
+
+ MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
+
+ if (rgctx_arg)
+ set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
+
+ return (MonoInst*)call;
+}
+
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
static MonoInst*
int context_used;
MonoCallInst *call;
int rgctx_reg = 0;
+ gboolean need_unbox_trampoline;
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
return mono_emit_calli (cfg, sig, args, addr, NULL);
}
- call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
+ need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
+
+ call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
if (might_be_remote)
call->method = mono_marshal_get_remoting_invoke_with_check (method);
#ifdef MONO_ARCH_HAVE_IMT
if (mono_use_imt) {
guint32 imt_slot = mono_method_get_imt_slot (method);
- emit_imt_argument (cfg, call, imt_arg);
+ emit_imt_argument (cfg, call, call->method, imt_arg);
slot_reg = vtable_reg;
call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
}
#ifdef MONO_ARCH_HAVE_IMT
if (imt_arg) {
g_assert (mono_method_signature (method)->generic_param_count);
- emit_imt_argument (cfg, call, imt_arg);
+ emit_imt_argument (cfg, call, call->method, imt_arg);
}
#endif
}
g_assert (sig);
- call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
call->fptr = func;
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
{
MonoInst *iargs [4];
- int n;
+ int context_used, n;
guint32 align = 0;
MonoMethod *memcpy_method;
+ MonoInst *size_ins = NULL;
g_assert (klass);
/*
* g_assert (klass && klass == src->klass && klass == dest->klass);
*/
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ g_assert (!native);
+ context_used = mono_class_check_context_used (klass);
+ size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
+ }
+
if (native)
n = mono_class_native_size (klass, &align);
else
context_used = mono_class_check_context_used (klass);
/* It's ok to intrinsify under gsharing since shared code types are layout stable. */
- if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
+ if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
return;
} else if (context_used) {
iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
}
}
- if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
+ if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
/* FIXME: Optimize the case when src/dest is OP_LDADDR */
mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
} else {
iargs [0] = dest;
iargs [1] = src;
- EMIT_NEW_ICONST (cfg, iargs [2], n);
+ if (size_ins)
+ iargs [2] = size_ins;
+ else
+ EMIT_NEW_ICONST (cfg, iargs [2], n);
memcpy_method = get_memcpy_method ();
mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
{
MonoInst *iargs [3];
- int n;
+ int n, context_used;
guint32 align;
MonoMethod *memset_method;
+ MonoInst *size_ins = NULL;
/* FIXME: Optimize this for the case when dest is an LDADDR */
mono_class_init (klass);
- n = mono_class_value_size (klass, &align);
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ context_used = mono_class_check_context_used (klass);
+ size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
+ n = -1;
+ } else {
+ n = mono_class_value_size (klass, &align);
+ }
- if (n <= sizeof (gpointer) * 5) {
+ if (!size_ins && n <= sizeof (gpointer) * 5) {
mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
}
else {
memset_method = get_memset_method ();
iargs [0] = dest;
EMIT_NEW_ICONST (cfg, iargs [1], 0);
- EMIT_NEW_ICONST (cfg, iargs [2], n);
+ if (size_ins)
+ iargs [2] = size_ins;
+ else
+ EMIT_NEW_ICONST (cfg, iargs [2], n);
mono_emit_method_call (cfg, memset_method, iargs, NULL);
}
}
#endif
}
+static void
+emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
+{
+ MonoInst *ins;
+
+ if (cfg->gen_seq_points && cfg->method == method) {
+ NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+}
+
static void
save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
{
if (!alloc)
return NULL;
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
+ ins->opcode = OP_STOREV_MEMBASE;
+ } else {
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
+ }
return alloc;
}
return;
MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
+ getaddr->cil_code = cfg->header->code;
getaddr->dreg = cfg->got_var->dreg;
/* Add it to the start of the first bblock */
MonoInst *ins;
guint32 size;
int mult_reg, add_reg, array_reg, index_reg, index2_reg;
+ int context_used;
- mono_class_init (klass);
- size = mono_class_array_element_size (klass);
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ size = -1;
+ } else {
+ mono_class_init (klass);
+ size = mono_class_array_element_size (klass);
+ }
mult_reg = alloc_preg (cfg);
array_reg = arr->dreg;
add_reg = alloc_ireg_mp (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
+ if (size == -1) {
+ MonoInst *rgctx_ins;
+
+ /* gsharedvt */
+ g_assert (cfg->generic_sharing_context);
+ context_used = mono_class_check_context_used (klass);
+ g_assert (context_used);
+ rgctx_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
+ MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
+ } else {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
+ }
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
ins->klass = mono_class_get_element_class (klass);
int realidx1_reg = alloc_preg (cfg);
int realidx2_reg = alloc_preg (cfg);
int sum_reg = alloc_preg (cfg);
- int index1, index2;
+ int index1, index2, tmpreg;
MonoInst *ins;
guint32 size;
index1 = index_ins1->dreg;
index2 = index_ins2->dreg;
+#if SIZEOF_REGISTER == 8
+ /* The array reg is 64 bits but the index reg is only 32 */
+ if (COMPILE_LLVM (cfg)) {
+ /* Not needed */
+ } else {
+ tmpreg = alloc_preg (cfg);
+ MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
+ index1 = tmpreg;
+ tmpreg = alloc_preg (cfg);
+ MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
+ index2 = tmpreg;
+ }
+#else
+ // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
+ tmpreg = -1;
+#endif
+
/* range checking */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
if (is_set) {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
+ if (mini_type_is_reference (cfg, fsig->params [2]))
+ emit_write_barrier (cfg, addr, load, -1);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
return store;
}
+
+static gboolean
+generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
+{
+ return mini_type_is_reference (cfg, &klass->byval_arg);
+}
+
+static MonoInst*
+emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
+{
+ if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
+ !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
+ MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
+ MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
+ MonoInst *iargs [3];
+
+ if (!helper->slot)
+ mono_class_setup_vtable (obj_array);
+ g_assert (helper->slot);
+
+ if (sp [0]->type != STACK_OBJ)
+ return NULL;
+ if (sp [2]->type != STACK_OBJ)
+ return NULL;
+
+ iargs [2] = sp [2];
+ iargs [1] = sp [1];
+ iargs [0] = sp [0];
+
+ return mono_emit_method_call (cfg, helper, iargs, sp [0]);
+ } else {
+ MonoInst *ins;
+
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ MonoInst *addr;
+
+ // FIXME-VT: OP_ICONST optimization
+ addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
+ ins->opcode = OP_STOREV_MEMBASE;
+ } else if (sp [1]->opcode == OP_ICONST) {
+ int array_reg = sp [0]->dreg;
+ int index_reg = sp [1]->dreg;
+ int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
+
+ if (safety_checks)
+ MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
+ } else {
+ MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
+ if (generic_class_is_reference_type (cfg, klass))
+ emit_write_barrier (cfg, addr, sp [2], -1);
+ }
+ return ins;
+ }
+}
+
+static MonoInst*
+emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
+{
+ MonoClass *eklass;
+
+ if (is_set)
+ eklass = mono_class_from_mono_type (fsig->params [2]);
+ else
+ eklass = mono_class_from_mono_type (fsig->ret);
+
+
+ if (is_set) {
+ return emit_array_store (cfg, eklass, args, FALSE);
+ } else {
+ MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
+ return ins;
+ }
+}
+
static MonoInst*
mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
return ins;
}
+static MonoInst*
+llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
+ int opcode = 0;
+
+ /* The LLVM backend supports these intrinsics */
+ if (cmethod->klass == mono_defaults.math_class) {
+ if (strcmp (cmethod->name, "Sin") == 0) {
+ opcode = OP_SIN;
+ } else if (strcmp (cmethod->name, "Cos") == 0) {
+ opcode = OP_COS;
+ } else if (strcmp (cmethod->name, "Sqrt") == 0) {
+ opcode = OP_SQRT;
+ } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
+ opcode = OP_ABS;
+ }
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->type = STACK_R8;
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+ opcode = 0;
+ if (cfg->opt & MONO_OPT_CMOV) {
+ if (strcmp (cmethod->name, "Min") == 0) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_IMIN;
+ if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_IMIN_UN;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_LMIN;
+ else if (fsig->params [0]->type == MONO_TYPE_U8)
+ opcode = OP_LMIN_UN;
+ } else if (strcmp (cmethod->name, "Max") == 0) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_IMAX;
+ if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_IMAX_UN;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_LMAX;
+ else if (fsig->params [0]->type == MONO_TYPE_U8)
+ opcode = OP_LMAX_UN;
+ }
+ }
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->sreg2 = args [1]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+ }
+
+ return ins;
+}
+
+static MonoInst*
+mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ if (cmethod->klass == mono_defaults.array_class) {
+ if (strcmp (cmethod->name, "UnsafeStore") == 0)
+ return emit_array_unsafe_access (cfg, fsig, args, TRUE);
+ if (strcmp (cmethod->name, "UnsafeLoad") == 0)
+ return emit_array_unsafe_access (cfg, fsig, args, FALSE);
+ }
+
+ return NULL;
+}
+
static MonoInst*
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
}
#endif
+ if (COMPILE_LLVM (cfg)) {
+ ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
+ if (ins)
+ return ins;
+ }
+
return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
}
return costs + 1;
} else {
if (cfg->verbose_level > 2)
- printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
+ printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
cfg->exception_type = MONO_EXCEPTION_NONE;
mono_loader_clear_error ();
}
return 0;
unverified:
+exception_exit:
*pos = ip;
return 1;
}
cfg->exception_ptr = exception;
}
-static gboolean
-generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
-{
- return mini_type_is_reference (cfg, &klass->byval_arg);
-}
-
static void
emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
{
/* Debugging support */
#if 0
if (supported_tail_call) {
- static int count = 0;
- count ++;
- if (getenv ("COUNT")) {
- if (count == atoi (getenv ("COUNT")))
- printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
- if (count > atoi (getenv ("COUNT")))
- supported_tail_call = FALSE;
- }
+ if (!mono_debug_count ())
+ supported_tail_call = FALSE;
}
#endif
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
gboolean init_locals, seq_points, skip_dead_blocks;
- gboolean disable_inline;
+ gboolean disable_inline, sym_seq_points = FALSE;
MonoInst *cached_tls_addr = NULL;
+ MonoDebugMethodInfo *minfo;
+ MonoBitSet *seq_point_locs = NULL;
disable_inline = is_jit_optimizer_disabled (method);
init_locals = header->init_locals;
seq_points = cfg->gen_seq_points && cfg->method == method;
+#ifdef PLATFORM_ANDROID
+ seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
+#endif
+
+ if (cfg->gen_seq_points && cfg->method == method) {
+ minfo = mono_debug_lookup_method (method);
+ if (minfo) {
+ int i, n_il_offsets;
+ int *il_offsets;
+ int *line_numbers;
+
+ mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
+ seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
+ sym_seq_points = TRUE;
+ for (i = 0; i < n_il_offsets; ++i) {
+ if (il_offsets [i] < header->code_size)
+ mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
+ }
+ }
+ }
/*
* Methods without init_locals set could cause asserts in various passes
* Currently, we generate these automatically at points where the IL
* stack is empty.
*/
- if (seq_points && sp == stack_start) {
+ if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
/*
* Make methods interruptable at the beginning, and at the targets of
* backward branches.
switch (*ip) {
case CEE_NOP:
- if (seq_points && sp != stack_start) {
+ if (seq_points && !sym_seq_points && sp != stack_start) {
/*
* The C# compiler uses these nops to notify the JIT that it should
* insert seq points.
case CEE_JMP: {
MonoCallInst *call;
- INLINE_FAILURE;
+ INLINE_FAILURE ("jmp");
+ GSHAREDVT_FAILURE (*ip);
CHECK_OPSIZE (5);
if (stack_start != sp)
MonoInst *vtable_arg = NULL;
gboolean check_this = FALSE;
gboolean supported_tail_call = FALSE;
+ gboolean need_seq_point = FALSE;
+ guint32 call_opcode = *ip;
+ gboolean emit_widen = TRUE;
+ gboolean push_res = TRUE;
+ gboolean skip_ret = FALSE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
+ ins = NULL;
+
if (calli) {
+ GSHAREDVT_FAILURE (*ip);
cmethod = NULL;
CHECK_STACK (1);
--sp;
if (cfg->verbose_level > 2)
printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
+ GSHAREDVT_FAILURE (*ip);
+
if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
/*
* This is needed since get_method_constrained can't find
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
cil_method = cmethod;
}
-
+
if (!cmethod || mono_loader_get_last_error ())
LOAD_ERROR;
if (!dont_verify && !cfg->skip_visibility) {
if (!cmethod->klass->inited)
if (!mono_class_init (cmethod->klass))
- LOAD_ERROR;
+ TYPE_LOAD_ERROR (cmethod->klass);
if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
mini_class_is_system_array (cmethod->klass)) {
mono_save_token_info (cfg, image, token, cil_method);
+ if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
+ /*
+ * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
+ * foo (bar (), baz ())
+ * works correctly. MS does this also:
+ * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
+ * The problem with this approach is that the debugger will stop after all calls returning a value,
+ * even for simple cases, like:
+ * int i = foo ();
+ */
+ /* Special case a few common successor opcodes */
+ if (!(ip + 5 < end && ip [5] == CEE_POP))
+ need_seq_point = TRUE;
+ }
+
n = fsig->param_count + fsig->hasthis;
+ /* Don't support calls made using type arguments for now */
+ /*
+ if (cfg->gsharedvt) {
+ if (mini_is_gsharedvt_signature (cfg, fsig))
+ GSHAREDVT_FAILURE (*ip);
+ }
+ */
+
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
}
/*
* We have the `constrained.' prefix opcode.
*/
- if (constrained_call->valuetype && !cmethod->klass->valuetype) {
+ if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
/*
* The type parameter is instantiated as a valuetype,
* but that type doesn't override the method we're
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
ins->type = STACK_OBJ;
sp [0] = ins;
- } else if (cmethod->klass->valuetype)
+ } else {
+ if (cmethod->klass->valuetype) {
+ /* Own method */
+ } else {
+ /* Interface method */
+ int ioffset, slot;
+
+ mono_class_setup_vtable (constrained_call);
+ CHECK_TYPELOAD (constrained_call);
+ ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
+ if (ioffset == -1)
+ TYPE_LOAD_ERROR (constrained_call);
+ slot = mono_method_get_vtable_slot (cmethod);
+ if (slot == -1)
+ TYPE_LOAD_ERROR (cmethod->klass);
+ cmethod = constrained_call->vtable [ioffset + slot];
+
+ if (cmethod->klass == mono_defaults.enum_class) {
+ /* Enum implements some interfaces, so treat this as the first case */
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
+ ins->klass = constrained_call;
+ sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
+ CHECK_CFG_EXCEPTION;
+ }
+ }
virtual = 0;
+ }
constrained_call = NULL;
}
- if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
+ if (!calli && check_call_signature (cfg, fsig, sp))
UNVERIFIED;
+ if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
+ bblock = cfg->cbb;
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
+ type_to_eval_stack_type ((cfg), fsig->ret, ins);
+ emit_widen = FALSE;
+ }
+
+ goto call_end;
+ }
+
/*
* If the callee is a shared method, then its static cctor
* might not get called after the call was patched.
if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
(cmethod->klass->generic_class || cmethod->klass->generic_container)) {
- gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
- MonoGenericContext *context = mini_class_get_context (cmethod->klass);
- gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
+ gboolean sharable = FALSE;
+
+ if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
+ sharable = TRUE;
+ } else {
+ gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
+ MonoGenericContext *context = mini_class_get_context (cmethod->klass);
+ gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
+
+ sharable = sharing_enabled && context_sharable;
+ }
/*
* Pass vtable iff target method might
* context is sharable (and it's not a
* generic method).
*/
- if (sharing_enabled && context_sharable &&
- !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
+ if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
pass_vtable = TRUE;
}
if (cmethod && mini_method_get_context (cmethod) &&
mini_method_get_context (cmethod)->method_inst) {
- gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
- MonoGenericContext *context = mini_method_get_context (cmethod);
- gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
-
g_assert (!pass_vtable);
- if (sharing_enabled && context_sharable)
- pass_mrgctx = TRUE;
+ if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
+ pass_mrgctx = TRUE;
+ } else {
+ gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
+ MonoGenericContext *context = mini_method_get_context (cmethod);
+ gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
+
+ if (sharing_enabled && context_sharable)
+ pass_mrgctx = TRUE;
+ if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
+ pass_mrgctx = TRUE;
+ }
}
if (cfg->generic_sharing_context && cmethod) {
g_assert (mono_method_signature (cmethod)->is_inflated);
/* Prevent inlining of methods that contain indirect calls */
- INLINE_FAILURE;
+ INLINE_FAILURE ("virtual generic call");
+
+ if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
+ GSHAREDVT_FAILURE (*ip);
#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
}
- if (!MONO_TYPE_IS_VOID (fsig->ret))
- *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
-
- CHECK_CFG_EXCEPTION;
-
- ip += 5;
- ins_flag = 0;
- break;
+ goto call_end;
}
/*
bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
type_to_eval_stack_type ((cfg), fsig->ret, ins);
- *sp = ins;
- sp++;
+ emit_widen = FALSE;
}
-
- CHECK_CFG_EXCEPTION;
-
- ip += 5;
- ins_flag = 0;
- break;
+ goto call_end;
}
/* Inlining */
if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
/* Prevent inlining of methods that call wrappers */
- INLINE_FAILURE;
+ INLINE_FAILURE ("wrapper call");
cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
always = TRUE;
}
- if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
- ip += 5;
+ costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
+ if (costs) {
cfg->real_offset += 5;
bblock = cfg->cbb;
- if (!MONO_TYPE_IS_VOID (fsig->ret))
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
/* *sp is already set by inline_method */
sp++;
+ push_res = FALSE;
+ }
inline_costs += costs;
- ins_flag = 0;
- break;
+
+ goto call_end;
}
}
+
+ /*
+ * Making generic calls out of gsharedvt methods.
+ */
+ if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
+ MonoInst *addr;
+
+ if (virtual) {
+ //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
+ //GSHAREDVT_FAILURE (*ip);
+ // disable for possible remoting calls
+ if (fsig->hasthis && (method->klass->marshalbyref || method->klass == mono_defaults.object_class))
+ GSHAREDVT_FAILURE (*ip);
+ // virtual generic calls were disabled earlier
+ }
+
+ if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
+ /* test_0_multi_dim_arrays () in gshared.cs */
+ GSHAREDVT_FAILURE (*ip);
+
+ if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
+ addr = emit_get_rgctx_method (cfg, context_used,
+ cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT);
+ else
+ addr = emit_get_rgctx_method (cfg, context_used,
+ cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
+ ins = emit_gsharedvt_call (cfg, fsig, sp, addr, cmethod, imt_arg, vtable_arg);
+
+ goto call_end;
+ }
+
+ if (virtual && cmethod && cfg->gsharedvt && cmethod->slot == -1) {
+ mono_class_setup_vtable (cmethod->klass);
+ if (cmethod->slot == -1)
+ // FIXME: How can this happen ?
+ GSHAREDVT_FAILURE (*ip);
+ }
inline_costs += 10 * num_calls++;
/* Tail recursion elimination */
- if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
+ if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
gboolean has_vtargs = FALSE;
int i;
/* Prevent inlining of methods with tail calls (the call stack would be altered) */
- INLINE_FAILURE;
+ INLINE_FAILURE ("tail call");
/* keep it simple */
for (i = fsig->param_count - 1; i >= 0; i--) {
/* skip the CEE_RET, too */
if (ip_in_bb (cfg, bblock, ip + 5))
- ip += 6;
- else
- ip += 5;
-
- ins_flag = 0;
- break;
+ skip_ret = TRUE;
+ push_res = FALSE;
+ goto call_end;
}
}
!mono_class_generic_sharing_enabled (cmethod->klass)) &&
(!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
- INLINE_FAILURE;
+ INLINE_FAILURE ("gshared");
g_assert (cfg->generic_sharing_context && cmethod);
g_assert (!addr);
if (addr) {
g_assert (!imt_arg);
- if (*ip == CEE_CALL)
+ if (call_opcode == CEE_CALL)
g_assert (context_used);
- else if (*ip == CEE_CALLI)
+ else if (call_opcode == CEE_CALLI)
g_assert (!vtable_arg);
else
/* FIXME: what the hell is this??? */
!(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
/* Prevent inlining of methods with indirect calls */
- INLINE_FAILURE;
+ INLINE_FAILURE ("indirect call");
if (vtable_arg) {
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
}
}
- if (!MONO_TYPE_IS_VOID (fsig->ret))
- *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
-
- CHECK_CFG_EXCEPTION;
- ip += 5;
- ins_flag = 0;
- break;
+ goto call_end;
}
/* Array methods */
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
-
- *sp++ = ins;
} else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
if (!cmethod->klass->element_class->valuetype && !readonly)
mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
readonly = FALSE;
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
- *sp++ = addr;
+ ins = addr;
} else {
g_assert_not_reached ();
}
- CHECK_CFG_EXCEPTION;
-
- ip += 5;
- ins_flag = 0;
- break;
+ emit_widen = FALSE;
+ goto call_end;
}
ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
- if (ins) {
- if (!MONO_TYPE_IS_VOID (fsig->ret))
- *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
-
- CHECK_CFG_EXCEPTION;
-
- ip += 5;
- ins_flag = 0;
- break;
- }
+ if (ins)
+ goto call_end;
/* Tail prefix / tail call optimization */
/* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
- supported_tail_call = cmethod &&
- ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
+ if (cmethod &&
+ ((((ins_flag & MONO_INST_TAILCALL) && (call_opcode == CEE_CALL))
))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
- && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
-
+ && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
+ supported_tail_call = TRUE;
if (supported_tail_call) {
MonoCallInst *call;
/* Prevent inlining of methods with tail calls (the call stack would be altered) */
- INLINE_FAILURE;
+ INLINE_FAILURE ("tail call");
//printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
#ifdef MONO_ARCH_USE_OP_TAIL_CALL
/* Handle tail calls similarly to calls */
- call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
+ call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE, FALSE);
#else
MONO_INST_NEW_CALL (cfg, call, OP_JMP);
call->tail_call = TRUE;
link_bblock (cfg, bblock, end_bblock);
start_new_bblock = 1;
- CHECK_CFG_EXCEPTION;
-
- ip += 5;
- ins_flag = 0;
-
// FIXME: Eliminate unreachable epilogs
/*
* OP_TAILCALL has no return value, so skip the CEE_RET if it is
* only reachable from this call.
*/
- GET_BBLOCK (cfg, tblock, ip);
+ GET_BBLOCK (cfg, tblock, ip + 5);
if (tblock == bblock || tblock->in_count == 0)
- ip += 1;
- break;
+ skip_ret = TRUE;
+ push_res = TRUE;
+
+ goto call_end;
}
+ /*
+ * Synchronized wrappers.
+ * Its hard to determine where to replace a method with its synchronized
+ * wrapper without causing an infinite recursion. The current solution is
+ * to add the synchronized wrapper in the trampolines, and to
+ * change the called method to a dummy wrapper, and resolve that wrapper
+ * to the real method in mono_jit_compile_method ().
+ */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod)
+ cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
+
/* Common call */
- INLINE_FAILURE;
+ INLINE_FAILURE ("call");
ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
imt_arg, vtable_arg);
- if (!MONO_TYPE_IS_VOID (fsig->ret))
- *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+ call_end:
+
+ /* End of call, INS should contain the result of the call, if any */
+
+ if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
+ g_assert (ins);
+ if (emit_widen)
+ *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+ else
+ *sp++ = ins;
+ }
CHECK_CFG_EXCEPTION;
ip += 5;
+ if (skip_ret) {
+ g_assert (*ip == CEE_RET);
+ ip += 1;
+ }
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
case CEE_RET:
* (test case: test_0_inline_throw ()).
*/
if (return_var && cfg->cbb->in_count) {
+ MonoType *ret_type = mono_method_signature (method)->ret;
+
MonoInst *store;
CHECK_STACK (1);
--sp;
+
+ if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
+ UNVERIFIED;
+
//g_assert (returnvar != -1);
EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
cfg->ret_var_set = TRUE;
if (cfg->ret) {
MonoType *ret_type = mono_method_signature (method)->ret;
- if (seq_points) {
+ if (seq_points && !sym_seq_points) {
/*
* Place a seq point here too even through the IL stack is not
* empty, so a step over on
/* Use the immediate opcodes if possible */
if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
- int imm_opcode = mono_op_to_op_imm (ins->opcode);
+ int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
ip++;
break;
case CEE_CPOBJ:
+ GSHAREDVT_FAILURE (*ip);
CHECK_OPSIZE (5);
CHECK_STACK (2);
token = read32 (ip + 1);
mono_save_token_info (cfg, image, token, cmethod);
if (!mono_class_init (cmethod->klass))
- LOAD_ERROR;
+ TYPE_LOAD_ERROR (cmethod->klass);
if (cfg->generic_sharing_context)
context_used = mono_method_check_context_used (cmethod);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
} else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
CHECK_TYPELOAD (cmethod->klass);
}
+ if (cmethod->klass->valuetype)
+ GSHAREDVT_FAILURE (*ip);
+
+ /*
+ if (cfg->gsharedvt) {
+ if (mini_is_gsharedvt_variable_signature (sig))
+ GSHAREDVT_FAILURE (*ip);
+ }
+ */
+
if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
inline_costs += costs - 5;
} else {
- INLINE_FAILURE;
+ INLINE_FAILURE ("inline failure");
+ // FIXME-VT: Clean this up
+ if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
+ GSHAREDVT_FAILURE(*ip);
mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
}
+ } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
+ MonoInst *addr;
+
+ addr = emit_get_rgctx_method (cfg, context_used,
+ cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
+ mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
} else if (context_used &&
(!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
!mono_class_generic_sharing_enabled (cmethod->klass))) {
mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
} else {
- INLINE_FAILURE;
+ INLINE_FAILURE ("ctor call");
ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
callvirt_this_arg, NULL, vtable_arg);
}
if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
+ if (mini_is_gsharedvt_klass (cfg, klass))
+ /* Need to check for nullable types at runtime */
+ GSHAREDVT_FAILURE (*ip);
+
if (generic_class_is_reference_type (cfg, klass)) {
/* CASTCLASS FIXME kill this huge slice of duplicated code*/
if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
is_instance = FALSE;
}
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
+
/* INSTANCE CASE */
foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
MonoInst *iargs [5];
+ GSHAREDVT_FAILURE (op);
+
iargs [0] = sp [0];
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ MonoInst *offset_ins;
+
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
+
+ offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ dreg = alloc_ireg_mp (cfg);
+ EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
+ // FIXME-VT: wbarriers ?
+ } else {
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
+ }
if (sp [0]->opcode != OP_LDADDR)
store->flags |= MONO_INST_FAULT;
MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
MonoInst *iargs [4];
+ GSHAREDVT_FAILURE (op);
+
iargs [0] = sp [0];
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
if (op == CEE_LDFLDA) {
if (is_magic_tls_access (field)) {
+ GSHAREDVT_FAILURE (*ip);
ins = sp [0];
*sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
} else {
dreg = alloc_ireg_mp (cfg);
- EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ MonoInst *offset_ins;
+
+ offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
+ } else {
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
+ }
ins->klass = mono_class_from_mono_type (field->type);
ins->type = STACK_MP;
*sp++ = ins;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ MonoInst *offset_ins;
+
+ offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ dreg = alloc_ireg_mp (cfg);
+ EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
+ } else {
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
+ }
load->flags |= ins_flag;
if (sp [0]->opcode != OP_LDADDR)
load->flags |= MONO_INST_FAULT;
ftype = mono_field_get_type (field);
- g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
+ if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
+ UNVERIFIED;
/* The special_static_fields field is init'd in mono_class_vtable, so it needs
* to be called here.
int idx, static_data_reg, array_reg, dreg;
MonoInst *thread_ins;
+ GSHAREDVT_FAILURE (op);
+
// offset &= 0x7fffffff;
// idx = (offset >> 24) - 1;
// return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
static_data = emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_STATIC_DATA);
- if (field->offset == 0) {
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ MonoInst *offset_ins;
+
+ offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ dreg = alloc_ireg_mp (cfg);
+ EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
+ } else if (field->offset == 0) {
ins = static_data;
} else {
int addr_reg = mono_alloc_preg (cfg);
/* .cctors: too many apps depend on them */
/* running with a specific order... */
if (! vtable->initialized)
- INLINE_FAILURE;
+ INLINE_FAILURE ("class init");
ex = mono_runtime_class_init_full (vtable, FALSE);
if (ex) {
set_exception_object (cfg, ex);
if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
}
+
+ GSHAREDVT_FAILURE (op);
+
/* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
is_const = TRUE;
switch (ro_type) {
ins->sreg1 = sp [0]->dreg;
ins->inst_newa_class = klass;
ins->type = STACK_OBJ;
- ins->klass = klass;
+ ins->klass = array_type;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
cfg->cbb->has_array_access = TRUE;
cfg->flags |= MONO_CFG_HAS_LDELEMA;
- if (sp [1]->opcode == OP_ICONST) {
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ // FIXME-VT: OP_ICONST optimization
+ addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
+ ins->opcode = OP_LOADV_MEMBASE;
+ } else if (sp [1]->opcode == OP_ICONST) {
int array_reg = sp [0]->dreg;
int index_reg = sp [1]->dreg;
int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
case CEE_STELEM_R8:
case CEE_STELEM_REF:
case CEE_STELEM: {
- MonoInst *addr;
-
CHECK_STACK (3);
sp -= 3;
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- /* storing a NULL doesn't need any of the complex checks in stelemref */
- if (generic_class_is_reference_type (cfg, klass) &&
- !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
- MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
- MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
- MonoInst *iargs [3];
-
- if (!helper->slot)
- mono_class_setup_vtable (obj_array);
- g_assert (helper->slot);
-
- if (sp [0]->type != STACK_OBJ)
- UNVERIFIED;
- if (sp [2]->type != STACK_OBJ)
- UNVERIFIED;
-
- iargs [2] = sp [2];
- iargs [1] = sp [1];
- iargs [0] = sp [0];
-
- mono_emit_method_call (cfg, helper, iargs, sp [0]);
- } else {
- if (sp [1]->opcode == OP_ICONST) {
- int array_reg = sp [0]->dreg;
- int index_reg = sp [1]->dreg;
- int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
-
- MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
- } else {
- addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
- }
- }
+ emit_array_store (cfg, klass, sp, TRUE);
if (*ip == CEE_STELEM)
ip += 5;
int klass_reg = alloc_preg (cfg);
int dreg = alloc_preg (cfg);
+ GSHAREDVT_FAILURE (*ip);
+
CHECK_STACK (1);
MONO_INST_NEW (cfg, ins, *ip);
--sp;
case CEE_MKREFANY: {
MonoInst *loc, *addr;
+ GSHAREDVT_FAILURE (*ip);
+
CHECK_STACK (1);
MONO_INST_NEW (cfg, ins, *ip);
--sp;
start_new_bblock = 1;
break;
case CEE_ENDFINALLY:
+ /* mono_save_seq_point_info () depends on this */
+ if (sp != stack_start)
+ emit_seq_point (cfg, method, ip, FALSE);
MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
MONO_ADD_INS (bblock, ins);
ip++;
token = read32 (ip + 2);
func = mono_method_get_wrapper_data (method, token);
info = mono_find_jit_icall_by_addr (func);
+ if (!info)
+ g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
g_assert (info);
CHECK_STACK (info->sig->param_count);
ip += 5;
break;
}
+ case CEE_MONO_JIT_ATTACH: {
+ MonoInst *args [16];
+ MonoInst *ad_ins, *lmf_ins;
+ MonoBasicBlock *next_bb = NULL;
+
+ cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+
+ EMIT_NEW_PCONST (cfg, ins, NULL);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
+
+#if TARGET_WIN32
+ ad_ins = NULL;
+ lmf_ins = NULL;
+#else
+ ad_ins = mono_get_domain_intrinsic (cfg);
+ lmf_ins = mono_get_lmf_intrinsic (cfg);
+#endif
+
+#ifdef MONO_ARCH_HAVE_TLS_GET
+ if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
+ NEW_BBLOCK (cfg, next_bb);
+
+ MONO_ADD_INS (cfg->cbb, ad_ins);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
+
+ MONO_ADD_INS (cfg->cbb, lmf_ins);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
+ }
+#endif
+
+ if (cfg->compile_aot) {
+ /* AOT code is only used in the root domain */
+ EMIT_NEW_PCONST (cfg, args [0], NULL);
+ } else {
+ EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
+ }
+ ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
+
+ if (next_bb) {
+ MONO_START_BB (cfg, next_bb);
+ bblock = cfg->cbb;
+ }
+ ip += 2;
+ break;
+ }
+ case CEE_MONO_JIT_DETACH: {
+ MonoInst *args [16];
+
+ /* Restore the original domain */
+ dreg = alloc_ireg (cfg);
+ EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
+ mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
+ ip += 2;
+ break;
+ }
default:
g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
break;
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
} else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
case CEE_LDVIRTFTN: {
MonoInst *args [2];
+ GSHAREDVT_FAILURE (*ip);
+
CHECK_STACK (1);
CHECK_OPSIZE (6);
n = read32 (ip + 2);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE;
+ INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
} else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
break;
}
case CEE_SIZEOF: {
- guint32 align;
+ guint32 val;
int ialign;
+ GSHAREDVT_FAILURE (*ip);
+
CHECK_STACK_OVF (1);
CHECK_OPSIZE (6);
token = read32 (ip + 2);
if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
MonoType *type = mono_type_create_from_typespec (image, token);
- token = mono_type_size (type, &ialign);
+ val = mono_type_size (type, &ialign);
} else {
MonoClass *klass = mono_class_get_full (image, token, generic_context);
CHECK_TYPELOAD (klass);
mono_class_init (klass);
- token = mono_class_value_size (klass, &align);
+ val = mono_type_size (&klass->byval_arg, &ialign);
}
- EMIT_NEW_ICONST (cfg, ins, token);
+ EMIT_NEW_ICONST (cfg, ins, val);
*sp++= ins;
ip += 6;
break;
case CEE_REFANYTYPE: {
MonoInst *src_var, *src;
+ GSHAREDVT_FAILURE (*ip);
+
CHECK_STACK (1);
--sp;
} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
+ } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
+ MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
} else {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
}
case OP_IREM:
case OP_IREM_UN:
return -1;
+#endif
+#if defined(MONO_ARCH_EMULATE_MUL_DIV)
+ case OP_IMUL:
+ return -1;
#endif
default:
return mono_op_to_op_imm (opcode);