#define FREG 'f'
#define VREG 'v'
#define XREG 'x'
-#if SIZEOF_REGISTER == 8
+#if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
#define LREG IREG
#else
#define LREG 'l'
case MONO_TYPE_GENERICINST:
type = &type->data.generic_class->container_class->byval_arg;
goto handle_enum;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* gsharedvt */
+ return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
default:
g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
}
case MONO_TYPE_GENERICINST:
simple_type = &simple_type->data.generic_class->container_class->byval_arg;
goto handle_enum;
-
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* gsharedvt */
+ if (args [i]->type != STACK_VTYPE)
+ return 1;
+ continue;
default:
g_error ("unknown type 0x%02x in check_call_signature",
simple_type->type);
return 0;
}
+/*
+ * check_method_sharing:
+ *
+ * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
+ */
+static void
+check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
+{
+ gboolean pass_vtable = FALSE;
+ gboolean pass_mrgctx = FALSE;
+
+ if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
+ (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
+ gboolean sharable = FALSE;
+
+ if (mono_method_is_generic_sharable (cmethod, TRUE)) {
+ sharable = TRUE;
+ } else {
+ gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
+ MonoGenericContext *context = mini_class_get_context (cmethod->klass);
+ gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
+
+ sharable = sharing_enabled && context_sharable;
+ }
+
+ /*
+ * Pass vtable iff target method might
+ * be shared, which means that sharing
+ * is enabled for its class and its
+ * context is sharable (and it's not a
+ * generic method).
+ */
+ if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
+ pass_vtable = TRUE;
+ }
+
+ if (mini_method_get_context (cmethod) &&
+ mini_method_get_context (cmethod)->method_inst) {
+ g_assert (!pass_vtable);
+
+ if (mono_method_is_generic_sharable (cmethod, TRUE)) {
+ pass_mrgctx = TRUE;
+ } else {
+ gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
+ MonoGenericContext *context = mini_method_get_context (cmethod);
+ gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
+
+ if (sharing_enabled && context_sharable)
+ pass_mrgctx = TRUE;
+ if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
+ pass_mrgctx = TRUE;
+ }
+ }
+
+ if (out_pass_vtable)
+ *out_pass_vtable = pass_vtable;
+ if (out_pass_mrgctx)
+ *out_pass_mrgctx = pass_mrgctx;
+}
+
inline static MonoCallInst *
mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
{
MonoCallInst *call;
-#ifdef MONO_ARCH_SOFT_FLOAT
+#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
int i;
#endif
} else if (!MONO_TYPE_IS_VOID (sig->ret))
call->inst.dreg = alloc_dreg (cfg, call->inst.type);
-#ifdef MONO_ARCH_SOFT_FLOAT
+#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg)) {
/*
* If the call has a float argument, we would need to do an r8->r4 conversion using
return (MonoInst*)call;
}
+static MonoInst*
+emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
+
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
static MonoInst*
emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
static MonoInst*
-mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
+mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
{
- gboolean might_be_remote;
+#ifndef DISABLE_REMOTING
+ gboolean might_be_remote = FALSE;
+#endif
gboolean virtual = this != NULL;
gboolean enable_for_aot = TRUE;
int context_used;
int rgctx_reg = 0;
gboolean need_unbox_trampoline;
+ if (!sig)
+ sig = mono_method_signature (method);
+
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
context_used = mini_method_check_context_used (cfg, method);
+#ifndef DISABLE_REMOTING
might_be_remote = this && sig->hasthis &&
(mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
}
+#endif
need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
- call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
+#ifndef DISABLE_REMOTING
if (might_be_remote)
call->method = mono_marshal_get_remoting_invoke_with_check (method);
else
+#endif
call->method = method;
call->inst.flags |= MONO_INST_HAS_METHOD;
call->inst.inst_left = this;
+ call->tail_call = tail;
if (virtual) {
int vtable_reg, slot_reg, this_reg;
+ int offset;
this_reg = this->dreg;
-#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
- if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
+ if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
MonoInst *dummy_use;
MONO_EMIT_NULL_CHECK (cfg, this_reg);
return (MonoInst*)call;
}
-#endif
if ((!cfg->compile_aot || enable_for_aot) &&
(!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
* the method is not virtual, we just need to ensure this is not null
* and then we can call the method directly.
*/
+#ifndef DISABLE_REMOTING
if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
/*
* The check above ensures method is not gshared, this is needed since
*/
method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
}
+#endif
if (!method->string_ctor)
MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
guint32 imt_slot = mono_method_get_imt_slot (method);
emit_imt_argument (cfg, call, call->method, imt_arg);
slot_reg = vtable_reg;
- call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
+ offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
}
#endif
if (slot_reg == -1) {
slot_reg = alloc_preg (cfg);
mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
- call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
+ offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
}
} else {
slot_reg = vtable_reg;
- call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
+ offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
#ifdef MONO_ARCH_HAVE_IMT
if (imt_arg) {
}
call->inst.sreg1 = slot_reg;
+ call->inst.inst_offset = offset;
call->virtual = TRUE;
}
}
MonoInst*
mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
{
- return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
+ return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
}
MonoInst*
}
static void
-emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
+emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
{
int card_table_shift_bits;
gpointer card_table_mask;
MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
wbarrier->sreg1 = ptr->dreg;
- if (value)
- wbarrier->sreg2 = value->dreg;
- else
- wbarrier->sreg2 = value_reg;
+ wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
} else if (card_table) {
int offset_reg = alloc_preg (cfg);
mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
}
- if (value) {
- EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
- } else {
- MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
- dummy_use->sreg1 = value_reg;
- MONO_ADD_INS (cfg->cbb, dummy_use);
- }
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
}
static gboolean
EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
while (size >= SIZEOF_VOID_P) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
+ MonoInst *load_inst;
+ MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
+ load_inst->dreg = tmp_reg;
+ load_inst->inst_basereg = srcreg;
+ load_inst->inst_offset = offset;
+ MONO_ADD_INS (cfg->cbb, load_inst);
+
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
if (need_wb & 0x1)
- emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
+ emit_write_barrier (cfg, iargs [0], load_inst);
offset += SIZEOF_VOID_P;
size -= SIZEOF_VOID_P;
guint32 align = 0;
MonoMethod *memcpy_method;
MonoInst *size_ins = NULL;
+ MonoInst *memcpy_ins = NULL;
g_assert (klass);
/*
if (mini_is_gsharedvt_klass (cfg, klass)) {
g_assert (!native);
context_used = mini_class_check_context_used (cfg, klass);
- size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
+ size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
+ memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
}
if (native)
n = mono_class_value_size (klass, &align);
/* if native is true there should be no references in the struct */
- if (cfg->gen_write_barriers && klass->has_references && !native) {
+ if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
/* Avoid barriers when storing to the stack */
if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
(dest->opcode == OP_LDADDR))) {
}
}
- mono_emit_jit_icall (cfg, mono_value_copy, iargs);
+ if (size_ins)
+ mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
+ else
+ mono_emit_jit_icall (cfg, mono_value_copy, iargs);
return;
}
}
EMIT_NEW_ICONST (cfg, iargs [2], n);
memcpy_method = get_memcpy_method ();
- mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
+ if (memcpy_ins)
+ mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
+ else
+ mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
}
}
guint32 align;
MonoMethod *memset_method;
MonoInst *size_ins = NULL;
+ MonoInst *bzero_ins = NULL;
+ static MonoMethod *bzero_method;
/* FIXME: Optimize this for the case when dest is an LDADDR */
mono_class_init (klass);
if (mini_is_gsharedvt_klass (cfg, klass)) {
context_used = mini_class_check_context_used (cfg, klass);
- size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
- n = -1;
- } else {
- n = mono_class_value_size (klass, &align);
+ size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
+ bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
+ if (!bzero_method)
+ bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
+ g_assert (bzero_method);
+ iargs [0] = dest;
+ iargs [1] = size_ins;
+ mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
+ return;
}
- if (!size_ins && n <= sizeof (gpointer) * 5) {
+ n = mono_class_value_size (klass, &align);
+
+ if (n <= sizeof (gpointer) * 5) {
mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
}
else {
memset_method = get_memset_method ();
iargs [0] = dest;
EMIT_NEW_ICONST (cfg, iargs [1], 0);
- if (size_ins)
- iargs [2] = size_ins;
- else
- EMIT_NEW_ICONST (cfg, iargs [2], n);
+ EMIT_NEW_ICONST (cfg, iargs [2], n);
mono_emit_method_call (cfg, memset_method, iargs, NULL);
}
}
return emit_rgctx_fetch (cfg, rgctx, entry);
}
+
+static MonoInst*
+emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
+ MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
+{
+ MonoJumpInfoRgctxEntry *entry;
+ MonoInst *rgctx;
+
+ entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
+ rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+
+ return emit_rgctx_fetch (cfg, rgctx, entry);
+}
+
/*
* emit_get_rgctx_method:
*
return emit_rgctx_fetch (cfg, rgctx, entry);
}
+static int
+get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
+{
+ MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
+ MonoRuntimeGenericContextInfoTemplate *template;
+ int i, idx;
+
+ g_assert (info);
+
+ for (i = 0; i < info->entries->len; ++i) {
+ MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
+
+ if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
+ return i;
+ }
+
+ template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
+ template->info_type = rgctx_type;
+ template->data = data;
+
+ idx = info->entries->len;
+
+ g_ptr_array_add (info->entries, template);
+
+ return idx;
+}
+
+/*
+ * emit_get_gsharedvt_info:
+ *
+ * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
+ */
+static MonoInst*
+emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
+{
+ MonoInst *ins;
+ int idx, dreg;
+
+ idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
+ /* Load info->entries [idx] */
+ dreg = alloc_preg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
+
+ return ins;
+}
+
+static MonoInst*
+emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
+{
+ return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
+}
+
/*
* On return the caller must check @klass for load errors.
*/
return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
} else {
- return mono_emit_method_call (cfg, method, &val, NULL);
+ gboolean pass_vtable, pass_mrgctx;
+ MonoInst *rgctx_arg = NULL;
+
+ check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
+ g_assert (!pass_mrgctx);
+
+ if (pass_vtable) {
+ MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
+
+ g_assert (vtable);
+ EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
+ }
+
+ return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
}
}
return add;
}
+static MonoInst*
+handle_unbox_gsharedvt (MonoCompile *cfg, int context_used, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
+{
+ MonoInst *addr, *klass_inst, *is_ref, *args[16];
+ MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
+ MonoInst *ins;
+ int dreg, addr_reg;
+
+ klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
+
+ /* obj */
+ args [0] = obj;
+
+ /* klass */
+ args [1] = klass_inst;
+
+ /* CASTCLASS */
+ obj = mono_emit_jit_icall (cfg, mono_object_castclass, args);
+
+ NEW_BBLOCK (cfg, is_ref_bb);
+ NEW_BBLOCK (cfg, is_nullable_bb);
+ NEW_BBLOCK (cfg, end_bb);
+ is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
+
+ /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
+ addr_reg = alloc_dreg (cfg, STACK_MP);
+
+ /* Non-ref case */
+ /* UNBOX */
+ NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
+ MONO_ADD_INS (cfg->cbb, addr);
+
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* Ref case */
+ MONO_START_BB (cfg, is_ref_bb);
+
+ /* Save the ref to a temporary */
+ dreg = alloc_ireg (cfg);
+ EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
+ addr->dreg = addr_reg;
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* Nullable case */
+ MONO_START_BB (cfg, is_nullable_bb);
+
+ {
+ MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
+ MonoInst *unbox_call;
+ MonoMethodSignature *unbox_sig;
+ MonoInst *var;
+
+ var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
+
+ unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
+ unbox_sig->ret = &klass->byval_arg;
+ unbox_sig->param_count = 1;
+ unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
+ unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
+
+ EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
+ addr->dreg = addr_reg;
+ }
+
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* End */
+ MONO_START_BB (cfg, end_bb);
+
+ /* LDOBJ */
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
+
+ *out_cbb = cfg->cbb;
+
+ return ins;
+}
+
/*
* Returns NULL and set the cfg exception on error.
*/
int rgctx_info;
MonoInst *iargs [2];
- /*
- FIXME: we cannot get managed_alloc here because we can't get
- the class's vtable (because it's not a closed class)
-
- MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
- MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
- */
+ MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
if (cfg->opt & MONO_OPT_SHARED)
rgctx_info = MONO_RGCTX_INFO_KLASS;
alloc_ftn = mono_object_new_specific;
}
+ if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
+ return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
+
return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
}
}
#ifndef MONO_CROSS_COMPILE
- managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
+ managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
#endif
if (managed_alloc) {
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
-handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
+handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
{
MonoInst *alloc, *ins;
+ *out_cbb = cfg->cbb;
+
if (mono_class_is_nullable (klass)) {
MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
} else {
- return mono_emit_method_call (cfg, method, &val, NULL);
+ gboolean pass_vtable, pass_mrgctx;
+ MonoInst *rgctx_arg = NULL;
+
+ check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
+ g_assert (!pass_mrgctx);
+
+ if (pass_vtable) {
+ MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
+
+ g_assert (vtable);
+ EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
+ }
+
+ return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
}
}
if (mini_is_gsharedvt_klass (cfg, klass)) {
- MonoBasicBlock *is_ref_bb, *end_bb;
+ MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *res, *is_ref, *src_var, *addr;
int addr_reg, dreg;
dreg = alloc_ireg (cfg);
NEW_BBLOCK (cfg, is_ref_bb);
+ NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
- is_ref = emit_get_rgctx_klass (cfg, context_used, klass,
- MONO_RGCTX_INFO_CLASS_IS_REF);
+ is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
+
/* Non-ref case */
alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (!alloc)
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+ /* Nullable case */
+ MONO_START_BB (cfg, is_nullable_bb);
+
+ {
+ MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
+ MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
+ MonoInst *box_call;
+ MonoMethodSignature *box_sig;
+
+ /*
+ * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
+ * construct that method at JIT time, so have to do things by hand.
+ */
+ box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
+ box_sig->ret = &mono_defaults.object_class->byval_arg;
+ box_sig->param_count = 1;
+ box_sig->params [0] = &klass->byval_arg;
+ box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
+ EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
+ res->type = STACK_OBJ;
+ res->klass = klass;
+ }
+
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
MONO_START_BB (cfg, end_bb);
+ *out_cbb = cfg->cbb;
+
return res;
} else {
alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (context_used) {
MonoInst *args [3];
- if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
+ if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
MonoInst *cache_ins;
}
klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
-
- if (is_complex_isinst (klass)) {
- /* Complex case, handle by an icall */
-
- /* obj */
- args [0] = src;
-
- /* klass */
- args [1] = klass_inst;
-
- return mono_emit_jit_icall (cfg, mono_object_castclass, args);
- } else {
- /* Simple case, handled by the code below */
- }
}
NEW_BBLOCK (cfg, is_null_bb);
if (context_used) {
MonoInst *args [3];
- if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
+ if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
MonoInst *cache_ins;
}
klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
-
- if (is_complex_isinst (klass)) {
- /* Complex case, handle by an icall */
-
- /* obj */
- args [0] = src;
-
- /* klass */
- args [1] = klass_inst;
-
- return mono_emit_jit_icall (cfg, mono_object_isinst, args);
- } else {
- /* Simple case, the code below can handle it */
- }
}
NEW_BBLOCK (cfg, is_null_bb);
2) if the object is a proxy whose type cannot be determined */
MonoInst *ins;
+#ifndef DISABLE_REMOTING
MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
+#else
+ MonoBasicBlock *true_bb, *false_bb, *end_bb;
+#endif
int obj_reg = src->dreg;
int dreg = alloc_ireg (cfg);
int tmp_reg;
+#ifndef DISABLE_REMOTING
int klass_reg = alloc_preg (cfg);
+#endif
NEW_BBLOCK (cfg, true_bb);
NEW_BBLOCK (cfg, false_bb);
- NEW_BBLOCK (cfg, false2_bb);
NEW_BBLOCK (cfg, end_bb);
+#ifndef DISABLE_REMOTING
+ NEW_BBLOCK (cfg, false2_bb);
NEW_BBLOCK (cfg, no_proxy_bb);
+#endif
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+#ifndef DISABLE_REMOTING
NEW_BBLOCK (cfg, interface_fail_bb);
+#endif
tmp_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
+#ifndef DISABLE_REMOTING
mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
MONO_START_BB (cfg, interface_fail_bb);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
+#else
+ mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
+#endif
} else {
+#ifndef DISABLE_REMOTING
tmp_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
MONO_START_BB (cfg, no_proxy_bb);
mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
+#else
+ g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
+#endif
}
MONO_START_BB (cfg, false_bb);
MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+#ifndef DISABLE_REMOTING
MONO_START_BB (cfg, false2_bb);
MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+#endif
MONO_START_BB (cfg, true_bb);
an InvalidCastException exception is thrown otherwhise*/
MonoInst *ins;
+#ifndef DISABLE_REMOTING
MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
+#else
+ MonoBasicBlock *ok_result_bb;
+#endif
int obj_reg = src->dreg;
int dreg = alloc_ireg (cfg);
int tmp_reg = alloc_preg (cfg);
- int klass_reg = alloc_preg (cfg);
+#ifndef DISABLE_REMOTING
+ int klass_reg = alloc_preg (cfg);
NEW_BBLOCK (cfg, end_bb);
+#endif
+
NEW_BBLOCK (cfg, ok_result_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
save_cast_details (cfg, klass, obj_reg);
if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+#ifndef DISABLE_REMOTING
NEW_BBLOCK (cfg, interface_fail_bb);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
-
+#else
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
+ mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
+#endif
} else {
+#ifndef DISABLE_REMOTING
NEW_BBLOCK (cfg, no_proxy_bb);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
MONO_START_BB (cfg, no_proxy_bb);
mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
+#else
+ g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
+#endif
}
MONO_START_BB (cfg, ok_result_bb);
MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
+#ifndef DISABLE_REMOTING
MONO_START_BB (cfg, end_bb);
+#endif
/* FIXME: */
MONO_INST_NEW (cfg, ins, OP_ICONST);
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
- emit_write_barrier (cfg, ptr, target, 0);
+ emit_write_barrier (cfg, ptr, target);
}
}
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
- emit_write_barrier (cfg, ptr, method_ins, 0);
+ emit_write_barrier (cfg, ptr, method_ins);
}
/*
* To avoid looking up the compiled code belonging to the target method
{
MonoMethodHeaderSummary header;
MonoVTable *vtable;
-#ifdef MONO_ARCH_SOFT_FLOAT
+#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
MonoMethodSignature *sig = mono_method_signature (method);
int i;
#endif
* CAS - do not inline methods with declarative security
* Note: this has to be before any possible return TRUE;
*/
- if (mono_method_has_declsec (method))
+ if (mono_security_method_has_declsec (method))
return FALSE;
-#ifdef MONO_ARCH_SOFT_FLOAT
- /* FIXME: */
- if (sig->ret && sig->ret->type == MONO_TYPE_R4)
- return FALSE;
- for (i = 0; i < sig->param_count; ++i)
- if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
+#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
+ if (mono_arch_is_soft_float ()) {
+ /* FIXME: */
+ if (sig->ret && sig->ret->type == MONO_TYPE_R4)
return FALSE;
+ for (i = 0; i < sig->param_count; ++i)
+ if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
+ return FALSE;
+ }
#endif
return TRUE;
g_assert (cfg->generic_sharing_context);
context_used = mini_class_check_context_used (cfg, klass);
g_assert (context_used);
- rgctx_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
+ rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
} else {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
if (mini_type_is_reference (cfg, fsig->params [2]))
- emit_write_barrier (cfg, addr, load, -1);
+ emit_write_barrier (cfg, addr, load);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
if (generic_class_is_reference_type (cfg, klass))
- emit_write_barrier (cfg, addr, sp [2], -1);
+ emit_write_barrier (cfg, addr, sp [2]);
}
return ins;
}
} else
return NULL;
} else if (cmethod->klass == mono_defaults.array_class) {
- if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
+ if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
#ifndef MONO_BIG_ARRAYS
}
if (cfg->gen_write_barriers && is_ref)
- emit_write_barrier (cfg, args [0], args [1], -1);
+ emit_write_barrier (cfg, args [0], args [1]);
}
#endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
/* g_assert_not_reached (); */
}
if (cfg->gen_write_barriers && is_ref)
- emit_write_barrier (cfg, args [0], args [1], -1);
+ emit_write_barrier (cfg, args [0], args [1]);
}
#endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
+ if (strcmp (cmethod->name, "MemoryBarrier") == 0)
+ ins = emit_memory_barrier (cfg, FullBarrier);
+
if (ins)
return ins;
} else if (cmethod->klass->image == mono_defaults.corlib) {
g_assert (vtable); /*Should not fail since it System.String*/
#ifndef MONO_CROSS_COMPILE
- managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
+ managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
#endif
if (!managed_alloc)
return NULL;
{
MonoClass *klass;
- if (method->wrapper_type != MONO_WRAPPER_NONE)
+ if (method->wrapper_type != MONO_WRAPPER_NONE) {
klass = mono_method_get_wrapper_data (method, token);
- else
+ if (context)
+ klass = mono_class_inflate_generic_class (klass, context);
+ } else {
klass = mono_class_get_full (method->klass->image, token, context);
+ }
if (klass)
mono_class_init (klass);
return klass;
{
guint32 result;
- if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
+ if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
return TRUE;
}
MonoInst *cached_tls_addr = NULL;
MonoDebugMethodInfo *minfo;
MonoBitSet *seq_point_locs = NULL;
+ MonoBitSet *seq_point_set_locs = NULL;
disable_inline = is_jit_optimizer_disabled (method);
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
- dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
+ dont_verify |= mono_security_smcs_hack_enabled ();
/* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
+ seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
sym_seq_points = TRUE;
for (i = 0; i < n_il_offsets; ++i) {
if (il_offsets [i] < header->code_size)
}
}
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
+ if (mono_security_cas_enabled ())
secman = mono_security_manager_get_methods ();
- security = (secman && mono_method_has_declsec (method));
+ security = (secman && mono_security_method_has_declsec (method));
/* at this point having security doesn't mean we have any code to generate */
if (security && (cfg->method == method)) {
/* Only Demand, NonCasDemand and DemandChoice requires code generation.
link_bblock (cfg, start_bblock, bblock);
}
+ if (cfg->gsharedvt && cfg->method == method) {
+ MonoGSharedVtMethodInfo *info;
+ MonoInst *var, *locals_var;
+ int dreg;
+
+ info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
+ info->method = cfg->method;
+ // FIXME: Free this
+ info->entries = g_ptr_array_new ();
+ cfg->gsharedvt_info = info;
+
+ var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ /* prevent it from being register allocated */
+ //var->flags |= MONO_INST_INDIRECT;
+ cfg->gsharedvt_info_var = var;
+
+ ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
+
+ /* Allocate locals */
+ locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ /* prevent it from being register allocated */
+ //locals_var->flags |= MONO_INST_INDIRECT;
+ cfg->gsharedvt_locals_var = locals_var;
+
+ dreg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
+
+ MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
+ ins->dreg = locals_var->dreg;
+ ins->sreg1 = dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ cfg->gsharedvt_locals_var_ins = ins;
+
+ cfg->flags |= MONO_CFG_HAS_ALLOCA;
+ /*
+ if (init_locals)
+ ins->flags |= MONO_INST_INIT;
+ */
+ }
+
/* at this point we know, if security is TRUE, that some code needs to be generated */
if (security && (cfg->method == method)) {
MonoInst *args [2];
mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
}
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
+ if (mono_security_core_clr_enabled ()) {
/* check if this is native code, e.g. an icall or a p/invoke */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
//if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
MONO_ADD_INS (cfg->cbb, ins);
+
+ if (sym_seq_points)
+ mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
}
bblock->real_offset = cfg->real_offset;
if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
+ if (mono_security_cas_enabled ())
CHECK_CFG_EXCEPTION;
#ifdef MONO_ARCH_USE_OP_TAIL_CALL
int calli = *ip == CEE_CALLI;
gboolean pass_imt_from_rgctx = FALSE;
MonoInst *imt_arg = NULL;
+ MonoInst *keep_this_alive = NULL;
gboolean pass_vtable = FALSE;
gboolean pass_mrgctx = FALSE;
MonoInst *vtable_arg = NULL;
gboolean check_this = FALSE;
gboolean supported_tail_call = FALSE;
+ gboolean tail_call = FALSE;
gboolean need_seq_point = FALSE;
guint32 call_opcode = *ip;
gboolean emit_widen = TRUE;
gboolean push_res = TRUE;
gboolean skip_ret = FALSE;
+ gboolean delegate_invoke = FALSE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
METHOD_ACCESS_FAILURE;
}
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
}
*/
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
+ if (mono_security_cas_enabled ()) {
if (check_linkdemand (cfg, method, cmethod))
INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
/*
* Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
*/
- /* Special case Object:ToString () as its easy to implement */
- if (cmethod->klass == mono_defaults.object_class && !strcmp (cmethod->name, "ToString")) {
- MonoInst *args [3];
+ /* Special case Object methods as they are easy to implement */
+ if (cmethod->klass == mono_defaults.object_class) {
+ MonoInst *args [16];
args [0] = sp [0];
EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
- ins = mono_emit_jit_icall (cfg, mono_object_tostring_gsharedvt, args);
+
+ if (!strcmp (cmethod->name, "ToString")) {
+ ins = mono_emit_jit_icall (cfg, mono_object_tostring_gsharedvt, args);
+ } else if (!strcmp (cmethod->name, "Equals")) {
+ args [3] = sp [1];
+ ins = mono_emit_jit_icall (cfg, mono_object_equals_gsharedvt, args);
+ } else if (!strcmp (cmethod->name, "GetHashCode")) {
+ ins = mono_emit_jit_icall (cfg, mono_object_gethashcode_gsharedvt, args);
+ } else {
+ GSHAREDVT_FAILURE (*ip);
+ }
goto call_end;
- } else if (cmethod->klass == mono_defaults.object_class && !strcmp (cmethod->name, "GetHashCode")) {
- MonoInst *args [3];
+ } else if (constrained_call->valuetype && cmethod->klass->valuetype) {
+ /* The 'Own method' case below */
+ } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && MONO_TYPE_IS_VOID (fsig->ret) && (fsig->param_count == 0 || (fsig->param_count == 1 && MONO_TYPE_IS_REFERENCE (fsig->params [0])))) {
+ MonoInst *args [16];
args [0] = sp [0];
EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
- ins = mono_emit_jit_icall (cfg, mono_object_gethashcode_gsharedvt, args);
+
+ if (fsig->param_count) {
+ /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
+ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
+ ins->dreg = alloc_preg (cfg);
+ ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
+ MONO_ADD_INS (cfg->cbb, ins);
+ args [3] = ins;
+
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [3]->dreg, 0, sp [1]->dreg);
+ } else {
+ EMIT_NEW_ICONST (cfg, args [3], 0);
+ }
+
+ ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
goto call_end;
- } else if (constrained_call->valuetype && cmethod->klass->valuetype) {
- /* The 'Own method' case below */
} else {
GSHAREDVT_FAILURE (*ip);
}
*/
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
ins->klass = constrained_call;
- sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
- bblock = cfg->cbb;
+ sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
CHECK_CFG_EXCEPTION;
} else if (!constrained_call->valuetype) {
int dreg = alloc_ireg_ref (cfg);
/* Enum implements some interfaces, so treat this as the first case */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
ins->klass = constrained_call;
- sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
- bblock = cfg->cbb;
+ sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
CHECK_CFG_EXCEPTION;
}
}
if (!calli && check_call_signature (cfg, fsig, sp))
UNVERIFIED;
+#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
+ if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
+ delegate_invoke = TRUE;
+#endif
+
if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
* If the callee is a shared method, then its static cctor
* might not get called after the call was patched.
*/
- if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
+ if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_generic_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
- if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
- (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
- gboolean sharable = FALSE;
-
- if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
- sharable = TRUE;
- } else {
- gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
- MonoGenericContext *context = mini_class_get_context (cmethod->klass);
- gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
-
- sharable = sharing_enabled && context_sharable;
- }
-
- /*
- * Pass vtable iff target method might
- * be shared, which means that sharing
- * is enabled for its class and its
- * context is sharable (and it's not a
- * generic method).
- */
- if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
- pass_vtable = TRUE;
- }
-
- if (cmethod && mini_method_get_context (cmethod) &&
- mini_method_get_context (cmethod)->method_inst) {
- g_assert (!pass_vtable);
-
- if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
- pass_mrgctx = TRUE;
- } else {
- gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
- MonoGenericContext *context = mini_method_get_context (cmethod);
- gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
-
- if (sharing_enabled && context_sharable)
- pass_mrgctx = TRUE;
- if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
- pass_mrgctx = TRUE;
- }
- }
+ if (cmethod)
+ check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
if (cfg->generic_sharing_context && cmethod) {
MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
g_assert (cmethod->is_inflated);
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
+ ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
} else {
this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
/* test_0_multi_dim_arrays () in gshared.cs */
GSHAREDVT_FAILURE (*ip);
+ if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
+ keep_this_alive = sp [0];
+
if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
else
/* Generic sharing */
/* FIXME: only do this for generic methods if
they are not shared! */
- if (context_used && !imt_arg && !array_rank &&
- (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
+ if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
+ (!mono_method_is_generic_sharable (cmethod, TRUE) ||
!mono_class_generic_sharing_enabled (cmethod->klass)) &&
(!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
* the method in the rgctx and do an
* indirect call.
*/
+ if (fsig->hasthis)
+ MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
+
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
goto call_end;
/* Prevent inlining of methods with indirect calls */
INLINE_FAILURE ("indirect call");
- if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
+ if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
+ int info_type;
+ gpointer info_data;
+
/*
* Instead of emitting an indirect call, emit a direct call
* with the contents of the aotconst as the patch info.
*/
- ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
- NULLIFY_INS (addr);
- } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
- ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
- NULLIFY_INS (addr);
- } else {
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
+ if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
+ info_type = addr->inst_c1;
+ info_data = addr->inst_p0;
+ } else {
+ info_type = addr->inst_right->inst_c1;
+ info_data = addr->inst_right->inst_left;
+ }
+
+ if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
+ ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
+ NULLIFY_INS (addr);
+ goto call_end;
+ }
}
-
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
goto call_end;
}
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
- emit_write_barrier (cfg, addr, val, 0);
+ emit_write_barrier (cfg, addr, val);
} else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
/* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
- if (cmethod &&
- ((((ins_flag & MONO_INST_TAILCALL) && (call_opcode == CEE_CALL))
- ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
- && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
+ if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
+ !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
supported_tail_call = TRUE;
+ if (supported_tail_call) {
+ if (call_opcode != CEE_CALL)
+ supported_tail_call = FALSE;
+ }
+
if (supported_tail_call) {
MonoCallInst *call;
//printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
-#ifdef MONO_ARCH_USE_OP_TAIL_CALL
- /* Handle tail calls similarly to calls */
- call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE, FALSE);
-#else
- MONO_INST_NEW_CALL (cfg, call, OP_JMP);
- call->tail_call = TRUE;
- call->method = cmethod;
- call->signature = mono_method_signature (cmethod);
-
- /*
- * We implement tail calls by storing the actual arguments into the
- * argument variables, then emitting a CEE_JMP.
- */
- for (i = 0; i < n; ++i) {
- /* Prevent argument from being register allocated */
- arg_array [i]->flags |= MONO_INST_VOLATILE;
- EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
- }
-#endif
+ if (ARCH_USE_OP_TAIL_CALL) {
+ /* Handle tail calls similarly to normal calls */
+ tail_call = TRUE;
+ } else {
+ MONO_INST_NEW_CALL (cfg, call, OP_JMP);
+ call->tail_call = TRUE;
+ call->method = cmethod;
+ call->signature = mono_method_signature (cmethod);
- ins = (MonoInst*)call;
- ins->inst_p0 = cmethod;
- ins->inst_p1 = arg_array [0];
- MONO_ADD_INS (bblock, ins);
- link_bblock (cfg, bblock, end_bblock);
- start_new_bblock = 1;
+ /*
+ * We implement tail calls by storing the actual arguments into the
+ * argument variables, then emitting a CEE_JMP.
+ */
+ for (i = 0; i < n; ++i) {
+ /* Prevent argument from being register allocated */
+ arg_array [i]->flags |= MONO_INST_VOLATILE;
+ EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
+ }
+ ins = (MonoInst*)call;
+ ins->inst_p0 = cmethod;
+ ins->inst_p1 = arg_array [0];
+ MONO_ADD_INS (bblock, ins);
+ link_bblock (cfg, bblock, end_bblock);
+ start_new_bblock = 1;
- // FIXME: Eliminate unreachable epilogs
+ // FIXME: Eliminate unreachable epilogs
- /*
- * OP_TAILCALL has no return value, so skip the CEE_RET if it is
- * only reachable from this call.
- */
- GET_BBLOCK (cfg, tblock, ip + 5);
- if (tblock == bblock || tblock->in_count == 0)
- skip_ret = TRUE;
- push_res = FALSE;
+ /*
+ * OP_TAILCALL has no return value, so skip the CEE_RET if it is
+ * only reachable from this call.
+ */
+ GET_BBLOCK (cfg, tblock, ip + 5);
+ if (tblock == bblock || tblock->in_count == 0)
+ skip_ret = TRUE;
+ push_res = FALSE;
- goto call_end;
+ goto call_end;
+ }
}
/*
* change the called method to a dummy wrapper, and resolve that wrapper
* to the real method in mono_jit_compile_method ().
*/
- if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod)
- cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
+ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
+ MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
+ if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
+ cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
+ }
/* Common call */
INLINE_FAILURE ("call");
- ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
+ ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
imt_arg, vtable_arg);
+ if (tail_call) {
+ link_bblock (cfg, bblock, end_bblock);
+ start_new_bblock = 1;
+
+ // FIXME: Eliminate unreachable epilogs
+
+ /*
+ * OP_TAILCALL has no return value, so skip the CEE_RET if it is
+ * only reachable from this call.
+ */
+ GET_BBLOCK (cfg, tblock, ip + 5);
+ if (tblock == bblock || tblock->in_count == 0)
+ skip_ret = TRUE;
+ push_res = FALSE;
+ }
+
call_end:
/* End of call, INS should contain the result of the call, if any */
*sp++ = ins;
}
+ if (keep_this_alive) {
+ MonoInst *dummy_use;
+
+ /* See mono_emit_method_call_full () */
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
+ }
+
CHECK_CFG_EXCEPTION;
ip += 5;
ins->klass = mono_class_from_mono_type (ret_type);
}
} else {
-#ifdef MONO_ARCH_SOFT_FLOAT
+#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
MonoInst *iargs [1];
MonoInst *conv;
MONO_ADD_INS (bblock, ins);
if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
- emit_write_barrier (cfg, sp [0], sp [1], -1);
+ emit_write_barrier (cfg, sp [0], sp [1]);
inline_costs += 1;
++ip;
int imm_opcode;
imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
+#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
+ /* Keep emulated opcodes which are optimized away later */
+ if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
+ imm_opcode = mono_op_to_op_imm (ins->opcode);
+ }
+#endif
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
if (sp [1]->opcode == OP_I8CONST) {
MONO_ADD_INS (cfg->cbb, store);
if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
- emit_write_barrier (cfg, sp [0], sp [1], -1);
+ emit_write_barrier (cfg, sp [0], sp [1]);
} else {
mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
}
context_used = mini_method_check_context_used (cfg, cmethod);
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
+ if (mono_security_cas_enabled ()) {
if (check_linkdemand (cfg, method, cmethod))
INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
- } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
+ } else if (mono_security_core_clr_enabled ()) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
}
- if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
+ if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_generic_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
*/
if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
- mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
+ mono_method_is_generic_sharable (cmethod, TRUE)) {
if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
mono_class_vtable (cfg->domain, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
/* we simply pass a null pointer */
EMIT_NEW_PCONST (cfg, *sp, NULL);
/* now call the string ctor */
- alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
+ alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
} else {
MonoInst* callvirt_this_arg = NULL;
// FIXME-VT: Clean this up
if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
GSHAREDVT_FAILURE(*ip);
- mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
+ mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
}
} else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
MonoInst *addr;
addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
} else if (context_used &&
- (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
+ (!mono_method_is_generic_sharable (cmethod, TRUE) ||
!mono_class_generic_sharing_enabled (cmethod->klass))) {
MonoInst *cmethod_addr;
mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
} else {
INLINE_FAILURE ("ctor call");
- ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
+ ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
callvirt_this_arg, NULL, vtable_arg);
}
}
context_used = mini_class_check_context_used (cfg, klass);
if (mini_is_gsharedvt_klass (cfg, klass)) {
- MonoInst *obj, *addr, *klass_inst, *args[16];
- int dreg;
-
- /* Need to check for nullable types at runtime, but those are disabled in mini_is_gsharedvt_sharable_method*/
- if (mono_class_is_nullable (klass))
- GSHAREDVT_FAILURE (*ip);
-
- obj = *sp;
-
- klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
-
- /* obj */
- args [0] = obj;
-
- /* klass */
- args [1] = klass_inst;
-
- /* CASTCLASS */
- obj = mono_emit_jit_icall (cfg, mono_object_castclass, args);
-
- /* UNBOX */
- dreg = alloc_dreg (cfg, STACK_MP);
- NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, dreg, obj->dreg, sizeof (MonoObject));
- MONO_ADD_INS (cfg->cbb, addr);
-
- /* LDOBJ */
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
- *sp++ = ins;
+ *sp = handle_unbox_gsharedvt (cfg, context_used, klass, *sp, &bblock);
+ sp ++;
ip += 5;
inline_costs += 2;
break;
}
- *sp++ = handle_box (cfg, val, klass, context_used);
- bblock = cfg->cbb;
+ *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
CHECK_CFG_EXCEPTION;
ip += 5;
case CEE_LDSFLDA:
case CEE_STSFLD: {
MonoClassField *field;
+#ifndef DISABLE_REMOTING
int costs;
+#endif
guint foffset;
gboolean is_instance;
int op;
UNVERIFIED;
/* if the class is Critical then transparent code cannot access it's fields */
- if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ if (!is_instance && mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
/* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
*/
if (op == CEE_STFLD) {
if (target_type_is_incompatible (cfg, field->type, sp [1]))
UNVERIFIED;
+#ifndef DISABLE_REMOTING
if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
MonoInst *iargs [5];
} else {
mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
}
- } else {
+ } else
+#endif
+ {
MonoInst *store;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
context_used = mini_class_check_context_used (cfg, klass);
- offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
+ /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
- // FIXME-VT: wbarriers ?
} else {
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
}
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
- emit_write_barrier (cfg, ptr, sp [1], -1);
+ emit_write_barrier (cfg, ptr, sp [1]);
}
store->flags |= ins_flag;
break;
}
+#ifndef DISABLE_REMOTING
if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
MonoInst *iargs [4];
ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
*sp++ = ins;
}
- } else if (is_instance) {
+ } else
+#endif
+ if (is_instance) {
if (sp [0]->type == STACK_VTYPE) {
MonoInst *var;
if (mini_is_gsharedvt_klass (cfg, klass)) {
MonoInst *offset_ins;
- offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
} else {
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
if (mini_is_gsharedvt_klass (cfg, klass)) {
MonoInst *offset_ins;
- offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
generic_class_is_reference_type (cfg, klass)) {
/* insert call to write barrier */
- emit_write_barrier (cfg, sp [0], sp [1], -1);
+ emit_write_barrier (cfg, sp [0], sp [1]);
}
ins_flag = 0;
ip += 5;
if (context_used) {
MonoInst *args [3];
MonoClass *array_class = mono_array_class_get (klass, 1);
- /* FIXME: we cannot get a managed
- allocator because we can't get the
- open generic class's vtable. We
- have the same problem in
- handle_alloc(). This
- needs to be solved so that we can
- have managed allocs of shared
- generic classes. */
- /*
- MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
- MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
- */
- MonoMethod *managed_alloc = NULL;
+ MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
- /* FIXME: Decompose later to help abcrem */
+ /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
/* vtable */
args [0] = emit_get_rgctx_klass (cfg, context_used,
g_assert_not_reached ();
}
} else if (cfg->compile_aot) {
- EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
+ EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
} else {
EMIT_NEW_PCONST (cfg, ins, handle);
}
token = read32 (ip + 2);
ptr = mono_method_get_wrapper_data (method, token);
- if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
- MonoJitICallInfo *callinfo;
- const char *icall_name;
-
- icall_name = method->name + strlen ("__icall_wrapper_");
- g_assert (icall_name);
- callinfo = mono_find_jit_icall_by_name (icall_name);
- g_assert (callinfo);
-
- if (ptr == callinfo->func) {
- /* Will be transformed into an AOTCONST later */
- EMIT_NEW_PCONST (cfg, ins, ptr);
- *sp++ = ins;
- ip += 6;
- break;
- }
- }
/* FIXME: Generalize this */
if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
cfg->disable_aot = 1;
break;
}
+ case CEE_MONO_JIT_ICALL_ADDR: {
+ MonoJitICallInfo *callinfo;
+ gpointer ptr;
+
+ CHECK_STACK_OVF (1);
+ CHECK_OPSIZE (6);
+ token = read32 (ip + 2);
+
+ ptr = mono_method_get_wrapper_data (method, token);
+ callinfo = mono_find_jit_icall_by_addr (ptr);
+ g_assert (callinfo);
+ EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
+ *sp++ = ins;
+ ip += 6;
+ inline_costs += 10 * num_calls++;
+ break;
+ }
case CEE_MONO_ICALL_ADDR: {
MonoMethod *cmethod;
gpointer ptr;
lmf_ins = mono_get_lmf_intrinsic (cfg);
#endif
-#ifdef MONO_ARCH_HAVE_TLS_GET
if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
NEW_BBLOCK (cfg, next_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
}
-#endif
if (cfg->compile_aot) {
/* AOT code is only used in the root domain */
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
METHOD_ACCESS_FAILURE;
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
+ if (mono_security_cas_enabled ()) {
if (check_linkdemand (cfg, method, cmethod))
INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
- } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
+ } else if (mono_security_core_clr_enabled ()) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
- }
+ }
/*
* Optimize the common case of ldftn+delegate creation
target_ins = sp [-1];
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
context_used = mini_method_check_context_used (cfg, cmethod);
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
+ if (mono_security_cas_enabled ()) {
if (check_linkdemand (cfg, method, cmethod))
INLINE_FAILURE ("linkdemand");
CHECK_CFG_EXCEPTION;
- } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
+ } else if (mono_security_core_clr_enabled ()) {
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
}
MONO_ADD_INS (cfg->bb_exit, ins);
}
+ /*
+ * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
+ * the code they refer to was dead (#11880).
+ */
+ if (sym_seq_points) {
+ for (i = 0; i < header->code_size; ++i) {
+ if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
+ MonoInst *ins;
+
+ NEW_SEQ_POINT (cfg, ins, i, FALSE);
+ mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
+ }
+ }
+ }
+
cfg->ip = NULL;
if (cfg->method == method) {
#if SIZEOF_REGISTER == 8
case STACK_I8:
#endif
-#if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
+#if !defined(TARGET_X86)
/* Enabling this screws up the fp stack on x86 */
case STACK_R8:
#endif
+ if (mono_arch_is_soft_float ())
+ break;
+
/* Arguments are implicitly global */
/* Putting R4 vars into registers doesn't work currently */
- if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
+ /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
+ if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
/*
* Make that the variable's liveness interval doesn't contain a call, since
* that would cause the lvreg to be spilled, making the whole optimization
guint32 stacktypes [128];
MonoInst **live_range_start, **live_range_end;
MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
+ int *gsharedvt_vreg_to_idx = NULL;
*need_local_opts = FALSE;
ins->flags |= MONO_INST_GC_TRACK;
}
}
+
+ if (cfg->gsharedvt) {
+ gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
+
+ for (i = 0; i < cfg->num_varinfo; ++i) {
+ MonoInst *ins = cfg->varinfo [i];
+ int idx;
+
+ if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
+ if (i >= cfg->locals_start) {
+ /* Local */
+ idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
+ gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
+ ins->opcode = OP_GSHAREDVT_LOCAL;
+ ins->inst_imm = idx;
+ } else {
+ /* Arg */
+ gsharedvt_vreg_to_idx [ins->dreg] = -1;
+ ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
+ }
+ }
+ }
+ }
/* FIXME: widening and truncation */
ins->inst_offset = vtaddr->inst_offset;
} else
NOT_IMPLEMENTED;
+ } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
+ /* gsharedvt arg passed by ref */
+ g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
+
+ ins->opcode = OP_LOAD_MEMBASE;
+ ins->inst_basereg = var->inst_basereg;
+ ins->inst_offset = var->inst_offset;
+ } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
+ MonoInst *load, *load2, *load3;
+ int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
+ int reg1, reg2, reg3;
+ MonoInst *info_var = cfg->gsharedvt_info_var;
+ MonoInst *locals_var = cfg->gsharedvt_locals_var;
+
+ /*
+ * gsharedvt local.
+ * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
+ */
+
+ g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
+
+ g_assert (info_var);
+ g_assert (locals_var);
+
+ /* Mark the instruction used to compute the locals var as used */
+ cfg->gsharedvt_locals_var_ins = NULL;
+
+ /* Load the offset */
+ if (info_var->opcode == OP_REGOFFSET) {
+ reg1 = alloc_ireg (cfg);
+ NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
+ } else if (info_var->opcode == OP_REGVAR) {
+ load = NULL;
+ reg1 = info_var->dreg;
+ } else {
+ g_assert_not_reached ();
+ }
+ reg2 = alloc_ireg (cfg);
+ NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
+ /* Load the locals area address */
+ reg3 = alloc_ireg (cfg);
+ if (locals_var->opcode == OP_REGOFFSET) {
+ NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
+ } else if (locals_var->opcode == OP_REGVAR) {
+ NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
+ } else {
+ g_assert_not_reached ();
+ }
+ /* Compute the address */
+ ins->opcode = OP_PADD;
+ ins->sreg1 = reg3;
+ ins->sreg2 = reg2;
+
+ mono_bblock_insert_before_ins (bb, ins, load3);
+ mono_bblock_insert_before_ins (bb, load3, load2);
+ if (load)
+ mono_bblock_insert_before_ins (bb, load2, load);
} else {
g_assert (var->opcode == OP_REGOFFSET);
ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
+#if SIZEOF_REGISTER != 8
if (regtype == 'l') {
NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
mono_bblock_insert_after_ins (bb, ins, store_ins);
mono_bblock_insert_after_ins (bb, ins, store_ins);
def_ins = store_ins;
}
- else {
+ else
+#endif
+ {
g_assert (store_opcode != OP_STOREV_MEMBASE);
/* Try to fuse the store into the instruction itself */
sregs [srcindex] = sreg;
//mono_inst_set_src_registers (ins, sregs);
+#if SIZEOF_REGISTER != 8
if (regtype == 'l') {
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
mono_bblock_insert_before_ins (bb, ins, load_ins);
use_ins = load_ins;
}
- else {
+ else
+#endif
+ {
#if SIZEOF_REGISTER == 4
g_assert (load_opcode != OP_LOADI8_MEMBASE);
#endif
}
#endif
+ if (cfg->gsharedvt_locals_var_ins) {
+ /* Nullify if unused */
+ cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
+ cfg->gsharedvt_locals_var_ins->inst_imm = 0;
+ }
+
g_free (live_range_start);
g_free (live_range_end);
g_free (live_range_start_bb);