extern MonoMethodSignature *helper_sig_class_init_trampoline;
extern MonoMethodSignature *helper_sig_domain_get;
extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
+extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
static int
mono_find_block_region (MonoCompile *cfg, int offset)
{
- MonoMethod *method = cfg->method;
- MonoMethodHeader *header = mono_method_get_header (method);
+ MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
static GList*
mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
{
- MonoMethod *method = cfg->method;
- MonoMethodHeader *header = mono_method_get_header (method);
+ MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
- MonoBasicBlock *handler;
int i;
GList *res = NULL;
clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
(!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
- if (clause->flags == type) {
- handler = cfg->cil_offset_to_bb [clause->handler_offset];
- g_assert (handler);
- res = g_list_append (res, handler);
- }
+ if (clause->flags == type)
+ res = g_list_append (res, clause);
}
}
return res;
int pos, vnum;
/* inlining can result in deeper stacks */
- if (slot >= mono_method_get_header (cfg->method)->max_stack)
+ if (slot >= cfg->header->max_stack)
return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
pos = ins->type - 1 + slot * STACK_MAX;
}
}
-/*
- * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
- * stored in "klass_reg" implements the interface "klass".
- */
static void
-mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
+mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
{
int ibitmap_reg = alloc_preg (cfg);
+#ifdef COMPRESSED_INTERFACE_BITMAP
+ MonoInst *args [2];
+ MonoInst *res, *ins;
+ NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
+ MONO_ADD_INS (cfg->cbb, ins);
+ args [0] = ins;
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
+ else
+ EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
+ res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
+#else
int ibitmap_byte_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
if (cfg->compile_aot) {
int iid_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
}
+#endif
+}
+
+/*
+ * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
+ * stored in "klass_reg" implements the interface "klass".
+ */
+static void
+mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
+{
+ mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
}
/*
static void
mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
{
- int ibitmap_reg = alloc_preg (cfg);
- int ibitmap_byte_reg = alloc_preg (cfg);
-
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
-
- if (cfg->compile_aot) {
- int iid_reg = alloc_preg (cfg);
- int shifted_iid_reg = alloc_preg (cfg);
- int ibitmap_byte_address_reg = alloc_preg (cfg);
- int masked_iid_reg = alloc_preg (cfg);
- int iid_one_bit_reg = alloc_preg (cfg);
- int iid_bit_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
- MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
- MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
- MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
- MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
- } else {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
- }
+ mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
}
/*
}
}
-#endif /* DISABLE_JIT */
-
void
mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
{
}
}
-#ifndef DISABLE_JIT
-
static int
ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
{
static void
emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
{
+ int method_reg;
+
+ if (COMPILE_LLVM (cfg)) {
+ method_reg = alloc_preg (cfg);
+
+ if (imt_arg) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
+ } else if (cfg->compile_aot) {
+ MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
+ } else {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_PCONST);
+ ins->inst_p0 = call->method;
+ ins->dreg = method_reg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+#ifdef ENABLE_LLVM
+ call->imt_arg_reg = method_reg;
+#endif
+#ifdef MONO_ARCH_IMT_REG
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
+#else
+ /* Need this to keep the IMT arg alive */
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
+#endif
+ return;
+ }
+
#ifdef MONO_ARCH_IMT_REG
- int method_reg = alloc_preg (cfg);
+ method_reg = alloc_preg (cfg);
if (imt_arg) {
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
return (MonoInst*)call;
}
+static void
+set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
+{
+#ifdef MONO_ARCH_RGCTX_REG
+ mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
+ cfg->uses_rgctx_reg = TRUE;
+ call->rgctx_reg = TRUE;
+#ifdef ENABLE_LLVM
+ call->rgctx_arg_reg = rgctx_reg;
+#endif
+#else
+ NOT_IMPLEMENTED;
+#endif
+}
+
inline static MonoInst*
mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
{
-#ifdef MONO_ARCH_RGCTX_REG
MonoCallInst *call;
int rgctx_reg = -1;
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
- if (rgctx_arg) {
- mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
- cfg->uses_rgctx_reg = TRUE;
- call->rgctx_reg = TRUE;
- }
+ if (rgctx_arg)
+ set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
return (MonoInst*)call;
-#else
- g_assert_not_reached ();
- return NULL;
-#endif
}
static MonoInst*
mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
{
-#ifdef MONO_ARCH_RGCTX_REG
int rgctx_reg = 0;
-#endif
MonoInst *ins;
MonoCallInst *call;
if (vtable_arg) {
-#ifdef MONO_ARCH_RGCTX_REG
rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
-#else
- NOT_IMPLEMENTED;
-#endif
}
ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
call = (MonoCallInst*)ins;
- if (vtable_arg) {
-#ifdef MONO_ARCH_RGCTX_REG
- mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
- cfg->uses_rgctx_reg = TRUE;
- call->rgctx_reg = TRUE;
-#else
- NOT_IMPLEMENTED;
-#endif
- }
+ if (vtable_arg)
+ set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
return ins;
}
return memcpy_method;
}
+static void
+create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
+{
+ MonoClassField *field;
+ gpointer iter = NULL;
+
+ while ((field = mono_class_get_fields (klass, &iter))) {
+ int foffset;
+
+ if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
+ continue;
+ foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
+ if (mono_type_is_reference (field->type)) {
+ g_assert ((foffset % SIZEOF_VOID_P) == 0);
+ *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
+ } else {
+ /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
+ MonoClass *field_class = mono_class_from_mono_type (field->type);
+ if (field_class->has_references)
+ create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
+ }
+ }
+}
+
+static gboolean
+mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
+{
+ int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
+ unsigned need_wb = 0;
+
+ if (align == 0)
+ align = 4;
+
+ /*types with references can't have alignment smaller than sizeof(void*) */
+ if (align < SIZEOF_VOID_P)
+ return FALSE;
+
+ /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
+ if (size > 32 * SIZEOF_VOID_P)
+ return FALSE;
+
+ create_write_barrier_bitmap (klass, &need_wb, 0);
+
+ /* We don't unroll more than 5 stores to avoid code bloat. */
+ if (size > 5 * SIZEOF_VOID_P) {
+ /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
+ size += (SIZEOF_VOID_P - 1);
+ size &= ~(SIZEOF_VOID_P - 1);
+
+ EMIT_NEW_ICONST (cfg, iargs [2], size);
+ EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
+ mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
+ return TRUE;
+ }
+
+ destreg = iargs [0]->dreg;
+ srcreg = iargs [1]->dreg;
+ offset = 0;
+
+ dest_ptr_reg = alloc_preg (cfg);
+ tmp_reg = alloc_preg (cfg);
+
+ /*tmp = dreg*/
+ EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
+
+ while (size >= SIZEOF_VOID_P) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
+
+ if (need_wb & 0x1) {
+ MonoInst *dummy_use;
+
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
+
+ MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
+ dummy_use->sreg1 = dest_ptr_reg;
+ MONO_ADD_INS (cfg->cbb, dummy_use);
+ }
+
+
+ offset += SIZEOF_VOID_P;
+ size -= SIZEOF_VOID_P;
+ need_wb >>= 1;
+
+ /*tmp += sizeof (void*)*/
+ if (size >= SIZEOF_VOID_P) {
+ NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
+ MONO_ADD_INS (cfg->cbb, iargs [0]);
+ }
+ }
+
+ /* Those cannot be references since size < sizeof (void*) */
+ while (size >= 4) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 4;
+ size -= 4;
+ }
+
+ while (size >= 2) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 2;
+ size -= 2;
+ }
+
+ while (size >= 1) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
+ offset += 1;
+ size -= 1;
+ }
+
+ return TRUE;
+}
+
/*
* Emit code to copy a valuetype of type @klass whose address is stored in
* @src->dreg to memory whose address is stored at @dest->dreg.
void
mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
{
- MonoInst *iargs [3];
+ MonoInst *iargs [4];
int n;
guint32 align = 0;
MonoMethod *memcpy_method;
else
n = mono_class_value_size (klass, &align);
-#if HAVE_WRITE_BARRIERS
/* if native is true there should be no references in the struct */
- if (klass->has_references && !native) {
+ if (cfg->gen_write_barriers && klass->has_references && !native) {
/* Avoid barriers when storing to the stack */
if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
(dest->opcode == OP_LDADDR))) {
if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (context_used) {
+
+ /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
+ if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
+ return;
+ } else if (context_used) {
iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- } else {
+ } else {
if (cfg->compile_aot) {
EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
} else {
}
mono_emit_jit_icall (cfg, mono_value_copy, iargs);
+ return;
}
}
-#endif
if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
/* FIXME: Optimize the case when src/dest is OP_LDADDR */
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
- call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
+ if (COMPILE_LLVM (cfg))
+ call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
+ else
+ call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
#ifdef MONO_ARCH_VTABLE_REG
mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
cfg->uses_vtable_reg = TRUE;
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
-handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
+handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
{
MonoInst *iargs [2];
void *alloc_ftn;
+ if (context_used) {
+ MonoInst *data;
+ int rgctx_info;
+ MonoInst *iargs [2];
+
+ /*
+ FIXME: we cannot get managed_alloc here because we can't get
+ the class's vtable (because it's not a closed class)
+
+ MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
+ MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
+ */
+
+ if (cfg->opt & MONO_OPT_SHARED)
+ rgctx_info = MONO_RGCTX_INFO_KLASS;
+ else
+ rgctx_info = MONO_RGCTX_INFO_VTABLE;
+ data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
+
+ if (cfg->opt & MONO_OPT_SHARED) {
+ EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
+ iargs [1] = data;
+ alloc_ftn = mono_object_new;
+ } else {
+ iargs [0] = data;
+ alloc_ftn = mono_object_new_specific;
+ }
+
+ return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
+ }
+
if (cfg->opt & MONO_OPT_SHARED) {
EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
}
-
-static MonoInst*
-handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
- gboolean for_box)
-{
- MonoInst *iargs [2];
- MonoMethod *managed_alloc = NULL;
- void *alloc_ftn;
-
- /*
- FIXME: we cannot get managed_alloc here because we can't get
- the class's vtable (because it's not a closed class)
-
- MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
- MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
- */
-
- if (cfg->opt & MONO_OPT_SHARED) {
- EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
- iargs [1] = data_inst;
- alloc_ftn = mono_object_new;
- } else {
- if (managed_alloc) {
- iargs [0] = data_inst;
- return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
- }
-
- iargs [0] = data_inst;
- alloc_ftn = mono_object_new_specific;
- }
-
- return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
-}
/*
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
-handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
+handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
{
MonoInst *alloc, *ins;
if (mono_class_is_nullable (klass)) {
MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
- return mono_emit_method_call (cfg, method, &val, NULL);
+
+ if (context_used) {
+ /* FIXME: What if the class is shared? We might not
+ have to get the method address from the RGCTX. */
+ MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
+ MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
+ MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+
+ return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
+ } else {
+ return mono_emit_method_call (cfg, method, &val, NULL);
+ }
}
- alloc = handle_alloc (cfg, klass, TRUE);
+ alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (!alloc)
return NULL;
return alloc;
}
-static MonoInst *
-handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
-{
- MonoInst *alloc, *ins;
-
- if (mono_class_is_nullable (klass)) {
- MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
- /* FIXME: What if the class is shared? We might not
- have to get the method address from the RGCTX. */
- MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
- MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
-
- return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
- } else {
- alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
-
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
-
- return alloc;
- }
-}
+// FIXME: This doesn't work yet (class libs tests fail?)
+#define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
/*
* Returns NULL and set the cfg exception on error.
int vtable_reg = alloc_preg (cfg);
MonoInst *klass_inst = NULL;
- NEW_BBLOCK (cfg, is_null_bb);
-
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
-
- save_cast_details (cfg, klass, obj_reg);
-
if (context_used) {
MonoInst *args [2];
klass_inst = emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_KLASS);
- // FIXME: This doesn't work yet (mcs/tests/gtest-304.cs fails)
- if (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass)) {
+ if (is_complex_isinst (klass)) {
/* Complex case, handle by an icall */
/* obj */
}
}
+ NEW_BBLOCK (cfg, is_null_bb);
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
+
+ save_cast_details (cfg, klass, obj_reg);
+
if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
if (context_used) {
klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- // FIXME: This doesn't work yet (mcs/tests/gtest-304.cs fails)
- if (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass)) {
+ if (is_complex_isinst (klass)) {
MonoInst *args [2];
/* Complex case, handle by an icall */
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
+ save_cast_details (cfg, klass, obj_reg);
+
if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
NEW_BBLOCK (cfg, interface_fail_bb);
MonoDomain *domain;
guint8 **code_slot;
- obj = handle_alloc (cfg, klass, FALSE);
+ obj = handle_alloc (cfg, klass, FALSE, 0);
if (!obj)
return NULL;
static gboolean
mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
{
- MonoMethodHeader *header;
+ MonoMethodHeaderSummary header;
MonoVTable *vtable;
#ifdef MONO_ARCH_SOFT_FLOAT
MonoMethodSignature *sig = mono_method_signature (method);
return TRUE;
#endif
- if (method->is_inflated)
- /* Avoid inflating the header */
- header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
- else
- header = mono_method_get_header (method);
- if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
- (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
- (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
+ if (!mono_method_get_header_summary (method, &header))
+ return FALSE;
+
+ /*runtime, icall and pinvoke are checked by summary call*/
+ if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
- (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
(method->klass->marshalbyref) ||
- !header || header->num_clauses)
+ header.has_clauses)
return FALSE;
/* also consider num_locals? */
inline_limit = INLINE_LENGTH_LIMIT;
inline_limit_inited = TRUE;
}
- if (header->code_size >= inline_limit)
+ if (header.code_size >= inline_limit)
return FALSE;
/*
}
static MonoInst*
-mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
+mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
{
MonoInst *ins;
guint32 size;
}
#endif
- MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
+ if (bcheck)
+ MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
#if defined(TARGET_X86) || defined(TARGET_AMD64)
if (size == 1 || size == 2 || size == 4 || size == 8) {
rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
if (rank == 1)
- return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
+ return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
#ifndef MONO_ARCH_EMULATE_MUL_DIV
/* emit_ldelema_2 depends on OP_LMUL */
}
}
+/* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
+static MonoInst*
+emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
+{
+ MonoInst *addr, *store, *load;
+ MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
+
+ /* the bounds check is already done by the callers */
+ addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
+ if (is_set) {
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
+ } else {
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
+ }
+ return store;
+}
+
+static MonoInst*
+mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
+#ifdef MONO_ARCH_SIMD_INTRINSICS
+ if (cfg->opt & MONO_OPT_SIMD) {
+ ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
+ if (ins)
+ return ins;
+ }
+#endif
+
+ return ins;
+}
+
static MonoInst*
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
+ return cfg->cbb->last_ins;
} else
return NULL;
} else if (cmethod->klass == mono_defaults.object_class) {
} else
return NULL;
} else if (cmethod->klass == mono_defaults.array_class) {
+ if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
+ return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
if (cmethod->name [0] != 'g')
return NULL;
return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
#endif
- } else if (mini_class_is_system_array (cmethod->klass) &&
- strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
- MonoInst *addr, *store, *load;
- MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
-
- addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
- return store;
} else if (cmethod->klass->image == mono_defaults.corlib &&
(strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
(strcmp (cmethod->klass->name, "Interlocked") == 0)) {
g_assert_not_reached ();
}
-#if HAVE_WRITE_BARRIERS
- if (is_ref) {
+ if (cfg->gen_write_barriers && is_ref) {
+ MonoInst *dummy_use;
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
}
-#endif
}
#endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
size = 4;
else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
size = sizeof (gpointer);
- else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
+ else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
size = 8;
if (size == 4) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
} else {
/* g_assert_not_reached (); */
}
-#if HAVE_WRITE_BARRIERS
- if (is_ref) {
+ if (cfg->gen_write_barriers && is_ref) {
+ MonoInst *dummy_use;
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
}
-#endif
}
#endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
{
if (method->klass == mono_defaults.string_class) {
/* managed string allocation support */
- if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
+ if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
MonoMethod *managed_alloc = NULL;
mono_jit_stats.inlineable_methods++;
cmethod->inline_info = 1;
}
+
+ /* allocate local variables */
+ cheader = mono_method_get_header (cmethod);
+
+ if (cheader == NULL || mono_loader_get_last_error ()) {
+ if (cheader)
+ mono_metadata_free_mh (cheader);
+ mono_loader_clear_error ();
+ return 0;
+ }
+
/* allocate space to store the return value */
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
}
- /* allocate local variables */
- cheader = mono_method_get_header (cmethod);
+
prev_locals = cfg->locals;
cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
for (i = 0; i < cheader->num_locals; ++i)
EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
*sp++ = ins;
}
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
return costs + 1;
} else {
if (cfg->verbose_level > 2)
/* This gets rid of the newly added bblocks */
cfg->cbb = prev_cbb;
}
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
return 0;
}
ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
MonoBasicBlock *bblock, unsigned char *ip)
{
- /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
- if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
- return;
-
/* we can't get the coreclr security level on wrappers since they don't have the attributes */
- caller = get_original_method (caller);
- if (!caller)
- return;
-
- /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
- if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
- emit_throw_exception (cfg, mono_get_exception_field_access ());
+ MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
+ if (ex)
+ emit_throw_exception (cfg, ex);
}
static void
ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
MonoBasicBlock *bblock, unsigned char *ip)
{
- /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
- if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
- return;
-
/* we can't get the coreclr security level on wrappers since they don't have the attributes */
- caller = get_original_method (caller);
- if (!caller)
- return;
-
- /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
- if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
- emit_throw_exception (cfg, mono_get_exception_method_access ());
+ MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
+ if (ex)
+ emit_throw_exception (cfg, ex);
}
/*
{
char *method_fname = mono_method_full_name (method, TRUE);
char *method_code;
+ MonoMethodHeader *header = mono_method_get_header (method);
- if (mono_method_get_header (method)->code_size == 0)
+ if (header->code_size == 0)
method_code = g_strdup ("method body is empty.");
else
method_code = mono_disasm_code_one (NULL, method, ip, NULL);
cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
g_free (method_fname);
g_free (method_code);
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
static void
MonoError error;
MonoInst *ins, **sp, **stack_start;
MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
- MonoSimpleBasicBlock *bb = NULL;
+ MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
MonoMethod *cmethod, *method_definition;
MonoInst **arg_array;
MonoMethodHeader *header;
image = method->klass->image;
header = mono_method_get_header (method);
+ if (!header) {
+ MonoLoaderError *error;
+
+ if ((error = mono_loader_get_last_error ())) {
+ cfg->exception_type = error->exception_type;
+ } else {
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
+ }
+ goto exception_exit;
+ }
generic_container = mono_method_get_generic_container (method);
sig = mono_method_signature (method);
num_args = sig->hasthis + sig->param_count;
skip_dead_blocks = !dont_verify;
if (skip_dead_blocks) {
- bb = mono_basic_block_split (method, &error);
+ original_bb = bb = mono_basic_block_split (method, &error);
if (!mono_error_ok (&error)) {
mono_error_cleanup (&error);
UNVERIFIED;
/* MS.NET seems to silently convert this to a callvirt */
virtual = 1;
+ {
+ /*
+ * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
+ * converts to a callvirt.
+ *
+ * tests/bug-515884.il is an example of this behavior
+ */
+ const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
+ const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
+ if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
+ virtual = 1;
+ }
+
if (!cmethod->klass->inited)
if (!mono_class_init (cmethod->klass))
goto load_error;
array_rank = cmethod->klass->rank;
fsig = mono_method_signature (cmethod);
} else {
- if (mono_method_signature (cmethod)->pinvoke) {
+ fsig = mono_method_signature (cmethod);
+
+ if (!fsig)
+ goto load_error;
+
+ if (fsig->pinvoke) {
MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
check_for_pending_exc, FALSE);
fsig = mono_method_signature (wrapper);
*/
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
ins->klass = constrained_call;
- sp [0] = handle_box (cfg, ins, constrained_call);
+ sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
CHECK_CFG_EXCEPTION;
} else if (!constrained_call->valuetype) {
int dreg = alloc_preg (cfg);
* emit_get_rgctx_method () calls mono_class_vtable () so check
* for type load errors before.
*/
- mono_class_vtable (cfg->domain, cmethod->klass);
+ mono_class_setup_vtable (cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
/* Prevent inlining of methods that contain indirect calls */
INLINE_FAILURE;
-#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
- /* The llvm vcall trampolines doesn't support generic virtual calls yet */
- if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
+#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
+ if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
g_assert (!imt_arg);
if (!context_used)
g_assert (cmethod->is_inflated);
if (!MONO_TYPE_IS_VOID (fsig->ret))
*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+ CHECK_CFG_EXCEPTION;
+
ip += 5;
ins_flag = 0;
break;
MONO_ADD_INS (bblock, ins);
link_bblock (cfg, bblock, end_bblock);
start_new_bblock = 1;
+
+ CHECK_CFG_EXCEPTION;
+
/* skip CEE_RET as well */
ip += 6;
ins_flag = 0;
sp++;
}
+ CHECK_CFG_EXCEPTION;
+
ip += 5;
ins_flag = 0;
break;
INLINE_FAILURE;
if (vtable_arg) {
-#ifdef MONO_ARCH_RGCTX_REG
MonoCallInst *call;
int rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
call = (MonoCallInst*)ins;
- mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
- cfg->uses_rgctx_reg = TRUE;
- call->rgctx_reg = TRUE;
-#else
- NOT_IMPLEMENTED;
-#endif
+ set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
} else {
if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
/*
if (!MONO_TYPE_IS_VOID (fsig->ret))
*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+ CHECK_CFG_EXCEPTION;
+
ip += 5;
ins_flag = 0;
break;
g_assert_not_reached ();
}
+ CHECK_CFG_EXCEPTION;
+
ip += 5;
ins_flag = 0;
break;
if (!MONO_TYPE_IS_VOID (fsig->ret))
*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+ CHECK_CFG_EXCEPTION;
+
ip += 5;
ins_flag = 0;
break;
if (!MONO_TYPE_IS_VOID (fsig->ret))
*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+ CHECK_CFG_EXCEPTION;
+
ip += 5;
ins_flag = 0;
break;
ins_flag = 0;
MONO_ADD_INS (bblock, ins);
-#if HAVE_WRITE_BARRIERS
- if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
+ if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
+ MonoInst *dummy_use;
/* insert call to write barrier */
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, sp, NULL);
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
}
-#endif
inline_costs += 1;
++ip;
case CEE_CONV_U:
CHECK_STACK (1);
ADD_UNOP (*ip);
+ CHECK_CFG_EXCEPTION;
ip++;
break;
case CEE_ADD_OVF:
NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
store->flags |= ins_flag;
MONO_ADD_INS (cfg->cbb, store);
+
+ if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
+ MonoInst *dummy_use;
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, sp, NULL);
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
+ }
} else {
mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
}
* will be transformed into a normal call there.
*/
} else if (context_used) {
- MonoInst *data;
- int rgctx_info;
-
- if (cfg->opt & MONO_OPT_SHARED)
- rgctx_info = MONO_RGCTX_INFO_KLASS;
- else
- rgctx_info = MONO_RGCTX_INFO_VTABLE;
- data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
-
- alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
+ alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
*sp = alloc;
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
class_inits = g_slist_prepend (class_inits, vtable);
}
- alloc = handle_alloc (cfg, cmethod->klass, FALSE);
+ alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
*sp = alloc;
}
CHECK_CFG_EXCEPTION; /*for handle_alloc*/
if (cmethod->klass->marshalbyref)
callvirt_this_arg = sp [0];
+
+ if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
+ type_to_eval_stack_type ((cfg), fsig->ret, ins);
+ *sp = ins;
+ sp++;
+ }
+
+ CHECK_CFG_EXCEPTION;
+ } else
+
+
+
if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
mono_method_check_inlining (cfg, cmethod) &&
!mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
break;
}
- if (context_used) {
- MonoInst *data;
- int rgctx_info;
-
- if (cfg->opt & MONO_OPT_SHARED)
- rgctx_info = MONO_RGCTX_INFO_KLASS;
- else
- rgctx_info = MONO_RGCTX_INFO_VTABLE;
- data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
- *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
- } else {
- *sp++ = handle_box (cfg, val, klass);
- }
+ *sp++ = handle_box (cfg, val, klass, context_used);
CHECK_CFG_EXCEPTION;
ip += 5;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
+ if (sp [0]->opcode != OP_LDADDR)
+ store->flags |= MONO_INST_FAULT;
-#if HAVE_WRITE_BARRIERS
- if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
+ if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
/* insert call to write barrier */
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
- MonoInst *iargs [2];
+ MonoInst *iargs [2], *dummy_use;
int dreg;
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
iargs [1] = sp [1];
mono_emit_method_call (cfg, write_barrier, iargs, NULL);
+
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
}
-#endif
store->flags |= ins_flag;
}
sp [0] = ins;
}
- MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
+ MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
if (*ip == CEE_LDFLDA) {
dreg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
load->flags |= ins_flag;
- load->flags |= MONO_INST_FAULT;
+ if (sp [0]->opcode != OP_LDADDR)
+ load->flags |= MONO_INST_FAULT;
*sp++ = load;
}
}
is_special_static = mono_class_field_is_special_static (field);
/* Generate IR to compute the field address */
+ if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
+ /*
+ * Fast access to TLS data
+ * Inline version of get_thread_static_data () in
+ * threads.c.
+ */
+ guint32 offset;
+ int idx, static_data_reg, array_reg, dreg;
+ MonoInst *thread_ins;
- if ((cfg->opt & MONO_OPT_SHARED) ||
+ // offset &= 0x7fffffff;
+ // idx = (offset >> 24) - 1;
+ // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
+
+ thread_ins = mono_get_thread_intrinsic (cfg);
+ MONO_ADD_INS (cfg->cbb, thread_ins);
+ static_data_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
+
+ if (cfg->compile_aot) {
+ int offset_reg, offset2_reg, idx_reg;
+
+ /* For TLS variables, this will return the TLS offset */
+ EMIT_NEW_SFLDACONST (cfg, ins, field);
+ offset_reg = ins->dreg;
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
+ idx_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
+ MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
+ array_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
+ offset2_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
+ dreg = alloc_ireg (cfg);
+ EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
+ } else {
+ offset = (gsize)addr & 0x7fffffff;
+ idx = (offset >> 24) - 1;
+
+ array_reg = alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
+ dreg = alloc_ireg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
+ }
+ } else if ((cfg->opt & MONO_OPT_SHARED) ||
(cfg->compile_aot && is_special_static) ||
(context_used && is_special_static)) {
MonoInst *iargs [2];
depth, field->offset);
*/
- if (mono_class_needs_cctor_run (klass, method)) {
- MonoCallInst *call;
- MonoInst *vtable;
-
- vtable = emit_get_rgctx_klass (cfg, context_used,
- klass, MONO_RGCTX_INFO_VTABLE);
-
- // FIXME: This doesn't work since it tries to pass the argument
- // in the normal way, instead of using MONO_ARCH_VTABLE_REG
- /*
- * The vtable pointer is always passed in a register regardless of
- * the calling convention, so assign it manually, and make a call
- * using a signature without parameters.
- */
- call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
-#ifdef MONO_ARCH_VTABLE_REG
- mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
- cfg->uses_vtable_reg = TRUE;
-#else
- NOT_IMPLEMENTED;
-#endif
- }
+ if (mono_class_needs_cctor_run (klass, method))
+ emit_generic_class_init (cfg, klass);
/*
* The pointer we're computing here is
else
EMIT_NEW_PCONST (cfg, ins, addr);
} else {
- /*
- * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
- * This could be later optimized to do just a couple of
- * memory dereferences with constant offsets.
- */
MonoInst *iargs [1];
EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
sp++;
break;
-#ifndef HAVE_MOVING_COLLECTOR
case MONO_TYPE_I:
case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+#ifndef HAVE_MOVING_COLLECTOR
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
case MONO_TYPE_ARRAY:
+#endif
EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
type_to_eval_stack_type ((cfg), field->type, *sp);
sp++;
break;
-#endif
case MONO_TYPE_I8:
case MONO_TYPE_U8:
EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
CHECK_TYPELOAD (klass);
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
+ if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
+ generic_class_is_reference_type (cfg, klass)) {
+ MonoInst *dummy_use;
+ /* insert call to write barrier */
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, sp, NULL);
+ EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
+ }
ins_flag = 0;
ip += 5;
inline_costs += 1;
allocator because we can't get the
open generic class's vtable. We
have the same problem in
- handle_alloc_from_inst(). This
+ handle_alloc(). This
needs to be solved so that we can
have managed allocs of shared
generic classes. */
}
readonly = FALSE;
- ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
+ ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
*sp++ = ins;
ip += 5;
break;
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
} else {
- addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
+ addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
}
*sp++ = ins;
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
} else {
- addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
+ addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
}
}
if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
GList *tmp;
+ MonoExceptionClause *clause;
+
for (tmp = handlers; tmp; tmp = tmp->next) {
- tblock = tmp->data;
+ clause = tmp->data;
+ tblock = cfg->cil_offset_to_bb [clause->handler_offset];
+ g_assert (tblock);
link_bblock (cfg, bblock, tblock);
MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
ins->inst_target_bb = tblock;
+ ins->inst_eh_block = clause;
MONO_ADD_INS (bblock, ins);
bblock->has_call_handler = 1;
if (COMPILE_LLVM (cfg)) {
/*
* Optimize the common case of ldftn+delegate creation
*/
-#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
- /* FIXME: SGEN support */
if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
- MonoInst *target_ins;
MonoMethod *invoke;
int invoke_context_used = 0;
if (cfg->generic_sharing_context)
invoke_context_used = mono_method_check_context_used (invoke);
- if (invoke_context_used == 0) {
+#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
+ /* FIXME: SGEN support */
+ if (!cfg->gen_write_barriers && invoke_context_used == 0) {
+ MonoInst *target_ins;
+
ip += 6;
if (cfg->verbose_level > 3)
g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
sp ++;
break;
}
+#endif
}
}
-#endif
argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
MONO_INST_NEW (cfg, ins, OP_RETHROW);
ins->sreg1 = load->dreg;
MONO_ADD_INS (bblock, ins);
+
+ MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
+ MONO_ADD_INS (bblock, ins);
+
sp = stack_start;
link_bblock (cfg, bblock, end_bblock);
start_new_bblock = 1;
CHECK_STACK_OVF (1);
CHECK_OPSIZE (6);
token = read32 (ip + 2);
- if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
+ if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
MonoType *type = mono_type_create_from_typespec (image, token);
token = mono_type_size (type, &ialign);
} else {
MONO_ADD_INS (cfg->cbb, store);
}
-#ifdef TARGET_POWERPC
+#if defined(TARGET_POWERPC) || defined(TARGET_X86)
if (cfg->compile_aot)
/* FIXME: The plt slots require a GOT var even if the method doesn't use it */
mono_get_got_var (cfg);
cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
g_free (mname);
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
+ mono_basic_block_free (original_bb);
return -1;
}
if ((cfg->verbose_level > 2) && (cfg->method == method))
mono_print_code (cfg, "AFTER METHOD-TO-IR");
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
+ mono_basic_block_free (original_bb);
return inline_costs;
exception_exit:
g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
- g_slist_free (class_inits);
- mono_basic_block_free (bb);
- dont_inline = g_list_remove (dont_inline, method);
- return -1;
+ goto cleanup;
inline_failure:
- g_slist_free (class_inits);
- mono_basic_block_free (bb);
- dont_inline = g_list_remove (dont_inline, method);
- return -1;
+ goto cleanup;
load_error:
- g_slist_free (class_inits);
- mono_basic_block_free (bb);
- dont_inline = g_list_remove (dont_inline, method);
cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
- return -1;
+ goto cleanup;
unverified:
+ set_exception_type_from_invalid_il (cfg, method, ip);
+ goto cleanup;
+
+ cleanup:
g_slist_free (class_inits);
- mono_basic_block_free (bb);
+ mono_basic_block_free (original_bb);
dont_inline = g_list_remove (dont_inline, method);
- set_exception_type_from_invalid_il (cfg, method, ip);
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
return -1;
}
case OP_LSHR:
case OP_LSHL:
case OP_LSHR_UN:
+ return -1;
#endif
#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
case OP_IDIV:
case OP_IDIV_UN:
case OP_IREM:
case OP_IREM_UN:
-#endif
return -1;
+#endif
default:
return mono_op_to_op_imm (opcode);
}