-/*
- * method-to-ir.c: Convert CIL to the JIT internal representation
+/**
+ * \file
+ * Convert CIL to the JIT internal representation
*
* Author:
* Paolo Molaro (lupus@ximian.com)
#include <config.h>
#include <mono/utils/mono-compiler.h>
+#include "mini.h"
#ifndef DISABLE_JIT
#endif
#include <mono/utils/memcheck.h>
-#include "mini.h"
#include <mono/metadata/abi-details.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/attrdefs.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/debug-helpers.h>
-#include <mono/metadata/mono-debug.h>
-#include <mono/metadata/mono-debug-debugger.h>
+#include <mono/metadata/debug-internals.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/security-manager.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/profiler.h>
#include <mono/metadata/monitor.h>
-#include <mono/metadata/debug-mono-symfile.h>
-#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/metadata/mono-basic-block.h>
int mono_op_to_op_imm (int opcode);
int mono_op_to_op_imm_noemul (int opcode);
-MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
-
static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
guchar *ip, guint real_offset, gboolean inline_always);
static MonoInst*
static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
static MonoMethodSignature *helper_sig_jit_thread_attach;
+static MonoMethodSignature *helper_sig_get_tls_tramp;
+static MonoMethodSignature *helper_sig_set_tls_tramp;
/* type loading helpers */
-static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
-static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
+static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
+static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
/*
* Instruction metadata
#undef MINI_OP
#undef MINI_OP3
-#define MONO_INIT_VARINFO(vi,id) do { \
- (vi)->range.first_use.pos.bid = 0xffff; \
- (vi)->reg = -1; \
- (vi)->idx = (id); \
-} while (0)
-
guint32
mono_alloc_ireg (MonoCompile *cfg)
{
{
int i;
MonoInst *tree;
+ GString *str = g_string_new ("");
- printf ("\n%s %d: [IN: ", msg, bb->block_num);
+ g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
for (i = 0; i < bb->in_count; ++i)
- printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
- printf (", OUT: ");
+ g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
+ g_string_append_printf (str, ", OUT: ");
for (i = 0; i < bb->out_count; ++i)
- printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
- printf (" ]\n");
+ g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
+ g_string_append_printf (str, " ]\n");
+
+ g_print ("%s", str->str);
+ g_string_free (str, TRUE);
+
for (tree = bb->code; tree; tree = tree->next)
mono_print_ins_index (-1, tree);
}
helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
+ helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
+ helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
}
static MONO_NEVER_INLINE void
break;
case STACK_PTR:
case STACK_MP:
+ case STACK_OBJ:
#if SIZEOF_VOID_P == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->klass = mono_defaults.object_class;
}
-static const char
-ldind_type [] = {
- STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
-};
+static MonoClass*
+ldind_to_type (int op)
+{
+ switch (op) {
+ case CEE_LDIND_I1: return mono_defaults.sbyte_class;
+ case CEE_LDIND_U1: return mono_defaults.byte_class;
+ case CEE_LDIND_I2: return mono_defaults.int16_class;
+ case CEE_LDIND_U2: return mono_defaults.uint16_class;
+ case CEE_LDIND_I4: return mono_defaults.int32_class;
+ case CEE_LDIND_U4: return mono_defaults.uint32_class;
+ case CEE_LDIND_I8: return mono_defaults.int64_class;
+ case CEE_LDIND_I: return mono_defaults.int_class;
+ case CEE_LDIND_R4: return mono_defaults.single_class;
+ case CEE_LDIND_R8: return mono_defaults.double_class;
+ case CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
+ default: g_error ("Unknown ldind type %d", op);
+ }
+}
#if 0
MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
- if (!cfg->compile_aot || !cfg->backend->need_got_var)
+ if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
return NULL;
if (!cfg->got_var) {
cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
return cfg->got_var;
}
-static MonoInst *
-mono_get_vtable_var (MonoCompile *cfg)
+static void
+mono_create_rgctx_var (MonoCompile *cfg)
{
- g_assert (cfg->gshared);
-
if (!cfg->rgctx_var) {
cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* force the var to be stack allocated */
cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
}
+}
+
+static MonoInst *
+mono_get_vtable_var (MonoCompile *cfg)
+{
+ g_assert (cfg->gshared);
+
+ mono_create_rgctx_var (cfg);
return cfg->rgctx_var;
}
}
}
-static MonoInst*
-emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
+MonoInst*
+mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
{
MonoInst *ins;
return ins;
}
-static void
-mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
-{
- int ibitmap_reg = alloc_preg (cfg);
-#ifdef COMPRESSED_INTERFACE_BITMAP
- MonoInst *args [2];
- MonoInst *res, *ins;
- NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
- MONO_ADD_INS (cfg->cbb, ins);
- args [0] = ins;
- args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
- res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
- MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
-#else
- int ibitmap_byte_reg = alloc_preg (cfg);
-
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
-
- if (cfg->compile_aot) {
- int iid_reg = alloc_preg (cfg);
- int shifted_iid_reg = alloc_preg (cfg);
- int ibitmap_byte_address_reg = alloc_preg (cfg);
- int masked_iid_reg = alloc_preg (cfg);
- int iid_one_bit_reg = alloc_preg (cfg);
- int iid_bit_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
- MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
- MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
- MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
- MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
- } else {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
- }
-#endif
-}
-
-/*
- * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
- * stored in "klass_reg" implements the interface "klass".
- */
-static void
-mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
+static MonoInst*
+mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
{
- mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
-}
+ int tls_offset = mono_tls_get_tls_offset (key);
-/*
- * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
- * stored in "vtable_reg" implements the interface "klass".
- */
-static void
-mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
-{
- mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
-}
+ if (cfg->compile_aot)
+ return NULL;
-/*
- * Emit code which checks whenever the interface id of @klass is smaller than
- * than the value given by max_iid_reg.
-*/
-static void
-mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
- MonoBasicBlock *false_target)
-{
- if (cfg->compile_aot) {
- int iid_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
+ if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_TLS_GET);
+ ins->dreg = mono_alloc_preg (cfg);
+ ins->inst_offset = tls_offset;
+ return ins;
}
- else
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
- if (false_target)
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
- else
- MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
-}
-
-/* Same as above, but obtains max_iid from a vtable */
-static void
-mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
- MonoBasicBlock *false_target)
-{
- int max_iid_reg = alloc_preg (cfg);
-
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU4_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
- mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
+ return NULL;
}
-/* Same as above, but obtains max_iid from a klass */
-static void
-mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
- MonoBasicBlock *false_target)
+static MonoInst*
+mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
{
- int max_iid_reg = alloc_preg (cfg);
+ int tls_offset = mono_tls_get_tls_offset (key);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU4_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
- mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
-}
+ if (cfg->compile_aot)
+ return NULL;
-static void
-mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
-{
- int idepth_reg = alloc_preg (cfg);
- int stypes_reg = alloc_preg (cfg);
- int stype = alloc_preg (cfg);
-
- mono_class_setup_supertypes (klass);
-
- if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
- }
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
- if (klass_ins) {
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
- } else if (cfg->compile_aot) {
- int const_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
- } else {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
+ if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_TLS_SET);
+ ins->sreg1 = value->dreg;
+ ins->inst_offset = tls_offset;
+ return ins;
}
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
-}
-
-static void
-mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
-{
- mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
+ return NULL;
}
-static void
-mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
-{
- int intf_reg = alloc_preg (cfg);
-
- mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
- mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
- if (true_target)
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
- else
- MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
-}
-/*
- * Variant of the above that takes a register to the class, not the vtable.
- */
-static void
-mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
+MonoInst*
+mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
{
- int intf_bit_reg = alloc_preg (cfg);
+ MonoInst *fast_tls = NULL;
- mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
- mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
- if (true_target)
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
- else
- MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
-}
+ if (!mini_get_debug_options ()->use_fallback_tls)
+ fast_tls = mono_create_fast_tls_getter (cfg, key);
-static inline void
-mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
-{
- if (klass_inst) {
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
- } else {
- MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
+ if (fast_tls) {
+ MONO_ADD_INS (cfg->cbb, fast_tls);
+ return fast_tls;
}
- MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
-}
-static inline void
-mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
-{
- mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
-}
-
-static inline void
-mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
-{
if (cfg->compile_aot) {
- int const_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
- } else {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
- }
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
-}
-
-static void
-mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
-
-static void
-mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
-{
- if (klass->rank) {
- int rank_reg = alloc_preg (cfg);
- int eclass_reg = alloc_preg (cfg);
-
- g_assert (!klass_inst);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
- MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
- // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
- if (klass->cast_class == mono_defaults.object_class) {
- int parent_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
- mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
- mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
- } else if (klass->cast_class == mono_defaults.enum_class->parent) {
- mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
- mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
- } else if (klass->cast_class == mono_defaults.enum_class) {
- mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
- } else if (mono_class_is_interface (klass->cast_class)) {
- mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
- } else {
- // Pass -1 as obj_reg to skip the check below for arrays of arrays
- mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
- }
-
- if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
- /* Check that the object is a vector too */
- int bounds_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
- MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
- }
+ MonoInst *addr;
+ /*
+ * tls getters are critical pieces of code and we don't want to resolve them
+ * through the standard plt/tramp mechanism since we might expose ourselves
+ * to crashes and infinite recursions.
+ */
+ EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
+ return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
} else {
- int idepth_reg = alloc_preg (cfg);
- int stypes_reg = alloc_preg (cfg);
- int stype = alloc_preg (cfg);
-
- mono_class_setup_supertypes (klass);
-
- if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
- MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
- }
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
- mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
- }
-}
-
-static void
-mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
-{
- mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
-}
-
-static void
-mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
-{
- int val_reg;
-
- g_assert (val == 0);
-
- if (align == 0)
- align = 4;
-
- if ((size <= SIZEOF_REGISTER) && (size <= align)) {
- switch (size) {
- case 1:
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
- return;
- case 2:
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
- return;
- case 4:
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
- return;
-#if SIZEOF_REGISTER == 8
- case 8:
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
- return;
-#endif
- }
- }
-
- val_reg = alloc_preg (cfg);
-
- if (SIZEOF_REGISTER == 8)
- MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
- else
- MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
-
- if (align < 4) {
- /* This could be optimized further if neccesary */
- while (size >= 1) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
- offset += 1;
- size -= 1;
- }
- return;
- }
-
- if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
- if (offset % 8) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
- offset += 4;
- size -= 4;
- }
- while (size >= 8) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
- offset += 8;
- size -= 8;
- }
- }
-
- while (size >= 4) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
- offset += 4;
- size -= 4;
- }
- while (size >= 2) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
- offset += 2;
- size -= 2;
- }
- while (size >= 1) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
- offset += 1;
- size -= 1;
+ gpointer getter = mono_tls_get_tls_getter (key, FALSE);
+ return mono_emit_jit_icall (cfg, getter, NULL);
}
}
-void
-mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
+static MonoInst*
+mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
{
- int cur_reg;
-
- if (align == 0)
- align = 4;
-
- /*FIXME arbitrary hack to avoid unbound code expansion.*/
- g_assert (size < 10000);
+ MonoInst *fast_tls = NULL;
- if (align < 4) {
- /* This could be optimized further if neccesary */
- while (size >= 1) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 1;
- soffset += 1;
- size -= 1;
- }
- }
-
- if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
- while (size >= 8) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 8;
- soffset += 8;
- size -= 8;
- }
- }
+ if (!mini_get_debug_options ()->use_fallback_tls)
+ fast_tls = mono_create_fast_tls_setter (cfg, value, key);
- while (size >= 4) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 4;
- soffset += 4;
- size -= 4;
+ if (fast_tls) {
+ MONO_ADD_INS (cfg->cbb, fast_tls);
+ return fast_tls;
}
- while (size >= 2) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 2;
- soffset += 2;
- size -= 2;
- }
- while (size >= 1) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 1;
- soffset += 1;
- size -= 1;
- }
-}
-
-static void
-emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
-{
- MonoInst *ins, *c;
if (cfg->compile_aot) {
- EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
- MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
- ins->sreg1 = sreg1;
- ins->sreg2 = c->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
+ MonoInst *addr;
+ EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
+ return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
} else {
- MONO_INST_NEW (cfg, ins, OP_TLS_SET);
- ins->sreg1 = sreg1;
- ins->inst_offset = mini_get_tls_offset (tls_key);
- MONO_ADD_INS (cfg->cbb, ins);
+ gpointer setter = mono_tls_get_tls_setter (key, FALSE);
+ return mono_emit_jit_icall (cfg, setter, &value);
}
}
* lmf->prev_lmf = *lmf_addr
* *lmf_addr = lmf
*/
- int lmf_reg, prev_lmf_reg;
MonoInst *ins, *lmf_ins;
if (!cfg->lmf_ir)
return;
- if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
- /* Load current lmf */
- lmf_ins = mono_get_lmf_intrinsic (cfg);
- g_assert (lmf_ins);
- MONO_ADD_INS (cfg->cbb, lmf_ins);
- EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
- lmf_reg = ins->dreg;
- /* Save previous_lmf */
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
- /* Set new LMF */
- emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
- } else {
- /*
- * Store lmf_addr in a variable, so it can be allocated to a global register.
- */
- if (!cfg->lmf_addr_var)
- cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ int lmf_reg, prev_lmf_reg;
+ /*
+ * Store lmf_addr in a variable, so it can be allocated to a global register.
+ */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
#ifdef HOST_WIN32
- ins = mono_get_jit_tls_intrinsic (cfg);
- if (ins) {
- int jit_tls_dreg = ins->dreg;
-
- MONO_ADD_INS (cfg->cbb, ins);
- lmf_reg = alloc_preg (cfg);
- EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
- } else {
- lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
- }
-#else
- lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
- if (lmf_ins) {
- MONO_ADD_INS (cfg->cbb, lmf_ins);
- } else {
-#ifdef TARGET_IOS
- MonoInst *args [16], *jit_tls_ins, *ins;
-
- /* Inline mono_get_lmf_addr () */
- /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
+ ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
+ g_assert (ins);
+ int jit_tls_dreg = ins->dreg;
- /* Load mono_jit_tls_id */
- if (cfg->compile_aot)
- EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
- else
- EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
- /* call pthread_getspecific () */
- jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
- /* lmf_addr = &jit_tls->lmf */
- EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
- lmf_ins = ins;
+ lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
#else
- lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
-#endif
- }
+ lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
+ g_assert (lmf_ins);
#endif
- lmf_ins->dreg = cfg->lmf_addr_var->dreg;
+ lmf_ins->dreg = cfg->lmf_addr_var->dreg;
- EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
- lmf_reg = ins->dreg;
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
- prev_lmf_reg = alloc_preg (cfg);
- /* Save previous_lmf */
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
- /* Set new lmf */
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
- }
+ prev_lmf_reg = alloc_preg (cfg);
+ /* Save previous_lmf */
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
+ /* Set new lmf */
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
}
/*
static void
emit_pop_lmf (MonoCompile *cfg)
{
- int lmf_reg, lmf_addr_reg, prev_lmf_reg;
+ int lmf_reg, lmf_addr_reg;
MonoInst *ins;
if (!cfg->lmf_ir)
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
- if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
- /* Load previous_lmf */
- prev_lmf_reg = alloc_preg (cfg);
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
- /* Set new LMF */
- emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
- } else {
- /*
- * Emit IR to pop the LMF:
- * *(lmf->lmf_addr) = lmf->prev_lmf
- */
- /* This could be called before emit_push_lmf () */
- if (!cfg->lmf_addr_var)
- cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- lmf_addr_reg = cfg->lmf_addr_var->dreg;
-
- prev_lmf_reg = alloc_preg (cfg);
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
- }
-}
-
-static void
-emit_instrumentation_call (MonoCompile *cfg, void *func)
-{
- MonoInst *iargs [1];
-
+ int prev_lmf_reg;
/*
- * Avoid instrumenting inlined methods since it can
- * distort profiling results.
+ * Emit IR to pop the LMF:
+ * *(lmf->lmf_addr) = lmf->prev_lmf
*/
- if (cfg->method != cfg->current_method)
- return;
+ /* This could be called before emit_push_lmf () */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ lmf_addr_reg = cfg->lmf_addr_var->dreg;
- if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
- EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
- mono_emit_jit_icall (cfg, func, iargs);
- }
+ prev_lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
}
static int
method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else {
- MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
+ MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
method_reg = ins->dreg;
}
method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else {
- MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
+ MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
method_reg = ins->dreg;
}
return ji;
}
-static int
+int
mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
{
if (cfg->gshared)
inline static MonoCallInst *
mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
- MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
+ MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline, MonoMethod *target)
{
MonoType *sig_ret;
MonoCallInst *call;
tail = FALSE;
if (tail) {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ mini_profiler_emit_tail_call (cfg, target);
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
} else
#endif
}
-inline static MonoInst*
-mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
+MonoInst*
+mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
{
MonoCallInst *call;
MonoInst *ins;
MONO_ADD_INS (cfg->cbb, ins);
}
- call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
+ call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
call->inst.sreg1 = addr->dreg;
return (MonoInst*)call;
}
-static MonoInst*
-emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
-
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
-static MonoInst*
-emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
static MonoInst*
mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
- return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
+ return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
}
#endif
need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
- call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
#ifndef DISABLE_REMOTING
if (might_be_remote)
g_assert (sig);
- call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
call->fptr = func;
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
tmp_reg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
csig = sig_to_rgctx_sig (fsig);
- return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
+ return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
}
/* Emit an indirect call to the function descriptor ADDR */
static gboolean
direct_icalls_enabled (MonoCompile *cfg)
{
+ return FALSE;
+
/* LLVM on amd64 can't handle calls to non-32 bit addresses */
#ifdef TARGET_AMD64
if (cfg->compile_llvm && !cfg->llvm_only)
static void
-emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
+emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
{
MonoInst *args [16];
- args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
- args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
+ args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
+ args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
mono_emit_jit_icall (cfg, mono_throw_method_access, args);
}
-static MonoMethod*
-get_memcpy_method (void)
+MonoMethod*
+mini_get_memcpy_method (void)
{
static MonoMethod *memcpy_method = NULL;
if (!memcpy_method) {
return memcpy_method;
}
-static void
-create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
-{
- MonoClassField *field;
- gpointer iter = NULL;
-
- while ((field = mono_class_get_fields (klass, &iter))) {
- int foffset;
-
- if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
- continue;
- foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
- if (mini_type_is_reference (mono_field_get_type (field))) {
- g_assert ((foffset % SIZEOF_VOID_P) == 0);
- *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
- } else {
- MonoClass *field_class = mono_class_from_mono_type (field->type);
- if (field_class->has_references)
- create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
- }
- }
-}
-
-static void
-emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
+void
+mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
{
int card_table_shift_bits;
gpointer card_table_mask;
if (!cfg->gen_write_barriers)
return;
+ //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
+
card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
wbarrier->sreg1 = ptr->dreg;
wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
- } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
+ } else if (card_table) {
int offset_reg = alloc_preg (cfg);
int card_reg;
MonoInst *ins;
+ /*
+ * We emit a fast light weight write barrier. This always marks cards as in the concurrent
+ * collector case, so, for the serial collector, it might slightly slow down nursery
+ * collections. We also expect that the host system and the target system have the same card
+ * table configuration, which is the case if they have the same pointer size.
+ */
+
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
if (card_table_mask)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
/*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
* IMM's larger than 32bits.
*/
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
card_reg = ins->dreg;
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
}
-static gboolean
-mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
+MonoMethod*
+mini_get_memset_method (void)
{
- int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
- unsigned need_wb = 0;
-
- if (align == 0)
- align = 4;
-
- /*types with references can't have alignment smaller than sizeof(void*) */
- if (align < SIZEOF_VOID_P)
- return FALSE;
-
- /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
- if (size > 32 * SIZEOF_VOID_P)
- return FALSE;
-
- create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
-
- /* We don't unroll more than 5 stores to avoid code bloat. */
- if (size > 5 * SIZEOF_VOID_P) {
- /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
- size += (SIZEOF_VOID_P - 1);
- size &= ~(SIZEOF_VOID_P - 1);
-
- EMIT_NEW_ICONST (cfg, iargs [2], size);
- EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
- mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
- return TRUE;
+ static MonoMethod *memset_method = NULL;
+ if (!memset_method) {
+ memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
+ if (!memset_method)
+ g_error ("Old corlib found. Install a new one");
}
-
- destreg = iargs [0]->dreg;
- srcreg = iargs [1]->dreg;
- offset = 0;
-
- dest_ptr_reg = alloc_preg (cfg);
- tmp_reg = alloc_preg (cfg);
-
- /*tmp = dreg*/
- EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
-
- while (size >= SIZEOF_VOID_P) {
- MonoInst *load_inst;
- MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
- load_inst->dreg = tmp_reg;
- load_inst->inst_basereg = srcreg;
- load_inst->inst_offset = offset;
- MONO_ADD_INS (cfg->cbb, load_inst);
-
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
-
- if (need_wb & 0x1)
- emit_write_barrier (cfg, iargs [0], load_inst);
-
- offset += SIZEOF_VOID_P;
- size -= SIZEOF_VOID_P;
- need_wb >>= 1;
-
- /*tmp += sizeof (void*)*/
- if (size >= SIZEOF_VOID_P) {
- NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
- MONO_ADD_INS (cfg->cbb, iargs [0]);
- }
- }
-
- /* Those cannot be references since size < sizeof (void*) */
- while (size >= 4) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
- offset += 4;
- size -= 4;
- }
-
- while (size >= 2) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
- offset += 2;
- size -= 2;
- }
-
- while (size >= 1) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
- offset += 1;
- size -= 1;
- }
-
- return TRUE;
-}
-
-/*
- * Emit code to copy a valuetype of type @klass whose address is stored in
- * @src->dreg to memory whose address is stored at @dest->dreg.
- */
-void
-mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
-{
- MonoInst *iargs [4];
- int n;
- guint32 align = 0;
- MonoMethod *memcpy_method;
- MonoInst *size_ins = NULL;
- MonoInst *memcpy_ins = NULL;
-
- g_assert (klass);
- if (cfg->gshared)
- klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
-
- /*
- * This check breaks with spilled vars... need to handle it during verification anyway.
- * g_assert (klass && klass == src->klass && klass == dest->klass);
- */
-
- if (mini_is_gsharedvt_klass (klass)) {
- g_assert (!native);
- size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
- memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
- }
-
- if (native)
- n = mono_class_native_size (klass, &align);
- else
- n = mono_class_value_size (klass, &align);
-
- /* if native is true there should be no references in the struct */
- if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
- /* Avoid barriers when storing to the stack */
- if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
- (dest->opcode == OP_LDADDR))) {
- int context_used;
-
- iargs [0] = dest;
- iargs [1] = src;
-
- context_used = mini_class_check_context_used (cfg, klass);
-
- /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
- if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
- return;
- } else if (context_used) {
- iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- } else {
- iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
- if (!cfg->compile_aot)
- mono_class_compute_gc_descriptor (klass);
- }
-
- if (size_ins)
- mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
- else
- mono_emit_jit_icall (cfg, mono_value_copy, iargs);
- return;
- }
- }
-
- if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
- /* FIXME: Optimize the case when src/dest is OP_LDADDR */
- mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
- } else {
- iargs [0] = dest;
- iargs [1] = src;
- if (size_ins)
- iargs [2] = size_ins;
- else
- EMIT_NEW_ICONST (cfg, iargs [2], n);
-
- memcpy_method = get_memcpy_method ();
- if (memcpy_ins)
- mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
- else
- mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
- }
-}
-
-static MonoMethod*
-get_memset_method (void)
-{
- static MonoMethod *memset_method = NULL;
- if (!memset_method) {
- memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
- if (!memset_method)
- g_error ("Old corlib found. Install a new one");
- }
- return memset_method;
-}
+ return memset_method;
+}
void
mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
/* FIXME: Optimize this for the case when dest is an LDADDR */
mono_class_init (klass);
if (mini_is_gsharedvt_klass (klass)) {
- size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
- bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
+ size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
+ bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
if (!bzero_method)
bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
g_assert (bzero_method);
iargs [0] = dest;
iargs [1] = size_ins;
- mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
+ mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
return;
}
mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
}
else {
- memset_method = get_memset_method ();
+ memset_method = mini_get_memset_method ();
iargs [0] = dest;
EMIT_NEW_ICONST (cfg, iargs [1], 0);
EMIT_NEW_ICONST (cfg, iargs [2], n);
if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
!(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
- !method->klass->valuetype)
- EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
+ !method->klass->valuetype)
+ EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
MonoInst *mrgctx_loc, *mrgctx_var;
mrgctx_loc = mono_get_vtable_var (cfg);
EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
+ return mrgctx_var;
+ } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
+ MonoInst *mrgctx_loc, *mrgctx_var;
+
+ /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
+ mrgctx_loc = mono_get_vtable_var (cfg);
+ EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
+
+ g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
+
return mrgctx_var;
} else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
MonoInst *vtable_loc, *vtable_var;
return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
}
-static MonoInst*
-emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
+MonoInst*
+mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
- MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
- MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
+ MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
{
- MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
- MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
+ MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
call_info->sig = sig;
call_info->method = cmethod;
- entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
- rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
+ rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
info->klass = klass;
info->method = virt_method;
- entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
- rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
+ rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
MonoJumpInfoRgctxEntry *entry;
MonoInst *rgctx;
- entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
- rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
+ rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
g_assert_not_reached ();
}
} else {
- MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
- MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
+ MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
emit_get_rgctx_field (MonoCompile *cfg, int context_used,
MonoClassField *field, MonoRgctxInfoType rgctx_type)
{
- MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
- MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
+ MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
return ins;
}
-static MonoInst*
-emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
+MonoInst*
+mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
}
context_used = mini_class_check_context_used (cfg, klass);
if (context_used) {
- vtable_arg = emit_get_rgctx_klass (cfg, context_used,
+ vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_VTABLE);
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
ins->sreg1 = vtable_arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else {
- static int byte_offset = -1;
- static guint8 bitmask;
- int bits_reg, inited_reg;
+ int inited_reg;
MonoBasicBlock *inited_bb;
MonoInst *args [16];
- if (byte_offset < 0)
- mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
-
- bits_reg = alloc_ireg (cfg);
inited_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
NEW_BBLOCK (cfg, inited_bb);
}
}
-static void
-save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
+void
+mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
{
if (mini_get_debug_options ()->better_cast_details) {
int vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
}
- tls_get = mono_get_jit_tls_intrinsic (cfg);
+ tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
if (!tls_get) {
fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
exit (1);
}
- MONO_ADD_INS (cfg->cbb, tls_get);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
if (context_used) {
MonoInst *class_ins;
- class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
+ class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
to_klass_reg = class_ins->dreg;
} else {
to_klass_reg = alloc_preg (cfg);
}
}
-static void
-reset_cast_details (MonoCompile *cfg)
+void
+mini_reset_cast_details (MonoCompile *cfg)
{
/* Reset the variables holding the cast details */
if (mini_get_debug_options ()->better_cast_details) {
- MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
-
- MONO_ADD_INS (cfg->cbb, tls_get);
+ MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
/* It is enough to reset the from field */
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
}
context_used = mini_class_check_context_used (cfg, array_class);
- save_cast_details (cfg, array_class, obj->dreg, FALSE);
+ mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MonoInst *ins;
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
} else if (context_used) {
MonoInst *vtable_ins;
- vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
+ vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
} else {
if (cfg->compile_aot) {
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
- reset_cast_details (cfg);
+ mini_reset_cast_details (cfg);
}
/**
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
} else {
- rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
- return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
+ return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
/* This assertion is from the unboxcast insn */
g_assert (klass->rank == 0);
- element_class = emit_get_rgctx_klass (cfg, context_used,
+ element_class = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
} else {
- save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
+ mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
mini_emit_class_check (cfg, eclass_reg, klass->element_class);
- reset_cast_details (cfg);
+ mini_reset_cast_details (cfg);
}
NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
MonoInst *ins;
int dreg, addr_reg;
- klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
+ klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
/* obj */
args [0] = obj;
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
- is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_START_BB (cfg, is_nullable_bb);
{
- MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
+ MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
MonoInst *unbox_call;
MonoMethodSignature *unbox_sig;
if (cfg->llvm_only)
unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
else
- unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
+ unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
addr->dreg = addr_reg;
rgctx_info = MONO_RGCTX_INFO_KLASS;
else
rgctx_info = MONO_RGCTX_INFO_VTABLE;
- data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
+ data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
if (cfg->opt & MONO_OPT_SHARED) {
EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
/* This happens often in argument checking code, eg. throw new FooException... */
/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
- return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
+ alloc_ftn = mono_helper_newobj_mscorlib;
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
MonoMethod *managed_alloc = NULL;
- gboolean pass_lw;
if (!vtable) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
EMIT_NEW_ICONST (cfg, iargs [1], size);
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
- alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
- if (pass_lw) {
- guint32 lw = vtable->klass->instance_size;
- lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
- EMIT_NEW_ICONST (cfg, iargs [0], lw);
- EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
- }
- else {
- EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
- }
+ alloc_ftn = ves_icall_object_new_specific;
+ EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
}
return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
have to get the method address from the RGCTX. */
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+ MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
- return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
+ return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
- is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_START_BB (cfg, is_nullable_bb);
{
- MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
+ MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
MonoInst *box_call;
MonoMethodSignature *box_sig;
if (cfg->llvm_only)
box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
else
- box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
+ box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
res->type = STACK_OBJ;
res->klass = klass;
}
}
-static gboolean
-mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
-{
- int i;
- MonoGenericContainer *container;
- MonoGenericInst *ginst;
-
- if (mono_class_is_ginst (klass)) {
- container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
- ginst = mono_class_get_generic_class (klass)->context.class_inst;
- } else if (mono_class_is_gtd (klass) && context_used) {
- container = mono_class_get_generic_container (klass);
- ginst = container->context.class_inst;
- } else {
- return FALSE;
- }
-
- for (i = 0; i < container->type_argc; ++i) {
- MonoType *type;
- if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
- continue;
- type = ginst->type_argv [i];
- if (mini_type_is_reference (type))
- return TRUE;
- }
- return FALSE;
-}
-
static GHashTable* direct_icall_type_hash;
static gboolean
return FALSE;
}
-#define is_complex_isinst(klass) (mono_class_is_interface (klass) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || mono_class_is_sealed (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
-
-static MonoInst*
-emit_isinst_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
-{
- MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
- return mono_emit_method_call (cfg, mono_isinst, args, NULL);
-}
-
-static MonoInst*
-emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
-{
- MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
- MonoInst *res;
-
- save_cast_details (cfg, klass, args [0]->dreg, TRUE);
- res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
- reset_cast_details (cfg);
-
- return res;
-}
-
-static int
-get_castclass_cache_idx (MonoCompile *cfg)
-{
- /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
- cfg->castclass_cache_index ++;
- return (cfg->method_index << 16) | cfg->castclass_cache_index;
-}
-
-
-static MonoInst*
-emit_isinst_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
-{
- MonoInst *args [3];
- int idx;
-
- args [0] = obj; /* obj */
- EMIT_NEW_CLASSCONST (cfg, args [1], klass); /* klass */
-
- idx = get_castclass_cache_idx (cfg); /* inline cache*/
- args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
-
- return emit_isinst_with_cache (cfg, klass, args);
-}
-
-static MonoInst*
-emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
-{
- MonoInst *args [3];
- int idx;
-
- /* obj */
- args [0] = obj;
-
- /* klass */
- EMIT_NEW_CLASSCONST (cfg, args [1], klass);
-
- /* inline cache*/
- idx = get_castclass_cache_idx (cfg);
- args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
-
- /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
- return emit_castclass_with_cache (cfg, klass, args);
-}
-
-/*
- * Returns NULL and set the cfg exception on error.
- */
-static MonoInst*
-handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
-{
- MonoBasicBlock *is_null_bb;
- int obj_reg = src->dreg;
- int vtable_reg = alloc_preg (cfg);
- MonoInst *klass_inst = NULL;
-
- if (MONO_INS_IS_PCONST_NULL (src))
- return src;
-
- if (context_used) {
- MonoInst *args [3];
-
- if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
- MonoInst *cache_ins;
-
- cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
-
- /* obj */
- args [0] = src;
-
- /* klass - it's the second element of the cache entry*/
- EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
-
- /* cache */
- args [2] = cache_ins;
-
- return emit_castclass_with_cache (cfg, klass, args);
- }
-
- klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- }
-
- NEW_BBLOCK (cfg, is_null_bb);
-
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
-
- save_cast_details (cfg, klass, obj_reg, FALSE);
-
- if (mono_class_is_interface (klass)) {
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
- mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
- } else {
- int klass_reg = alloc_preg (cfg);
-
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
-
- if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && mono_class_is_sealed (klass)) {
- /* the remoting code is broken, access the class for now */
- if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
- MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
- if (!vt) {
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
- cfg->exception_ptr = klass;
- return NULL;
- }
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
- } else {
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
- }
- MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
- } else {
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
- }
- }
-
- MONO_START_BB (cfg, is_null_bb);
-
- reset_cast_details (cfg);
-
- return src;
-}
-
-/*
- * Returns NULL and set the cfg exception on error.
- */
-static MonoInst*
-handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
-{
- MonoInst *ins;
- MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
- int obj_reg = src->dreg;
- int vtable_reg = alloc_preg (cfg);
- int res_reg = alloc_ireg_ref (cfg);
- MonoInst *klass_inst = NULL;
-
- if (context_used) {
- MonoInst *args [3];
-
- if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
- MonoInst *cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
-
- args [0] = src; /* obj */
-
- /* klass - it's the second element of the cache entry*/
- EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
-
- args [2] = cache_ins; /* cache */
- return emit_isinst_with_cache (cfg, klass, args);
- }
-
- klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- }
-
- NEW_BBLOCK (cfg, is_null_bb);
- NEW_BBLOCK (cfg, false_bb);
- NEW_BBLOCK (cfg, end_bb);
-
- /* Do the assignment at the beginning, so the other assignment can be if converted */
- EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
- ins->type = STACK_OBJ;
- ins->klass = klass;
-
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
-
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
-
- if (mono_class_is_interface (klass)) {
- g_assert (!context_used);
- /* the is_null_bb target simply copies the input register to the output */
- mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
- } else {
- int klass_reg = alloc_preg (cfg);
-
- if (klass->rank) {
- int rank_reg = alloc_preg (cfg);
- int eclass_reg = alloc_preg (cfg);
-
- g_assert (!context_used);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
- if (klass->cast_class == mono_defaults.object_class) {
- int parent_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
- mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
- mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
- } else if (klass->cast_class == mono_defaults.enum_class->parent) {
- mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
- mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
- } else if (klass->cast_class == mono_defaults.enum_class) {
- mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
- } else if (mono_class_is_interface (klass->cast_class)) {
- mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
- } else {
- if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
- /* Check that the object is a vector too */
- int bounds_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
- }
-
- /* the is_null_bb target simply copies the input register to the output */
- mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
- }
- } else if (mono_class_is_nullable (klass)) {
- g_assert (!context_used);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- /* the is_null_bb target simply copies the input register to the output */
- mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
- } else {
- if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && mono_class_is_sealed (klass)) {
- g_assert (!context_used);
- /* the remoting code is broken, access the class for now */
- if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
- MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
- if (!vt) {
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
- cfg->exception_ptr = klass;
- return NULL;
- }
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
- } else {
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
- }
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
- } else {
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- /* the is_null_bb target simply copies the input register to the output */
- mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
- }
- }
- }
-
- MONO_START_BB (cfg, false_bb);
-
- MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
-
- MONO_START_BB (cfg, is_null_bb);
-
- MONO_START_BB (cfg, end_bb);
-
- return ins;
-}
-
-static MonoInst*
-handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
-{
- /* This opcode takes as input an object reference and a class, and returns:
- 0) if the object is an instance of the class,
- 1) if the object is not instance of the class,
- 2) if the object is a proxy whose type cannot be determined */
-
- MonoInst *ins;
-#ifndef DISABLE_REMOTING
- MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
-#else
- MonoBasicBlock *true_bb, *false_bb, *end_bb;
-#endif
- int obj_reg = src->dreg;
- int dreg = alloc_ireg (cfg);
- int tmp_reg;
-#ifndef DISABLE_REMOTING
- int klass_reg = alloc_preg (cfg);
-#endif
-
- NEW_BBLOCK (cfg, true_bb);
- NEW_BBLOCK (cfg, false_bb);
- NEW_BBLOCK (cfg, end_bb);
-#ifndef DISABLE_REMOTING
- NEW_BBLOCK (cfg, false2_bb);
- NEW_BBLOCK (cfg, no_proxy_bb);
-#endif
-
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
-
- if (mono_class_is_interface (klass)) {
-#ifndef DISABLE_REMOTING
- NEW_BBLOCK (cfg, interface_fail_bb);
-#endif
-
- tmp_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
-#ifndef DISABLE_REMOTING
- mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
- MONO_START_BB (cfg, interface_fail_bb);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
-
- mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
-
- tmp_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
-#else
- mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
-#endif
- } else {
-#ifndef DISABLE_REMOTING
- tmp_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
-
- mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
- tmp_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
-
- tmp_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
-
- mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
-
- MONO_START_BB (cfg, no_proxy_bb);
-
- mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
-#else
- g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
-#endif
- }
-
- MONO_START_BB (cfg, false_bb);
-
- MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
-
-#ifndef DISABLE_REMOTING
- MONO_START_BB (cfg, false2_bb);
-
- MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
-#endif
-
- MONO_START_BB (cfg, true_bb);
-
- MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
-
- MONO_START_BB (cfg, end_bb);
-
- /* FIXME: */
- MONO_INST_NEW (cfg, ins, OP_ICONST);
- ins->dreg = dreg;
- ins->type = STACK_I4;
-
- return ins;
-}
-
-static MonoInst*
-handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
-{
- /* This opcode takes as input an object reference and a class, and returns:
- 0) if the object is an instance of the class,
- 1) if the object is a proxy whose type cannot be determined
- an InvalidCastException exception is thrown otherwhise*/
-
- MonoInst *ins;
-#ifndef DISABLE_REMOTING
- MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
-#else
- MonoBasicBlock *ok_result_bb;
-#endif
- int obj_reg = src->dreg;
- int dreg = alloc_ireg (cfg);
- int tmp_reg = alloc_preg (cfg);
-
-#ifndef DISABLE_REMOTING
- int klass_reg = alloc_preg (cfg);
- NEW_BBLOCK (cfg, end_bb);
-#endif
-
- NEW_BBLOCK (cfg, ok_result_bb);
-
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
-
- save_cast_details (cfg, klass, obj_reg, FALSE);
-
- if (mono_class_is_interface (klass)) {
-#ifndef DISABLE_REMOTING
- NEW_BBLOCK (cfg, interface_fail_bb);
-
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
- mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
- MONO_START_BB (cfg, interface_fail_bb);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
-
- mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
-
- tmp_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
- MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
-
- MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
-#else
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
- mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
-#endif
- } else {
-#ifndef DISABLE_REMOTING
- NEW_BBLOCK (cfg, no_proxy_bb);
-
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
-
- tmp_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
-
- tmp_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
-
- NEW_BBLOCK (cfg, fail_1_bb);
-
- mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
-
- MONO_START_BB (cfg, fail_1_bb);
-
- MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
- MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
-
- MONO_START_BB (cfg, no_proxy_bb);
-
- mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
-#else
- g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
-#endif
- }
-
- MONO_START_BB (cfg, ok_result_bb);
-
- MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
-
-#ifndef DISABLE_REMOTING
- MONO_START_BB (cfg, end_bb);
-#endif
-
- /* FIXME: */
- MONO_INST_NEW (cfg, ins, OP_ICONST);
- ins->dreg = dreg;
- ins->type = STACK_I4;
-
- return ins;
-}
-
static G_GNUC_UNUSED MonoInst*
handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
{
/* Set target field */
/* Optimize away setting of NULL target */
if (!MONO_INS_IS_PCONST_NULL (target)) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
+ MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
- emit_write_barrier (cfg, ptr, target);
+ mini_emit_write_barrier (cfg, ptr, target);
}
}
}
mono_domain_unlock (domain);
- code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
+ code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
}
* pack the arguments into an array, and do the rest of the work in in an icall.
*/
if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
- (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
+ (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
(fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
MonoInst *args [16];
args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
else
EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
- args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
+ args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
/* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
if (fsig->hasthis && fsig->param_count) {
if (mini_is_gsharedvt_type (fsig->params [0])) {
int addr_reg, deref_arg_reg;
- ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
deref_arg_reg = alloc_preg (cfg);
/* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
if (mini_is_gsharedvt_type (fsig->ret)) {
ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
- } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
+ } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
MonoInst *add;
/* Unbox */
if (cfg->disable_inline)
return FALSE;
- if (cfg->gshared)
+ if (cfg->gsharedvt)
return FALSE;
if (cfg->inline_depth > 10)
/* also consider num_locals? */
/* Do the size check early to avoid creating vtables */
if (!inline_limit_inited) {
- if (g_getenv ("MONO_INLINELIMIT"))
- inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
- else
+ char *inlinelimit;
+ if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
+ inline_limit = atoi (inlinelimit);
+ g_free (inlinelimit);
+ } else
inline_limit = INLINE_LENGTH_LIMIT;
inline_limit_inited = TRUE;
}
* since it would mean inserting a call to mono_runtime_class_init()
* inside the inlined code
*/
+ if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
+ return FALSE;
+
if (!(cfg->opt & MONO_OPT_SHARED)) {
/* The AggressiveInlining hint is a good excuse to force that cctor to run. */
if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
- vtable = mono_class_vtable (cfg->domain, method->klass);
- if (!vtable)
- return FALSE;
- if (!cfg->compile_aot) {
- MonoError error;
- if (!mono_runtime_class_init_full (vtable, &error)) {
- mono_error_cleanup (&error);
+ if (method->klass->has_cctor) {
+ vtable = mono_class_vtable (cfg->domain, method->klass);
+ if (!vtable)
return FALSE;
+ if (!cfg->compile_aot) {
+ MonoError error;
+ if (!mono_runtime_class_init_full (vtable, &error)) {
+ mono_error_cleanup (&error);
+ return FALSE;
+ }
}
}
} else if (mono_class_is_before_field_init (method->klass)) {
if (g_list_find (cfg->dont_inline, method))
return FALSE;
+ if (mono_profiler_get_call_instrumentation_flags (method))
+ return FALSE;
+
return TRUE;
}
#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
if (COMPILE_LLVM (cfg)) {
- /* Not needed */
+ /*
+ * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
+ * during OP_BOUNDS_CHECK decomposition, and in the implementation
+ * of OP_X86_LEA for llvm.
+ */
index2_reg = index_reg;
} else {
index2_reg = alloc_preg (cfg);
g_assert (cfg->gshared);
context_used = mini_class_check_context_used (cfg, klass);
g_assert (context_used);
- rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
+ rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
} else {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
return addr;
}
-static MonoBreakPolicy
-always_insert_breakpoint (MonoMethod *method)
-{
- return MONO_BREAK_POLICY_ALWAYS;
-}
-
-static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
-
-/**
- * mono_set_break_policy:
- * policy_callback: the new callback function
- *
- * Allow embedders to decide wherther to actually obey breakpoint instructions
- * (both break IL instructions and Debugger.Break () method calls), for example
- * to not allow an app to be aborted by a perfectly valid IL opcode when executing
- * untrusted or semi-trusted code.
- *
- * @policy_callback will be called every time a break point instruction needs to
- * be inserted with the method argument being the method that calls Debugger.Break()
- * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
- * if it wants the breakpoint to not be effective in the given method.
- * #MONO_BREAK_POLICY_ALWAYS is the default.
- */
-void
-mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
-{
- if (policy_callback)
- break_policy_func = policy_callback;
- else
- break_policy_func = always_insert_breakpoint;
-}
-
-static gboolean
-should_insert_brekpoint (MonoMethod *method) {
- switch (break_policy_func (method)) {
- case MONO_BREAK_POLICY_ALWAYS:
- return TRUE;
- case MONO_BREAK_POLICY_NEVER:
- return FALSE;
- case MONO_BREAK_POLICY_ON_DBG:
- g_warning ("mdb no longer supported");
- return FALSE;
- default:
- g_warning ("Incorrect value returned from break policy callback");
- return FALSE;
- }
-}
-
/* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
static MonoInst*
emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
if (mini_type_is_reference (&eklass->byval_arg))
- emit_write_barrier (cfg, addr, load);
+ mini_emit_write_barrier (cfg, addr, load);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
if (generic_class_is_reference_type (cfg, klass))
- emit_write_barrier (cfg, addr, sp [2]);
+ mini_emit_write_barrier (cfg, addr, sp [2]);
}
return ins;
}
return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
}
-static MonoInst*
-emit_memory_barrier (MonoCompile *cfg, int kind)
+MonoInst*
+mini_emit_memory_barrier (MonoCompile *cfg, int kind)
{
MonoInst *ins = NULL;
MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
return NULL;
}
+
+static gboolean
+mono_type_is_native_blittable (MonoType *t)
+{
+ if (MONO_TYPE_IS_REFERENCE (t))
+ return FALSE;
+
+ if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
+ return TRUE;
+
+ MonoClass *klass = mono_class_from_mono_type (t);
+
+ //MonoClass::blitable depends on mono_class_setup_fields being done.
+ mono_class_setup_fields (klass);
+ if (!klass->blittable)
+ return FALSE;
+
+ // If the native marshal size is different we can't convert PtrToStructure to a type load
+ if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
+ return FALSE;
+
+ return TRUE;
+}
+
+
static MonoInst*
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
-
- MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
+ MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
if (cmethod->klass == mono_defaults.string_class) {
if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
} else if (cmethod->klass == runtime_helpers_class) {
if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
+ return ins;
+ } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
+ MonoGenericContext *ctx = mono_method_get_context (cmethod);
+ g_assert (ctx);
+ g_assert (ctx->method_inst);
+ g_assert (ctx->method_inst->type_argc == 1);
+ MonoType *arg_type = ctx->method_inst->type_argv [0];
+ MonoType *t;
+ MonoClass *klass;
+
+ ins = NULL;
+
+ /* Resolve the argument class as possible so we can handle common cases fast */
+ t = mini_get_underlying_type (arg_type);
+ klass = mono_class_from_mono_type (t);
+ mono_class_init (klass);
+ if (MONO_TYPE_IS_REFERENCE (t))
+ EMIT_NEW_ICONST (cfg, ins, 1);
+ else if (MONO_TYPE_IS_PRIMITIVE (t))
+ EMIT_NEW_ICONST (cfg, ins, 0);
+ else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
+ EMIT_NEW_ICONST (cfg, ins, 1);
+ else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
+ EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
+ else {
+ g_assert (cfg->gshared);
+
+ /* Have to use the original argument class here */
+ MonoClass *arg_class = mono_class_from_mono_type (arg_type);
+ int context_used = mini_class_check_context_used (cfg, arg_class);
+
+ /* This returns 1 or 2 */
+ MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
+ int dreg = alloc_ireg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
+ }
+
return ins;
} else
return NULL;
ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
- ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
+ ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
MONO_START_BB (cfg, end_bb);
return ins;
}
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
- return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
} else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
guint32 opcode = 0;
gboolean is_ref = mini_type_is_reference (fsig->params [0]);
if (opcode == OP_LOADI8_MEMBASE)
ins = mono_decompose_opcode (cfg, ins);
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
return ins;
}
opcode = OP_STORE_MEMBASE_REG;
if (opcode) {
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
MONO_INST_NEW (cfg, ins, opcode);
ins->sreg1 = args [1]->dreg;
} else {
MonoInst *load_ins;
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
/* 64 bit reads are already atomic */
MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
load_ins->type = STACK_I8;
MONO_ADD_INS (cfg->cbb, load_ins);
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
ins = load_ins;
}
}
if (cfg->gen_write_barriers && is_ref)
- emit_write_barrier (cfg, args [0], args [1]);
+ mini_emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
}
if (cfg->gen_write_barriers && is_ref)
- emit_write_barrier (cfg, args [0], args [1]);
+ mini_emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
fsig->params [1]->type == MONO_TYPE_I4) {
cfg->has_atomic_cas_i4 = TRUE;
}
else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
- ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
if (ins)
return ins;
MONO_ADD_INS (cfg->cbb, ins);
if (cfg->gen_write_barriers && is_ref)
- emit_write_barrier (cfg, args [0], args [1]);
+ mini_emit_write_barrier (cfg, args [0], args [1]);
}
}
(strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
(strcmp (cmethod->klass->name, "Debugger") == 0)) {
if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
- if (should_insert_brekpoint (cfg->method)) {
+ if (mini_should_insert_breakpoint (cfg->method)) {
ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
} else {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
+ } else if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Runtime.InteropServices") == 0) &&
+ (strcmp (cmethod->klass->name, "Marshal") == 0)) {
+ //Convert Marshal.PtrToStructure<T> of blittable T to direct loads
+ if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
+ cmethod->is_inflated &&
+ fsig->param_count == 1 &&
+ !mini_method_check_context_used (cfg, cmethod)) {
+
+ MonoGenericContext *method_context = mono_method_get_context (cmethod);
+ MonoType *arg0 = method_context->method_inst->type_argv [0];
+ if (mono_type_is_native_blittable (arg0))
+ return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
+ }
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
{
if (method->klass == mono_defaults.string_class) {
/* managed string allocation support */
- if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
+ if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
MonoMethod *managed_alloc = NULL;
}
}
+int
+mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
+{
+ return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
+}
+
/*
* inline_method:
*
prev_args = cfg->args;
prev_arg_types = cfg->arg_types;
prev_inlined_method = cfg->inlined_method;
- cfg->inlined_method = cmethod;
- cfg->ret_var_set = FALSE;
- cfg->inline_depth ++;
+ prev_ret_var_set = cfg->ret_var_set;
prev_real_offset = cfg->real_offset;
prev_cbb_hash = cfg->cbb_hash;
prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
prev_cbb = cfg->cbb;
prev_current_method = cfg->current_method;
prev_generic_context = cfg->generic_context;
- prev_ret_var_set = cfg->ret_var_set;
prev_disable_inline = cfg->disable_inline;
+ cfg->inlined_method = cmethod;
+ cfg->ret_var_set = FALSE;
+ cfg->inline_depth ++;
+
if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
virtual_ = TRUE;
if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
if (cfg->verbose_level > 2)
printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
-
+
cfg->stat_inlined_methods++;
/* always add some code to avoid block split failures */
* Get rid of the begin and end bblocks if possible to aid local
* optimizations.
*/
- mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
+ if (prev_cbb->out_count == 1)
+ mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
{
MonoMethod *method;
- mono_error_init (error);
+ error_init (error);
if (m->wrapper_type != MONO_WRAPPER_NONE) {
method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
return method;
}
-static inline MonoClass*
-mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
-{
- MonoError error;
- MonoClass *klass;
-
- if (method->wrapper_type != MONO_WRAPPER_NONE) {
- klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
- if (context) {
- klass = mono_class_inflate_generic_class_checked (klass, context, &error);
- mono_error_cleanup (&error); /* FIXME don't swallow the error */
- }
- } else {
- klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
- mono_error_cleanup (&error); /* FIXME don't swallow the error */
- }
- if (klass)
- mono_class_init (klass);
- return klass;
-}
-
static inline MonoMethodSignature*
mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
{
MonoMethodSignature *fsig;
- mono_error_init (error);
+ error_init (error);
if (method->wrapper_type != MONO_WRAPPER_NONE) {
fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
} else {
return NULL;
if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
return NULL;
- switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
- case MONO_TYPE_BOOLEAN:
+ switch (mini_get_underlying_type (&klass->byval_arg)->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
size = 1; break;
/* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
#if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
- case MONO_TYPE_CHAR:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
size = 2; break;
}
return data_ptr;
}
- return NULL;
-}
-
-static void
-set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
-{
- MonoError error;
- char *method_fname = mono_method_full_name (method, TRUE);
- char *method_code;
- MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
-
- if (!header) {
- method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
- mono_error_cleanup (&error);
- } else if (header->code_size == 0)
- method_code = g_strdup ("method body is empty.");
- else
- method_code = mono_disasm_code_one (NULL, method, ip, NULL);
- mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
- g_free (method_fname);
- g_free (method_code);
- cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
+ return NULL;
+}
+
+static void
+set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
+{
+ MonoError error;
+ char *method_fname = mono_method_full_name (method, TRUE);
+ char *method_code;
+ MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
+
+ if (!header) {
+ method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
+ mono_error_cleanup (&error);
+ } else if (header->code_size == 0)
+ method_code = g_strdup ("method body is empty.");
+ else
+ method_code = mono_disasm_code_one (NULL, method, ip, NULL);
+ mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
+ g_free (method_fname);
+ g_free (method_code);
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
+}
+
+static guint32
+mono_type_to_stloc_coerce (MonoType *type)
+{
+ if (type->byref)
+ return 0;
+
+ type = mini_get_underlying_type (type);
+handle_enum:
+ switch (type->type) {
+ case MONO_TYPE_I1:
+ return OP_ICONV_TO_I1;
+ case MONO_TYPE_U1:
+ return OP_ICONV_TO_U1;
+ case MONO_TYPE_I2:
+ return OP_ICONV_TO_I2;
+ case MONO_TYPE_U2:
+ return OP_ICONV_TO_U2;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ case MONO_TYPE_TYPEDBYREF:
+ case MONO_TYPE_GENERICINST:
+ return 0;
+ case MONO_TYPE_VALUETYPE:
+ if (type->data.klass->enumtype) {
+ type = mono_class_enum_basetype (type->data.klass);
+ goto handle_enum;
+ }
+ return 0;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
+ return 0;
+ default:
+ g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
+ }
+ return -1;
}
static void
emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
{
MonoInst *ins;
+ guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
+
+ if (coerce_op) {
+ if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
+ if (cfg->verbose_level > 2)
+ printf ("Found existing coercing is enough for stloc\n");
+ } else {
+ MONO_INST_NEW (cfg, ins, coerce_op);
+ ins->dreg = alloc_ireg (cfg);
+ ins->sreg1 = sp [0]->dreg;
+ ins->type = STACK_I4;
+ ins->klass = mono_class_from_mono_type (header->locals [n]);
+ MONO_ADD_INS (cfg->cbb, ins);
+ *sp = mono_decompose_opcode (cfg, ins);
+ }
+ }
+
+
guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
}
}
+static void
+emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
+{
+ MonoInst *ins;
+ guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
+
+ if (coerce_op) {
+ if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
+ if (cfg->verbose_level > 2)
+ printf ("Found existing coercing is enough for starg\n");
+ } else {
+ MONO_INST_NEW (cfg, ins, coerce_op);
+ ins->dreg = alloc_ireg (cfg);
+ ins->sreg1 = sp [0]->dreg;
+ ins->type = STACK_I4;
+ ins->klass = mono_class_from_mono_type (cfg->arg_types [n]);
+ MONO_ADD_INS (cfg->cbb, ins);
+ *sp = mono_decompose_opcode (cfg, ins);
+ }
+ }
+
+ EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
+}
+
/*
* ldloca inhibits many optimizations so try to get rid of it in common
* cases.
gboolean variant_iface = FALSE;
guint32 slot;
int offset;
+ gboolean special_array_interface = cmethod->klass->is_array_special_interface;
/*
* In llvm-only mode, vtables contain function descriptors instead of
return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
- if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
+ if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
/*
* A simple interface call
*
icall_args [0] = thunk_arg_ins;
icall_args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
- if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
+ if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
/*
* This is similar to the interface case, the vtable slot points to an imt thunk which is
* dynamically extended as more instantiations are discovered.
icall_args [0] = thunk_arg_ins;
icall_args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
ftndesc_ins->dreg = ftndesc_reg;
/*
* Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
} else {
if (context_used) {
- vtable_arg = emit_get_rgctx_klass (cfg, context_used,
+ vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
cmethod->klass, MONO_RGCTX_INFO_VTABLE);
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
// FIXME: Avoid initializing vtable_arg
emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
- mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
+ mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
}
} else if (context_used &&
((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
cmethod_addr = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
+ mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
}
} else {
INLINE_FAILURE ("ctor call");
seq_points = FALSE;
}
- if (cfg->gen_sdb_seq_points && cfg->method == method) {
+ if (cfg->method == method)
+ cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
+
+ if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->coverage_info) {
minfo = mono_debug_lookup_method (method);
if (minfo) {
MonoSymSeqPoint *sps;
mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
}
g_free (sps);
+
+ MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
+ if (asyncMethod) {
+ for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
+ {
+ mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
+ mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
+ }
+ mono_debug_free_method_async_debug_info (asyncMethod);
+ }
} else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
/* Methods without line number info like auto-generated property accessors */
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
if (cfg->method == method) {
-
- if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
- cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
-
/* ENTRY BLOCK */
NEW_BBLOCK (cfg, start_bblock);
cfg->bb_entry = start_bblock;
tblock->real_offset = clause->handler_offset;
tblock->flags |= BB_EXCEPTION_HANDLER;
+ if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
+ mono_create_exvar_for_offset (cfg, clause->handler_offset);
/*
* Linking the try block with the EH block hinders inlining as we won't be able to
* merge the bblocks from inlining and produce an artificial hole for no good reason.
if (sym_seq_points)
mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
- }
- cfg->cbb->real_offset = cfg->real_offset;
+ if ((cfg->method == method) && cfg->coverage_info) {
+ guint32 cil_offset = ip - header->code;
+ gpointer counter = &cfg->coverage_info->data [cil_offset].count;
+ cfg->coverage_info->data [cil_offset].cil_code = ip;
- if ((cfg->method == method) && cfg->coverage_info) {
- guint32 cil_offset = ip - header->code;
- cfg->coverage_info->data [cil_offset].cil_code = ip;
+ if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
+ MonoInst *one_ins, *load_ins;
- /* TODO: Use an increment here */
-#if defined(TARGET_X86)
- MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
- ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
- ins->inst_imm = 1;
- MONO_ADD_INS (cfg->cbb, ins);
-#else
- EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
-#endif
+ EMIT_NEW_PCONST (cfg, load_ins, counter);
+ EMIT_NEW_ICONST (cfg, one_ins, 1);
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = load_ins->dreg;
+ ins->inst_offset = 0;
+ ins->sreg2 = one_ins->dreg;
+ ins->type = STACK_I4;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ EMIT_NEW_PCONST (cfg, ins, counter);
+ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
+ }
+ }
}
+ cfg->cbb->real_offset = cfg->real_offset;
+
if (cfg->verbose_level > 3)
printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
MONO_ADD_INS (cfg->cbb, ins);
break;
case CEE_BREAK:
- if (should_insert_brekpoint (cfg->method)) {
+ if (mini_should_insert_breakpoint (cfg->method)) {
ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
} else {
MONO_INST_NEW (cfg, ins, OP_NOP);
CHECK_ARG (n);
if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
UNVERIFIED;
- EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
+ emit_starg_ir (cfg, sp, n);
ip += 2;
break;
case CEE_LDLOC_S:
CHECK_STACK_OVF (1);
n = ip [1];
CHECK_LOCAL (n);
- EMIT_NEW_LOCLOAD (cfg, ins, n);
+ if ((ip [2] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 2) && MONO_TYPE_ISSTRUCT (header->locals [n])) {
+ /* Avoid loading a struct just to load one of its fields */
+ EMIT_NEW_LOCLOADA (cfg, ins, n);
+ } else {
+ EMIT_NEW_LOCLOAD (cfg, ins, n);
+ }
*sp++ = ins;
ip += 2;
break;
if (cfg->gshared && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ mini_profiler_emit_tail_call (cfg, cmethod);
fsig = mono_method_signature (cmethod);
n = fsig->param_count + fsig->hasthis;
addr = emit_get_rgctx_sig (cfg, context_used,
fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
goto calli_end;
}
goto calli_end;
}
}
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
calli_end:
CHECK_CFG_ERROR;
}
}
+
+ if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
+ /* Use the corresponding method from the base type to avoid boxing */
+ MonoType *base_type = mono_class_enum_basetype (constrained_class);
+ g_assert (base_type);
+ constrained_class = mono_class_from_mono_type (base_type);
+ cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
+ g_assert (cmethod);
+ }
}
if (!dont_verify && !cfg->skip_visibility) {
sp -= n;
+ if (cmethod && cmethod->klass->image == mono_defaults.corlib && !strcmp (cmethod->klass->name, "ThrowHelper"))
+ cfg->cbb->out_of_line = TRUE;
+
/*
* We have the `constrained.' prefix opcode.
*/
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
/* Non-ref case */
- nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
} else {
g_assert (mono_class_is_interface (cmethod->klass));
addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
goto call_end;
}
} else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
* request a generic sharing context.
*/
if (context_used &&
- ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
+ ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
mono_get_vtable_var (cfg);
}
if (pass_vtable) {
if (context_used) {
- vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
+ vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
}
goto call_end;
}
for (i = 0; i < n; ++i)
EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
+
+ mini_profiler_emit_tail_call (cfg, cmethod);
+
MONO_INST_NEW (cfg, ins, OP_BR);
MONO_ADD_INS (cfg->cbb, ins);
tblock = start_bblock->out_bb [0];
// FIXME: Avoid initializing vtable_arg
ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
}
goto call_end;
}
ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
}
goto call_end;
}
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
- emit_write_barrier (cfg, addr, val);
+ mini_emit_write_barrier (cfg, addr, val);
if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
GSHAREDVT_FAILURE (*ip);
} else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
/* Handle tail calls similarly to normal calls */
tail_call = TRUE;
} else {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ mini_profiler_emit_tail_call (cfg, cmethod);
MONO_INST_NEW_CALL (cfg, call, OP_JMP);
call->tail_call = TRUE;
}
/* Common call */
- if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
+ if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
INLINE_FAILURE ("call");
ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
imt_arg, vtable_arg);
break;
}
case CEE_RET:
+ mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
+
if (cfg->method != method) {
/* return from inlined method */
/*
cfg->ret_var_set = TRUE;
}
} else {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
-
if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
emit_pop_lmf (cfg);
CHECK_STACK (1);
--sp;
- switch (*ip) {
- case CEE_LDIND_R4:
- case CEE_LDIND_R8:
- dreg = alloc_freg (cfg);
- break;
- case CEE_LDIND_I8:
- dreg = alloc_lreg (cfg);
- break;
- case CEE_LDIND_REF:
- dreg = alloc_ireg_ref (cfg);
- break;
- default:
- dreg = alloc_preg (cfg);
- }
-
- NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
- ins->type = ldind_type [*ip - CEE_LDIND_I1];
- if (*ip == CEE_LDIND_R4)
- ins->type = cfg->r4_stack_type;
- ins->flags |= ins_flag;
- MONO_ADD_INS (cfg->cbb, ins);
+ ins = mini_emit_memory_load (cfg, &ldind_to_type (*ip)->byval_arg, sp [0], 0, ins_flag);
*sp++ = ins;
- if (ins_flag & MONO_INST_VOLATILE) {
- /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
- }
ins_flag = 0;
++ip;
break;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
MONO_ADD_INS (cfg->cbb, ins);
if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
- emit_write_barrier (cfg, sp [0], sp [1]);
+ mini_emit_write_barrier (cfg, sp [0], sp [1]);
inline_costs += 1;
++ip;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
sp -= 2;
- if (generic_class_is_reference_type (cfg, klass)) {
- MonoInst *store, *load;
- int dreg = alloc_ireg_ref (cfg);
-
- NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
- load->flags |= ins_flag;
- MONO_ADD_INS (cfg->cbb, load);
-
- NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
- store->flags |= ins_flag;
- MONO_ADD_INS (cfg->cbb, store);
-
- if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
- emit_write_barrier (cfg, sp [0], sp [1]);
- } else {
- mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
- }
+ mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
ins_flag = 0;
ip += 5;
break;
ip += stloc_len;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
}
ins_flag = 0;
break;
}
/* Optimize the ldobj+stobj combination */
- /* The reference case ends up being a load+store anyway */
- /* Skip this if the operation is volatile. */
- if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
+ if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token)) {
CHECK_STACK (1);
sp --;
- mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
+ mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
ip += 5 + 5;
ins_flag = 0;
break;
}
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
- ins->flags |= ins_flag;
+ ins = mini_emit_memory_load (cfg, &klass->byval_arg, sp [0], 0, ins_flag);
*sp++ = ins;
- if (ins_flag & MONO_INST_VOLATILE) {
- /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
- }
-
ip += 5;
ins_flag = 0;
inline_costs += 1;
TYPE_LOAD_ERROR (cmethod->klass);
context_used = mini_method_check_context_used (cfg, cmethod);
+
+ if (!dont_verify && !cfg->skip_visibility) {
+ MonoMethod *cil_method = cmethod;
+ MonoMethod *target_method = cil_method;
+
+ if (method->is_inflated) {
+ target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
+ CHECK_CFG_ERROR;
+ }
+
+ if (!mono_method_can_access_method (method_definition, target_method) &&
+ !mono_method_can_access_method (method, cil_method))
+ emit_method_access_failure (cfg, method, cil_method);
+ }
if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
if (mini_is_gsharedvt_klass (klass)) {
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
wbarrier_ptr_ins = ins;
- /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
+ /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
} else {
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
if (mini_is_gsharedvt_klass (klass)) {
g_assert (wbarrier_ptr_ins);
- emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
+ mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
} else {
/* insert call to write barrier */
MonoInst *ptr;
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
- emit_write_barrier (cfg, ptr, sp [1]);
+ mini_emit_write_barrier (cfg, ptr, sp [1]);
}
}
}
}
+ MonoInst *field_add_inst = sp [0];
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
- dreg = alloc_ireg_mp (cfg);
- EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
- } else {
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
+ EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
+ foffset = 0;
}
- load->flags |= ins_flag;
+
+ load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
+
if (sp [0]->opcode != OP_LDADDR)
load->flags |= MONO_INST_FAULT;
*sp++ = load;
is_special_static = mono_class_field_is_special_static (field);
if (is_special_static && ((gsize)addr & 0x80000000) == 0)
- thread_ins = mono_get_thread_intrinsic (cfg);
+ thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
else
thread_ins = NULL;
guint32 offset;
int idx, static_data_reg, array_reg, dreg;
- GSHAREDVT_FAILURE (op);
+ if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
+ GSHAREDVT_FAILURE (op);
- MONO_ADD_INS (cfg->cbb, thread_ins);
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
*
* super_info.static_data + field->offset
*/
- static_data = emit_get_rgctx_klass (cfg, context_used,
+ static_data = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_STATIC_DATA);
if (mini_is_gsharedvt_klass (klass)) {
int addr_reg = mono_alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
}
- } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
+ } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
MonoInst *iargs [2];
g_assert (field->parent);
if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
if (op == CEE_LDSFLDA) {
if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
}
ins_flag = 0;
token = read32 (ip + 1);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (ins_flag & MONO_INST_VOLATILE) {
- /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
- }
+
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
- ins->flags |= ins_flag;
- if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
- generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
- /* insert call to write barrier */
- emit_write_barrier (cfg, sp [0], sp [1]);
- }
+ mini_emit_memory_store (cfg, &klass->byval_arg, sp [0], sp [1], ins_flag);
ins_flag = 0;
ip += 5;
inline_costs += 1;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
+ if (klass->byval_arg.type == MONO_TYPE_VOID)
+ UNVERIFIED;
context_used = mini_class_check_context_used (cfg, klass);
/* FIXME: Use OP_NEWARR and decompose later to help abcrem */
/* vtable */
- args [0] = emit_get_rgctx_klass (cfg, context_used,
+ args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
array_class, MONO_RGCTX_INFO_VTABLE);
/* array len */
args [1] = sp [0];
* ensure the rva field is big enough
*/
if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
- MonoMethod *memcpy_method = get_memcpy_method ();
+ MonoMethod *memcpy_method = mini_get_memcpy_method ();
MonoInst *iargs [3];
int add_reg = alloc_ireg_mp (cfg);
if (context_used) {
MonoInst *klass_ins;
- klass_ins = emit_get_rgctx_klass (cfg, context_used,
+ klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_KLASS);
// FIXME:
MonoInst *const_ins;
int type_reg = alloc_preg (cfg);
- const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
+ const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
mono_class_init (tclass);
if (context_used) {
- ins = emit_get_rgctx_klass (cfg, context_used,
+ ins = mini_emit_get_rgctx_klass (cfg, context_used,
tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
} else if (cfg->compile_aot) {
if (method->wrapper_type) {
- mono_error_init (&error); //got to do it since there are multiple conditionals below
+ error_init (&error); //got to do it since there are multiple conditionals below
if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
/* Special case for static synchronized wrappers */
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
if (context_used) {
if (handle_class == mono_defaults.typehandle_class) {
- ins = emit_get_rgctx_klass (cfg, context_used,
+ ins = mini_emit_get_rgctx_klass (cfg, context_used,
mono_class_from_mono_type ((MonoType *)handle),
MONO_RGCTX_INFO_TYPE);
} else if (handle_class == mono_defaults.methodhandle_class) {
if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
GList *tmp;
- MonoExceptionClause *clause;
for (tmp = handlers; tmp; tmp = tmp->next) {
- clause = (MonoExceptionClause *)tmp->data;
+ MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data;
+ MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
+ MonoBasicBlock *dont_throw;
+
tblock = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (tblock);
link_bblock (cfg, cfg->cbb, tblock);
+
+ MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
+
MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
ins->inst_target_bb = tblock;
ins->inst_eh_block = clause;
MONO_ADD_INS (cfg->cbb, ins);
cfg->cbb->has_call_handler = 1;
+
+ /* Throw exception if exvar is set */
+ /* FIXME Do we need this for calls from catch/filter ? */
+ NEW_BBLOCK (cfg, dont_throw);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
+ mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL);
+ cfg->cbb->clause_hole = clause;
+
+ MONO_START_BB (cfg, dont_throw);
+ cfg->cbb->clause_hole = clause;
+
if (COMPILE_LLVM (cfg)) {
MonoBasicBlock *target_bb;
CHECK_STACK (info->sig->param_count);
sp -= info->sig->param_count;
- ins = mono_emit_jit_icall (cfg, info->func, sp);
+ if (cfg->compile_aot && !strcmp (info->name, "mono_threads_attach_coop")) {
+ MonoInst *addr;
+
+ /*
+ * This is called on unattached threads, so it cannot go through the trampoline
+ * infrastructure. Use an indirect call through a got slot initialized at load time
+ * instead.
+ */
+ EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name);
+ ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
+ } else {
+ ins = mono_emit_jit_icall (cfg, info->func, sp);
+ }
+
if (!MONO_TYPE_IS_VOID (info->sig->ret))
*sp++ = ins;
case CEE_MONO_LDPTR_CARD_TABLE:
case CEE_MONO_LDPTR_NURSERY_START:
case CEE_MONO_LDPTR_NURSERY_BITS:
- case CEE_MONO_LDPTR_INT_REQ_FLAG: {
+ case CEE_MONO_LDPTR_INT_REQ_FLAG:
+ case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: {
CHECK_STACK_OVF (1);
switch (ip [1]) {
- case CEE_MONO_LDPTR_CARD_TABLE:
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
- break;
- case CEE_MONO_LDPTR_NURSERY_START:
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
- break;
- case CEE_MONO_LDPTR_NURSERY_BITS:
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
- break;
- case CEE_MONO_LDPTR_INT_REQ_FLAG:
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
- break;
+ case CEE_MONO_LDPTR_CARD_TABLE:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
+ break;
+ case CEE_MONO_LDPTR_NURSERY_START:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
+ break;
+ case CEE_MONO_LDPTR_NURSERY_BITS:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
+ break;
+ case CEE_MONO_LDPTR_INT_REQ_FLAG:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
+ break;
+ case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL);
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
}
*sp++ = ins;
temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
temp->backend.is_pinvoke = 1;
EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
- mini_emit_stobj (cfg, dest, src, klass, TRUE);
+ mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
dest->type = STACK_VTYPE;
} else {
EMIT_NEW_RETLOADA (cfg, ins);
}
- mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
+ mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
if (sp != stack_start)
UNVERIFIED;
+ mini_profiler_emit_leave (cfg, sp [0]);
+
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
MONO_ADD_INS (cfg->cbb, ins);
ip += 6;
break;
}
- case CEE_MONO_CISINST:
- case CEE_MONO_CCASTCLASS: {
- int token;
- CHECK_STACK (1);
- --sp;
- CHECK_OPSIZE (6);
- token = read32 (ip + 2);
- klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
- if (ip [1] == CEE_MONO_CISINST)
- ins = handle_cisinst (cfg, klass, sp [0]);
- else
- ins = handle_ccastclass (cfg, klass, sp [0]);
- *sp++ = ins;
- ip += 6;
- break;
- }
case CEE_MONO_SAVE_LMF:
case CEE_MONO_RESTORE_LMF:
ip += 2;
g_assert (key < TLS_KEY_NUM);
ins = mono_create_tls_get (cfg, key);
- if (!ins) {
- if (cfg->compile_aot) {
- DISABLE_AOT (cfg);
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->dreg = alloc_preg (cfg);
- ins->type = STACK_PTR;
- } else {
- g_assert_not_reached ();
- }
- }
+ g_assert (ins);
ins->type = STACK_PTR;
- MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
ip += 6;
break;
cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
}
- /* Has to use a call inst since it local regalloc expects it */
+ /* Has to use a call inst since local regalloc expects it */
MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
ins = (MonoInst*)call;
sp -= 2;
MONO_ADD_INS (cfg->cbb, ins);
cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
+ /* OP_DYN_CALL might need to allocate a dynamically sized param area */
+ cfg->flags |= MONO_CFG_HAS_ALLOCA;
ip += 2;
inline_costs += 10 * num_calls++;
}
case CEE_MONO_MEMORY_BARRIER: {
CHECK_OPSIZE (6);
- emit_memory_barrier (cfg, (int)read32 (ip + 2));
+ mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
ip += 6;
break;
}
MonoInst *ad_ins, *jit_tls_ins;
MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
- g_assert (!mono_threads_is_coop_enabled ());
+ g_assert (!mono_threads_is_blocking_transition_enabled ());
cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
EMIT_NEW_PCONST (cfg, ins, NULL);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
- ad_ins = mono_get_domain_intrinsic (cfg);
- jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
+ ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
+ jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
- if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
+ if (ad_ins && jit_tls_ins) {
NEW_BBLOCK (cfg, next_bb);
NEW_BBLOCK (cfg, call_bb);
} else {
EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
}
- MONO_ADD_INS (cfg->cbb, ad_ins);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
- MONO_ADD_INS (cfg->cbb, jit_tls_ins);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
* instead.
*/
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
- ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
+ ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
} else {
ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
}
addr = emit_get_rgctx_sig (cfg, context_used,
fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
} else {
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
}
}
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
+ ip += 2;
+ *sp++ = ins;
+ break;
+ case CEE_MONO_GET_RGCTX_ARG:
+ CHECK_OPSIZE (2);
+ CHECK_STACK_OVF (1);
+
+ mono_create_rgctx_var (cfg);
+
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = alloc_dreg (cfg, STACK_PTR);
+ ins->sreg1 = cfg->rgctx_var->dreg;
+ ins->type = STACK_PTR;
+ MONO_ADD_INS (cfg->cbb, ins);
+
ip += 2;
*sp++ = ins;
break;
CHECK_ARG (n);
if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
UNVERIFIED;
- EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
+ emit_starg_ir (cfg, sp, n);
ip += 4;
break;
case CEE_LDLOC:
CHECK_OPSIZE (4);
n = read16 (ip + 2);
CHECK_LOCAL (n);
- EMIT_NEW_LOCLOAD (cfg, ins, n);
+ if ((ip [4] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 4) && header->locals [n]->type == MONO_TYPE_VALUETYPE) {
+ /* Avoid loading a struct just to load one of its fields */
+ EMIT_NEW_LOCLOADA (cfg, ins, n);
+ } else {
+ EMIT_NEW_LOCLOAD (cfg, ins, n);
+ }
*sp++ = ins;
ip += 4;
break;
ip += 6;
break;
case CEE_CPBLK:
- case CEE_INITBLK: {
- MonoInst *iargs [3];
CHECK_STACK (3);
sp -= 3;
-
- /* Skip optimized paths for volatile operations. */
- if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
- mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
- } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
- /* emit_memset only works when val == 0 */
- mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
- } else {
- MonoInst *call;
- iargs [0] = sp [0];
- iargs [1] = sp [1];
- iargs [2] = sp [2];
- if (ip [1] == CEE_CPBLK) {
- /*
- * FIXME: It's unclear whether we should be emitting both the acquire
- * and release barriers for cpblk. It is technically both a load and
- * store operation, so it seems like that's the sensible thing to do.
- *
- * FIXME: We emit full barriers on both sides of the operation for
- * simplicity. We should have a separate atomic memcpy method instead.
- */
- MonoMethod *memcpy_method = get_memcpy_method ();
-
- if (ins_flag & MONO_INST_VOLATILE)
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
-
- call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
- call->flags |= ins_flag;
-
- if (ins_flag & MONO_INST_VOLATILE)
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
- } else {
- MonoMethod *memset_method = get_memset_method ();
- if (ins_flag & MONO_INST_VOLATILE) {
- /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
- }
- call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
- call->flags |= ins_flag;
- }
- }
+ mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
+ ip += 2;
+ ins_flag = 0;
+ inline_costs += 1;
+ break;
+ case CEE_INITBLK:
+ CHECK_STACK (3);
+ sp -= 3;
+ mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
ip += 2;
ins_flag = 0;
inline_costs += 1;
break;
- }
case CEE_NO_:
CHECK_OPSIZE (3);
if (ip [2] & 0x1)
cfg->cbb = init_localsbb;
- if ((get_domain = mono_get_domain_intrinsic (cfg))) {
- MONO_ADD_INS (cfg->cbb, get_domain);
- } else {
- get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
- }
+ get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
MONO_ADD_INS (cfg->cbb, store);
}
}
cfg->cbb = init_localsbb;
- emit_instrumentation_call (cfg, mono_profiler_method_enter);
+ mini_profiler_emit_enter (cfg);
if (seq_points) {
MonoBasicBlock *bb;
int orig_next_vreg;
guint32 *vreg_to_lvreg;
guint32 *lvregs;
- guint32 i, lvregs_len;
+ guint32 i, lvregs_len, lvregs_size;
gboolean dest_has_lvreg = FALSE;
MonoStackType stacktypes [128];
MonoInst **live_range_start, **live_range_end;
*/
orig_next_vreg = cfg->next_vreg;
vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
- lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
+ lvregs_size = 1024;
+ lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
lvregs_len = 0;
/*
}
g_assert (sreg != -1);
vreg_to_lvreg [var->dreg] = sreg;
- g_assert (lvregs_len < 1024);
+ if (lvregs_len >= lvregs_size) {
+ guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
+ memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
+ lvregs = new_lvregs;
+ lvregs_size *= 2;
+ }
lvregs [lvregs_len ++] = var->dreg;
}
}
if (dest_has_lvreg) {
g_assert (ins->dreg != -1);
vreg_to_lvreg [prev_dreg] = ins->dreg;
- g_assert (lvregs_len < 1024);
+ if (lvregs_len >= lvregs_size) {
+ guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
+ memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
+ lvregs = new_lvregs;
+ lvregs_size *= 2;
+ }
lvregs [lvregs_len ++] = prev_dreg;
dest_has_lvreg = FALSE;
}
g_free (live_range_end_bb);
}
-static void
-mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
-{
- MonoInst *ret, *move, *source;
- MonoClass *klass = ins->klass;
- int context_used = mini_class_check_context_used (cfg, klass);
- int is_isinst = ins->opcode == OP_ISINST;
- g_assert (is_isinst || ins->opcode == OP_CASTCLASS);
- source = get_vreg_to_inst (cfg, ins->sreg1);
- if (!source || source == (MonoInst *) -1)
- source = mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, ins->sreg1);
- g_assert (source && source != (MonoInst *) -1);
-
- MonoBasicBlock *first_bb;
- NEW_BBLOCK (cfg, first_bb);
- cfg->cbb = first_bb;
-
- if (!context_used && (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || klass->is_array_special_interface)) {
- if (is_isinst)
- ret = emit_isinst_with_cache_nonshared (cfg, source, klass);
- else
- ret = emit_castclass_with_cache_nonshared (cfg, source, klass);
- } else if (!context_used && (mono_class_is_marshalbyref (klass) || mono_class_is_interface (klass))) {
- MonoInst *iargs [1];
- int costs;
-
- iargs [0] = source;
- if (is_isinst) {
- MonoMethod *wrapper = mono_marshal_get_isinst (klass);
- costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
- } else {
- MonoMethod *wrapper = mono_marshal_get_castclass (klass);
- save_cast_details (cfg, klass, source->dreg, TRUE);
- costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
- reset_cast_details (cfg);
- }
- g_assert (costs > 0);
- ret = iargs [0];
- } else {
- if (is_isinst)
- ret = handle_isinst (cfg, klass, source, context_used);
- else
- ret = handle_castclass (cfg, klass, source, context_used);
- }
- EMIT_NEW_UNALU (cfg, move, OP_MOVE, ins->dreg, ret->dreg);
-
- g_assert (cfg->cbb->code || first_bb->code);
- MonoInst *prev = ins->prev;
- mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
-}
-
-void
-mono_decompose_typechecks (MonoCompile *cfg)
-{
- for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *ins;
- MONO_BB_FOR_EACH_INS (bb, ins) {
- switch (ins->opcode) {
- case OP_ISINST:
- case OP_CASTCLASS:
- mono_decompose_typecheck (cfg, bb, ins);
- break;
- }
- }
- }
-}
-
/**
* FIXME:
* - create a helper function for allocating a stack slot, taking into account
* MONO_CFG_HAS_SPILLUP.
* - merge r68207.
- * - merge the ia64 switch changes.
* - optimize mono_regstate2_alloc_int/float.
* - fix the pessimistic handling of variables accessed in exception handler blocks.
* - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
the values on the stack before emitting the last instruction of the bb.
*/
-#else /* !DISABLE_JIT */
-
-MONO_EMPTY_SOURCE_FILE (method_to_ir);
-
#endif /* !DISABLE_JIT */