#include <mono/metadata/marshal.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/mono-debug.h>
+#include <mono/metadata/mono-debug-debugger.h>
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/security-manager.h>
#include <mono/metadata/threads-types.h>
/* helper methods signatures */
static MonoMethodSignature *helper_sig_class_init_trampoline;
static MonoMethodSignature *helper_sig_domain_get;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
{
helper_sig_domain_get = mono_create_icall_signature ("ptr");
helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
{
if (cfg->verbose_level > 2) \
- printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
+ printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
}
else
return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
}
+ }
+ for (i = 0; i < header->num_clauses; ++i) {
+ clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, offset))
return ((i + 1) << 8) | clause->flags;
}
}
-/* Emit code which loads interface_offsets [klass->interface_id]
- * The array is stored in memory before vtable.
-*/
-static void
-mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
-{
- if (cfg->compile_aot) {
- int ioffset_reg = alloc_preg (cfg);
- int iid_reg = alloc_preg (cfg);
-
- MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
- MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
- }
- else {
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
- }
-}
-
static void
mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
{
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type (simple_type);
- if (klass != arg->klass)
+ /* The second cases is needed when doing partial sharing */
+ if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
return 1;
return 0;
} else {
#ifdef ENABLE_LLVM
call->imt_arg_reg = method_reg;
#endif
-#ifdef MONO_ARCH_IMT_REG
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
-#else
- /* Need this to keep the IMT arg alive */
- mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
-#endif
return;
}
-#ifdef MONO_ARCH_IMT_REG
method_reg = alloc_preg (cfg);
if (imt_arg) {
}
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
-#else
- mono_arch_emit_imt_argument (cfg, call, imt_arg);
-#endif
}
static MonoJumpInfo *
this_reg = this->dreg;
- if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
+ if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
MonoInst *dummy_use;
MONO_EMIT_NULL_CHECK (cfg, this_reg);
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
- slot_reg = -1;
- if (mono_use_imt) {
- guint32 imt_slot = mono_method_get_imt_slot (method);
- emit_imt_argument (cfg, call, call->method, imt_arg);
- slot_reg = vtable_reg;
- offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
- }
- if (slot_reg == -1) {
- slot_reg = alloc_preg (cfg);
- mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
- offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
- }
+ guint32 imt_slot = mono_method_get_imt_slot (method);
+ emit_imt_argument (cfg, call, call->method, imt_arg);
+ slot_reg = vtable_reg;
+ offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
} else {
slot_reg = vtable_reg;
offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
if (cfg->compile_llvm)
return FALSE;
#endif
- if (cfg->gen_seq_points_debug_data || cfg->disable_direct_icalls)
+ if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
return FALSE;
return TRUE;
}
return emit_rgctx_fetch (cfg, rgctx, entry);
}
+/*
+ * emit_get_rgctx_virt_method:
+ *
+ * Return data for method VIRT_METHOD for a receiver of type KLASS.
+ */
+static MonoInst*
+emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
+ MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
+{
+ MonoJumpInfoVirtMethod *info;
+ MonoJumpInfoRgctxEntry *entry;
+ MonoInst *rgctx;
+
+ info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
+ info->klass = klass;
+ info->method = virt_method;
+
+ entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
+ rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+
+ return emit_rgctx_fetch (cfg, rgctx, entry);
+}
static MonoInst*
emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
* On return the caller must check @klass for load errors.
*/
static void
-emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
+emit_generic_class_init (MonoCompile *cfg, MonoClass *klass, MonoBasicBlock **out_bblock)
{
MonoInst *vtable_arg;
- MonoCallInst *call;
int context_used;
+ *out_bblock = cfg->cbb;
+
context_used = mini_class_check_context_used (cfg, klass);
if (context_used) {
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
- if (COMPILE_LLVM (cfg))
- call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
- else
- call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
-#ifdef MONO_ARCH_VTABLE_REG
- mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
- cfg->uses_vtable_reg = TRUE;
+#ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
+ MonoInst *ins;
+
+ /*
+ * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
+ * so this doesn't have to clobber any regs and it doesn't break basic blocks.
+ */
+ /*
+ * For LLVM, this requires that the code in the generic trampoline obtain the vtable argument according to
+ * the normal calling convention of the platform.
+ */
+ MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
+ ins->sreg1 = vtable_arg->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
#else
- NOT_IMPLEMENTED;
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ int bits_reg, inited_reg;
+ MonoBasicBlock *inited_bb;
+ MonoInst *args [16];
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ bits_reg = alloc_ireg (cfg);
+ inited_reg = alloc_ireg (cfg);
+
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
+
+ NEW_BBLOCK (cfg, inited_bb);
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
+
+ args [0] = vtable_arg;
+ mono_emit_jit_icall (cfg, mono_generic_class_init, args);
+
+ MONO_START_BB (cfg, inited_bb);
+ *out_bblock = inited_bb;
#endif
}
+
+static void
+emit_class_init (MonoCompile *cfg, MonoClass *klass, MonoBasicBlock **out_bblock)
+{
+ /* This could be used as a fallback if needed */
+ //emit_generic_class_init (cfg, klass, out_bblock);
+
+ *out_bblock = cfg->cbb;
+
+ mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
+}
+
static void
emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
{
if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
if (known_instance_size) {
int size = mono_class_instance_size (klass);
+ if (size < sizeof (MonoObject))
+ g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
}
if (managed_alloc) {
int size = mono_class_instance_size (klass);
+ if (size < sizeof (MonoObject))
+ g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
MonoInst *obj, *method_ins, *tramp_ins;
MonoDomain *domain;
guint8 **code_slot;
-
- // FIXME reenable optimisation for virtual case
- if (virtual)
- return NULL;
if (virtual) {
MonoMethod *invoke = mono_get_delegate_invoke (klass);
return FALSE;
}
- /*
- * CAS - do not inline methods with declarative security
- * Note: this has to be before any possible return TRUE;
- */
- if (mono_security_method_has_declsec (method))
- return FALSE;
-
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (mono_arch_is_soft_float ()) {
/* FIXME: */
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
-
+
static MonoClass *runtime_helpers_class = NULL;
if (! runtime_helpers_class)
runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
"System.Runtime.CompilerServices", "RuntimeHelpers");
if (cmethod->klass == mono_defaults.string_class) {
- if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
+ if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
int dreg = alloc_ireg (cfg);
int index_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
#endif
type_from_op (cfg, ins, NULL, NULL);
return ins;
- } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
+ } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
/* Decompose later to allow more optimizations */
EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
return ins;
- } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
- int mult_reg = alloc_preg (cfg);
- int add_reg = alloc_preg (cfg);
-
- /* The corlib functions check for oob already. */
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
- MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
- return cfg->cbb->last_ins;
} else
return NULL;
} else if (cmethod->klass == mono_defaults.object_class) {
- if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
+ if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg_ref (cfg);
int vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
} else
return NULL;
} else if (cmethod->klass == mono_defaults.array_class) {
- if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
+ if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
return emit_array_generic_access (cfg, fsig, args, FALSE);
- else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
+ else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
return emit_array_generic_access (cfg, fsig, args, TRUE);
#ifndef MONO_BIG_ARRAYS
* This is an inline version of GetLength/GetLowerBound(0) used frequently in
* Array methods.
*/
- else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
- (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
+ else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
+ (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
int dreg = alloc_ireg (cfg);
int bounds_reg = alloc_ireg_mp (cfg);
if (cmethod->name [0] != 'g')
return NULL;
- if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
+ if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
int vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
type_from_op (cfg, ins, NULL, NULL);
return ins;
- } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
+ } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
* all inputs:
* http://everything2.com/?node_id=1051618
*/
- } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
+ } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
!strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
- !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
- !strcmp (cmethod->klass->name, "Selector")) {
+ !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
+ !strcmp (cmethod->klass->name, "Selector")) ||
+ (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
+ !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
+ !strcmp (cmethod->klass->name, "Selector"))
+ ) {
#ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
- if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
+ if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
(args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
cfg->compile_aot) {
MonoInst *pi;
return fsig;
}
-/*
- * Returns TRUE if the JIT should abort inlining because "callee"
- * is influenced by security attributes.
- */
-static
-gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
-{
- guint32 result;
-
- if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
- return TRUE;
- }
-
- result = mono_declsec_linkdemand (cfg->domain, caller, callee);
- if (result == MONO_JIT_SECURITY_OK)
- return FALSE;
-
- if (result == MONO_JIT_LINKDEMAND_ECMA) {
- /* Generate code to throw a SecurityException before the actual call/link */
- MonoSecurityManager *secman = mono_security_manager_get_methods ();
- MonoInst *args [2];
-
- NEW_ICONST (cfg, args [0], 4);
- NEW_METHODCONST (cfg, args [1], caller);
- mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
- } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
- /* don't hide previous results */
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
- cfg->exception_data = result;
- return TRUE;
- }
-
- return FALSE;
-}
-
static MonoMethod*
throw_exception (void)
{
return supported_tail_call;
}
-/* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
- * it to the thread local value based on the tls_offset field. Every other kind of access to
- * the field causes an assert.
- */
-static gboolean
-is_magic_tls_access (MonoClassField *field)
-{
- if (strcmp (field->name, "tlsdata"))
- return FALSE;
- if (strcmp (field->parent->name, "ThreadLocal`1"))
- return FALSE;
- return field->parent->image == mono_defaults.corlib;
-}
-
/* emits the code needed to access a managed tls var (like ThreadStatic)
* with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
* pointer for the current thread.
MonoInst *addr;
int static_data_reg, array_reg, dreg;
int offset2_reg, idx_reg;
- // inlined access to the tls data
- // idx = (offset >> 24) - 1;
- // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
+ // inlined access to the tls data (see threads.c)
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
idx_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
offset2_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
return addr;
}
-/*
- * redirect access to the tlsdata field to the tls var given by the tls_offset field.
- * this address is cached per-method in cached_tls_addr.
- */
-static MonoInst*
-create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
-{
- MonoInst *load, *addr, *temp, *store, *thread_ins;
- MonoClassField *offset_field;
-
- if (*cached_tls_addr) {
- EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
- return addr;
- }
- thread_ins = mono_get_thread_intrinsic (cfg);
- offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
-
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
- if (thread_ins) {
- MONO_ADD_INS (cfg->cbb, thread_ins);
- } else {
- MonoMethod *thread_method;
- thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
- thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
- }
- addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
- addr->klass = mono_class_from_mono_type (tls_field->type);
- addr->type = STACK_MP;
- *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
- EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
-
- EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
- return addr;
-}
-
/*
* handle_ctor_call:
*
int num_calls = 0, inline_costs = 0;
int breakpoint_id = 0;
guint num_args;
- MonoBoolean security, pinvoke;
- MonoSecurityManager* secman = NULL;
- MonoDeclSecurityActions actions;
GSList *class_inits = NULL;
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
gboolean init_locals, seq_points, skip_dead_blocks;
gboolean sym_seq_points = FALSE;
- MonoInst *cached_tls_addr = NULL;
MonoDebugMethodInfo *minfo;
MonoBitSet *seq_point_locs = NULL;
MonoBitSet *seq_point_set_locs = NULL;
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
- dont_verify |= mono_security_smcs_hack_enabled ();
-
/* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
cfg->stat_cil_code_size += header->code_size;
seq_points = cfg->gen_seq_points && cfg->method == method;
-#ifdef PLATFORM_ANDROID
- seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
-#endif
if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
/* We could hit a seq point before attaching to the JIT (#8338) */
seq_points = FALSE;
}
- if (cfg->gen_seq_points_debug_data && cfg->method == method) {
+ if (cfg->gen_sdb_seq_points && cfg->method == method) {
minfo = mono_debug_lookup_method (method);
if (minfo) {
+ MonoSymSeqPoint *sps;
int i, n_il_offsets;
- int *il_offsets;
- int *line_numbers;
- mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
+ mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
sym_seq_points = TRUE;
for (i = 0; i < n_il_offsets; ++i) {
- if (il_offsets [i] < header->code_size)
- mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
+ if (sps [i].il_offset < header->code_size)
+ mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
}
- g_free (il_offsets);
- g_free (line_numbers);
+ g_free (sps);
} else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
/* Methods without line number info like auto-generated property accessors */
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
cfg->bb_entry = start_bblock;
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
-#if defined(__native_client_codegen__)
- MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
- ins->dreg = alloc_dreg (cfg, STACK_I4);
- MONO_ADD_INS (start_bblock, ins);
-#endif
/* EXIT BLOCK */
NEW_BBLOCK (cfg, end_bblock);
tblock->flags |= BB_EXCEPTION_UNSAFE;
}
-
/*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
while (p < end) {
printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
/* catch and filter blocks get the exception object on the stack */
if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
- MonoInst *dummy_use;
/* mostly like handle_stack_args (), but just sets the input args */
/* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
+ cfg->cbb = tblock;
+
+#ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
+ /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
+ if (!cfg->compile_llvm) {
+ MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
+ ins->dreg = tblock->in_stack [0]->dreg;
+ MONO_ADD_INS (tblock, ins);
+ }
+#else
+ MonoInst *dummy_use;
+
/*
* Add a dummy use for the exvar so its liveness info will be
* correct.
*/
- cfg->cbb = tblock;
EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
+#endif
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
}
}
- if (mono_security_cas_enabled ())
- secman = mono_security_manager_get_methods ();
-
- security = (secman && mono_security_method_has_declsec (method));
- /* at this point having security doesn't mean we have any code to generate */
- if (security && (cfg->method == method)) {
- /* Only Demand, NonCasDemand and DemandChoice requires code generation.
- * And we do not want to enter the next section (with allocation) if we
- * have nothing to generate */
- security = mono_declsec_get_demands (method, &actions);
- }
-
- /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
- pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
- if (pinvoke) {
- MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
- if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
- MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
-
- /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
- if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
- pinvoke = FALSE;
- }
- if (custom)
- mono_custom_attrs_free (custom);
-
- if (pinvoke) {
- custom = mono_custom_attrs_from_class (wrapped->klass);
- if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
- pinvoke = FALSE;
- }
- if (custom)
- mono_custom_attrs_free (custom);
- }
- } else {
- /* not a P/Invoke after all */
- pinvoke = FALSE;
- }
- }
-
/* we use a separate basic block for the initialization code */
NEW_BBLOCK (cfg, init_localsbb);
cfg->bb_init = init_localsbb;
*/
}
- /* at this point we know, if security is TRUE, that some code needs to be generated */
- if (security && (cfg->method == method)) {
- MonoInst *args [2];
-
- cfg->stat_cas_demand_generation++;
-
- if (actions.demand.blob) {
- /* Add code for SecurityAction.Demand */
- EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
- EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
- /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
- mono_emit_method_call (cfg, secman->demand, args, NULL);
- }
- if (actions.noncasdemand.blob) {
- /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
- /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
- EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
- EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
- /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
- mono_emit_method_call (cfg, secman->demand, args, NULL);
- }
- if (actions.demandchoice.blob) {
- /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
- EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
- EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
- /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
- mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
- }
- }
-
- /* we must Demand SecurityPermission.Unmanaged before p/invoking */
- if (pinvoke) {
- mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
- }
-
if (mono_security_core_clr_enabled ()) {
/* check if this is native code, e.g. an icall or a p/invoke */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
- if (mono_security_cas_enabled ())
- CHECK_CFG_EXCEPTION;
-
emit_instrumentation_call (cfg, mono_profiler_method_leave);
if (ARCH_HAVE_OP_TAIL_CALL) {
gboolean skip_ret = FALSE;
gboolean delegate_invoke = FALSE;
gboolean direct_icall = FALSE;
+ gboolean constrained_partial_call = FALSE;
MonoMethod *cil_method;
CHECK_OPSIZE (5);
if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
g_assert (!cmethod->klass->valuetype);
- if (!mini_type_is_reference (cfg, &constrained_class->byval_arg)) {
- /* FIXME: gshared type constrained to a primitive type */
- GENERIC_SHARING_FAILURE (CEE_CALL);
- }
+ if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
+ constrained_partial_call = TRUE;
}
}
}
*/
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- }
-
if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
g_assert_not_reached ();
/*
* We have the `constrained.' prefix opcode.
*/
- if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
+ if (constrained_partial_call) {
+ gboolean need_box = TRUE;
+
+ /*
+ * The receiver is a valuetype, but the exact type is not known at compile time. This means the
+ * called method is not known at compile time either. The called method could end up being
+ * one of the methods on the parent classes (object/valuetype/enum), in which case we need
+ * to box the receiver.
+ * A simple solution would be to box always and make a normal virtual call, but that would
+ * be bad performance wise.
+ */
+ if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
+ /*
+ * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
+ */
+ need_box = FALSE;
+ }
+
+ if (need_box) {
+ MonoInst *box_type;
+ MonoBasicBlock *is_ref_bb, *end_bb;
+ MonoInst *nonbox_call;
+
+ /*
+ * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
+ * if needed.
+ * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
+ * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
+ */
+ addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
+
+ NEW_BBLOCK (cfg, is_ref_bb);
+ NEW_BBLOCK (cfg, end_bb);
+
+ box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
+
+ /* Non-ref case */
+ nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* Ref case */
+ MONO_START_BB (cfg, is_ref_bb);
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
+ ins->klass = constrained_class;
+ sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ MONO_START_BB (cfg, end_bb);
+ bblock = end_bb;
+
+ nonbox_call->dreg = ins->dreg;
+ } else {
+ g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
+ addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ }
+ goto call_end;
+ } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
/*
* The type parameter is instantiated as a valuetype,
* but that type doesn't override the method we're
if (check_call_signature (cfg, fsig, sp))
UNVERIFIED;
-#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
delegate_invoke = TRUE;
-#endif
if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
bblock = cfg->cbb;
* might not get called after the call was patched.
*/
if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
- emit_generic_class_init (cfg, cmethod->klass);
+ emit_generic_class_init (cfg, cmethod->klass, &bblock);
CHECK_TYPELOAD (cmethod->klass);
}
GSHAREDVT_FAILURE (*ip);
#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
- if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
+ if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
use_imt = TRUE;
#endif
GSHAREDVT_FAILURE (*ip);
if (fsig->generic_param_count) {
/* virtual generic call */
- g_assert (mono_use_imt);
g_assert (!imt_arg);
/* Same as the virtual generic case above */
imt_arg = emit_get_rgctx_method (cfg, context_used,
context_used = mini_method_check_context_used (cfg, cmethod);
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- } else if (mono_security_core_clr_enabled ()) {
+ if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
- }
if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
- emit_generic_class_init (cfg, cmethod->klass);
+ emit_generic_class_init (cfg, cmethod->klass, &bblock);
CHECK_TYPELOAD (cmethod->klass);
}
* As a workaround, we call class cctors before allocating objects.
*/
if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
+ emit_class_init (cfg, cmethod->klass, &bblock);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
class_inits = g_slist_prepend (class_inits, cmethod->klass);
FIELD_ACCESS_FAILURE (method, field);
mono_class_init (klass);
- if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
- UNVERIFIED;
-
/* if the class is Critical then transparent code cannot access it's fields */
if (!is_instance && mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
}
if (op == CEE_LDFLDA) {
- if (is_magic_tls_access (field)) {
- GSHAREDVT_FAILURE (*ip);
- ins = sp [0];
- *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
- } else {
- if (sp [0]->type == STACK_OBJ) {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
- MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
- }
+ if (sp [0]->type == STACK_OBJ) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
+ MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
+ }
- dreg = alloc_ireg_mp (cfg);
+ dreg = alloc_ireg_mp (cfg);
- if (mini_is_gsharedvt_klass (cfg, klass)) {
- MonoInst *offset_ins;
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ MonoInst *offset_ins;
- offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
- EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
- } else {
- EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
- }
- ins->klass = mono_class_from_mono_type (field->type);
- ins->type = STACK_MP;
- *sp++ = ins;
+ offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
+ } else {
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
}
+ ins->klass = mono_class_from_mono_type (field->type);
+ ins->type = STACK_MP;
+ *sp++ = ins;
} else {
MonoInst *load;
}
/* STATIC CASE */
-
- /*
- * We can only support shared generic static
- * field access on architectures where the
- * trampoline code has been extended to handle
- * the generic class init.
- */
-#ifndef MONO_ARCH_VTABLE_REG
- GENERIC_SHARING_FAILURE (op);
-#endif
-
context_used = mini_class_check_context_used (cfg, klass);
ftype = mono_field_get_type (field);
GSHAREDVT_FAILURE (op);
- // offset &= 0x7fffffff;
- // idx = (offset >> 24) - 1;
- // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
MONO_ADD_INS (cfg->cbb, thread_ins);
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
offset_reg = ins->dreg;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
idx_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
offset2_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
} else {
offset = (gsize)addr & 0x7fffffff;
- idx = (offset >> 24) - 1;
+ idx = offset & 0x3f;
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
dreg = alloc_ireg (cfg);
- EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
}
} else if ((cfg->opt & MONO_OPT_SHARED) ||
(cfg->compile_aot && is_special_static) ||
*/
if (mono_class_needs_cctor_run (klass, method))
- emit_generic_class_init (cfg, klass);
+ emit_generic_class_init (cfg, klass, &bblock);
/*
* The pointer we're computing here is
if (!addr) {
if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
if (!(g_slist_find (class_inits, klass))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
+ emit_class_init (cfg, klass, &bblock);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
class_inits = g_slist_prepend (class_inits, klass);
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
METHOD_ACCESS_FAILURE (method, cil_method);
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- } else if (mono_security_core_clr_enabled ()) {
+ if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
- }
/*
* Optimize the common case of ldftn+delegate creation
}
}
-#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
/* FIXME: SGEN support */
if (invoke_context_used == 0) {
ip += 6;
}
ip -= 6;
}
-#endif
}
}
context_used = mini_method_check_context_used (cfg, cmethod);
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- } else if (mono_security_core_clr_enabled ()) {
+ if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
- }
/*
* Optimize the common case of ldvirtftn+delegate creation
MonoInst *target_ins, *handle_ins;
MonoMethod *invoke;
int invoke_context_used;
+ gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
invoke = mono_get_delegate_invoke (ctor_method->klass);
if (!invoke || !mono_method_signature (invoke))
if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
-#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
/* FIXME: SGEN support */
if (invoke_context_used == 0) {
ip += 6;
if (cfg->verbose_level > 3)
g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
- if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
+ if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
sp -= 2;
*sp = handle_ins;
CHECK_CFG_EXCEPTION;
}
ip -= 6;
}
-#endif
}
}
}
/* Add a sequence point for method entry/exit events */
- if (cfg->gen_seq_points_debug_data) {
+ if (seq_points && cfg->gen_sdb_seq_points) {
NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
MONO_ADD_INS (init_localsbb, ins);
NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);