* (C) 2002 Ximian, Inc.
* Copyright 2003-2010 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <mono/metadata/security-core-clr.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/profiler.h>
+#include <mono/metadata/monitor.h>
#include <mono/metadata/debug-mono-symfile.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-memory-model.h>
+#include <mono/utils/mono-error-internals.h>
#include <mono/metadata/mono-basic-block.h>
+#include <mono/metadata/reflection-internals.h>
+#include <mono/utils/mono-threads-coop.h>
#include "trace.h"
if (cfg->exception_type != MONO_EXCEPTION_NONE) \
goto exception_exit; \
} while (0)
-#define METHOD_ACCESS_FAILURE(method, cmethod) do { \
- method_access_failure ((cfg), (method), (cmethod)); \
- goto exception_exit; \
- } while (0)
#define FIELD_ACCESS_FAILURE(method, field) do { \
field_access_failure ((cfg), (method), (field)); \
goto exception_exit; \
} \
} while (0)
#define OUT_OF_MEMORY_FAILURE do { \
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
+ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
+ mono_error_set_out_of_memory (&cfg->error, ""); \
goto exception_exit; \
} while (0)
#define DISABLE_AOT(cfg) do { \
/* helper methods signatures */
static MonoMethodSignature *helper_sig_domain_get;
static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
-static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
+static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
+
+/* type loading helpers */
+static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
+static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
/*
* Instruction metadata
{
helper_sig_domain_get = mono_create_icall_signature ("ptr");
helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
- helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
+ helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
}
static MONO_NEVER_INLINE void
G_BREAKPOINT ();
}
-static MONO_NEVER_INLINE void
-method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
-{
- char *method_fname = mono_method_full_name (method, TRUE);
- char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
- cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
- g_free (method_fname);
- g_free (cil_method_fname);
-}
-
static MONO_NEVER_INLINE void
field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
{
char *method_fname = mono_method_full_name (method, TRUE);
char *field_fname = mono_field_full_name (field);
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
- cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
+ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
+ mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
g_free (method_fname);
g_free (field_fname);
}
return -1;
}
+static gboolean
+ip_in_finally_clause (MonoCompile *cfg, int offset)
+{
+ MonoMethodHeader *header = cfg->header;
+ MonoExceptionClause *clause;
+ int i;
+
+ for (i = 0; i < header->num_clauses; ++i) {
+ clause = &header->clauses [i];
+ if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
+ continue;
+
+ if (MONO_OFFSET_IN_HANDLER (clause, offset))
+ return TRUE;
+ }
+ return FALSE;
+}
+
static GList*
mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
{
} else {
MonoJumpInfo ji;
gpointer target;
+ MonoError error;
ji.type = patch_type;
ji.data.target = data;
- target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE);
+ target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
+ mono_error_assert_ok (&error);
EMIT_NEW_PCONST (cfg, ins, target);
}
mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
} else if (klass->cast_class == mono_defaults.enum_class) {
mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
- } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ } else if (mono_class_is_interface (klass->cast_class)) {
mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
} else {
// Pass -1 as obj_reg to skip the check below for arrays of arrays
return -1;
}
+//XXX this ignores if t is byref
+#define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
+
/*
* target_type_is_incompatible:
* @cfg: MonoCompile context
if (target->byref) {
/* FIXME: check that the pointed to types match */
if (arg->type == STACK_MP) {
- MonoClass *base_class = mono_class_from_mono_type (target);
- /* This is needed to handle gshared types + ldaddr */
- simple_type = mini_get_underlying_type (&base_class->byval_arg);
- return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
+ /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
+ MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
+ MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
+
+ /* if the target is native int& or same type */
+ if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
+ return 0;
+
+ /* Both are primitive type byrefs and the source points to a larger type that the destination */
+ if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
+ mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
+ return 0;
+ return 1;
}
if (arg->type == STACK_PTR)
return 0;
gboolean pass_mrgctx = FALSE;
if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
- (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
+ (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
gboolean sharable = FALSE;
if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
if (!sig)
sig = mono_method_signature (method);
- if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
+ if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
g_assert_not_reached ();
if (rgctx_arg) {
if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
- need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
+ need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
} else {
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
- if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ if (mono_class_is_interface (method->klass)) {
guint32 imt_slot = mono_method_get_imt_slot (method);
emit_imt_argument (cfg, call, call->method, imt_arg);
slot_reg = vtable_reg;
{
/* LLVM on amd64 can't handle calls to non-32 bit addresses */
#ifdef TARGET_AMD64
- if (cfg->compile_llvm)
+ if (cfg->compile_llvm && !cfg->llvm_only)
return FALSE;
#endif
if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
}
MonoInst*
-mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
+mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
{
/*
* Call the jit icall without a wrapper if possible.
* an exception check.
*/
costs = inline_method (cfg, info->wrapper_method, NULL,
- args, NULL, cfg->real_offset, TRUE);
+ args, NULL, il_offset, TRUE);
g_assert (costs > 0);
g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
return ins;
}
+
+static void
+emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
+{
+ MonoInst *args [16];
+
+ args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
+ args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
+
+ mono_emit_jit_icall (cfg, mono_throw_method_access, args);
+}
+
static MonoMethod*
get_memcpy_method (void)
{
RGCTX. */
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- if (cfg->llvm_only && cfg->gsharedvt) {
+ if (cfg->llvm_only) {
+ cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
} else {
rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
if (cfg->opt & MONO_OPT_SHARED) {
EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
iargs [1] = data;
- alloc_ftn = mono_object_new;
+ alloc_ftn = ves_icall_object_new;
} else {
iargs [0] = data;
- alloc_ftn = mono_object_new_specific;
+ alloc_ftn = ves_icall_object_new_specific;
}
if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
if (size < sizeof (MonoObject))
g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
- EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
+ EMIT_NEW_ICONST (cfg, iargs [1], size);
}
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
- alloc_ftn = mono_object_new;
- } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
+ alloc_ftn = ves_icall_object_new;
+ } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
/* This happens often in argument checking code, eg. throw new FooException... */
/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
- EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
+ EMIT_NEW_ICONST (cfg, iargs [1], size);
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
MonoGenericContainer *container;
MonoGenericInst *ginst;
- if (klass->generic_class) {
- container = klass->generic_class->container_class->generic_container;
- ginst = klass->generic_class->context.class_inst;
- } else if (klass->generic_container && context_used) {
- container = klass->generic_container;
+ if (mono_class_is_ginst (klass)) {
+ container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
+ ginst = mono_class_get_generic_class (klass)->context.class_inst;
+ } else if (mono_class_is_gtd (klass) && context_used) {
+ container = mono_class_get_generic_container (klass);
ginst = container->context.class_inst;
} else {
return FALSE;
return FALSE;
}
-#define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
+static gboolean
+method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
+{
+ if (cmethod->klass == mono_defaults.systemtype_class) {
+ if (!strcmp (cmethod->name, "GetType"))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+#define is_complex_isinst(klass) (mono_class_is_interface (klass) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || mono_class_is_sealed (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
+
+static MonoInst*
+emit_isinst_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
+{
+ MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
+ return mono_emit_method_call (cfg, mono_isinst, args, NULL);
+}
static MonoInst*
emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
{
- MonoMethod *mono_castclass;
+ MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
MonoInst *res;
- mono_castclass = mono_marshal_get_castclass_with_cache ();
-
save_cast_details (cfg, klass, args [0]->dreg, TRUE);
res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
reset_cast_details (cfg);
return (cfg->method_index << 16) | cfg->castclass_cache_index;
}
+
+static MonoInst*
+emit_isinst_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
+{
+ MonoInst *args [3];
+ int idx;
+
+ args [0] = obj; /* obj */
+ EMIT_NEW_CLASSCONST (cfg, args [1], klass); /* klass */
+
+ idx = get_castclass_cache_idx (cfg); /* inline cache*/
+ args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
+
+ return emit_isinst_with_cache (cfg, klass, args);
+}
+
static MonoInst*
emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
{
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
-handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
+handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
{
MonoBasicBlock *is_null_bb;
int obj_reg = src->dreg;
int vtable_reg = alloc_preg (cfg);
- int context_used;
- MonoInst *klass_inst = NULL, *res;
-
- context_used = mini_class_check_context_used (cfg, klass);
-
- if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
- res = emit_castclass_with_cache_nonshared (cfg, src, klass);
- (*inline_costs) += 2;
- return res;
- } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
- MonoMethod *mono_castclass;
- MonoInst *iargs [1];
- int costs;
-
- mono_castclass = mono_marshal_get_castclass (klass);
- iargs [0] = src;
-
- save_cast_details (cfg, klass, src->dreg, TRUE);
- costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
- iargs, ip, cfg->real_offset, TRUE);
- reset_cast_details (cfg);
- CHECK_CFG_EXCEPTION;
- g_assert (costs > 0);
-
- cfg->real_offset += 5;
-
- (*inline_costs) += costs;
+ MonoInst *klass_inst = NULL;
+ if (MONO_INS_IS_PCONST_NULL (src))
return src;
- }
if (context_used) {
MonoInst *args [3];
- if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
+ if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
MonoInst *cache_ins;
cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
save_cast_details (cfg, klass, obj_reg, FALSE);
- if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ if (mono_class_is_interface (klass)) {
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
} else {
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
- if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
+ if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && mono_class_is_sealed (klass)) {
/* the remoting code is broken, access the class for now */
if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
reset_cast_details (cfg);
return src;
-
-exception_exit:
- return NULL;
}
/*
MonoInst *args [3];
if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
- MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
- MonoInst *cache_ins;
+ MonoInst *cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
- cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
-
- /* obj */
- args [0] = src;
+ args [0] = src; /* obj */
/* klass - it's the second element of the cache entry*/
EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
- /* cache */
- args [2] = cache_ins;
-
- return mono_emit_method_call (cfg, mono_isinst, args, NULL);
+ args [2] = cache_ins; /* cache */
+ return emit_isinst_with_cache (cfg, klass, args);
}
klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
- if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ if (mono_class_is_interface (klass)) {
g_assert (!context_used);
/* the is_null_bb target simply copies the input register to the output */
mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
} else if (klass->cast_class == mono_defaults.enum_class) {
mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
- } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ } else if (mono_class_is_interface (klass->cast_class)) {
mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
} else {
if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
/* the is_null_bb target simply copies the input register to the output */
mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
} else {
- if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
+ if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && mono_class_is_sealed (klass)) {
g_assert (!context_used);
/* the remoting code is broken, access the class for now */
if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
- if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ if (mono_class_is_interface (klass)) {
#ifndef DISABLE_REMOTING
NEW_BBLOCK (cfg, interface_fail_bb);
#endif
save_cast_details (cfg, klass, obj_reg, FALSE);
- if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ if (mono_class_is_interface (klass)) {
#ifndef DISABLE_REMOTING
NEW_BBLOCK (cfg, interface_fail_bb);
if (!obj)
return NULL;
- if (cfg->llvm_only) {
- MonoInst *args [16];
-
- /*
- * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
- * the address of a gshared method. So use a JIT icall.
- * FIXME: Optimize this.
- */
- args [0] = obj;
- args [1] = target;
- args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
- mono_emit_jit_icall (cfg, virtual_ ? mono_init_delegate_virtual : mono_init_delegate, args);
-
- return obj;
- }
-
/* Inline the contents of mono_delegate_ctor */
/* Set target field */
/* Optimize away setting of NULL target */
- if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
+ if (!MONO_INS_IS_PCONST_NULL (target)) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
}
+ if (cfg->llvm_only) {
+ MonoInst *args [16];
+
+ if (virtual_) {
+ args [0] = obj;
+ args [1] = target;
+ args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
+ mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
+ } else {
+ args [0] = obj;
+ mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
+ }
+
+ return obj;
+ }
+
if (cfg->compile_aot) {
MonoDelegateClassMethodPair *del_tramp;
cfg->flags |= MONO_CFG_HAS_VARARGS;
/* mono_array_new_va () needs a vararg calling convention */
+ cfg->exception_message = g_strdup ("array-new");
cfg->disable_llvm = TRUE;
/* FIXME: This uses info->sig, but it should use the signature of the wrapper */
* This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
* pack the arguments into an array, and do the rest of the work in in an icall.
*/
- if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
+ if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
(MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
(fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
MonoInst *args [16];
vtable = mono_class_vtable (cfg->domain, method->klass);
if (!vtable)
return FALSE;
- if (!cfg->compile_aot)
- mono_runtime_class_init (vtable);
- } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
+ if (!cfg->compile_aot) {
+ MonoError error;
+ if (!mono_runtime_class_init_full (vtable, &error)) {
+ mono_error_cleanup (&error);
+ return FALSE;
+ }
+ }
+ } else if (mono_class_is_before_field_init (method->klass)) {
if (cfg->run_cctors && method->klass->has_cctor) {
/*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
if (!method->klass->runtime_info)
/* running with a specific order... */
if (! vtable->initialized)
return FALSE;
- mono_runtime_class_init (vtable);
+ MonoError error;
+ if (!mono_runtime_class_init_full (vtable, &error)) {
+ mono_error_cleanup (&error);
+ return FALSE;
+ }
}
} else if (mono_class_needs_cctor_run (method->klass, NULL)) {
if (!method->klass->runtime_info)
* the cctor will need to be run at aot method load time, for example,
* or at the end of the compilation of the inlining method.
*/
- if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
+ if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
return FALSE;
}
return FALSE;
}
- if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
+ if (mono_class_is_before_field_init (klass)) {
if (cfg->method == method)
return FALSE;
}
if (is_set) {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
- if (mini_type_is_reference (fsig->params [2]))
+ if (mini_type_is_reference (&eklass->byval_arg))
emit_write_barrier (cfg, addr, load);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
{
if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
- !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
+ !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
MonoInst *iargs [3];
if (opcode && fsig->param_count == 1) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = STACK_R8;
- ins->dreg = mono_alloc_freg (cfg);
+ ins->dreg = mono_alloc_dreg (cfg, ins->type);
ins->sreg1 = args [0]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
if (opcode && fsig->param_count == 2) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
- ins->dreg = mono_alloc_ireg (cfg);
+ ins->dreg = mono_alloc_dreg (cfg, ins->type);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
{
MonoInst *ins = NULL;
- static MonoClass *runtime_helpers_class = NULL;
- if (! runtime_helpers_class)
- runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
- "System.Runtime.CompilerServices", "RuntimeHelpers");
+ MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
if (cmethod->klass == mono_defaults.string_class) {
if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
} else
return NULL;
} else if (cmethod->klass == mono_defaults.object_class) {
-
if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg_ref (cfg);
int vt_reg = alloc_preg (cfg);
} else
return NULL;
} else if (cmethod->klass == runtime_helpers_class) {
-
if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
return ins;
} else
return NULL;
+ } else if (cmethod->klass == mono_defaults.monitor_class) {
+ gboolean is_enter = FALSE;
+ gboolean is_v4 = FALSE;
+
+ if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
+ is_enter = TRUE;
+ is_v4 = TRUE;
+ }
+ if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
+ is_enter = TRUE;
+
+ if (is_enter) {
+ /*
+ * To make async stack traces work, icalls which can block should have a wrapper.
+ * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
+ */
+ MonoBasicBlock *end_bb;
+
+ NEW_BBLOCK (cfg, end_bb);
+
+ ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
+ ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
+ MONO_START_BB (cfg, end_bb);
+ return ins;
+ }
} else if (cmethod->klass == mono_defaults.thread_class) {
if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
if (opcode == OP_LOADI8_MEMBASE)
ins = mono_decompose_opcode (cfg, ins);
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
return ins;
}
opcode = OP_STORE_MEMBASE_REG;
if (opcode) {
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
MONO_INST_NEW (cfg, ins, opcode);
ins->sreg1 = args [1]->dreg;
if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
guint32 opcode = 0;
- gboolean is_ref = mini_type_is_reference (fsig->params [0]);
- gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
-
- if (fsig->params [0]->type == MONO_TYPE_I1)
+ MonoType *t = fsig->params [0];
+ gboolean is_ref;
+ gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
+
+ g_assert (t->byref);
+ /* t is a byref type, so the reference check is more complicated */
+ is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
+ if (t->type == MONO_TYPE_I1)
opcode = OP_ATOMIC_LOAD_I1;
- else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
+ else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
opcode = OP_ATOMIC_LOAD_U1;
- else if (fsig->params [0]->type == MONO_TYPE_I2)
+ else if (t->type == MONO_TYPE_I2)
opcode = OP_ATOMIC_LOAD_I2;
- else if (fsig->params [0]->type == MONO_TYPE_U2)
+ else if (t->type == MONO_TYPE_U2)
opcode = OP_ATOMIC_LOAD_U2;
- else if (fsig->params [0]->type == MONO_TYPE_I4)
+ else if (t->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_LOAD_I4;
- else if (fsig->params [0]->type == MONO_TYPE_U4)
+ else if (t->type == MONO_TYPE_U4)
opcode = OP_ATOMIC_LOAD_U4;
- else if (fsig->params [0]->type == MONO_TYPE_R4)
+ else if (t->type == MONO_TYPE_R4)
opcode = OP_ATOMIC_LOAD_R4;
- else if (fsig->params [0]->type == MONO_TYPE_R8)
+ else if (t->type == MONO_TYPE_R8)
opcode = OP_ATOMIC_LOAD_R8;
#if SIZEOF_REGISTER == 8
- else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
+ else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_LOAD_I8;
- else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
+ else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_LOAD_U8;
#else
- else if (fsig->params [0]->type == MONO_TYPE_I)
+ else if (t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_LOAD_I4;
- else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
+ else if (is_ref || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_LOAD_U4;
#endif
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
MONO_ADD_INS (cfg->cbb, ins);
- switch (fsig->params [0]->type) {
+ switch (t->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
ins->type = STACK_R8;
break;
default:
- g_assert (mini_type_is_reference (fsig->params [0]));
+ g_assert (is_ref);
ins->type = STACK_OBJ;
break;
}
if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
guint32 opcode = 0;
- gboolean is_ref = mini_type_is_reference (fsig->params [0]);
+ MonoType *t = fsig->params [0];
+ gboolean is_ref;
- if (fsig->params [0]->type == MONO_TYPE_I1)
+ g_assert (t->byref);
+ is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
+ if (t->type == MONO_TYPE_I1)
opcode = OP_ATOMIC_STORE_I1;
- else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
+ else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
opcode = OP_ATOMIC_STORE_U1;
- else if (fsig->params [0]->type == MONO_TYPE_I2)
+ else if (t->type == MONO_TYPE_I2)
opcode = OP_ATOMIC_STORE_I2;
- else if (fsig->params [0]->type == MONO_TYPE_U2)
+ else if (t->type == MONO_TYPE_U2)
opcode = OP_ATOMIC_STORE_U2;
- else if (fsig->params [0]->type == MONO_TYPE_I4)
+ else if (t->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_STORE_I4;
- else if (fsig->params [0]->type == MONO_TYPE_U4)
+ else if (t->type == MONO_TYPE_U4)
opcode = OP_ATOMIC_STORE_U4;
- else if (fsig->params [0]->type == MONO_TYPE_R4)
+ else if (t->type == MONO_TYPE_R4)
opcode = OP_ATOMIC_STORE_R4;
- else if (fsig->params [0]->type == MONO_TYPE_R8)
+ else if (t->type == MONO_TYPE_R8)
opcode = OP_ATOMIC_STORE_R8;
#if SIZEOF_REGISTER == 8
- else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
+ else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_STORE_I8;
- else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
+ else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_STORE_U8;
#else
- else if (fsig->params [0]->type == MONO_TYPE_I)
+ else if (t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_STORE_I4;
- else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
+ else if (is_ref || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_STORE_U4;
#endif
(strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
(strcmp (cmethod->klass->name, "Assembly") == 0)) {
if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
- /* No stack walks are current available, so implement this as an intrinsic */
+ /* No stack walks are currently available, so implement this as an intrinsic */
MonoInst *assembly_ins;
EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
return ins;
}
+ } else if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
+ (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
+ if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
+ /* No stack walks are currently available, so implement this as an intrinsic */
+ MonoInst *method_ins;
+ MonoMethod *declaring = cfg->method;
+
+ /* This returns the declaring generic method */
+ if (declaring->is_inflated)
+ declaring = ((MonoMethodInflated*)cfg->method)->declaring;
+ EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
+ ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
+ cfg->no_inline = TRUE;
+ if (cfg->method != cfg->current_method)
+ inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
+ return ins;
+ }
} else if (cmethod->klass == mono_defaults.math_class) {
/*
* There is general branchless code for Min/Max, but it does not work for
!strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
!strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
!strcmp (cmethod->klass->name, "Selector")) ||
- (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
+ ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
+ !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
!strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
!strcmp (cmethod->klass->name, "Selector"))
) {
- if (cfg->backend->have_objc_get_selector &&
+ if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
(args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
cfg->compile_aot) {
MonoInst *pi;
MonoJumpInfoToken *ji;
- MonoString *s;
-
- cfg->disable_llvm = TRUE;
+ char *s;
if (args [0]->opcode == OP_GOT_ENTRY) {
pi = (MonoInst *)args [0]->inst_p1;
NULLIFY_INS (args [0]);
- // FIXME: Ugly
- s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
+ s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
+ return_val_if_nok (&cfg->error, NULL);
+
MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
ins->dreg = mono_alloc_ireg (cfg);
// FIXME: Leaks
- ins->inst_p0 = mono_string_to_utf8 (s);
+ ins->inst_p0 = s;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
/*
* inline_method:
*
- * Return the cost of inlining CMETHOD.
+ * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
*/
static int
inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
- guchar *ip, guint real_offset, gboolean inline_always)
+ guchar *ip, guint real_offset, gboolean inline_always)
{
+ MonoError error;
MonoInst *ins, *rvar = NULL;
MonoMethodHeader *cheader;
MonoBasicBlock *ebblock, *sbblock;
GHashTable *prev_cbb_hash;
MonoBasicBlock **prev_cil_offset_to_bb;
MonoBasicBlock *prev_cbb;
- unsigned char* prev_cil_start;
+ const unsigned char *prev_ip;
+ unsigned char *prev_cil_start;
guint32 prev_cil_offset_to_bb_len;
MonoMethod *prev_current_method;
MonoGenericContext *prev_generic_context;
}
/* allocate local variables */
- cheader = mono_method_get_header (cmethod);
-
- if (cheader == NULL || mono_loader_get_last_error ()) {
- MonoLoaderError *error = mono_loader_get_last_error ();
-
- if (cheader)
- mono_metadata_free_mh (cheader);
- if (inline_always && error)
- mono_cfg_set_exception (cfg, error->exception_type);
-
- mono_loader_clear_error ();
+ cheader = mono_method_get_header_checked (cmethod, &error);
+ if (!cheader) {
+ if (inline_always) {
+ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
+ mono_error_move (&cfg->error, &error);
+ } else {
+ mono_error_cleanup (&error);
+ }
return 0;
}
prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
prev_cil_start = cfg->cil_start;
+ prev_ip = cfg->ip;
prev_cbb = cfg->cbb;
prev_current_method = cfg->current_method;
prev_generic_context = cfg->generic_context;
cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
cfg->cil_start = prev_cil_start;
+ cfg->ip = prev_ip;
cfg->locals = prev_locals;
cfg->args = prev_args;
cfg->arg_types = prev_arg_types;
if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
MonoBasicBlock *prev = ebblock->in_bb [0];
- mono_merge_basic_blocks (cfg, prev, ebblock);
- cfg->cbb = prev;
- if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
- mono_merge_basic_blocks (cfg, prev_cbb, prev);
- cfg->cbb = prev_cbb;
+
+ if (prev->next_bb == ebblock) {
+ mono_merge_basic_blocks (cfg, prev, ebblock);
+ cfg->cbb = prev;
+ if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
+ mono_merge_basic_blocks (cfg, prev_cbb, prev);
+ cfg->cbb = prev_cbb;
+ }
+ } else {
+ /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
+ cfg->cbb = ebblock;
}
} else {
/*
if (cfg->verbose_level > 2)
printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
cfg->exception_type = MONO_EXCEPTION_NONE;
- mono_loader_clear_error ();
/* This gets rid of the newly added bblocks */
cfg->cbb = prev_cbb;
#define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
#define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
#define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
-#define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
+#define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
/* offset from br.s -> br like opcodes */
#define BIG_BRANCH_OFFSET 13
}
static inline MonoMethod *
-mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
+mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
{
MonoMethod *method;
+ mono_error_init (error);
+
if (m->wrapper_type != MONO_WRAPPER_NONE) {
method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
if (context) {
- MonoError error;
- method = mono_class_inflate_generic_method_checked (method, context, &error);
- g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
+ method = mono_class_inflate_generic_method_checked (method, context, error);
}
} else {
- method = mono_get_method_full (m->klass->image, token, klass, context);
+ method = mono_get_method_checked (m->klass->image, token, klass, context, error);
}
return method;
static inline MonoMethod *
mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
{
- MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
+ MonoError error;
+ MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
- if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
- return NULL;
+ if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
+ mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
+ method = NULL;
+ }
+
+ if (!method && !cfg)
+ mono_error_cleanup (&error); /* FIXME don't swallow the error */
return method;
}
if (method->wrapper_type != MONO_WRAPPER_NONE) {
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
- if (context)
- klass = mono_class_inflate_generic_class (klass, context);
+ if (context) {
+ klass = mono_class_inflate_generic_class_checked (klass, context, &error);
+ mono_error_cleanup (&error); /* FIXME don't swallow the error */
+ }
} else {
klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
mono_error_cleanup (&error); /* FIXME don't swallow the error */
}
static inline MonoMethodSignature*
-mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
+mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
{
MonoMethodSignature *fsig;
+ mono_error_init (error);
if (method->wrapper_type != MONO_WRAPPER_NONE) {
fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
} else {
- fsig = mono_metadata_parse_signature (method->klass->image, token);
+ fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
+ return_val_if_nok (error, NULL);
}
if (context) {
- MonoError error;
- fsig = mono_inflate_generic_signature(fsig, context, &error);
- // FIXME:
- g_assert(mono_error_ok(&error));
+ fsig = mono_inflate_generic_signature(fsig, context, error);
}
return fsig;
}
static void
set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
{
+ MonoError error;
char *method_fname = mono_method_full_name (method, TRUE);
char *method_code;
- MonoMethodHeader *header = mono_method_get_header (method);
+ MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
- if (header->code_size == 0)
+ if (!header) {
+ method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
+ mono_error_cleanup (&error);
+ } else if (header->code_size == 0)
method_code = g_strdup ("method body is empty.");
else
method_code = mono_disasm_code_one (NULL, method, ip, NULL);
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
- cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
+ mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
g_free (method_fname);
g_free (method_code);
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
-static void
-set_exception_object (MonoCompile *cfg, MonoException *exception)
-{
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
- MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
- cfg->exception_ptr = exception;
-}
-
static void
emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
{
MonoInst *icall_args [16];
MonoInst *call_target, *ins, *vtable_ins;
int arg_reg, this_reg, vtable_reg;
- gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
+ gboolean is_iface = mono_class_is_interface (cmethod->klass);
gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
+ gboolean variant_iface = FALSE;
guint32 slot;
int offset;
this_reg = sp [0]->dreg;
+ if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
+ variant_iface = TRUE;
+
if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
/*
* The simplest case, a normal virtual call.
return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
- if (!fsig->generic_param_count && is_iface && !is_gsharedvt) {
+ if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
/*
* A simple interface call
*
icall_args [0] = thunk_arg_ins;
icall_args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
- if (fsig->generic_param_count && !is_gsharedvt) {
+ if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
/*
* This is similar to the interface case, the vtable slot points to an imt thunk which is
* dynamically extended as more instantiations are discovered.
icall_args [0] = thunk_arg_ins;
icall_args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
ftndesc_ins->dreg = ftndesc_reg;
/*
* Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
static gboolean
is_jit_optimizer_disabled (MonoMethod *m)
{
+ MonoError error;
MonoAssembly *ass = m->klass->image->assembly;
MonoCustomAttrInfo* attrs;
- static MonoClass *klass;
+ MonoClass *klass;
int i;
gboolean val = FALSE;
if (ass->jit_optimizer_disabled_inited)
return ass->jit_optimizer_disabled;
- if (!klass)
- klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
+ klass = mono_class_try_get_debuggable_attribute_class ();
+
if (!klass) {
/* Linked away */
ass->jit_optimizer_disabled = FALSE;
return FALSE;
}
- attrs = mono_custom_attrs_from_assembly (ass);
+ attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
+ mono_error_cleanup (&error); /* FIXME don't swallow the error */
if (attrs) {
for (i = 0; i < attrs->num_attrs; ++i) {
MonoCustomAttrEntry *attr = &attrs->attrs [i];
/*
* mono_method_to_ir:
*
- * Translate the .net IL into linear IR.
+ * Translate the .net IL into linear IR.
+ *
+ * @start_bblock: if not NULL, the starting basic block, used during inlining.
+ * @end_bblock: if not NULL, the ending basic block, used during inlining.
+ * @return_var: if not NULL, the place where the return value is stored, used during inlining.
+ * @inline_args: if not NULL, contains the arguments to the inline call
+ * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
+ * @is_virtual_call: whether this method is being called as a result of a call to callvirt
+ *
+ * This method is used to turn ECMA IL into Mono's internal Linear IR
+ * reprensetation. It is used both for entire methods, as well as
+ * inlining existing methods. In the former case, the @start_bblock,
+ * @end_bblock, @return_var, @inline_args are all set to NULL, and the
+ * inline_offset is set to zero.
+ *
+ * Returns: the inline cost, or -1 if there was an error processing this method.
*/
int
mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
image = method->klass->image;
- header = mono_method_get_header (method);
+ header = mono_method_get_header_checked (method, &cfg->error);
if (!header) {
- MonoLoaderError *error;
-
- if ((error = mono_loader_get_last_error ())) {
- mono_cfg_set_exception (cfg, error->exception_type);
- } else {
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
- cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
- }
+ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
goto exception_exit;
+ } else {
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
+
generic_container = mono_method_get_generic_container (method);
sig = mono_method_signature (method);
num_args = sig->hasthis + sig->param_count;
/* we use a separate basic block for the initialization code */
NEW_BBLOCK (cfg, init_localsbb);
- cfg->bb_init = init_localsbb;
+ if (cfg->method == method)
+ cfg->bb_init = init_localsbb;
init_localsbb->real_offset = cfg->real_offset;
start_bblock->next_bb = init_localsbb;
init_localsbb->next_bb = cfg->cbb;
skip_dead_blocks = !dont_verify;
if (skip_dead_blocks) {
- original_bb = bb = mono_basic_block_split (method, &cfg->error);
+ original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
CHECK_CFG_ERROR;
g_assert (bb);
}
token = read32 (ip + 1);
/* FIXME: check the signature matches */
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
-
- if (!cmethod || mono_loader_get_last_error ())
- LOAD_ERROR;
+ CHECK_CFG_ERROR;
if (cfg->gshared && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
for (i = 0; i < n; ++i)
EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
+ if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
+ call->vret_var = cfg->vret_addr;
+
mono_arch_emit_call (cfg, call);
cfg->param_area = MAX(cfg->param_area, call->stack_usage);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
CHECK_STACK (1);
--sp;
addr = *sp;
- fsig = mini_get_signature (method, token, generic_context);
+ fsig = mini_get_signature (method, token, generic_context, &cfg->error);
+ CHECK_CFG_ERROR;
if (method->dynamic && fsig->pinvoke) {
MonoInst *args [3];
info_data = addr->inst_right->inst_left;
}
- if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
+ if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
+ ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
+ NULLIFY_INS (addr);
+ goto calli_end;
+ } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
NULLIFY_INS (addr);
goto calli_end;
ins = NULL;
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
+ CHECK_CFG_ERROR;
+
cil_method = cmethod;
if (constrained_class) {
}
}
- if (!cmethod || mono_loader_get_last_error ())
- LOAD_ERROR;
if (!dont_verify && !cfg->skip_visibility) {
MonoMethod *target_method = cil_method;
if (method->is_inflated) {
- target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
+ target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
+ CHECK_CFG_ERROR;
}
if (!mono_method_can_access_method (method_definition, target_method) &&
!mono_method_can_access_method (method, cil_method))
- METHOD_ACCESS_FAILURE (method, cil_method);
+ emit_method_access_failure (cfg, method, cil_method);
}
if (mono_security_core_clr_enabled ())
CHECK_CFG_ERROR;
}
- if (cfg->llvm_only && !cfg->method->wrapper_type)
+ if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
/* See code below */
n = fsig->param_count + fsig->hasthis;
- if (!cfg->gshared && cmethod->klass->generic_container)
+ if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
UNVERIFIED;
if (!cfg->gshared)
if (mini_is_gsharedvt_klass (constrained_class)) {
if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
/* The 'Own method' case below */
- } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
+ } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
/* 'The type parameter is instantiated as a reference type' case below. */
} else {
ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
* A simple solution would be to box always and make a normal virtual call, but that would
* be bad performance wise.
*/
- if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
+ if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
/*
* The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
*/
nonbox_call->dreg = ins->dreg;
goto call_end;
} else {
- g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
+ g_assert (mono_class_is_interface (cmethod->klass));
addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
goto call_end;
* If the callee is a shared method, then its static cctor
* might not get called after the call was patched.
*/
- if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
+ if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
context_used = mini_method_check_context_used (cfg, cmethod);
- if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
+ if (context_used && mono_class_is_interface (cmethod->klass)) {
/* Generic method interface
calls are resolved via a
helper function and don't
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE (*ip);
- if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
+ if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
g_assert (!imt_arg);
if (!context_used)
g_assert (cmethod->is_inflated);
}
goto call_end;
}
-
+ CHECK_CFG_ERROR;
+
/* Inlining */
if ((cfg->opt & MONO_OPT_INLINE) &&
(!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
}
if (!has_vtargs) {
+ if (need_seq_point) {
+ emit_seq_point (cfg, method, ip, FALSE, TRUE);
+ need_seq_point = FALSE;
+ }
for (i = 0; i < n; ++i)
EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
MONO_INST_NEW (cfg, ins, OP_BR);
inline_costs += 10 * num_calls++;
+ /*
+ * Synchronized wrappers.
+ * Its hard to determine where to replace a method with its synchronized
+ * wrapper without causing an infinite recursion. The current solution is
+ * to add the synchronized wrapper in the trampolines, and to
+ * change the called method to a dummy wrapper, and resolve that wrapper
+ * to the real method in mono_jit_compile_method ().
+ */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
+ MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
+ if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
+ cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
+ }
+
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
- if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
+ if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
!(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
- (!(cfg->llvm_only && virtual_))) {
+ (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
MonoRgctxInfoType info_type;
if (virtual_) {
- //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
+ //if (mono_class_is_interface (cmethod->klass))
//GSHAREDVT_FAILURE (*ip);
// disable for possible remoting calls
if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
cmethod, MONO_RGCTX_INFO_METHOD);
/* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
vtable_arg = NULL;
- } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
+ } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
/* This can happen when we call a fully instantiated iface method */
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
if (fsig->hasthis)
MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
- addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
if (cfg->llvm_only) {
+ if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
+ addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
+ else
+ addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
// FIXME: Avoid initializing imt_arg/vtable_arg
ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
+ addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
}
goto call_end;
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
- if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
+ if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
emit_write_barrier (cfg, addr, val);
if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
GSHAREDVT_FAILURE (*ip);
}
}
- /*
- * Synchronized wrappers.
- * Its hard to determine where to replace a method with its synchronized
- * wrapper without causing an infinite recursion. The current solution is
- * to add the synchronized wrapper in the trampolines, and to
- * change the called method to a dummy wrapper, and resolve that wrapper
- * to the real method in mono_jit_compile_method ().
- */
- if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
- MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
- if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
- cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
- }
-
/*
* Virtual calls in llvm-only mode.
*/
EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
}
+ if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
+ /*
+ * Clang can convert these calls to tail calls which screw up the stack
+ * walk. This happens even when the -fno-optimize-sibling-calls
+ * option is passed to clang.
+ * Work around this by emitting a dummy call.
+ */
+ mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
+ }
+
CHECK_CFG_EXCEPTION;
ip += 5;
MONO_ADD_INS (cfg->cbb, ins);
- if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
+ if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
emit_write_barrier (cfg, sp [0], sp [1]);
inline_costs += 1;
/* Use the immediate opcodes if possible */
if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
- int imm_opcode;
-
- imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
-#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
- /* Keep emulated opcodes which are optimized away later */
- if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
- imm_opcode = mono_op_to_op_imm (ins->opcode);
- }
-#endif
+ int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
if (sp [1]->opcode == OP_I8CONST) {
EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
- *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
- mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
+ *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
+ mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
+ CHECK_CFG_ERROR;
} else {
if (cfg->cbb->out_of_line) {
MonoInst *iargs [2];
else {
NEW_PCONST (cfg, ins, NULL);
ins->type = STACK_OBJ;
- ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
+ ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
+ CHECK_CFG_ERROR;
+
if (!ins->inst_p0)
OUT_OF_MEMORY_FAILURE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
- if (!cmethod || mono_loader_get_last_error ())
- LOAD_ERROR;
+ CHECK_CFG_ERROR;
+
fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
CHECK_CFG_ERROR;
if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
- if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
+ if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
break;
}
case CEE_CASTCLASS:
- CHECK_STACK (1);
- --sp;
- CHECK_OPSIZE (5);
- token = read32 (ip + 1);
- klass = mini_get_class (method, token, generic_context);
- CHECK_TYPELOAD (klass);
- if (sp [0]->type != STACK_OBJ)
- UNVERIFIED;
-
- ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
- CHECK_CFG_EXCEPTION;
-
- *sp ++ = ins;
- ip += 5;
- break;
case CEE_ISINST: {
CHECK_STACK (1);
--sp;
CHECK_TYPELOAD (klass);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
-
- context_used = mini_class_check_context_used (cfg, klass);
-
- if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
- MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
- MonoInst *args [3];
- int idx;
-
- /* obj */
- args [0] = *sp;
- /* klass */
- EMIT_NEW_CLASSCONST (cfg, args [1], klass);
-
- /* inline cache*/
- idx = get_castclass_cache_idx (cfg);
- args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
-
- *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
- ip += 5;
- inline_costs += 2;
- } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
- MonoMethod *mono_isinst;
- MonoInst *iargs [1];
- int costs;
-
- mono_isinst = mono_marshal_get_isinst (klass);
- iargs [0] = sp [0];
-
- costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
- iargs, ip, cfg->real_offset, TRUE);
- CHECK_CFG_EXCEPTION;
- g_assert (costs > 0);
-
- ip += 5;
- cfg->real_offset += 5;
+ MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
+ ins->dreg = alloc_preg (cfg);
+ ins->sreg1 = (*sp)->dreg;
+ ins->klass = klass;
+ ins->type = STACK_OBJ;
+ MONO_ADD_INS (cfg->cbb, ins);
- *sp++= iargs [0];
+ CHECK_CFG_EXCEPTION;
+ *sp++ = ins;
+ ip += 5;
- inline_costs += costs;
- }
- else {
- ins = handle_isinst (cfg, klass, *sp, context_used);
- CHECK_CFG_EXCEPTION;
- *sp ++ = ins;
- ip += 5;
- }
+ cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
break;
}
case CEE_UNBOX_ANY: {
res = handle_unbox_gsharedvt (cfg, klass, *sp);
inline_costs += 2;
} else if (generic_class_is_reference_type (cfg, klass)) {
- res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
- CHECK_CFG_EXCEPTION;
+ if (MONO_INS_IS_PCONST_NULL (*sp)) {
+ EMIT_NEW_PCONST (cfg, res, NULL);
+ res->type = STACK_OBJ;
+ } else {
+ MONO_INST_NEW (cfg, res, OP_CASTCLASS);
+ res->dreg = alloc_preg (cfg);
+ res->sreg1 = (*sp)->dreg;
+ res->klass = klass;
+ res->type = STACK_OBJ;
+ MONO_ADD_INS (cfg->cbb, res);
+ cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
+ }
} else if (mono_class_is_nullable (klass)) {
res = handle_unbox_nullable (cfg, *sp, klass, context_used);
} else {
} else
#endif
{
- MonoInst *store;
+ MonoInst *store, *wbarrier_ptr_ins = NULL;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ }
+
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
+ wbarrier_ptr_ins = ins;
/* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
} else {
if (sp [0]->opcode != OP_LDADDR)
store->flags |= MONO_INST_FAULT;
- if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
- /* insert call to write barrier */
- MonoInst *ptr;
- int dreg;
+ if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
+ if (mini_is_gsharedvt_klass (klass)) {
+ g_assert (wbarrier_ptr_ins);
+ emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
+ } else {
+ /* insert call to write barrier */
+ MonoInst *ptr;
+ int dreg;
- dreg = alloc_ireg_mp (cfg);
- EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
- emit_write_barrier (cfg, ptr, sp [1]);
- }
+ dreg = alloc_ireg_mp (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
+ emit_write_barrier (cfg, ptr, sp [1]);
+ }
+ }
store->flags |= ins_flag;
}
/* STATIC CASE */
context_used = mini_class_check_context_used (cfg, klass);
- if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
- UNVERIFIED;
+ if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
+ mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
+ CHECK_CFG_ERROR;
+ }
/* The special_static_fields field is init'd in mono_class_vtable, so it needs
* to be called here.
}
} else {
if (cfg->run_cctors) {
- MonoException *ex;
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
g_assert (vtable);
if (! vtable->initialized)
INLINE_FAILURE ("class init");
- ex = mono_runtime_class_init_full (vtable, FALSE);
- if (ex) {
- set_exception_object (cfg, ex);
+ if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
+ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
goto exception_exit;
}
}
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
ins->flags |= ins_flag;
if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
- generic_class_is_reference_type (cfg, klass)) {
+ generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
/* insert call to write barrier */
emit_write_barrier (cfg, sp [0], sp [1]);
}
if (managed_alloc)
ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
else
- ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
+ ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
} else {
if (cfg->opt & MONO_OPT_SHARED) {
/* Decompose now to avoid problems with references to the domainvar */
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
iargs [2] = sp [0];
- ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
+ ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
} else {
/* Decompose later since it is needed by abcrem */
MonoClass *array_type = mono_array_class_get (klass, 1);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
- } else if (cfg->compile_aot) {
+ } else {
int const_reg = alloc_preg (cfg);
int type_reg = alloc_preg (cfg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
- } else {
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
}
} else {
- EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, (MonoType *)handle));
+ MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
+ CHECK_CFG_ERROR;
+ EMIT_NEW_PCONST (cfg, ins, rt);
}
ins->type = STACK_OBJ;
ins->klass = cmethod->klass;
}
case CEE_THROW:
CHECK_STACK (1);
+ if (sp [-1]->type != STACK_OBJ)
+ UNVERIFIED;
+
MONO_INST_NEW (cfg, ins, OP_THROW);
--sp;
ins->sreg1 = sp [0]->dreg;
INLINE_FAILURE ("throw");
break;
case CEE_ENDFINALLY:
+ if (!ip_in_finally_clause (cfg, ip - header->code))
+ UNVERIFIED;
/* mono_save_seq_point_info () depends on this */
if (sp != stack_start)
emit_seq_point (cfg, method, ip, FALSE, FALSE);
MONO_ADD_INS (cfg->cbb, iargs [0]);
NEW_CLASSCONST (cfg, iargs [1], klass);
MONO_ADD_INS (cfg->cbb, iargs [1]);
- *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
+ *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
ip += 6;
inline_costs += 10 * num_calls++;
break;
ip += 6;
break;
}
+ case CEE_MONO_ATOMIC_STORE_I4: {
+ g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
+
+ CHECK_OPSIZE (6);
+ CHECK_STACK (2);
+ sp -= 2;
+
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
+ ins->dreg = sp [0]->dreg;
+ ins->sreg1 = sp [1]->dreg;
+ ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ ip += 6;
+ break;
+ }
case CEE_MONO_JIT_ATTACH: {
MonoInst *args [16], *domain_ins;
MonoInst *ad_ins, *jit_tls_ins;
MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
+ g_assert (!mono_threads_is_coop_enabled ());
+
cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
EMIT_NEW_PCONST (cfg, ins, NULL);
MONO_START_BB (cfg, call_bb);
}
- if (cfg->compile_aot) {
- /* AOT code is only used in the root domain */
- EMIT_NEW_PCONST (cfg, args [0], NULL);
- } else {
- EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
- }
+ /* AOT code is only used in the root domain */
+ EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
if (next_bb)
MONO_START_BB (cfg, next_bb);
+
+
ip += 2;
break;
}
ip += 2;
break;
}
+ case CEE_MONO_CALLI_EXTRA_ARG: {
+ MonoInst *addr;
+ MonoMethodSignature *fsig;
+ MonoInst *arg;
+
+ /*
+ * This is the same as CEE_CALLI, but passes an additional argument
+ * to the called method in llvmonly mode.
+ * This is only used by delegate invoke wrappers to call the
+ * actual delegate method.
+ */
+ g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
+
+ CHECK_OPSIZE (6);
+ token = read32 (ip + 2);
+
+ ins = NULL;
+
+ cmethod = NULL;
+ CHECK_STACK (1);
+ --sp;
+ addr = *sp;
+ fsig = mini_get_signature (method, token, generic_context, &cfg->error);
+ CHECK_CFG_ERROR;
+
+ if (cfg->llvm_only)
+ cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
+
+ n = fsig->param_count + fsig->hasthis + 1;
+
+ CHECK_STACK (n);
+
+ sp -= n;
+ arg = sp [n - 1];
+
+ if (cfg->llvm_only) {
+ /*
+ * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
+ * cconv. This is set by mono_init_delegate ().
+ */
+ if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
+ MonoInst *callee = addr;
+ MonoInst *call, *localloc_ins;
+ MonoBasicBlock *is_gsharedvt_bb, *end_bb;
+ int low_bit_reg = alloc_preg (cfg);
+
+ NEW_BBLOCK (cfg, is_gsharedvt_bb);
+ NEW_BBLOCK (cfg, end_bb);
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
+
+ /* Normal case: callee uses a normal cconv, have to add an out wrapper */
+ addr = emit_get_rgctx_sig (cfg, context_used,
+ fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
+ /*
+ * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
+ */
+ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
+ ins->dreg = alloc_preg (cfg);
+ ins->inst_imm = 2 * SIZEOF_VOID_P;
+ MONO_ADD_INS (cfg->cbb, ins);
+ localloc_ins = ins;
+ cfg->flags |= MONO_CFG_HAS_ALLOCA;
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
+
+ call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
+ MONO_START_BB (cfg, is_gsharedvt_bb);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
+ ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
+ ins->dreg = call->dreg;
+
+ MONO_START_BB (cfg, end_bb);
+ } else {
+ /* Caller uses a normal calling conv */
+
+ MonoInst *callee = addr;
+ MonoInst *call, *localloc_ins;
+ MonoBasicBlock *is_gsharedvt_bb, *end_bb;
+ int low_bit_reg = alloc_preg (cfg);
+
+ NEW_BBLOCK (cfg, is_gsharedvt_bb);
+ NEW_BBLOCK (cfg, end_bb);
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
+
+ /* Normal case: callee uses a normal cconv, no conversion is needed */
+ call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+ /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
+ MONO_START_BB (cfg, is_gsharedvt_bb);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
+ NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
+ MONO_ADD_INS (cfg->cbb, addr);
+ /*
+ * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
+ */
+ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
+ ins->dreg = alloc_preg (cfg);
+ ins->inst_imm = 2 * SIZEOF_VOID_P;
+ MONO_ADD_INS (cfg->cbb, ins);
+ localloc_ins = ins;
+ cfg->flags |= MONO_CFG_HAS_ALLOCA;
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
+
+ ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
+ ins->dreg = call->dreg;
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ MONO_START_BB (cfg, end_bb);
+ }
+ } else {
+ /* Same as CEE_CALLI */
+ if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
+ /*
+ * We pass the address to the gsharedvt trampoline in the rgctx reg
+ */
+ MonoInst *callee = addr;
+
+ addr = emit_get_rgctx_sig (cfg, context_used,
+ fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
+ } else {
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ }
+ }
+
+ if (!MONO_TYPE_IS_VOID (fsig->ret))
+ *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+
+ CHECK_CFG_EXCEPTION;
+
+ ip += 6;
+ ins_flag = 0;
+ constrained_class = NULL;
+ break;
+ }
+ case CEE_MONO_LDDOMAIN:
+ CHECK_STACK_OVF (1);
+ EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
+ ip += 2;
+ *sp++ = ins;
+ break;
+ case CEE_MONO_GET_LAST_ERROR:
+ CHECK_OPSIZE (2);
+ CHECK_STACK_OVF (1);
+
+ MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
+ ins->dreg = alloc_dreg (cfg, STACK_I4);
+ ins->type = STACK_I4;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ ip += 2;
+ *sp++ = ins;
+ break;
default:
g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
break;
CHECK_OPSIZE (6);
n = read32 (ip + 2);
cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
- if (!cmethod || mono_loader_get_last_error ())
- LOAD_ERROR;
+ CHECK_CFG_ERROR;
+
mono_class_init (cmethod->klass);
mono_save_token_info (cfg, image, n, cmethod);
cil_method = cmethod;
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
- METHOD_ACCESS_FAILURE (method, cil_method);
+ emit_method_access_failure (cfg, method, cil_method);
if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
CHECK_OPSIZE (6);
n = read32 (ip + 2);
cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
- if (!cmethod || mono_loader_get_last_error ())
- LOAD_ERROR;
+ CHECK_CFG_ERROR;
+
mono_class_init (cmethod->klass);
context_used = mini_method_check_context_used (cfg, cmethod);
ip += 4;
inline_costs += 1;
break;
- case CEE_LOCALLOC:
+ case CEE_LOCALLOC: {
CHECK_STACK (1);
+ MonoBasicBlock *non_zero_bb, *end_bb;
+ int alloc_ptr = alloc_preg (cfg);
--sp;
if (sp != stack_start)
UNVERIFIED;
*/
INLINE_FAILURE("localloc");
+ NEW_BBLOCK (cfg, non_zero_bb);
+ NEW_BBLOCK (cfg, end_bb);
+
+ /* if size != zero */
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
+
+ //size is zero, so result is NULL
+ MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ MONO_START_BB (cfg, non_zero_bb);
MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
- ins->dreg = alloc_preg (cfg);
+ ins->dreg = alloc_ptr;
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_PTR;
MONO_ADD_INS (cfg->cbb, ins);
if (init_locals)
ins->flags |= MONO_INST_INIT;
+ MONO_START_BB (cfg, end_bb);
+ EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
+ ins->type = STACK_PTR;
+
*sp++ = ins;
ip += 2;
break;
+ }
case CEE_ENDFILTER: {
MonoExceptionClause *clause, *nearest;
int cc;
if (cfg->method == method) {
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- bb->region = mono_find_block_region (cfg, bb->real_offset);
+ if (bb == cfg->bb_init)
+ bb->region = -1;
+ else
+ bb->region = mono_find_block_region (cfg, bb->real_offset);
if (cfg->spvars)
mono_create_spvar_for_region (cfg, bb->region);
if (cfg->verbose_level > 2)
printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
}
+ } else {
+ MonoBasicBlock *bb;
+ /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
+ for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
+ bb->real_offset = inline_offset;
+ }
}
if (inline_costs < 0) {
/* Method is too large */
mname = mono_method_full_name (method, TRUE);
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
- cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
+ mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
g_free (mname);
}
g_slist_free (class_inits);
mono_basic_block_free (original_bb);
cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
- cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
if (cfg->exception_type)
return -1;
else
* Make the component vregs volatile since the optimizations can
* get confused otherwise.
*/
- get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
- get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
+ get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
+ get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
}
#endif
/* Modify the two component vars too */
MonoInst *var1;
- var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
+ var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
var1->inst_c0 = pos;
- var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
+ var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
var1->inst_c0 = pos;
}
#endif
g_assert (ins->opcode == OP_REGOFFSET);
- tree = get_vreg_to_inst (cfg, ins->dreg + 1);
+ tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
g_assert (tree);
tree->opcode = OP_REGOFFSET;
tree->inst_basereg = ins->inst_basereg;
tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
- tree = get_vreg_to_inst (cfg, ins->dreg + 2);
+ tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
g_assert (tree);
tree->opcode = OP_REGOFFSET;
tree->inst_basereg = ins->inst_basereg;
#if SIZEOF_REGISTER != 8
if (regtype == 'l') {
- NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
+ NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
mono_bblock_insert_after_ins (bb, ins, store_ins);
- NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
+ NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
mono_bblock_insert_after_ins (bb, ins, store_ins);
def_ins = store_ins;
}
#if SIZEOF_REGISTER != 8
if (regtype == 'l') {
- NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
+ NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
- NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
+ NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
use_ins = load_ins;
}
g_free (live_range_end_bb);
}
+static void
+mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
+{
+ MonoInst *ret, *move, *source;
+ MonoClass *klass = ins->klass;
+ int context_used = mini_class_check_context_used (cfg, klass);
+ int is_isinst = ins->opcode == OP_ISINST;
+ g_assert (is_isinst || ins->opcode == OP_CASTCLASS);
+ source = get_vreg_to_inst (cfg, ins->sreg1);
+ if (!source || source == (MonoInst *) -1)
+ source = mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, ins->sreg1);
+ g_assert (source && source != (MonoInst *) -1);
+
+ MonoBasicBlock *first_bb;
+ NEW_BBLOCK (cfg, first_bb);
+ cfg->cbb = first_bb;
+
+ if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
+ if (is_isinst)
+ ret = emit_isinst_with_cache_nonshared (cfg, source, klass);
+ else
+ ret = emit_castclass_with_cache_nonshared (cfg, source, klass);
+ } else if (!context_used && (mono_class_is_marshalbyref (klass) || mono_class_is_interface (klass))) {
+ MonoInst *iargs [1];
+ int costs;
+
+ iargs [0] = source;
+ if (is_isinst) {
+ MonoMethod *wrapper = mono_marshal_get_isinst (klass);
+ costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
+ } else {
+ MonoMethod *wrapper = mono_marshal_get_castclass (klass);
+ save_cast_details (cfg, klass, source->dreg, TRUE);
+ costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
+ reset_cast_details (cfg);
+ }
+ g_assert (costs > 0);
+ ret = iargs [0];
+ } else {
+ if (is_isinst)
+ ret = handle_isinst (cfg, klass, source, context_used);
+ else
+ ret = handle_castclass (cfg, klass, source, context_used);
+ }
+ EMIT_NEW_UNALU (cfg, move, OP_MOVE, ins->dreg, ret->dreg);
+
+ g_assert (cfg->cbb->code || first_bb->code);
+ MonoInst *prev = ins->prev;
+ mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
+}
+
+void
+mono_decompose_typechecks (MonoCompile *cfg)
+{
+ for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *ins;
+ MONO_BB_FOR_EACH_INS (bb, ins) {
+ switch (ins->opcode) {
+ case OP_ISINST:
+ case OP_CASTCLASS:
+ mono_decompose_typecheck (cfg, bb, ins);
+ break;
+ }
+ }
+ }
+}
+
+
/**
* FIXME:
* - use 'iadd' instead of 'int_add'