#include <mono/metadata/marshal.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/mono-debug.h>
+#include <mono/metadata/mono-debug-debugger.h>
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/security-manager.h>
#include <mono/metadata/threads-types.h>
MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
- guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
+ guchar *ip, guint real_offset, gboolean inline_always);
/* helper methods signatures */
static MonoMethodSignature *helper_sig_class_init_trampoline;
static MonoMethodSignature *helper_sig_domain_get;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
{
helper_sig_domain_get = mono_create_icall_signature ("ptr");
helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
add_widen_op (cfg, ins, &sp [0], &sp [1]); \
ins->dreg = alloc_dreg ((cfg), (ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
- *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
+ *sp++ = mono_decompose_opcode ((cfg), (ins)); \
} while (0)
#define ADD_UNOP(op) do { \
CHECK_TYPE (ins); \
(ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
- *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
+ *sp++ = mono_decompose_opcode (cfg, ins); \
} while (0)
#define ADD_BINCOND(next_block) do { \
type_from_op (cfg, ins, sp [0], sp [1]); \
ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
GET_BBLOCK (cfg, tblock, target); \
- link_bblock (cfg, bblock, tblock); \
+ link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_true_bb = tblock; \
if ((next_block)) { \
- link_bblock (cfg, bblock, (next_block)); \
+ link_bblock (cfg, cfg->cbb, (next_block)); \
ins->inst_false_bb = (next_block); \
start_new_bblock = 1; \
} else { \
GET_BBLOCK (cfg, tblock, ip); \
- link_bblock (cfg, bblock, tblock); \
+ link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_false_bb = tblock; \
start_new_bblock = 2; \
} \
handle_stack_args (cfg, stack_start, sp - stack_start); \
CHECK_UNVERIFIABLE (cfg); \
} \
- MONO_ADD_INS (bblock, cmp); \
- MONO_ADD_INS (bblock, ins); \
+ MONO_ADD_INS (cfg->cbb, cmp); \
+ MONO_ADD_INS (cfg->cbb, ins); \
} while (0)
/* *
}
}
-/* Emit code which loads interface_offsets [klass->interface_id]
- * The array is stored in memory before vtable.
-*/
-static void
-mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
-{
- if (cfg->compile_aot) {
- int ioffset_reg = alloc_preg (cfg);
- int iid_reg = alloc_preg (cfg);
-
- MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
- MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
- }
- else {
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
- }
-}
-
static void
mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
{
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type (simple_type);
- if (klass != arg->klass)
+ /* The second cases is needed when doing partial sharing */
+ if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
return 1;
return 0;
} else {
#ifdef ENABLE_LLVM
call->imt_arg_reg = method_reg;
#endif
-#ifdef MONO_ARCH_IMT_REG
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
-#else
- /* Need this to keep the IMT arg alive */
- mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
-#endif
return;
}
-#ifdef MONO_ARCH_IMT_REG
method_reg = alloc_preg (cfg);
if (imt_arg) {
}
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
-#else
- mono_arch_emit_imt_argument (cfg, call, imt_arg);
-#endif
}
static MonoJumpInfo *
(cmethod->klass->generic_class || cmethod->klass->generic_container)) {
gboolean sharable = FALSE;
- if (mono_method_is_generic_sharable (cmethod, TRUE)) {
+ if (mono_method_is_generic_sharable (cmethod, TRUE))
sharable = TRUE;
- } else {
- gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
- MonoGenericContext *context = mini_class_get_context (cmethod->klass);
- gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
-
- sharable = sharing_enabled && context_sharable;
- }
/*
* Pass vtable iff target method might
if (mono_method_is_generic_sharable (cmethod, TRUE)) {
pass_mrgctx = TRUE;
} else {
- gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
- MonoGenericContext *context = mini_method_get_context (cmethod);
- gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
-
- if (sharing_enabled && context_sharable)
- pass_mrgctx = TRUE;
if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
pass_mrgctx = TRUE;
}
this_reg = this->dreg;
- if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
+ if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
MonoInst *dummy_use;
MONO_EMIT_NULL_CHECK (cfg, this_reg);
}
MonoInst*
-mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
+mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
{
/*
* Call the jit icall without a wrapper if possible.
* an exception check.
*/
costs = inline_method (cfg, info->wrapper_method, NULL,
- args, NULL, cfg->real_offset, TRUE, out_cbb);
+ args, NULL, cfg->real_offset, TRUE);
g_assert (costs > 0);
g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
}
}
+/*
+ * emit_get_rgctx:
+ *
+ * Emit IR to return either the this pointer for instance method,
+ * or the mrgctx for static methods.
+ */
static MonoInst*
emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
{
return res;
}
+/*
+ * emit_rgctx_fetch:
+ *
+ * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
+ * given by RGCTX.
+ */
static inline MonoInst*
emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
{
+ /* Inline version, not currently used */
+ // FIXME: This can be called from mono_decompose_vtype_opts (), which can't create new bblocks
+#if 0
+ int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
+ gboolean mrgctx;
+ MonoBasicBlock *is_null_bb, *end_bb;
+ MonoInst *res, *ins, *call;
+ MonoInst *args[16];
+
+ slot = mini_get_rgctx_entry_slot (entry);
+
+ mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
+ index = MONO_RGCTX_SLOT_INDEX (slot);
+ if (mrgctx)
+ index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
+ for (depth = 0; ; ++depth) {
+ int size = mono_class_rgctx_get_array_size (depth, mrgctx);
+
+ if (index < size - 1)
+ break;
+ index -= size - 1;
+ }
+
+ NEW_BBLOCK (cfg, end_bb);
+ NEW_BBLOCK (cfg, is_null_bb);
+
+ if (mrgctx) {
+ rgctx_reg = rgctx->dreg;
+ } else {
+ rgctx_reg = alloc_preg (cfg);
+
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
+ // FIXME: Avoid this check by allocating the table when the vtable is created etc.
+ NEW_BBLOCK (cfg, is_null_bb);
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
+ }
+
+ for (i = 0; i < depth; ++i) {
+ int array_reg = alloc_preg (cfg);
+
+ /* load ptr to next array */
+ if (mrgctx && i == 0)
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
+ else
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
+ rgctx_reg = array_reg;
+ /* is the ptr null? */
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
+ /* if yes, jump to actual trampoline */
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
+ }
+
+ /* fetch slot */
+ val_reg = alloc_preg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
+ /* is the slot null? */
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
+ /* if yes, jump to actual trampoline */
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
+
+ /* Fastpath */
+ res_reg = alloc_preg (cfg);
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = res_reg;
+ ins->sreg1 = val_reg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ res = ins;
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* Slowpath */
+ MONO_START_BB (cfg, is_null_bb);
+ args [0] = rgctx;
+ EMIT_NEW_ICONST (cfg, args [1], index);
+ if (mrgctx)
+ call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
+ else
+ call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = res_reg;
+ ins->sreg1 = call->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ MONO_START_BB (cfg, end_bb);
+
+ return res;
+#else
return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
+#endif
}
static MonoInst*
emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
{
MonoInst *vtable_arg;
- MonoCallInst *call;
int context_used;
+ gboolean use_op_generic_class_init = FALSE;
context_used = mini_class_check_context_used (cfg, klass);
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
- if (COMPILE_LLVM (cfg))
- call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
- else
- call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
-#ifdef MONO_ARCH_VTABLE_REG
- mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
- cfg->uses_vtable_reg = TRUE;
-#else
- NOT_IMPLEMENTED;
+#ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
+ if (!COMPILE_LLVM (cfg))
+ use_op_generic_class_init = TRUE;
#endif
+
+ if (use_op_generic_class_init) {
+ MonoInst *ins;
+
+ /*
+ * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
+ * so this doesn't have to clobber any regs and it doesn't break basic blocks.
+ */
+ MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
+ ins->sreg1 = vtable_arg->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ int bits_reg, inited_reg;
+ MonoBasicBlock *inited_bb;
+ MonoInst *args [16];
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ bits_reg = alloc_ireg (cfg);
+ inited_reg = alloc_ireg (cfg);
+
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
+
+ NEW_BBLOCK (cfg, inited_bb);
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
+
+ args [0] = vtable_arg;
+ mono_emit_jit_icall (cfg, mono_generic_class_init, args);
+
+ MONO_START_BB (cfg, inited_bb);
+ }
+}
+
+
+static void
+emit_class_init (MonoCompile *cfg, MonoClass *klass)
+{
+ /* This could be used as a fallback if needed */
+ if (cfg->compile_aot) {
+ /* With the overhead of plt entries, the inline version is comparable in size/speed */
+ emit_generic_class_init (cfg, klass);
+ return;
+ }
+
+ mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
}
static void
}
static void
-save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
+save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
{
if (mini_get_debug_options ()->better_cast_details) {
int vtable_reg = alloc_preg (cfg);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
- if (null_check) {
+ if (null_check)
MONO_START_BB (cfg, is_null_bb);
- if (out_bblock)
- *out_bblock = cfg->cbb;
- }
}
}
context_used = mini_class_check_context_used (cfg, array_class);
- save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
+ save_cast_details (cfg, array_class, obj->dreg, FALSE);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
} else {
- save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
+ save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
mini_emit_class_check (cfg, eclass_reg, klass->element_class);
reset_cast_details (cfg);
}
}
static MonoInst*
-handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
+handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
{
MonoInst *addr, *klass_inst, *is_ref, *args[16];
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
/* LDOBJ */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
- *out_cbb = cfg->cbb;
-
return ins;
}
if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
if (known_instance_size) {
int size = mono_class_instance_size (klass);
+ if (size < sizeof (MonoObject))
+ g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
}
if (managed_alloc) {
int size = mono_class_instance_size (klass);
+ if (size < sizeof (MonoObject))
+ g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
-handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
+handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
{
MonoInst *alloc, *ins;
- *out_cbb = cfg->cbb;
-
if (mono_class_is_nullable (klass)) {
MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
MONO_START_BB (cfg, end_bb);
- *out_cbb = cfg->cbb;
-
return res;
} else {
alloc = handle_alloc (cfg, klass, TRUE, context_used);
#define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
static MonoInst*
-emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
+emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
{
MonoMethod *mono_castclass;
MonoInst *res;
mono_castclass = mono_marshal_get_castclass_with_cache ();
- save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
+ save_cast_details (cfg, klass, args [0]->dreg, TRUE);
res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
reset_cast_details (cfg);
- *out_bblock = cfg->cbb;
return res;
}
}
static MonoInst*
-emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
+emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
{
MonoInst *args [3];
int idx;
}
/*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
-
- return emit_castclass_with_cache (cfg, klass, args, out_bblock);
+ return emit_castclass_with_cache (cfg, klass, args);
}
/*
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
-handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
+handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
{
MonoBasicBlock *is_null_bb;
int obj_reg = src->dreg;
int vtable_reg = alloc_preg (cfg);
int context_used;
MonoInst *klass_inst = NULL, *res;
- MonoBasicBlock *bblock;
-
- *out_bb = cfg->cbb;
context_used = mini_class_check_context_used (cfg, klass);
if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
- res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
+ res = emit_castclass_with_cache_nonshared (cfg, src, klass);
(*inline_costs) += 2;
- *out_bb = cfg->cbb;
return res;
} else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
MonoMethod *mono_castclass;
mono_castclass = mono_marshal_get_castclass (klass);
iargs [0] = src;
- save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
+ save_cast_details (cfg, klass, src->dreg, TRUE);
costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
- iargs, ip, cfg->real_offset, TRUE, &bblock);
+ iargs, ip, cfg->real_offset, TRUE);
reset_cast_details (cfg);
CHECK_CFG_EXCEPTION;
g_assert (costs > 0);
(*inline_costs) += costs;
- *out_bb = cfg->cbb;
return src;
}
/* cache */
args [2] = cache_ins;
- return emit_castclass_with_cache (cfg, klass, args, out_bb);
+ return emit_castclass_with_cache (cfg, klass, args);
}
klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
- save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
+ save_cast_details (cfg, klass, obj_reg, FALSE);
if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
reset_cast_details (cfg);
- *out_bb = cfg->cbb;
-
return src;
exception_exit:
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
- save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
+ save_cast_details (cfg, klass, obj_reg, FALSE);
if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
#ifndef DISABLE_REMOTING
ceq->type = STACK_I4;
if (!is_i4) {
- load = mono_decompose_opcode (cfg, load, NULL);
- and = mono_decompose_opcode (cfg, and, NULL);
- cmp = mono_decompose_opcode (cfg, cmp, NULL);
- ceq = mono_decompose_opcode (cfg, ceq, NULL);
+ load = mono_decompose_opcode (cfg, load);
+ and = mono_decompose_opcode (cfg, and);
+ cmp = mono_decompose_opcode (cfg, cmp);
+ ceq = mono_decompose_opcode (cfg, ceq);
}
return ceq;
MonoInst *obj, *method_ins, *tramp_ins;
MonoDomain *domain;
guint8 **code_slot;
-
- // FIXME reenable optimisation for virtual case
- if (virtual)
- return NULL;
if (virtual) {
MonoMethod *invoke = mono_get_delegate_invoke (klass);
*/
static MonoInst*
handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
- gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
+ gboolean *ref_emit_widen)
{
MonoInst *ins = NULL;
- MonoBasicBlock *bblock = *ref_bblock;
gboolean emit_widen = *ref_emit_widen;
/*
emit_widen = FALSE;
if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
- ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
+ ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
} else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
MonoInst *add;
}
*ref_emit_widen = emit_widen;
- *ref_bblock = bblock;
return ins;
return FALSE;
}
- /*
- * CAS - do not inline methods with declarative security
- * Note: this has to be before any possible return TRUE;
- */
- if (mono_security_method_has_declsec (method))
- return FALSE;
-
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (mono_arch_is_soft_float ()) {
/* FIXME: */
g_assert (cfg->generic_sharing_context);
context_used = mini_class_check_context_used (cfg, klass);
g_assert (context_used);
- rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
+ rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
} else {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
}
static gboolean
-is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
+is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
{
uint32_t align;
+ param_klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, ¶m_klass->byval_arg));
+
//Only allow for valuetypes
if (!param_klass->valuetype || !return_klass->valuetype)
return FALSE;
MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
//Valuetypes that are semantically equivalent
- if (is_unsafe_mov_compatible (param_klass, return_klass))
+ if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
return args [0];
//Arrays of valuetypes that are semantically equivalent
- if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
+ if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
return args [0];
return NULL;
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
-
+
static MonoClass *runtime_helpers_class = NULL;
if (! runtime_helpers_class)
runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
"System.Runtime.CompilerServices", "RuntimeHelpers");
if (cmethod->klass == mono_defaults.string_class) {
- if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
+ if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
int dreg = alloc_ireg (cfg);
int index_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
#endif
type_from_op (cfg, ins, NULL, NULL);
return ins;
- } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
+ } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
/* Decompose later to allow more optimizations */
EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
return ins;
- } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
- int mult_reg = alloc_preg (cfg);
- int add_reg = alloc_preg (cfg);
-
- /* The corlib functions check for oob already. */
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
- MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
- return cfg->cbb->last_ins;
} else
return NULL;
} else if (cmethod->klass == mono_defaults.object_class) {
- if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
+ if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg_ref (cfg);
int vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
} else
return NULL;
} else if (cmethod->klass == mono_defaults.array_class) {
- if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
+ if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
return emit_array_generic_access (cfg, fsig, args, FALSE);
- else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
+ else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
return emit_array_generic_access (cfg, fsig, args, TRUE);
#ifndef MONO_BIG_ARRAYS
* This is an inline version of GetLength/GetLowerBound(0) used frequently in
* Array methods.
*/
- else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
- (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
+ else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
+ (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
int dreg = alloc_ireg (cfg);
int bounds_reg = alloc_ireg_mp (cfg);
if (cmethod->name [0] != 'g')
return NULL;
- if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
+ if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
int vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
type_from_op (cfg, ins, NULL, NULL);
return ins;
- } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
+ } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
}
if (opcode == OP_LOADI8_MEMBASE)
- ins = mono_decompose_opcode (cfg, ins, NULL);
+ ins = mono_decompose_opcode (cfg, ins);
emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
MONO_ADD_INS (cfg->cbb, ins);
if (opcode == OP_STOREI8_MEMBASE_REG)
- ins = mono_decompose_opcode (cfg, ins, NULL);
+ ins = mono_decompose_opcode (cfg, ins);
return ins;
}
* all inputs:
* http://everything2.com/?node_id=1051618
*/
- } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
+ } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
!strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
- !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
- !strcmp (cmethod->klass->name, "Selector")) {
+ !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
+ !strcmp (cmethod->klass->name, "Selector")) ||
+ (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
+ !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
+ !strcmp (cmethod->klass->name, "Selector"))
+ ) {
#ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
- if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
+ if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
(args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
cfg->compile_aot) {
MonoInst *pi;
*/
static int
inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
- guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
+ guchar *ip, guint real_offset, gboolean inline_always)
{
MonoInst *ins, *rvar = NULL;
MonoMethodHeader *cheader;
cfg->cbb = ebblock;
}
- if (out_cbb)
- *out_cbb = cfg->cbb;
-
if (rvar) {
/*
* If the inlined method contains only a throw, then the ret var is not
return fsig;
}
-/*
- * Returns TRUE if the JIT should abort inlining because "callee"
- * is influenced by security attributes.
- */
-static
-gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
-{
- guint32 result;
-
- if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
- return TRUE;
- }
-
- result = mono_declsec_linkdemand (cfg->domain, caller, callee);
- if (result == MONO_JIT_SECURITY_OK)
- return FALSE;
-
- if (result == MONO_JIT_LINKDEMAND_ECMA) {
- /* Generate code to throw a SecurityException before the actual call/link */
- MonoSecurityManager *secman = mono_security_manager_get_methods ();
- MonoInst *args [2];
-
- NEW_ICONST (cfg, args [0], 4);
- NEW_METHODCONST (cfg, args [1], caller);
- mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
- } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
- /* don't hide previous results */
- mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
- cfg->exception_data = result;
- return TRUE;
- }
-
- return FALSE;
-}
-
static MonoMethod*
throw_exception (void)
{
}
static void
-ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
- MonoBasicBlock *bblock, unsigned char *ip)
+ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
{
/* we can't get the coreclr security level on wrappers since they don't have the attributes */
MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
}
static void
-ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
- MonoBasicBlock *bblock, unsigned char *ip)
+ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
{
/* we can't get the coreclr security level on wrappers since they don't have the attributes */
MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
return supported_tail_call;
}
-/* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
- * it to the thread local value based on the tls_offset field. Every other kind of access to
- * the field causes an assert.
- */
-static gboolean
-is_magic_tls_access (MonoClassField *field)
-{
- if (strcmp (field->name, "tlsdata"))
- return FALSE;
- if (strcmp (field->parent->name, "ThreadLocal`1"))
- return FALSE;
- return field->parent->image == mono_defaults.corlib;
-}
-
/* emits the code needed to access a managed tls var (like ThreadStatic)
* with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
* pointer for the current thread.
MonoInst *addr;
int static_data_reg, array_reg, dreg;
int offset2_reg, idx_reg;
- // inlined access to the tls data
- // idx = (offset >> 24) - 1;
- // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
+ // inlined access to the tls data (see threads.c)
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
idx_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
offset2_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
return addr;
}
-/*
- * redirect access to the tlsdata field to the tls var given by the tls_offset field.
- * this address is cached per-method in cached_tls_addr.
- */
-static MonoInst*
-create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
-{
- MonoInst *load, *addr, *temp, *store, *thread_ins;
- MonoClassField *offset_field;
-
- if (*cached_tls_addr) {
- EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
- return addr;
- }
- thread_ins = mono_get_thread_intrinsic (cfg);
- offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
-
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
- if (thread_ins) {
- MONO_ADD_INS (cfg->cbb, thread_ins);
- } else {
- MonoMethod *thread_method;
- thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
- thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
- }
- addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
- addr->klass = mono_class_from_mono_type (tls_field->type);
- addr->type = STACK_MP;
- *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
- EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
-
- EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
- return addr;
-}
-
/*
* handle_ctor_call:
*
* Handle calls made to ctors from NEWOBJ opcodes.
- *
- * REF_BBLOCK will point to the current bblock after the call.
*/
static void
handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
- MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
+ MonoInst **sp, guint8 *ip, int *inline_costs)
{
MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
- MonoBasicBlock *bblock = *ref_bblock;
if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
mono_method_is_generic_sharable (cmethod, TRUE)) {
!mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
int costs;
- if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
+ if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
cfg->real_offset += 5;
*inline_costs += costs - 5;
- *ref_bblock = bblock;
} else {
INLINE_FAILURE ("inline failure");
// FIXME-VT: Clean this up
{
MonoError error;
MonoInst *ins, **sp, **stack_start;
- MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
+ MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
MonoMethod *cmethod, *method_definition;
MonoInst **arg_array;
int num_calls = 0, inline_costs = 0;
int breakpoint_id = 0;
guint num_args;
- MonoBoolean security, pinvoke;
- MonoSecurityManager* secman = NULL;
- MonoDeclSecurityActions actions;
GSList *class_inits = NULL;
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
gboolean init_locals, seq_points, skip_dead_blocks;
gboolean sym_seq_points = FALSE;
- MonoInst *cached_tls_addr = NULL;
MonoDebugMethodInfo *minfo;
MonoBitSet *seq_point_locs = NULL;
MonoBitSet *seq_point_set_locs = NULL;
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
- dont_verify |= mono_security_smcs_hack_enabled ();
-
/* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
if (cfg->gen_sdb_seq_points && cfg->method == method) {
minfo = mono_debug_lookup_method (method);
if (minfo) {
+ MonoSymSeqPoint *sps;
int i, n_il_offsets;
- int *il_offsets;
- int *line_numbers;
- mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
+ mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
sym_seq_points = TRUE;
for (i = 0; i < n_il_offsets; ++i) {
- if (il_offsets [i] < header->code_size)
- mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
+ if (sps [i].il_offset < header->code_size)
+ mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
}
- g_free (il_offsets);
- g_free (line_numbers);
+ g_free (sps);
} else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
/* Methods without line number info like auto-generated property accessors */
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
cfg->bb_entry = start_bblock;
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
-#if defined(__native_client_codegen__)
- MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
- ins->dreg = alloc_dreg (cfg, STACK_I4);
- MONO_ADD_INS (start_bblock, ins);
-#endif
/* EXIT BLOCK */
NEW_BBLOCK (cfg, end_bblock);
tblock->flags |= BB_EXCEPTION_UNSAFE;
}
-
/*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
while (p < end) {
printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
/* catch and filter blocks get the exception object on the stack */
if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
- MonoInst *dummy_use;
/* mostly like handle_stack_args (), but just sets the input args */
/* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
+ cfg->cbb = tblock;
+
+#ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
+ /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
+ if (!cfg->compile_llvm) {
+ MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
+ ins->dreg = tblock->in_stack [0]->dreg;
+ MONO_ADD_INS (tblock, ins);
+ }
+#else
+ MonoInst *dummy_use;
+
/*
* Add a dummy use for the exvar so its liveness info will be
* correct.
*/
- cfg->cbb = tblock;
EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
+#endif
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
}
/* FIRST CODE BLOCK */
- NEW_BBLOCK (cfg, bblock);
- bblock->cil_code = ip;
- cfg->cbb = bblock;
+ NEW_BBLOCK (cfg, tblock);
+ tblock->cil_code = ip;
+ cfg->cbb = tblock;
cfg->ip = ip;
- ADD_BBLOCK (cfg, bblock);
+ ADD_BBLOCK (cfg, tblock);
if (cfg->method == method) {
breakpoint_id = mono_debugger_method_has_breakpoint (method);
if (breakpoint_id) {
MONO_INST_NEW (cfg, ins, OP_BREAK);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
}
}
- if (mono_security_cas_enabled ())
- secman = mono_security_manager_get_methods ();
-
- security = (secman && mono_security_method_has_declsec (method));
- /* at this point having security doesn't mean we have any code to generate */
- if (security && (cfg->method == method)) {
- /* Only Demand, NonCasDemand and DemandChoice requires code generation.
- * And we do not want to enter the next section (with allocation) if we
- * have nothing to generate */
- security = mono_declsec_get_demands (method, &actions);
- }
-
- /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
- pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
- if (pinvoke) {
- MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
- if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
- MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
-
- /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
- if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
- pinvoke = FALSE;
- }
- if (custom)
- mono_custom_attrs_free (custom);
-
- if (pinvoke) {
- custom = mono_custom_attrs_from_class (wrapped->klass);
- if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
- pinvoke = FALSE;
- }
- if (custom)
- mono_custom_attrs_free (custom);
- }
- } else {
- /* not a P/Invoke after all */
- pinvoke = FALSE;
- }
- }
-
/* we use a separate basic block for the initialization code */
NEW_BBLOCK (cfg, init_localsbb);
cfg->bb_init = init_localsbb;
init_localsbb->real_offset = cfg->real_offset;
start_bblock->next_bb = init_localsbb;
- init_localsbb->next_bb = bblock;
+ init_localsbb->next_bb = cfg->cbb;
link_bblock (cfg, start_bblock, init_localsbb);
- link_bblock (cfg, init_localsbb, bblock);
+ link_bblock (cfg, init_localsbb, cfg->cbb);
cfg->cbb = init_localsbb;
*/
}
- /* at this point we know, if security is TRUE, that some code needs to be generated */
- if (security && (cfg->method == method)) {
- MonoInst *args [2];
-
- cfg->stat_cas_demand_generation++;
-
- if (actions.demand.blob) {
- /* Add code for SecurityAction.Demand */
- EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
- EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
- /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
- mono_emit_method_call (cfg, secman->demand, args, NULL);
- }
- if (actions.noncasdemand.blob) {
- /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
- /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
- EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
- EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
- /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
- mono_emit_method_call (cfg, secman->demand, args, NULL);
- }
- if (actions.demandchoice.blob) {
- /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
- EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
- EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
- /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
- mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
- }
- }
-
- /* we must Demand SecurityPermission.Unmanaged before p/invoking */
- if (pinvoke) {
- mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
- }
-
if (mono_security_core_clr_enabled ()) {
/* check if this is native code, e.g. an icall or a p/invoke */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
}
if (cfg->method == method)
- mono_debug_init_method (cfg, bblock, breakpoint_id);
+ mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
for (n = 0; n < header->num_locals; ++n) {
if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
ins_flag = 0;
start_new_bblock = 0;
- cfg->cbb = bblock;
while (ip < end) {
if (cfg->method == method)
cfg->real_offset = ip - header->code;
cfg->ip = ip;
context_used = 0;
-
+
if (start_new_bblock) {
- bblock->cil_length = ip - bblock->cil_code;
+ cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
if (start_new_bblock == 2) {
g_assert (ip == tblock->cil_code);
} else {
GET_BBLOCK (cfg, tblock, ip);
}
- bblock->next_bb = tblock;
- bblock = tblock;
- cfg->cbb = bblock;
+ cfg->cbb->next_bb = tblock;
+ cfg->cbb = tblock;
start_new_bblock = 0;
- for (i = 0; i < bblock->in_scount; ++i) {
+ for (i = 0; i < cfg->cbb->in_scount; ++i) {
if (cfg->verbose_level > 3)
- printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
- EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
+ printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
+ EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
*sp++ = ins;
}
if (class_inits)
g_slist_free (class_inits);
class_inits = NULL;
} else {
- if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
- link_bblock (cfg, bblock, tblock);
+ if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
+ link_bblock (cfg, cfg->cbb, tblock);
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
- bblock->next_bb = tblock;
- bblock = tblock;
- cfg->cbb = bblock;
- for (i = 0; i < bblock->in_scount; ++i) {
+ cfg->cbb->next_bb = tblock;
+ cfg->cbb = tblock;
+ for (i = 0; i < cfg->cbb->in_scount; ++i) {
if (cfg->verbose_level > 3)
- printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
- EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
+ printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
+ EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
*sp++ = ins;
}
g_slist_free (class_inits);
if (ip_offset + op_size == bb->end) {
MONO_INST_NEW (cfg, ins, OP_NOP);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
}
* Backward branches are handled at the end of method-to-ir ().
*/
gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
+ gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
/* Avoid sequence points on empty IL like .volatile */
// FIXME: Enable this
//if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
- if (sp != stack_start)
+ if ((sp != stack_start) && !sym_seq_point)
ins->flags |= MONO_INST_NONEMPTY_STACK;
MONO_ADD_INS (cfg->cbb, ins);
mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
}
- bblock->real_offset = cfg->real_offset;
+ cfg->cbb->real_offset = cfg->real_offset;
if ((cfg->method == method) && cfg->coverage_info) {
guint32 cil_offset = ip - header->code;
}
if (cfg->verbose_level > 3)
- printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
+ printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
switch (*ip) {
case CEE_NOP:
else
MONO_INST_NEW (cfg, ins, OP_NOP);
ip++;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
break;
case CEE_BREAK:
if (should_insert_brekpoint (cfg->method)) {
MONO_INST_NEW (cfg, ins, OP_NOP);
}
ip++;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
break;
case CEE_LDARG_0:
case CEE_LDARG_1:
ins->dreg = alloc_dreg (cfg, STACK_I8);
++ip;
ins->inst_l = (gint64)read64 (ip);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
ip += 8;
*sp++ = ins;
break;
ins->type = cfg->r4_stack_type;
ins->dreg = alloc_dreg (cfg, STACK_R8);
ins->inst_p0 = f;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
}
++ip;
readr4 (ip, f);
ins->type = STACK_R8;
ins->dreg = alloc_dreg (cfg, STACK_R8);
ins->inst_p0 = d;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
}
++ip;
readr8 (ip, d);
if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
- if (mono_security_cas_enabled ())
- CHECK_CFG_EXCEPTION;
-
emit_instrumentation_call (cfg, mono_profiler_method_leave);
if (ARCH_HAVE_OP_TAIL_CALL) {
mono_arch_emit_call (cfg, call);
cfg->param_area = MAX(cfg->param_area, call->stack_usage);
- MONO_ADD_INS (bblock, (MonoInst*)call);
+ MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
} else {
for (i = 0; i < num_args; ++i)
/* Prevent arguments from being optimized away */
MONO_INST_NEW_CALL (cfg, call, OP_JMP);
ins = (MonoInst*)call;
ins->inst_p0 = cmethod;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
}
ip += 5;
}
if (mono_security_core_clr_enabled ())
- ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
+ ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
/* MS.NET seems to silently convert this to a callvirt */
}
*/
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- }
-
if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
g_assert_not_reached ();
} else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
/* 'The type parameter is instantiated as a reference type' case below. */
} else {
- ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
+ ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
CHECK_CFG_EXCEPTION;
g_assert (ins);
goto call_end;
MONO_START_BB (cfg, is_ref_bb);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
ins->klass = constrained_class;
- sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
+ sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
- bblock = end_bb;
+ cfg->cbb = end_bb;
nonbox_call->dreg = ins->dreg;
} else {
*/
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
ins->klass = constrained_class;
- sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
+ sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
} else if (!constrained_class->valuetype) {
int dreg = alloc_ireg_ref (cfg);
/* Enum implements some interfaces, so treat this as the first case */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
ins->klass = constrained_class;
- sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
+ sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
}
}
if (check_call_signature (cfg, fsig, sp))
UNVERIFIED;
-#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
delegate_invoke = TRUE;
-#endif
if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
- bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
type_to_eval_stack_type ((cfg), fsig->ret, ins);
emit_widen = FALSE;
} else {
this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
- MONO_ADD_INS (bblock, store);
+ MONO_ADD_INS (cfg->cbb, store);
/* FIXME: This should be a managed pointer */
this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* Conversion to a JIT intrinsic */
if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
- bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
type_to_eval_stack_type ((cfg), fsig->ret, ins);
emit_widen = FALSE;
always = TRUE;
}
- costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
+ costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
if (costs) {
cfg->real_offset += 5;
for (i = 0; i < n; ++i)
EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
MONO_INST_NEW (cfg, ins, OP_BR);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
tblock = start_bblock->out_bb [0];
- link_bblock (cfg, bblock, tblock);
+ link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
start_new_bblock = 1;
/* skip the CEE_RET, too */
- if (ip_in_bb (cfg, bblock, ip + 5))
+ if (ip_in_bb (cfg, cfg->cbb, ip + 5))
skip_ret = TRUE;
push_res = FALSE;
goto call_end;
/* Inline the wrapper */
wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
- costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
+ costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
g_assert (costs > 0);
cfg->real_offset += 5;
ins = (MonoInst*)call;
ins->inst_p0 = cmethod;
ins->inst_p1 = arg_array [0];
- MONO_ADD_INS (bblock, ins);
- link_bblock (cfg, bblock, end_bblock);
+ MONO_ADD_INS (cfg->cbb, ins);
+ link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
// FIXME: Eliminate unreachable epilogs
* only reachable from this call.
*/
GET_BBLOCK (cfg, tblock, ip + 5);
- if (tblock == bblock || tblock->in_count == 0)
+ if (tblock == cfg->cbb || tblock->in_count == 0)
skip_ret = TRUE;
push_res = FALSE;
imt_arg, vtable_arg);
if (tail_call) {
- link_bblock (cfg, bblock, end_bblock);
+ link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
// FIXME: Eliminate unreachable epilogs
* only reachable from this call.
*/
GET_BBLOCK (cfg, tblock, ip + 5);
- if (tblock == bblock || tblock->in_count == 0)
+ if (tblock == cfg->cbb || tblock->in_count == 0)
skip_ret = TRUE;
push_res = FALSE;
}
MONO_INST_NEW (cfg, ins, OP_BR);
ip++;
ins->inst_target_bb = end_bblock;
- MONO_ADD_INS (bblock, ins);
- link_bblock (cfg, bblock, end_bblock);
+ MONO_ADD_INS (cfg->cbb, ins);
+ link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
break;
case CEE_BR_S:
target = ip + 1 + (signed char)(*ip);
++ip;
GET_BBLOCK (cfg, tblock, target);
- link_bblock (cfg, bblock, tblock);
+ link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
inline_costs += BRANCH_COST;
break;
target = ip + 4 + (gint32)read32(ip);
ip += 4;
GET_BBLOCK (cfg, tblock, target);
- link_bblock (cfg, bblock, tblock);
+ link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
CHECK_UNVERIFIABLE (cfg);
}
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
inline_costs += BRANCH_COST;
sp--;
GET_BBLOCK (cfg, tblock, target);
- link_bblock (cfg, bblock, tblock);
+ link_bblock (cfg, cfg->cbb, tblock);
GET_BBLOCK (cfg, tblock, ip);
- link_bblock (cfg, bblock, tblock);
+ link_bblock (cfg, cfg->cbb, tblock);
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
ins->type = STACK_I8;
ins->dreg = alloc_dreg (cfg, STACK_I8);
ins->inst_l = 0;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
cmp->opcode = OP_LCOMPARE;
cmp->sreg2 = ins->dreg;
}
#endif
- MONO_ADD_INS (bblock, cmp);
+ MONO_ADD_INS (cfg->cbb, cmp);
MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
type_from_op (cfg, ins, sp [0], NULL);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
GET_BBLOCK (cfg, tblock, target);
ins->inst_true_bb = tblock;
* Link the current bb with the targets as well, so handle_stack_args
* will set their in_stack correctly.
*/
- link_bblock (cfg, bblock, default_bblock);
+ link_bblock (cfg, cfg->cbb, default_bblock);
for (i = 0; i < n; ++i)
- link_bblock (cfg, bblock, targets [i]);
+ link_bblock (cfg, cfg->cbb, targets [i]);
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
- bblock = cfg->cbb;
for (i = 0; i < n; ++i)
- link_bblock (cfg, bblock, targets [i]);
+ link_bblock (cfg, cfg->cbb, targets [i]);
table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
table->table = targets;
if (*ip == CEE_LDIND_R4)
ins->type = cfg->r4_stack_type;
ins->flags |= ins_flag;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
ins->flags |= ins_flag;
ins_flag = 0;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
emit_write_barrier (cfg, sp [0], sp [1]);
MONO_ADD_INS ((cfg)->cbb, (ins));
- *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
+ *sp++ = mono_decompose_opcode (cfg, ins);
ip++;
break;
case CEE_ADD:
}
MONO_ADD_INS ((cfg)->cbb, (ins));
- *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
+ *sp++ = mono_decompose_opcode (cfg, ins);
ip++;
break;
case CEE_NEG:
break;
}
- if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
+ if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
CHECK_LOCAL (loc_index);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
/* Optimize the ldobj+stobj combination */
/* The reference case ends up being a load+store anyway */
/* Skip this if the operation is volatile. */
- if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
+ if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
CHECK_STACK (1);
sp --;
*sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
} else {
- if (bblock->out_of_line) {
+ if (cfg->cbb->out_of_line) {
MonoInst *iargs [2];
if (image == mono_defaults.corlib) {
if (cfg->compile_aot) {
NEW_LDSTRCONST (cfg, ins, image, n);
*sp = ins;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
}
else {
NEW_PCONST (cfg, ins, NULL);
OUT_OF_MEMORY_FAILURE;
*sp = ins;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
}
}
}
context_used = mini_method_check_context_used (cfg, cmethod);
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- } else if (mono_security_core_clr_enabled ()) {
- ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
- }
+ if (mono_security_core_clr_enabled ())
+ ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_generic_class_init (cfg, cmethod->klass);
* Generate smaller code for the common newobj <exception> instruction in
* argument checking code.
*/
- if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
+ if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
is_exception_class (cmethod->klass) && n <= 2 &&
((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
* As a workaround, we call class cctors before allocating objects.
*/
if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
+ emit_class_init (cfg, cmethod->klass);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
class_inits = g_slist_prepend (class_inits, cmethod->klass);
MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
/* Now call the actual ctor */
- handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
+ handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
CHECK_CFG_EXCEPTION;
}
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
+ ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
CHECK_CFG_EXCEPTION;
*sp ++ = ins;
iargs [0] = sp [0];
costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
- iargs, ip, cfg->real_offset, TRUE, &bblock);
+ iargs, ip, cfg->real_offset, TRUE);
CHECK_CFG_EXCEPTION;
g_assert (costs > 0);
else {
ins = handle_isinst (cfg, klass, *sp, context_used);
CHECK_CFG_EXCEPTION;
- bblock = cfg->cbb;
*sp ++ = ins;
ip += 5;
}
context_used = mini_class_check_context_used (cfg, klass);
if (mini_is_gsharedvt_klass (cfg, klass)) {
- res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
+ res = handle_unbox_gsharedvt (cfg, klass, *sp);
inline_costs += 2;
} else if (generic_class_is_reference_type (cfg, klass)) {
- res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
+ res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
CHECK_CFG_EXCEPTION;
} else if (mono_class_is_nullable (klass)) {
res = handle_unbox_nullable (cfg, *sp, klass, context_used);
ip [5] == CEE_PREFIX1 &&
ip [6] == CEE_CONSTRAINED_ &&
ip [11] == CEE_CALLVIRT &&
- ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
+ ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
mono_class_is_enum (klass) &&
(enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
(has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
// FIXME: LLVM can't handle the inconsistent bb linking
if (!mono_class_is_nullable (klass) &&
- ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
+ ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
(ip [5] == CEE_BRTRUE ||
ip [5] == CEE_BRTRUE_S ||
ip [5] == CEE_BRFALSE ||
ip += 5;
if (cfg->verbose_level > 3) {
- printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
+ printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
printf ("<box+brtrue opt>\n");
}
break;
}
- *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
+ *sp++ = handle_box (cfg, val, klass, context_used);
CHECK_CFG_EXCEPTION;
ip += 5;
FIELD_ACCESS_FAILURE (method, field);
mono_class_init (klass);
- if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
- UNVERIFIED;
-
/* if the class is Critical then transparent code cannot access it's fields */
if (!is_instance && mono_security_core_clr_enabled ())
- ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
+ ensure_method_is_allowed_to_access_field (cfg, method, field);
/* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
if (mono_security_core_clr_enabled ())
- ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
+ ensure_method_is_allowed_to_access_field (cfg, method, field);
*/
/*
if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
- iargs, ip, cfg->real_offset, TRUE, &bblock);
+ iargs, ip, cfg->real_offset, TRUE);
CHECK_CFG_EXCEPTION;
g_assert (costs > 0);
EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
- iargs, ip, cfg->real_offset, TRUE, &bblock);
+ iargs, ip, cfg->real_offset, TRUE);
CHECK_CFG_EXCEPTION;
g_assert (costs > 0);
}
if (op == CEE_LDFLDA) {
- if (is_magic_tls_access (field)) {
- GSHAREDVT_FAILURE (*ip);
- ins = sp [0];
- *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
- } else {
- if (sp [0]->type == STACK_OBJ) {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
- MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
- }
+ if (sp [0]->type == STACK_OBJ) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
+ MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
+ }
- dreg = alloc_ireg_mp (cfg);
+ dreg = alloc_ireg_mp (cfg);
- if (mini_is_gsharedvt_klass (cfg, klass)) {
- MonoInst *offset_ins;
+ if (mini_is_gsharedvt_klass (cfg, klass)) {
+ MonoInst *offset_ins;
- offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
- EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
- } else {
- EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
- }
- ins->klass = mono_class_from_mono_type (field->type);
- ins->type = STACK_MP;
- *sp++ = ins;
+ offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
+ } else {
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
}
+ ins->klass = mono_class_from_mono_type (field->type);
+ ins->type = STACK_MP;
+ *sp++ = ins;
} else {
MonoInst *load;
}
/* STATIC CASE */
-
- /*
- * We can only support shared generic static
- * field access on architectures where the
- * trampoline code has been extended to handle
- * the generic class init.
- */
-#ifndef MONO_ARCH_VTABLE_REG
- GENERIC_SHARING_FAILURE (op);
-#endif
-
context_used = mini_class_check_context_used (cfg, klass);
ftype = mono_field_get_type (field);
GSHAREDVT_FAILURE (op);
- // offset &= 0x7fffffff;
- // idx = (offset >> 24) - 1;
- // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
MONO_ADD_INS (cfg->cbb, thread_ins);
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
offset_reg = ins->dreg;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
idx_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
offset2_reg = alloc_ireg (cfg);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
} else {
offset = (gsize)addr & 0x7fffffff;
- idx = (offset >> 24) - 1;
+ idx = offset & 0x3f;
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
dreg = alloc_ireg (cfg);
- EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
}
} else if ((cfg->opt & MONO_OPT_SHARED) ||
(cfg->compile_aot && is_special_static) ||
if (!addr) {
if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
if (!(g_slist_find (class_inits, klass))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
+ emit_class_init (cfg, klass);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
class_inits = g_slist_prepend (class_inits, klass);
ins->type = STACK_I4;
ins->dreg = alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins);
- *sp = mono_decompose_opcode (cfg, ins, &bblock);
+ *sp = mono_decompose_opcode (cfg, ins);
}
if (context_used) {
* for small sizes open code the memcpy
* ensure the rva field is big enough
*/
- if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
+ if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
MonoMethod *memcpy_method = get_memcpy_method ();
MonoInst *iargs [3];
int add_reg = alloc_ireg_mp (cfg);
ins->sreg1 = sp [0]->dreg;
ins->dreg = alloc_freg (cfg);
ins->type = STACK_R8;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
- *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
+ *sp++ = mono_decompose_opcode (cfg, ins);
++ip;
break;
}
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
ins->type = STACK_MP;
+ ins->klass = klass;
*sp++ = ins;
ip += 5;
break;
EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
} else {
- if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
+ if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
(cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
(cmethod->klass == mono_defaults.systemtype_class) &&
--sp;
ins->sreg1 = sp [0]->dreg;
ip++;
- bblock->out_of_line = TRUE;
- MONO_ADD_INS (bblock, ins);
+ cfg->cbb->out_of_line = TRUE;
+ MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
sp = stack_start;
- link_bblock (cfg, bblock, end_bblock);
+ link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
break;
case CEE_ENDFINALLY:
if (sp != stack_start)
emit_seq_point (cfg, method, ip, FALSE, FALSE);
MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
ip++;
start_new_bblock = 1;
MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
MONO_START_BB (cfg, dont_throw);
- bblock = cfg->cbb;
}
}
clause = tmp->data;
tblock = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (tblock);
- link_bblock (cfg, bblock, tblock);
+ link_bblock (cfg, cfg->cbb, tblock);
MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
ins->inst_target_bb = tblock;
ins->inst_eh_block = clause;
- MONO_ADD_INS (bblock, ins);
- bblock->has_call_handler = 1;
+ MONO_ADD_INS (cfg->cbb, ins);
+ cfg->cbb->has_call_handler = 1;
if (COMPILE_LLVM (cfg)) {
MonoBasicBlock *target_bb;
}
MONO_INST_NEW (cfg, ins, OP_BR);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
GET_BBLOCK (cfg, tblock, target);
- link_bblock (cfg, bblock, tblock);
+ link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
start_new_bblock = 1;
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
- MONO_ADD_INS (bblock, ins);
- link_bblock (cfg, bblock, end_bblock);
+ MONO_ADD_INS (cfg->cbb, ins);
+ link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
ip += 6;
break;
ins = handle_cisinst (cfg, klass, sp [0]);
else
ins = handle_ccastclass (cfg, klass, sp [0]);
- bblock = cfg->cbb;
*sp++ = ins;
ip += 6;
break;
case CEE_MONO_RESTORE_LMF:
#ifdef MONO_ARCH_HAVE_LMF_OPS
MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
cfg->need_lmf_area = TRUE;
#endif
ip += 2;
inline_costs += 10 * num_calls++;
break;
case CEE_MONO_NOT_TAKEN:
- bblock->out_of_line = TRUE;
+ cfg->cbb->out_of_line = TRUE;
ip += 2;
break;
case CEE_MONO_TLS: {
}
}
ins->type = STACK_PTR;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
ip += 6;
break;
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
- if (next_bb) {
+ if (next_bb)
MONO_START_BB (cfg, next_bb);
- bblock = cfg->cbb;
- }
ip += 2;
break;
}
cmp->opcode = OP_FCOMPARE;
else
cmp->opcode = OP_ICOMPARE;
- MONO_ADD_INS (bblock, cmp);
+ MONO_ADD_INS (cfg->cbb, cmp);
ins->type = STACK_I4;
ins->dreg = alloc_dreg (cfg, ins->type);
type_from_op (cfg, ins, arg1, arg2);
ins->sreg2 = cmp->sreg2;
NULLIFY_INS (cmp);
}
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
ip += 2;
break;
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
METHOD_ACCESS_FAILURE (method, cil_method);
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- } else if (mono_security_core_clr_enabled ()) {
- ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
- }
+ if (mono_security_core_clr_enabled ())
+ ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
/*
* Optimize the common case of ldftn+delegate creation
*/
- if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
+ if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
MonoInst *target_ins, *handle_ins;
target_ins = sp [-1];
if (mono_security_core_clr_enabled ())
- ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
+ ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
/*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
}
}
-#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
/* FIXME: SGEN support */
if (invoke_context_used == 0) {
ip += 6;
if (cfg->verbose_level > 3)
- g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
+ g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
sp --;
*sp = handle_ins;
}
ip -= 6;
}
-#endif
}
}
context_used = mini_method_check_context_used (cfg, cmethod);
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- } else if (mono_security_core_clr_enabled ()) {
- ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
- }
+ if (mono_security_core_clr_enabled ())
+ ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
/*
* Optimize the common case of ldvirtftn+delegate creation
*/
- if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
+ if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
MonoInst *target_ins, *handle_ins;
MonoMethod *invoke;
int invoke_context_used;
+ gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
invoke = mono_get_delegate_invoke (ctor_method->klass);
if (!invoke || !mono_method_signature (invoke))
target_ins = sp [-1];
if (mono_security_core_clr_enabled ())
- ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
+ ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
-#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
/* FIXME: SGEN support */
if (invoke_context_used == 0) {
ip += 6;
if (cfg->verbose_level > 3)
- g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
- if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
+ g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
+ if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
sp -= 2;
*sp = handle_ins;
CHECK_CFG_EXCEPTION;
}
ip -= 6;
}
-#endif
}
}
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
ins->sreg1 = (*sp)->dreg;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
ip += 2;
}
}
- bblock->flags |= BB_EXCEPTION_UNSAFE;
+ cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
if (handler_offset == -1)
UNVERIFIED;
EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
MONO_INST_NEW (cfg, ins, OP_RETHROW);
ins->sreg1 = load->dreg;
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
- MONO_ADD_INS (bblock, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
sp = stack_start;
- link_bblock (cfg, bblock, end_bblock);
+ link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
ip += 2;
break;
if (start_new_bblock != 1)
UNVERIFIED;
- bblock->cil_length = ip - bblock->cil_code;
- if (bblock->next_bb) {
+ cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
+ if (cfg->cbb->next_bb) {
/* This could already be set because of inlining, #693905 */
- MonoBasicBlock *bb = bblock;
+ MonoBasicBlock *bb = cfg->cbb;
while (bb->next_bb)
bb = bb->next_bb;
bb->next_bb = end_bblock;
} else {
- bblock->next_bb = end_bblock;
+ cfg->cbb->next_bb = end_bblock;
}
if (cfg->method == method && cfg->domainvar) {