* Dietmar Maurer (dietmar@ximian.com)
*
* (C) 2002 Ximian, Inc.
+ * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
*/
#include <config.h>
#include <mono/metadata/monitor.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/profiler.h>
+#include <mono/metadata/debug-mono-symfile.h>
#include <mono/utils/mono-compiler.h>
+#include <mono/utils/mono-memory-model.h>
#include <mono/metadata/mono-basic-block.h>
#include "mini.h"
#define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
+#define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
+
#define GET_BBLOCK(cfg,tblock,ip) do { \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
int stypes_reg = alloc_preg (cfg);
int stype = alloc_preg (cfg);
+ mono_class_setup_supertypes (klass);
+
if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
int stypes_reg = alloc_preg (cfg);
int stype = alloc_preg (cfg);
+ mono_class_setup_supertypes (klass);
+
if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
}
static MonoInst*
-emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
+emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
static MonoInst*
-emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
+emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
static MonoInst*
mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
}
static MonoJumpInfoRgctxEntry *
-mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
+mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
{
MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
res->method = method;
static MonoInst*
emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
- MonoClass *klass, int rgctx_type)
+ MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
*/
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used,
- MonoMethod *cmethod, int rgctx_type)
+ MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
if (!context_used) {
MonoInst *ins;
static MonoInst*
emit_get_rgctx_field (MonoCompile *cfg, int context_used,
- MonoClassField *field, int rgctx_type)
+ MonoClassField *field, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
#endif
}
+static void
+emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
+{
+ MonoInst *ins;
+
+ if (cfg->gen_seq_points && cfg->method == method) {
+ NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+}
+
static void
save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
{
* in mono_delegate_trampoline (), we allocate a per-domain memory slot to
* store it, and we fill it after the method has been compiled.
*/
- if (!cfg->compile_aot && !method->dynamic) {
+ if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
MonoInst *code_slot_ins;
if (context_used) {
if (cfg->compile_aot) {
EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
} else {
- trampoline = mono_create_delegate_trampoline (klass);
+ trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
inline_limit = INLINE_LENGTH_LIMIT;
inline_limit_inited = TRUE;
}
- if (header.code_size >= inline_limit)
+ if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
return FALSE;
/*
return ins;
}
+static MonoInst*
+emit_memory_barrier (MonoCompile *cfg, int kind)
+{
+ MonoInst *ins = NULL;
+ MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
+ MONO_ADD_INS (cfg->cbb, ins);
+ ins->backend.memory_barrier_kind = kind;
+
+ return ins;
+}
+
static MonoInst*
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
- MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
- MONO_ADD_INS (cfg->cbb, ins);
- return ins;
+ return emit_memory_barrier (cfg, FullBarrier);
}
} else if (cmethod->klass == mono_defaults.monitor_class) {
return (MonoInst*)call;
}
#elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
+ {
MonoMethod *fast_method = NULL;
/* Avoid infinite recursion */
strcmp (cfg->method->name, "FastMonitorExit") == 0))
return NULL;
- if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
+ if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
strcmp (cmethod->name, "Exit") == 0)
fast_method = mono_monitor_get_fast_path (cmethod);
if (!fast_method)
return NULL;
return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
+ }
#endif
} else if (cmethod->klass->image == mono_defaults.corlib &&
(strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
} else if (cmethod->klass->image == mono_defaults.corlib) {
if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
&& strcmp (cmethod->klass->name, "Debugger") == 0) {
- if (should_insert_brekpoint (cfg->method))
- MONO_INST_NEW (cfg, ins, OP_BREAK);
- else
+ if (should_insert_brekpoint (cfg->method)) {
+ ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
+ } else {
MONO_INST_NEW (cfg, ins, OP_NOP);
- MONO_ADD_INS (cfg->cbb, ins);
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
return ins;
}
if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
}
#endif
+static void
+emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
+{
+ static double r8_0 = 0.0;
+ MonoInst *ins;
+
+ switch (rvar->type) {
+ case STACK_I4:
+ MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
+ break;
+ case STACK_I8:
+ MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
+ break;
+ case STACK_PTR:
+ case STACK_MP:
+ case STACK_OBJ:
+ MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
+ break;
+ case STACK_R8:
+ MONO_INST_NEW (cfg, ins, OP_R8CONST);
+ ins->type = STACK_R8;
+ ins->inst_p0 = (void*)&r8_0;
+ ins->dreg = rvar->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ break;
+ case STACK_VTYPE:
+ MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
static int
inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
cfg->cbb = prev_cbb;
}
} else {
+ /*
+ * Its possible that the rvar is set in some prev bblock, but not in others.
+ * (#1835).
+ */
+ if (rvar) {
+ MonoBasicBlock *bb;
+
+ for (i = 0; i < ebblock->in_count; ++i) {
+ bb = ebblock->in_bb [i];
+
+ if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
+ cfg->cbb = bb;
+
+ emit_init_rvar (cfg, rvar, fsig->ret);
+ }
+ }
+ }
+
cfg->cbb = ebblock;
}
* If the inlined method contains only a throw, then the ret var is not
* set, so set it to a dummy value.
*/
- if (!ret_var_set) {
- static double r8_0 = 0.0;
-
- switch (rvar->type) {
- case STACK_I4:
- MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
- break;
- case STACK_I8:
- MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
- break;
- case STACK_PTR:
- case STACK_MP:
- case STACK_OBJ:
- MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
- break;
- case STACK_R8:
- MONO_INST_NEW (cfg, ins, OP_R8CONST);
- ins->type = STACK_R8;
- ins->inst_p0 = (void*)&r8_0;
- ins->dreg = rvar->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
- break;
- case STACK_VTYPE:
- MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
- break;
- default:
- g_assert_not_reached ();
- }
- }
+ if (!ret_var_set)
+ emit_init_rvar (cfg, rvar, fsig->ret);
EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
*sp++ = ins;
/* Debugging support */
#if 0
if (supported_tail_call) {
- static int count = 0;
- count ++;
- if (getenv ("COUNT")) {
- if (count == atoi (getenv ("COUNT")))
- printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
- if (count > atoi (getenv ("COUNT")))
- supported_tail_call = FALSE;
- }
+ if (!mono_debug_count ())
+ supported_tail_call = FALSE;
}
#endif
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
gboolean init_locals, seq_points, skip_dead_blocks;
- gboolean disable_inline;
+ gboolean disable_inline, sym_seq_points = FALSE;
MonoInst *cached_tls_addr = NULL;
+ MonoDebugMethodInfo *minfo;
+ MonoBitSet *seq_point_locs = NULL;
disable_inline = is_jit_optimizer_disabled (method);
seq_points = cfg->gen_seq_points && cfg->method == method;
+ if (cfg->gen_seq_points && cfg->method == method) {
+ minfo = mono_debug_lookup_method (method);
+ if (minfo) {
+ int i, n_il_offsets;
+ int *il_offsets;
+ int *line_numbers;
+
+ mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
+ seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
+ sym_seq_points = TRUE;
+ for (i = 0; i < n_il_offsets; ++i) {
+ if (il_offsets [i] < header->code_size)
+ mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
+ }
+ }
+ }
+
/*
* Methods without init_locals set could cause asserts in various passes
* (#497220).
* Currently, we generate these automatically at points where the IL
* stack is empty.
*/
- if (seq_points && sp == stack_start) {
- NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
+ if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
+ /*
+ * Make methods interruptable at the beginning, and at the targets of
+ * backward branches.
+ * Also, do this at the start of every bblock in methods with clauses too,
+ * to be able to handle instructions with inprecise control flow like
+ * throw/endfinally.
+ * Backward branches are handled at the end of method-to-ir ().
+ */
+ gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
+
+ /* Avoid sequence points on empty IL like .volatile */
+ // FIXME: Enable this
+ //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
+ NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
MONO_ADD_INS (cfg->cbb, ins);
}
switch (*ip) {
case CEE_NOP:
+ if (seq_points && !sym_seq_points && sp != stack_start) {
+ /*
+ * The C# compiler uses these nops to notify the JIT that it should
+ * insert seq points.
+ */
+ NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
if (cfg->keep_cil_nops)
MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
else
MONO_ADD_INS (bblock, ins);
break;
case CEE_BREAK:
- if (should_insert_brekpoint (cfg->method))
- MONO_INST_NEW (cfg, ins, OP_BREAK);
- else
+ if (should_insert_brekpoint (cfg->method)) {
+ ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
+ } else {
MONO_INST_NEW (cfg, ins, OP_NOP);
+ }
ip++;
MONO_ADD_INS (bblock, ins);
break;
MonoInst *vtable_arg = NULL;
gboolean check_this = FALSE;
gboolean supported_tail_call = FALSE;
+ gboolean need_seq_point = FALSE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
cil_method = cmethod;
}
-
+
if (!cmethod || mono_loader_get_last_error ())
LOAD_ERROR;
if (!dont_verify && !cfg->skip_visibility) {
if (!cmethod->klass->inited)
if (!mono_class_init (cmethod->klass))
- LOAD_ERROR;
+ TYPE_LOAD_ERROR (cmethod->klass);
if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
mini_class_is_system_array (cmethod->klass)) {
mono_save_token_info (cfg, image, token, cil_method);
+ if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
+ /*
+ * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
+ * foo (bar (), baz ())
+ * works correctly. MS does this also:
+ * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
+ * The problem with this approach is that the debugger will stop after all calls returning a value,
+ * even for simple cases, like:
+ * int i = foo ();
+ */
+ /* Special case a few common successor opcodes */
+ if (!(ip + 5 < end && ip [5] == CEE_POP))
+ need_seq_point = TRUE;
+ }
+
n = fsig->param_count + fsig->hasthis;
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
cfg->real_offset += 5;
bblock = cfg->cbb;
- if (!MONO_TYPE_IS_VOID (fsig->ret))
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
/* *sp is already set by inline_method */
sp++;
+ }
inline_costs += costs;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
}
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
ip += 5;
ins_flag = 0;
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
break;
}
+ /*
+ * Synchronized wrappers.
+ * Its hard to determine where to replace a method with its synchronized
+ * wrapper without causing an infinite recursion. The current solution is
+ * to add the synchronized wrapper in the trampolines, and to
+ * change the called method to a dummy wrapper, and resolve that wrapper
+ * to the real method in mono_jit_compile_method ().
+ */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod) {
+ cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
+ }
+
/* Common call */
INLINE_FAILURE;
ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
ip += 5;
ins_flag = 0;
+ if (need_seq_point)
+ emit_seq_point (cfg, method, ip, FALSE);
break;
}
case CEE_RET:
* (test case: test_0_inline_throw ()).
*/
if (return_var && cfg->cbb->in_count) {
+ MonoType *ret_type = mono_method_signature (method)->ret;
+
MonoInst *store;
CHECK_STACK (1);
--sp;
+
+ if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
+ UNVERIFIED;
+
//g_assert (returnvar != -1);
EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
cfg->ret_var_set = TRUE;
if (cfg->ret) {
MonoType *ret_type = mono_method_signature (method)->ret;
- if (seq_points) {
+ if (seq_points && !sym_seq_points) {
/*
* Place a seq point here too even through the IL stack is not
* empty, so a step over on
MONO_ADD_INS (bblock, ins);
*sp++ = ins;
if (ins->flags & MONO_INST_VOLATILE) {
- MonoInst *barrier;
-
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- MONO_INST_NEW (cfg, barrier, OP_MEMORY_BARRIER);
- MONO_ADD_INS (cfg->cbb, barrier);
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
}
++ip;
break;
ins_flag = 0;
if (ins->flags & MONO_INST_VOLATILE) {
- MonoInst *barrier;
-
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- MONO_INST_NEW (cfg, barrier, OP_MEMORY_BARRIER);
- MONO_ADD_INS (cfg->cbb, barrier);
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
}
MONO_ADD_INS (bblock, ins);
mono_save_token_info (cfg, image, token, cmethod);
if (!mono_class_init (cmethod->klass))
- LOAD_ERROR;
+ TYPE_LOAD_ERROR (cmethod->klass);
if (cfg->generic_sharing_context)
context_used = mono_method_check_context_used (cmethod);
}
case CEE_LDFLD:
case CEE_LDFLDA:
- case CEE_STFLD: {
+ case CEE_STFLD:
+ case CEE_LDSFLD:
+ case CEE_LDSFLDA:
+ case CEE_STSFLD: {
MonoClassField *field;
int costs;
guint foffset;
-
- if (*ip == CEE_STFLD) {
- CHECK_STACK (2);
- sp -= 2;
+ gboolean is_instance;
+ int op;
+ gpointer addr = NULL;
+ gboolean is_special_static;
+ MonoType *ftype;
+ MonoInst *store_val = NULL;
+
+ op = *ip;
+ is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
+ if (is_instance) {
+ if (op == CEE_STFLD) {
+ CHECK_STACK (2);
+ sp -= 2;
+ store_val = sp [1];
+ } else {
+ CHECK_STACK (1);
+ --sp;
+ }
+ if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
+ UNVERIFIED;
+ if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
+ UNVERIFIED;
} else {
- CHECK_STACK (1);
- --sp;
+ if (op == CEE_STSFLD) {
+ CHECK_STACK (1);
+ sp--;
+ store_val = sp [0];
+ }
}
- if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
- UNVERIFIED;
- if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
- UNVERIFIED;
+
CHECK_OPSIZE (5);
token = read32 (ip + 1);
if (method->wrapper_type != MONO_WRAPPER_NONE) {
FIELD_ACCESS_FAILURE;
mono_class_init (klass);
- if (*ip != CEE_LDFLDA && is_magic_tls_access (field))
+ if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
UNVERIFIED;
+
+ /* if the class is Critical then transparent code cannot access it's fields */
+ if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
+
/* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
*/
+ /*
+ * LDFLD etc. is usable on static fields as well, so convert those cases to
+ * the static case.
+ */
+ if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
+ switch (op) {
+ case CEE_LDFLD:
+ op = CEE_LDSFLD;
+ break;
+ case CEE_STFLD:
+ op = CEE_STSFLD;
+ break;
+ case CEE_LDFLDA:
+ op = CEE_LDSFLDA;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ is_instance = FALSE;
+ }
+
+ /* INSTANCE CASE */
+
foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
- if (*ip == CEE_STFLD) {
+ if (op == CEE_STFLD) {
if (target_type_is_incompatible (cfg, field->type, sp [1]))
UNVERIFIED;
if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
break;
}
- if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
- MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
+ if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
+ MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
MonoInst *iargs [4];
iargs [0] = sp [0];
ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
*sp++ = ins;
}
- } else {
+ } else if (is_instance) {
if (sp [0]->type == STACK_VTYPE) {
MonoInst *var;
sp [0] = ins;
}
- if (*ip == CEE_LDFLDA) {
+ if (op == CEE_LDFLDA) {
if (is_magic_tls_access (field)) {
ins = sp [0];
*sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
*sp++ = load;
}
}
- ins_flag = 0;
- ip += 5;
- break;
- }
- case CEE_LDSFLD:
- case CEE_LDSFLDA:
- case CEE_STSFLD: {
- MonoClassField *field;
- gpointer addr = NULL;
- gboolean is_special_static;
- MonoType *ftype;
-
- CHECK_OPSIZE (5);
- token = read32 (ip + 1);
- if (method->wrapper_type != MONO_WRAPPER_NONE) {
- field = mono_method_get_wrapper_data (method, token);
- klass = field->parent;
+ if (is_instance) {
+ ins_flag = 0;
+ ip += 5;
+ break;
}
- else
- field = mono_field_from_token (image, token, &klass, generic_context);
- if (!field)
- LOAD_ERROR;
- mono_class_init (klass);
- if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
- FIELD_ACCESS_FAILURE;
- /* if the class is Critical then transparent code cannot access it's fields */
- if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
- ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
+ /* STATIC CASE */
/*
* We can only support shared generic static
* the generic class init.
*/
#ifndef MONO_ARCH_VTABLE_REG
- GENERIC_SHARING_FAILURE (*ip);
+ GENERIC_SHARING_FAILURE (op);
#endif
if (cfg->generic_sharing_context)
ftype = mono_field_get_type (field);
- g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
+ if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
+ UNVERIFIED;
/* The special_static_fields field is init'd in mono_class_vtable, so it needs
* to be called here.
}
}
}
- addr = (char*)vtable->data + field->offset;
+ addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
if (cfg->compile_aot)
EMIT_NEW_SFLDACONST (cfg, ins, field);
/* Generate IR to do the actual load/store operation */
- if (*ip == CEE_LDSFLDA) {
+ if (op == CEE_LDSFLDA) {
ins->klass = mono_class_from_mono_type (ftype);
ins->type = STACK_PTR;
*sp++ = ins;
- } else if (*ip == CEE_STSFLD) {
+ } else if (op == CEE_STSFLD) {
MonoInst *store;
- CHECK_STACK (1);
- sp--;
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
store->flags |= ins_flag;
} else {
gboolean is_const = FALSE;
MonoVTable *vtable = NULL;
+ gpointer addr = NULL;
if (!context_used) {
vtable = mono_class_vtable (cfg->domain, klass);
CHECK_TYPELOAD (klass);
}
- if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
- vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
- gpointer addr = (char*)vtable->data + field->offset;
+ if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
+ (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
int ro_type = ftype->type;
+ if (!addr)
+ addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
}
ins->sreg1 = sp [0]->dreg;
ins->inst_newa_class = klass;
ins->type = STACK_OBJ;
- ins->klass = klass;
+ ins->klass = array_type;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
cfg->cbb->has_array_access = TRUE;
break;
}
+ case CEE_MONO_MEMORY_BARRIER: {
+ CHECK_OPSIZE (5);
+ emit_memory_barrier (cfg, (int)read32 (ip + 1));
+ ip += 5;
+ break;
+ }
+ case CEE_MONO_JIT_ATTACH: {
+ MonoInst *args [16];
+ MonoInst *ad_ins, *lmf_ins;
+ MonoBasicBlock *next_bb = NULL;
+
+ cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+
+ EMIT_NEW_PCONST (cfg, ins, NULL);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
+
+#if TARGET_WIN32
+ ad_ins = NULL;
+ lmf_ins = NULL;
+#else
+ ad_ins = mono_get_domain_intrinsic (cfg);
+ lmf_ins = mono_get_lmf_intrinsic (cfg);
+#endif
+
+#ifdef MONO_ARCH_HAVE_TLS_GET
+ if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
+ NEW_BBLOCK (cfg, next_bb);
+
+ MONO_ADD_INS (cfg->cbb, ad_ins);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
+
+ MONO_ADD_INS (cfg->cbb, lmf_ins);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
+ }
+#endif
+
+ if (cfg->compile_aot) {
+ /* AOT code is only used in the root domain */
+ EMIT_NEW_PCONST (cfg, args [0], NULL);
+ } else {
+ EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
+ }
+ ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
+
+ if (next_bb) {
+ MONO_START_BB (cfg, next_bb);
+ bblock = cfg->cbb;
+ }
+ ip += 2;
+ break;
+ }
+ case CEE_MONO_JIT_DETACH: {
+ MonoInst *args [16];
+
+ /* Restore the original domain */
+ dreg = alloc_ireg (cfg);
+ EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
+ mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
+ ip += 2;
+ break;
+ }
default:
g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
break;
}
}
+ if (seq_points) {
+ MonoBasicBlock *bb;
+
+ /*
+ * Make seq points at backward branch targets interruptable.
+ */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
+ bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
+ }
+
/* Add a sequence point for method entry/exit events */
if (seq_points) {
NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);