mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
goto exception_exit; \
} while (0)
+#define DISABLE_AOT(cfg) do { \
+ if ((cfg)->verbose_level >= 2) \
+ printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
+ (cfg)->disable_aot = TRUE; \
+ } while (0)
+
/* Determine whenever 'ins' represents a load of the 'this' argument */
#define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
return alloc_ireg (cfg);
}
+guint32
+mono_alloc_lreg (MonoCompile *cfg)
+{
+ return alloc_lreg (cfg);
+}
+
guint32
mono_alloc_freg (MonoCompile *cfg)
{
if (type->byref)
return OP_MOVE;
+ type = mini_replace_type (type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
{
MonoClass *klass;
+ type = mini_replace_type (type);
inst->klass = klass = mono_class_from_mono_type (type);
if (type->byref) {
inst->type = STACK_MP;
if (align == 0)
align = 4;
- if ((size <= 4) && (size <= align)) {
+ if ((size <= SIZEOF_REGISTER) && (size <= align)) {
switch (size) {
case 1:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
* lmf->prev_lmf = *lmf_addr
* *lmf_addr = lmf
*/
- int lmf_reg, lmf_addr_reg, prev_lmf_reg;
+ int lmf_reg, prev_lmf_reg;
MonoInst *ins, *lmf_ins;
if (!cfg->lmf_ir)
return;
- if (cfg->lmf_ir_mono_lmf) {
+ if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
/* Load current lmf */
lmf_ins = mono_get_lmf_intrinsic (cfg);
g_assert (lmf_ins);
/* Save previous_lmf */
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
/* Set new LMF */
- emit_tls_set (cfg, lmf_reg, OP_TLS_SET_REG);
+ emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
} else {
+ /*
+ * Store lmf_addr in a variable, so it can be allocated to a global register.
+ */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+
+#ifdef HOST_WIN32
+ ins = mono_get_jit_tls_intrinsic (cfg);
+ if (ins) {
+ int jit_tls_dreg = ins->dreg;
+
+ MONO_ADD_INS (cfg->cbb, ins);
+ lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
+ } else {
+ lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
+ }
+#else
lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
- if (lmf_ins)
+ if (lmf_ins) {
MONO_ADD_INS (cfg->cbb, lmf_ins);
- else
+ } else {
+#ifdef TARGET_IOS
+ MonoInst *args [16], *jit_tls_ins, *ins;
+
+ /* Inline mono_get_lmf_addr () */
+ /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
+
+ /* Load mono_jit_tls_id */
+ EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
+ /* call pthread_getspecific () */
+ jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
+ /* lmf_addr = &jit_tls->lmf */
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
+ lmf_ins = ins;
+#else
lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
- lmf_addr_reg = lmf_ins->dreg;
+#endif
+ }
+#endif
+ lmf_ins->dreg = cfg->lmf_addr_var->dreg;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
- /* Save lmf_addr */
- if (!cfg->lmf_addr_var)
- cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- EMIT_NEW_UNALU (cfg, ins, OP_MOVE, cfg->lmf_addr_var->dreg, lmf_ins->dreg);
+
prev_lmf_reg = alloc_preg (cfg);
/* Save previous_lmf */
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_addr_reg, 0);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
/* Set new lmf */
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, lmf_reg);
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
}
}
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
- if (cfg->lmf_ir_mono_lmf) {
+ if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
/* Load previous_lmf */
prev_lmf_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* Set new LMF */
- emit_tls_set (cfg, prev_lmf_reg, OP_TLS_SET_REG);
+ emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
} else {
/*
* Emit IR to pop the LMF:
if (!cfg->lmf_addr_var)
cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
lmf_addr_reg = cfg->lmf_addr_var->dreg;
+
prev_lmf_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
handle_enum:
type = mini_get_basic_type_from_generic (gsctx, type);
+ type = mini_replace_type (type);
switch (type->type) {
case MONO_TYPE_VOID:
return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
MonoType *simple_type;
MonoClass *klass;
+ target = mini_replace_type (target);
if (target->byref) {
/* FIXME: check that the pointed to types match */
if (arg->type == STACK_MP)
call->args = args;
call->signature = sig;
call->rgctx_reg = rgctx;
- sig_ret = sig->ret;
+ sig_ret = mini_replace_type (sig->ret);
type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
{
MonoCallInst *call;
+ MonoInst *ins;
int rgctx_reg = -1;
+ gboolean check_sp = FALSE;
+
+ if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
+
+ if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
+ check_sp = TRUE;
+ }
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
+ if (check_sp) {
+ if (!cfg->stack_inbalance_var)
+ cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+
+ MONO_INST_NEW (cfg, ins, OP_GET_SP);
+ ins->dreg = cfg->stack_inbalance_var->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
call->inst.sreg1 = addr->dreg;
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
+ if (check_sp) {
+ int sp_reg;
+
+ sp_reg = mono_alloc_preg (cfg);
+
+ MONO_INST_NEW (cfg, ins, OP_GET_SP);
+ ins->dreg = sp_reg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ /* Restore the stack so we don't crash when throwing the exception */
+ MONO_INST_NEW (cfg, ins, OP_SET_SP);
+ ins->sreg1 = cfg->stack_inbalance_var->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
+ MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
+ }
+
if (rgctx_arg)
set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
g_assert (info);
- for (i = 0; i < info->entries->len; ++i) {
- MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
+ for (i = 0; i < info->num_entries; ++i) {
+ MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
return i;
}
- template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
+ if (info->num_entries == info->count_entries) {
+ MonoRuntimeGenericContextInfoTemplate *new_entries;
+ int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
+
+ new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
+
+ memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
+ info->entries = new_entries;
+ info->count_entries = new_count_entries;
+ }
+
+ idx = info->num_entries;
+ template = &info->entries [idx];
template->info_type = rgctx_type;
template->data = data;
- idx = info->entries->len;
-
- g_ptr_array_add (info->entries, template);
+ info->num_entries ++;
return idx;
}
// FIXME: This doesn't work yet (class libs tests fail?)
#define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
+static MonoInst*
+emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
+{
+ MonoMethod *mono_castclass;
+ MonoInst *res;
+
+ mono_castclass = mono_marshal_get_castclass_with_cache ();
+
+ save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
+ res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
+ reset_cast_details (cfg);
+
+ return res;
+}
+
/*
* Returns NULL and set the cfg exception on error.
*/
MonoInst *args [3];
if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
- MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
MonoInst *cache_ins;
cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
/* cache */
args [2] = cache_ins;
- return mono_emit_method_call (cfg, mono_castclass, args, NULL);
+ return emit_castclass_with_cache (cfg, klass, args, NULL);
}
klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
* in mono_delegate_trampoline (), we allocate a per-domain memory slot to
* store it, and we fill it after the method has been compiled.
*/
- if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
+ if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
MonoInst *code_slot_ins;
if (context_used) {
}
mono_domain_unlock (domain);
- EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
+ else
+ EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
}
/* Set invoke_impl field */
if (cfg->compile_aot) {
- EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
+ MonoClassMethodPair *del_tramp;
+
+ del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
+ del_tramp->klass = klass;
+ del_tramp->method = context_used ? NULL : method;
+ EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
} else {
- trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
+ trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
vtable = mono_class_vtable (cfg->domain, method->klass);
if (!vtable)
return FALSE;
- mono_runtime_class_init (vtable);
+ if (!cfg->compile_aot)
+ mono_runtime_class_init (vtable);
} else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
if (cfg->run_cctors && method->klass->has_cctor) {
/*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
return FALSE;
}
- if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
- return FALSE;
+ if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
+ if (cfg->method == method)
+ return FALSE;
+ }
if (!mono_class_needs_cctor_run (klass, method))
return FALSE;
int mult_reg, add_reg, array_reg, index_reg, index2_reg;
int context_used;
- if (mini_is_gsharedvt_klass (cfg, klass)) {
+ if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
size = -1;
} else {
mono_class_init (klass);
case MONO_BREAK_POLICY_NEVER:
return FALSE;
case MONO_BREAK_POLICY_ON_DBG:
- return mono_debug_using_mono_debugger ();
+ g_warning ("mdb no longer supported");
+ return FALSE;
default:
g_warning ("Incorrect value returned from break policy callback");
return FALSE;
} else {
MonoInst *ins;
- if (mini_is_gsharedvt_klass (cfg, klass)) {
+ if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
MonoInst *addr;
// FIXME-VT: OP_ICONST optimization
else
eklass = mono_class_from_mono_type (fsig->ret);
-
if (is_set) {
return emit_array_store (cfg, eklass, args, FALSE);
} else {
}
}
+static gboolean
+is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
+{
+ uint32_t align;
+
+ //Only allow for valuetypes
+ if (!param_klass->valuetype || !return_klass->valuetype)
+ return FALSE;
+
+ //That are blitable
+ if (param_klass->has_references || return_klass->has_references)
+ return FALSE;
+
+ /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
+ if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
+ (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
+ return FALSE;
+
+ if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
+ return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
+ return FALSE;
+
+ //And have the same size
+ if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
+ return FALSE;
+ return TRUE;
+}
+
+static MonoInst*
+emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
+ MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
+
+ //Valuetypes that are semantically equivalent
+ if (is_unsafe_mov_compatible (param_klass, return_klass))
+ return args [0];
+
+ //Arrays of valuetypes that are semantically equivalent
+ if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
+ return args [0];
+
+ return NULL;
+}
+
static MonoInst*
mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
- MonoInst *ins = NULL;
#ifdef MONO_ARCH_SIMD_INTRINSICS
+ MonoInst *ins = NULL;
+
if (cfg->opt & MONO_OPT_SIMD) {
ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
if (ins)
}
#endif
- return ins;
+ return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
}
static MonoInst*
if (cmethod->klass == mono_defaults.array_class) {
if (strcmp (cmethod->name, "UnsafeStore") == 0)
return emit_array_unsafe_access (cfg, fsig, args, TRUE);
- if (strcmp (cmethod->name, "UnsafeLoad") == 0)
+ else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
return emit_array_unsafe_access (cfg, fsig, args, FALSE);
+ else if (strcmp (cmethod->name, "UnsafeMov") == 0)
+ return emit_array_unsafe_mov (cfg, fsig, args);
}
return NULL;
MonoInst *ins_iconst;
guint32 opcode = 0;
- if (fsig->params [0]->type == MONO_TYPE_I4)
+ if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = OP_ATOMIC_ADD_NEW_I4;
+ cfg->has_atomic_add_new_i4 = TRUE;
+ }
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
if (opcode) {
+ if (!mono_arch_opcode_supported (opcode))
+ return NULL;
MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
ins_iconst->inst_c0 = 1;
ins_iconst->dreg = mono_alloc_ireg (cfg);
MonoInst *ins_iconst;
guint32 opcode = 0;
- if (fsig->params [0]->type == MONO_TYPE_I4)
+ if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = OP_ATOMIC_ADD_NEW_I4;
+ cfg->has_atomic_add_new_i4 = TRUE;
+ }
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
if (opcode) {
+ if (!mono_arch_opcode_supported (opcode))
+ return NULL;
MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
ins_iconst->inst_c0 = -1;
ins_iconst->dreg = mono_alloc_ireg (cfg);
} else if (strcmp (cmethod->name, "Add") == 0) {
guint32 opcode = 0;
- if (fsig->params [0]->type == MONO_TYPE_I4)
+ if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = OP_ATOMIC_ADD_NEW_I4;
+ cfg->has_atomic_add_new_i4 = TRUE;
+ }
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_NEW_I8;
#endif
-
if (opcode) {
+ if (!mono_arch_opcode_supported (opcode))
+ return NULL;
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
guint32 opcode;
gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
- if (fsig->params [0]->type == MONO_TYPE_I4)
+ if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = OP_ATOMIC_EXCHANGE_I4;
+ cfg->has_atomic_exchange_i4 = TRUE;
+ }
#if SIZEOF_REGISTER == 8
else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
(fsig->params [0]->type == MONO_TYPE_I))
opcode = OP_ATOMIC_EXCHANGE_I8;
#else
- else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
+ else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
opcode = OP_ATOMIC_EXCHANGE_I4;
+ cfg->has_atomic_exchange_i4 = TRUE;
+ }
#endif
else
return NULL;
+ if (!mono_arch_opcode_supported (opcode))
+ return NULL;
+
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
size = 8;
if (size == 4) {
+ if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
+ return NULL;
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg3 = args [2]->dreg;
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
+ cfg->has_atomic_cas_i4 = TRUE;
} else if (size == 8) {
+ if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
+ return NULL;
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
if (args [0]->opcode == OP_GOT_ENTRY) {
pi = args [0]->inst_p1;
g_assert (pi->opcode == OP_PATCH_INFO);
- g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
+ g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
ji = pi->inst_p0;
} else {
- g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
+ g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
ji = args [0]->inst_p0;
}
}
#endif
+ ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
+ if (ins)
+ return ins;
+
if (COMPILE_LLVM (cfg)) {
ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
if (ins)
{
static double r8_0 = 0.0;
MonoInst *ins;
+ int t;
- int t = rtype->type;
+ rtype = mini_replace_type (rtype);
+ t = rtype->type;
- if (t == MONO_TYPE_VALUETYPE && rtype->data.klass->enumtype)
- t = mono_class_enum_basetype (rtype->data.klass)->type;
if (rtype->byref) {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
}
static void
-emit_init_local (MonoCompile *cfg, int local, MonoType *type)
+emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
+{
+ int t;
+
+ rtype = mini_replace_type (rtype);
+ t = rtype->type;
+
+ if (rtype->byref) {
+ MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
+ } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
+ MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
+ } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
+ MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
+ } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
+ } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
+ ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
+ MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
+ } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
+ MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
+ } else {
+ emit_init_rvar (cfg, dreg, rtype);
+ }
+}
+
+/* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
+static void
+emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
{
MonoInst *var = cfg->locals [local];
if (COMPILE_SOFT_FLOAT (cfg)) {
emit_init_rvar (cfg, reg, type);
EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
} else {
- emit_init_rvar (cfg, var->dreg, type);
+ if (init)
+ emit_init_rvar (cfg, var->dreg, type);
+ else
+ emit_dummy_init_rvar (cfg, var->dreg, type);
}
}
token = read32 (ip + 2);
klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
CHECK_TYPELOAD (klass);
- type = &klass->byval_arg;
- emit_init_local (cfg, local, type);
+ type = mini_replace_type (&klass->byval_arg);
+ emit_init_local (cfg, local, type, TRUE);
return ip + 6;
}
load_error:
int i;
#ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
- supported_tail_call = mono_arch_tail_call_supported (mono_method_signature (method), mono_method_signature (cmethod));
+ supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
#else
supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
#endif
cfg->cil_start = ip;
end = ip + header->code_size;
cfg->stat_cil_code_size += header->code_size;
- init_locals = header->init_locals;
seq_points = cfg->gen_seq_points && cfg->method == method;
#ifdef PLATFORM_ANDROID
/*
* Methods without init_locals set could cause asserts in various passes
- * (#497220).
+ * (#497220). To work around this, we emit dummy initialization opcodes
+ * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
+ * on some platforms.
*/
- init_locals = TRUE;
+ if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
+ init_locals = header->init_locals;
+ else
+ init_locals = TRUE;
method_definition = method;
while (method_definition->is_inflated) {
dont_verify_stloc = TRUE;
}
- if (mono_debug_using_mono_debugger ())
- cfg->keep_cil_nops = TRUE;
-
if (sig->is_inflated)
generic_context = mono_method_get_context (method);
else if (generic_container)
if (cfg->method == method) {
breakpoint_id = mono_debugger_method_has_breakpoint (method);
- if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
+ if (breakpoint_id) {
MONO_INST_NEW (cfg, ins, OP_BREAK);
MONO_ADD_INS (bblock, ins);
}
}
}
- if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
- /* we use a separate basic block for the initialization code */
- NEW_BBLOCK (cfg, init_localsbb);
- cfg->bb_init = init_localsbb;
- init_localsbb->real_offset = cfg->real_offset;
- start_bblock->next_bb = init_localsbb;
- init_localsbb->next_bb = bblock;
- link_bblock (cfg, start_bblock, init_localsbb);
- link_bblock (cfg, init_localsbb, bblock);
+ /* we use a separate basic block for the initialization code */
+ NEW_BBLOCK (cfg, init_localsbb);
+ cfg->bb_init = init_localsbb;
+ init_localsbb->real_offset = cfg->real_offset;
+ start_bblock->next_bb = init_localsbb;
+ init_localsbb->next_bb = bblock;
+ link_bblock (cfg, start_bblock, init_localsbb);
+ link_bblock (cfg, init_localsbb, bblock);
- cfg->cbb = init_localsbb;
- } else {
- start_bblock->next_bb = bblock;
- link_bblock (cfg, start_bblock, bblock);
- }
+ cfg->cbb = init_localsbb;
if (cfg->gsharedvt && cfg->method == method) {
MonoGSharedVtMethodInfo *info;
info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
info->method = cfg->method;
- // FIXME: Free this
- info->entries = g_ptr_array_new ();
+ info->count_entries = 16;
+ info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
cfg->gsharedvt_info = info;
var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* Handle tail calls similarly to calls */
n = fsig->param_count + fsig->hasthis;
+ DISABLE_AOT (cfg);
+
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
call->method = cmethod;
call->tail_call = TRUE;
emit_pop_lmf (cfg);
if (cfg->ret) {
- MonoType *ret_type = mono_method_signature (method)->ret;
+ MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
if (seq_points && !sym_seq_points) {
/*
NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
ins->type = ldind_type [*ip - CEE_LDIND_I1];
ins->flags |= ins_flag;
- ins_flag = 0;
MONO_ADD_INS (bblock, ins);
*sp++ = ins;
- if (ins->flags & MONO_INST_VOLATILE) {
+ if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
/* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
emit_memory_barrier (cfg, FullBarrier);
}
+ ins_flag = 0;
++ip;
break;
case CEE_STIND_REF:
CHECK_STACK (2);
sp -= 2;
- NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
- ins->flags |= ins_flag;
- ins_flag = 0;
-
- if (ins->flags & MONO_INST_VOLATILE) {
+ if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
/* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
emit_memory_barrier (cfg, FullBarrier);
}
+ NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
+ ins->flags |= ins_flag;
+ ins_flag = 0;
+
MONO_ADD_INS (bblock, ins);
if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
ins->dreg = cfg->locals [loc_index]->dreg;
+ ins->flags |= ins_flag;
ip += 5;
ip += stloc_len;
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
+ }
+ ins_flag = 0;
break;
}
/* Optimize the ldobj+stobj combination */
/* The reference case ends up being a load+store anyway */
- if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
+ /* Skip this if the operation is volatile. */
+ if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
CHECK_STACK (1);
sp --;
}
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
+ ins->flags |= ins_flag;
*sp++ = ins;
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
+ }
+
ip += 5;
ins_flag = 0;
inline_costs += 1;
addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
} else if (context_used &&
- (!mono_method_is_generic_sharable (cmethod, TRUE) ||
- !mono_class_generic_sharing_enabled (cmethod->klass))) {
+ ((!mono_method_is_generic_sharable (cmethod, TRUE) ||
+ !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
MonoInst *cmethod_addr;
+ /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
+
cmethod_addr = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
context_used = mini_class_check_context_used (cfg, klass);
if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
- MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
MonoInst *args [3];
/* obj */
/*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
- save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
- *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
- reset_cast_details (cfg);
+ *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
ip += 5;
inline_costs += 2;
} else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
if (generic_class_is_reference_type (cfg, klass)) {
/* CASTCLASS FIXME kill this huge slice of duplicated code*/
if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
- MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
MonoInst *args [3];
/* obj */
else
EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
- /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
- *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
+ /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
+ *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
ip += 5;
inline_costs += 2;
} else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
gboolean is_special_static;
MonoType *ftype;
MonoInst *store_val = NULL;
+ MonoInst *thread_ins;
op = *ip;
is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
is_special_static = mono_class_field_is_special_static (field);
+ if (is_special_static && ((gsize)addr & 0x80000000) == 0)
+ thread_ins = mono_get_thread_intrinsic (cfg);
+ else
+ thread_ins = NULL;
+
/* Generate IR to compute the field address */
- if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
+ if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
/*
* Fast access to TLS data
* Inline version of get_thread_static_data () in
*/
guint32 offset;
int idx, static_data_reg, array_reg, dreg;
- MonoInst *thread_ins;
GSHAREDVT_FAILURE (op);
// offset &= 0x7fffffff;
// idx = (offset >> 24) - 1;
// return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
-
- thread_ins = mono_get_thread_intrinsic (cfg);
MONO_ADD_INS (cfg->cbb, thread_ins);
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
else {
g_assert (vtable);
addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
+ g_assert (addr);
EMIT_NEW_PCONST (cfg, ins, addr);
}
} else {
/* Generate IR to do the actual load/store operation */
+ if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
+ /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
+ }
+
if (op == CEE_LDSFLDA) {
ins->klass = mono_class_from_mono_type (ftype);
ins->type = STACK_PTR;
*sp++ = load;
}
}
+
+ if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
+ /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
+ }
+
ins_flag = 0;
ip += 5;
break;
token = read32 (ip + 1);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
+ }
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
+ ins->flags |= ins_flag;
if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
generic_class_is_reference_type (cfg, klass)) {
/* insert call to write barrier */
cfg->flags |= MONO_CFG_HAS_LDELEMA;
- if (mini_is_gsharedvt_klass (cfg, klass)) {
+ if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
// FIXME-VT: OP_ICONST optimization
addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
} else {
/* FIXME: n is not a normal token */
- cfg->disable_aot = TRUE;
+ DISABLE_AOT (cfg);
EMIT_NEW_PCONST (cfg, ins, NULL);
}
} else {
ip += 6;
inline_costs += 10 * num_calls++;
/* Can't embed random pointers into AOT code */
- cfg->disable_aot = 1;
+ DISABLE_AOT (cfg);
break;
}
case CEE_MONO_JIT_ICALL_ADDR: {
ins = mono_create_tls_get (cfg, key);
if (!ins) {
if (cfg->compile_aot) {
- cfg->disable_aot = TRUE;
+ DISABLE_AOT (cfg);
MONO_INST_NEW (cfg, ins, OP_TLS_GET);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_PTR;
CHECK_STACK (3);
sp -= 3;
- if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
+ /* Skip optimized paths for volatile operations. */
+ if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
- } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
+ } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
/* emit_memset only works when val == 0 */
mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
} else {
+ MonoInst *call;
iargs [0] = sp [0];
iargs [1] = sp [1];
iargs [2] = sp [2];
if (ip [1] == CEE_CPBLK) {
+ /*
+ * FIXME: It's unclear whether we should be emitting both the acquire
+ * and release barriers for cpblk. It is technically both a load and
+ * store operation, so it seems like that's the sensible thing to do.
+ */
MonoMethod *memcpy_method = get_memcpy_method ();
- mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
+ }
+ call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
+ call->flags |= ins_flag;
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
+ }
} else {
MonoMethod *memset_method = get_memset_method ();
- mono_emit_method_call (cfg, memset_method, iargs, NULL);
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
+ /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
+ emit_memory_barrier (cfg, FullBarrier);
+ }
+ call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
+ call->flags |= ins_flag;
}
}
ip += 2;
+ ins_flag = 0;
inline_costs += 1;
break;
}
bblock->next_bb = end_bblock;
}
- if (cfg->lmf_var) {
- cfg->cbb = init_localsbb;
- emit_push_lmf (cfg);
- }
-
if (cfg->method == method && cfg->domainvar) {
MonoInst *store;
MonoInst *get_domain;
cfg->cbb = init_localsbb;
- if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
+ if ((get_domain = mono_get_domain_intrinsic (cfg))) {
+ MONO_ADD_INS (cfg->cbb, get_domain);
+ } else {
get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
}
- else {
- get_domain->dreg = alloc_preg (cfg);
- MONO_ADD_INS (cfg->cbb, get_domain);
- }
NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
MONO_ADD_INS (cfg->cbb, store);
}
if (cfg->method == method && cfg->got_var)
mono_emit_load_got_addr (cfg);
- if (init_locals) {
+ if (init_localsbb) {
cfg->cbb = init_localsbb;
cfg->ip = NULL;
for (i = 0; i < header->num_locals; ++i) {
- emit_init_local (cfg, i, header->locals [i]);
+ emit_init_local (cfg, i, header->locals [i], init_locals);
}
}
}
}
+ if (cfg->lmf_var && cfg->method == method) {
+ cfg->cbb = init_localsbb;
+ emit_push_lmf (cfg);
+ }
+
if (seq_points) {
MonoBasicBlock *bb;
/* Arguments are implicitly global */
/* Putting R4 vars into registers doesn't work currently */
/* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
- if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
+ if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
/*
* Make that the variable's liveness interval doesn't contain a call, since
* that would cause the lvreg to be spilled, making the whole optimization
}
if (cfg->gsharedvt) {
- gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
+ gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *ins = cfg->varinfo [i];