var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* prevent it from being register allocated */
- var->flags |= MONO_INST_INDIRECT;
+ var->flags |= MONO_INST_VOLATILE;
g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
}
var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
/* prevent it from being register allocated */
- var->flags |= MONO_INST_INDIRECT;
+ var->flags |= MONO_INST_VOLATILE;
g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
if (!cfg->rgctx_var) {
cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* force the var to be stack allocated */
- cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
+ cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
}
return cfg->rgctx_var;
}
}
+static void
+emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
+{
+ MonoInst *ins, *c;
+
+ if (cfg->compile_aot) {
+ EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
+ MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
+ ins->sreg1 = sreg1;
+ ins->sreg2 = c->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_TLS_SET);
+ ins->sreg1 = sreg1;
+ ins->inst_offset = mini_get_tls_offset (tls_key);
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+}
+
+/*
+ * emit_push_lmf:
+ *
+ * Emit IR to push the current LMF onto the LMF stack.
+ */
+static void
+emit_push_lmf (MonoCompile *cfg)
+{
+ /*
+ * Emit IR to push the LMF:
+ * lmf_addr = <lmf_addr from tls>
+ * lmf->lmf_addr = lmf_addr
+ * lmf->prev_lmf = *lmf_addr
+ * *lmf_addr = lmf
+ */
+ int lmf_reg, prev_lmf_reg;
+ MonoInst *ins, *lmf_ins;
+
+ if (!cfg->lmf_ir)
+ return;
+
+ if (cfg->lmf_ir_mono_lmf) {
+ /* Load current lmf */
+ lmf_ins = mono_get_lmf_intrinsic (cfg);
+ g_assert (lmf_ins);
+ MONO_ADD_INS (cfg->cbb, lmf_ins);
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
+ /* Save previous_lmf */
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
+ /* Set new LMF */
+ emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
+ } else {
+ /*
+ * Store lmf_addr in a variable, so it can be allocated to a global register.
+ */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+
+ lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
+ if (lmf_ins)
+ MONO_ADD_INS (cfg->cbb, lmf_ins);
+ else
+ lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
+ lmf_ins->dreg = cfg->lmf_addr_var->dreg;
+
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
+
+ prev_lmf_reg = alloc_preg (cfg);
+ /* Save previous_lmf */
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
+ /* Set new lmf */
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
+ }
+}
+
+/*
+ * emit_pop_lmf:
+ *
+ * Emit IR to pop the current LMF from the LMF stack.
+ */
+static void
+emit_pop_lmf (MonoCompile *cfg)
+{
+ int lmf_reg, lmf_addr_reg, prev_lmf_reg;
+ MonoInst *ins;
+
+ if (!cfg->lmf_ir)
+ return;
+
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
+
+ if (cfg->lmf_ir_mono_lmf) {
+ /* Load previous_lmf */
+ prev_lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ /* Set new LMF */
+ emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
+ } else {
+ /*
+ * Emit IR to pop the LMF:
+ * *(lmf->lmf_addr) = lmf->prev_lmf
+ */
+ /* This could be called before emit_push_lmf () */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ lmf_addr_reg = cfg->lmf_addr_var->dreg;
+
+ prev_lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
+ }
+}
+
static int
ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
{
if (type->byref)
- return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
+ return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
handle_enum:
type = mini_get_basic_type_from_generic (gsctx, type);
switch (type->type) {
case MONO_TYPE_VOID:
- return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
+ return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
- return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
+ return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
- return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
+ return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
- return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
+ return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
- return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
+ return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
- return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
+ return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
} else
- return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
+ return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
case MONO_TYPE_TYPEDBYREF:
- return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
+ return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
case MONO_TYPE_GENERICINST:
type = &type->data.generic_class->container_class->byval_arg;
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt */
- return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
+ return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
default:
g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
}
callvirt_to_call (int opcode)
{
switch (opcode) {
- case OP_CALLVIRT:
+ case OP_CALL_MEMBASE:
return OP_CALL;
- case OP_VOIDCALLVIRT:
+ case OP_VOIDCALL_MEMBASE:
return OP_VOIDCALL;
- case OP_FCALLVIRT:
+ case OP_FCALL_MEMBASE:
return OP_FCALL;
- case OP_VCALLVIRT:
+ case OP_VCALL_MEMBASE:
return OP_VCALL;
- case OP_LCALLVIRT:
+ case OP_LCALL_MEMBASE:
return OP_LCALL;
default:
g_assert_not_reached ();
return -1;
}
-static int
-callvirt_to_call_membase (int opcode)
-{
- switch (opcode) {
- case OP_CALLVIRT:
- return OP_CALL_MEMBASE;
- case OP_VOIDCALLVIRT:
- return OP_VOIDCALL_MEMBASE;
- case OP_FCALLVIRT:
- return OP_FCALL_MEMBASE;
- case OP_LCALLVIRT:
- return OP_LCALL_MEMBASE;
- case OP_VCALLVIRT:
- return OP_VCALL_MEMBASE;
- default:
- g_assert_not_reached ();
- }
-
- return -1;
-}
-
#ifdef MONO_ARCH_HAVE_IMT
/* Either METHOD or IMT_ARG needs to be set */
static void
mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
{
+ MonoType *sig_ret;
MonoCallInst *call;
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
int i;
call->args = args;
call->signature = sig;
call->rgctx_reg = rgctx;
+ sig_ret = sig->ret;
- type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
+ type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
if (tail) {
- if (mini_type_is_vtype (cfg, sig->ret)) {
+ if (mini_type_is_vtype (cfg, sig_ret)) {
call->vret_var = cfg->vret_addr;
//g_assert_not_reached ();
}
- } else if (mini_type_is_vtype (cfg, sig->ret)) {
- MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
+ } else if (mini_type_is_vtype (cfg, sig_ret)) {
+ MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
MonoInst *loada;
temp->backend.is_pinvoke = sig->pinvoke;
call->inst.dreg = temp->dreg;
call->vret_var = loada;
- } else if (!MONO_TYPE_IS_VOID (sig->ret))
+ } else if (!MONO_TYPE_IS_VOID (sig_ret))
call->inst.dreg = alloc_dreg (cfg, call->inst.type);
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
MONO_EMIT_NULL_CHECK (cfg, this_reg);
/* Make a call to delegate->invoke_impl */
- call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
call->inst.inst_basereg = this_reg;
call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
call->inst.opcode = callvirt_to_call (call->inst.opcode);
} else {
- call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
-
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
has_card_table_wb = TRUE;
#endif
- if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
+ if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
MonoInst *wbarrier;
MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
* inside the inlined code
*/
if (!(cfg->opt & MONO_OPT_SHARED)) {
- if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
+ /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
+ if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
+ vtable = mono_class_vtable (cfg->domain, method->klass);
+ if (!vtable)
+ return FALSE;
+ mono_runtime_class_init (vtable);
+ } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
if (cfg->run_cctors && method->klass->has_cctor) {
/*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
if (!method->klass->runtime_info)
}
static gboolean
-mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
+mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
{
- if (vtable->initialized && !cfg->compile_aot)
- return FALSE;
+ if (!cfg->compile_aot) {
+ g_assert (vtable);
+ if (vtable->initialized)
+ return FALSE;
+ }
- if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
+ if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
return FALSE;
- if (!mono_class_needs_cctor_run (vtable->klass, method))
+ if (!mono_class_needs_cctor_run (klass, method))
return FALSE;
- if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
+ if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
/* The initialization is already done before the method is called */
return FALSE;
case MONO_BREAK_POLICY_NEVER:
return FALSE;
case MONO_BREAK_POLICY_ON_DBG:
- return mono_debug_using_mono_debugger ();
+ g_warning ("mdb no longer supported");
+ return FALSE;
default:
g_warning ("Incorrect value returned from break policy callback");
return FALSE;
#endif
static void
-emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
+emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
{
static double r8_0 = 0.0;
MonoInst *ins;
- switch (rvar->type) {
- case STACK_I4:
- MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
- break;
- case STACK_I8:
- MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
- break;
- case STACK_PTR:
- case STACK_MP:
- case STACK_OBJ:
- MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
- break;
- case STACK_R8:
+ int t = rtype->type;
+
+ if (t == MONO_TYPE_VALUETYPE && rtype->data.klass->enumtype)
+ t = mono_class_enum_basetype (rtype->data.klass)->type;
+ if (rtype->byref) {
+ MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
+ } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
+ MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
+ } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
+ MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
+ } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
MONO_INST_NEW (cfg, ins, OP_R8CONST);
ins->type = STACK_R8;
ins->inst_p0 = (void*)&r8_0;
- ins->dreg = rvar->dreg;
+ ins->dreg = dreg;
MONO_ADD_INS (cfg->cbb, ins);
- break;
- case STACK_VTYPE:
- MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
- break;
- default:
- g_assert_not_reached ();
+ } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
+ ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
+ MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
+ } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
+ MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
+ } else {
+ MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
+ }
+}
+
+static void
+emit_init_local (MonoCompile *cfg, int local, MonoType *type)
+{
+ MonoInst *var = cfg->locals [local];
+ if (COMPILE_SOFT_FLOAT (cfg)) {
+ MonoInst *store;
+ int reg = alloc_dreg (cfg, var->type);
+ emit_init_rvar (cfg, reg, type);
+ EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
+ } else {
+ emit_init_rvar (cfg, var->dreg, type);
}
}
if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
cfg->cbb = bb;
- emit_init_rvar (cfg, rvar, fsig->ret);
+ emit_init_rvar (cfg, rvar->dreg, fsig->ret);
}
}
}
* set, so set it to a dummy value.
*/
if (!ret_var_set)
- emit_init_rvar (cfg, rvar, fsig->ret);
+ emit_init_rvar (cfg, rvar->dreg, fsig->ret);
EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
*sp++ = ins;
case MONO_TYPE_R4:
size = 4; break;
case MONO_TYPE_R8:
-#ifdef ARM_FPU_FPA
- return NULL; /* stupid ARM FP swapped format */
-#endif
case MONO_TYPE_I8:
case MONO_TYPE_U8:
size = 8; break;
{
int local, token;
MonoClass *klass;
+ MonoType *type;
if (size == 1) {
local = ip [1];
}
if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
- gboolean skip = FALSE;
-
/* From the INITOBJ case */
token = read32 (ip + 2);
klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
CHECK_TYPELOAD (klass);
- if (mini_type_is_reference (cfg, &klass->byval_arg)) {
- MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
- } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
- MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
- } else {
- skip = TRUE;
- }
-
- if (!skip)
- return ip + 6;
+ type = &klass->byval_arg;
+ emit_init_local (cfg, local, type);
+ return ip + 6;
}
load_error:
return NULL;
}
static gboolean
-is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
+is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
{
gboolean supported_tail_call;
int i;
-#ifdef MONO_ARCH_USE_OP_TAIL_CALL
- supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
+#ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
+ supported_tail_call = mono_arch_tail_call_supported (mono_method_signature (method), mono_method_signature (cmethod));
#else
supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
#endif
supported_tail_call = FALSE;
if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
supported_tail_call = FALSE;
+ if (call_opcode != CEE_CALL)
+ supported_tail_call = FALSE;
/* Debugging support */
#if 0
MonoClass *klass;
MonoClass *constrained_call = NULL;
unsigned char *ip, *end, *target, *err_pos;
- static double r8_0 = 0.0;
MonoMethodSignature *sig;
MonoGenericContext *generic_context = NULL;
MonoGenericContainer *generic_container = NULL;
dont_verify_stloc = TRUE;
}
- if (mono_debug_using_mono_debugger ())
- cfg->keep_cil_nops = TRUE;
-
if (sig->is_inflated)
generic_context = mono_method_get_context (method);
else if (generic_container)
tblock->real_offset = clause->handler_offset;
tblock->flags |= BB_EXCEPTION_HANDLER;
- link_bblock (cfg, try_bb, tblock);
+ /*
+ * Linking the try block with the EH block hinders inlining as we won't be able to
+ * merge the bblocks from inlining and produce an artificial hole for no good reason.
+ */
+ if (COMPILE_LLVM (cfg))
+ link_bblock (cfg, try_bb, tblock);
if (*(ip + clause->handler_offset) == CEE_POP)
tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
if (cfg->method == method) {
breakpoint_id = mono_debugger_method_has_breakpoint (method);
- if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
+ if (breakpoint_id) {
MONO_INST_NEW (cfg, ins, OP_BREAK);
MONO_ADD_INS (bblock, ins);
}
var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* prevent it from being register allocated */
- //var->flags |= MONO_INST_INDIRECT;
+ //var->flags |= MONO_INST_VOLATILE;
cfg->gsharedvt_info_var = var;
ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
/* Allocate locals */
locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* prevent it from being register allocated */
- //locals_var->flags |= MONO_INST_INDIRECT;
+ //locals_var->flags |= MONO_INST_VOLATILE;
cfg->gsharedvt_locals_var = locals_var;
dreg = alloc_ireg (cfg);
/* FIXME: Is there a better way to do this?
We need the variable live for the duration
of the whole method. */
- cfg->args [0]->flags |= MONO_INST_INDIRECT;
+ cfg->args [0]->flags |= MONO_INST_VOLATILE;
}
}
if (mono_security_cas_enabled ())
CHECK_CFG_EXCEPTION;
-#ifdef MONO_ARCH_USE_OP_TAIL_CALL
- {
+ if (ARCH_HAVE_OP_TAIL_CALL) {
MonoMethodSignature *fsig = mono_method_signature (cmethod);
int i, n;
mono_arch_emit_call (cfg, call);
MONO_ADD_INS (bblock, (MonoInst*)call);
- }
-#else
- for (i = 0; i < num_args; ++i)
- /* Prevent arguments from being optimized away */
- arg_array [i]->flags |= MONO_INST_VOLATILE;
+ } else {
+ for (i = 0; i < num_args; ++i)
+ /* Prevent arguments from being optimized away */
+ arg_array [i]->flags |= MONO_INST_VOLATILE;
- MONO_INST_NEW_CALL (cfg, call, OP_JMP);
- ins = (MonoInst*)call;
- ins->inst_p0 = cmethod;
- MONO_ADD_INS (bblock, ins);
-#endif
+ MONO_INST_NEW_CALL (cfg, call, OP_JMP);
+ ins = (MonoInst*)call;
+ ins->inst_p0 = cmethod;
+ MONO_ADD_INS (bblock, ins);
+ }
ip += 5;
start_new_bblock = 1;
/*
* Making generic calls out of gsharedvt methods.
*/
- if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
+ if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
MonoRgctxInfoType info_type;
if (virtual) {
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
- !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
+ !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
supported_tail_call = TRUE;
- if (supported_tail_call) {
- if (call_opcode != CEE_CALL)
- supported_tail_call = FALSE;
- }
if (supported_tail_call) {
MonoCallInst *call;
//printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
- if (ARCH_USE_OP_TAIL_CALL) {
+ if (ARCH_HAVE_OP_TAIL_CALL) {
/* Handle tail calls similarly to normal calls */
tail_call = TRUE;
} else {
cfg->ret_var_set = TRUE;
}
} else {
+ if (cfg->lmf_var && cfg->cbb->in_count)
+ emit_pop_lmf (cfg);
+
if (cfg->ret) {
MonoType *ret_type = mono_method_signature (method)->ret;
if (cmethod->klass->valuetype) {
iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
- MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
+ emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
alloc = NULL;
alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
*sp = alloc;
} else {
- MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
+ MonoVTable *vtable = NULL;
+ if (!cfg->compile_aot)
+ vtable = mono_class_vtable (cfg->domain, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
/*
* call in mono_jit_runtime_invoke () can abort the finalizer thread.
* As a workaround, we call class cctors before allocating objects.
*/
- if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
+ if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
+ mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
- class_inits = g_slist_prepend (class_inits, vtable);
+ class_inits = g_slist_prepend (class_inits, cmethod->klass);
}
alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
gboolean is_special_static;
MonoType *ftype;
MonoInst *store_val = NULL;
+ MonoInst *thread_ins;
op = *ip;
is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
is_special_static = mono_class_field_is_special_static (field);
+ if (is_special_static && ((gsize)addr & 0x80000000) == 0)
+ thread_ins = mono_get_thread_intrinsic (cfg);
+ else
+ thread_ins = NULL;
+
/* Generate IR to compute the field address */
- if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
+ if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
/*
* Fast access to TLS data
* Inline version of get_thread_static_data () in
*/
guint32 offset;
int idx, static_data_reg, array_reg, dreg;
- MonoInst *thread_ins;
GSHAREDVT_FAILURE (op);
// offset &= 0x7fffffff;
// idx = (offset >> 24) - 1;
// return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
-
- thread_ins = mono_get_thread_intrinsic (cfg);
MONO_ADD_INS (cfg->cbb, thread_ins);
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
} else {
- MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
+ MonoVTable *vtable = NULL;
+ if (!cfg->compile_aot)
+ vtable = mono_class_vtable (cfg->domain, klass);
CHECK_TYPELOAD (klass);
+
if (!addr) {
- if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
- if (!(g_slist_find (class_inits, vtable))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
+ if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
+ if (!(g_slist_find (class_inits, klass))) {
+ mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
- class_inits = g_slist_prepend (class_inits, vtable);
+ class_inits = g_slist_prepend (class_inits, klass);
}
} else {
if (cfg->run_cctors) {
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
+ g_assert (vtable);
if (! vtable->initialized)
INLINE_FAILURE ("class init");
ex = mono_runtime_class_init_full (vtable, FALSE);
}
}
}
- addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
-
if (cfg->compile_aot)
EMIT_NEW_SFLDACONST (cfg, ins, field);
- else
+ else {
+ g_assert (vtable);
+ addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
EMIT_NEW_PCONST (cfg, ins, addr);
+ }
} else {
MonoInst *iargs [1];
EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
bblock->out_of_line = TRUE;
ip += 2;
break;
- case CEE_MONO_TLS:
+ case CEE_MONO_TLS: {
+ int key;
+
CHECK_STACK_OVF (1);
CHECK_OPSIZE (6);
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->dreg = alloc_preg (cfg);
- ins->inst_offset = (gint32)read32 (ip + 2);
+ key = (gint32)read32 (ip + 2);
+ g_assert (key < TLS_KEY_NUM);
+
+ ins = mono_create_tls_get (cfg, key);
+ if (!ins) {
+ if (cfg->compile_aot) {
+ cfg->disable_aot = TRUE;
+ MONO_INST_NEW (cfg, ins, OP_TLS_GET);
+ ins->dreg = alloc_preg (cfg);
+ ins->type = STACK_PTR;
+ } else {
+ g_assert_not_reached ();
+ }
+ }
ins->type = STACK_PTR;
MONO_ADD_INS (bblock, ins);
*sp++ = ins;
ip += 6;
break;
+ }
case CEE_MONO_DYN_CALL: {
MonoCallInst *call;
if (!cfg->dyn_call_var) {
cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* prevent it from being register allocated */
- cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
+ cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
}
/* Has to use a call inst since it local regalloc expects it */
ins->sreg2 = sp [1]->dreg;
MONO_ADD_INS (bblock, ins);
-#ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
-#endif
ip += 2;
inline_costs += 10 * num_calls++;
cfg->cbb = init_localsbb;
- if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
+ if ((get_domain = mono_get_domain_intrinsic (cfg))) {
+ MONO_ADD_INS (cfg->cbb, get_domain);
+ } else {
get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
}
- else {
- get_domain->dreg = alloc_preg (cfg);
- MONO_ADD_INS (cfg->cbb, get_domain);
- }
NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
MONO_ADD_INS (cfg->cbb, store);
}
mono_emit_load_got_addr (cfg);
if (init_locals) {
- MonoInst *store;
-
cfg->cbb = init_localsbb;
cfg->ip = NULL;
for (i = 0; i < header->num_locals; ++i) {
- MonoType *ptype = header->locals [i];
- int t = ptype->type;
- dreg = cfg->locals [i]->dreg;
-
- if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
- t = mono_class_enum_basetype (ptype->data.klass)->type;
- if (ptype->byref) {
- MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
- } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
- MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
- } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
- MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
- } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
- MONO_INST_NEW (cfg, ins, OP_R8CONST);
- ins->type = STACK_R8;
- ins->inst_p0 = (void*)&r8_0;
- ins->dreg = alloc_dreg (cfg, STACK_R8);
- MONO_ADD_INS (init_localsbb, ins);
- EMIT_NEW_LOCSTORE (cfg, store, i, ins);
- } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
- ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
- MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
- } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
- MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
- } else {
- MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
- }
+ emit_init_local (cfg, i, header->locals [i]);
}
}
}
}
+ if (cfg->lmf_var && cfg->method == method) {
+ cfg->cbb = init_localsbb;
+ emit_push_lmf (cfg);
+ }
+
if (seq_points) {
MonoBasicBlock *bb;
/* Arguments are implicitly global */
/* Putting R4 vars into registers doesn't work currently */
/* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
- if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
+ if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
/*
* Make that the variable's liveness interval doesn't contain a call, since
* that would cause the lvreg to be spilled, making the whole optimization