}
}
+static void
+emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
+{
+ MonoInst *ins, *c;
+
+ if (cfg->compile_aot) {
+ EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
+ MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
+ ins->sreg1 = sreg1;
+ ins->sreg2 = c->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_TLS_SET);
+ ins->sreg1 = sreg1;
+ ins->inst_offset = mini_get_tls_offset (tls_key);
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+}
+
+/*
+ * emit_push_lmf:
+ *
+ * Emit IR to push the current LMF onto the LMF stack.
+ */
+static void
+emit_push_lmf (MonoCompile *cfg)
+{
+ /*
+ * Emit IR to push the LMF:
+ * lmf_addr = <lmf_addr from tls>
+ * lmf->lmf_addr = lmf_addr
+ * lmf->prev_lmf = *lmf_addr
+ * *lmf_addr = lmf
+ */
+ int lmf_reg, prev_lmf_reg;
+ MonoInst *ins, *lmf_ins;
+
+ if (!cfg->lmf_ir)
+ return;
+
+ if (cfg->lmf_ir_mono_lmf) {
+ /* Load current lmf */
+ lmf_ins = mono_get_lmf_intrinsic (cfg);
+ g_assert (lmf_ins);
+ MONO_ADD_INS (cfg->cbb, lmf_ins);
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
+ /* Save previous_lmf */
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
+ /* Set new LMF */
+ emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
+ } else {
+ /*
+ * Store lmf_addr in a variable, so it can be allocated to a global register.
+ */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+
+ lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
+ if (lmf_ins)
+ MONO_ADD_INS (cfg->cbb, lmf_ins);
+ else
+ lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
+ lmf_ins->dreg = cfg->lmf_addr_var->dreg;
+
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
+
+ prev_lmf_reg = alloc_preg (cfg);
+ /* Save previous_lmf */
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
+ /* Set new lmf */
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
+ }
+}
+
+/*
+ * emit_pop_lmf:
+ *
+ * Emit IR to pop the current LMF from the LMF stack.
+ */
+static void
+emit_pop_lmf (MonoCompile *cfg)
+{
+ int lmf_reg, lmf_addr_reg, prev_lmf_reg;
+ MonoInst *ins;
+
+ if (!cfg->lmf_ir)
+ return;
+
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
+
+ if (cfg->lmf_ir_mono_lmf) {
+ /* Load previous_lmf */
+ prev_lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ /* Set new LMF */
+ emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
+ } else {
+ /*
+ * Emit IR to pop the LMF:
+ * *(lmf->lmf_addr) = lmf->prev_lmf
+ */
+ /* This could be called before emit_push_lmf () */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ lmf_addr_reg = cfg->lmf_addr_var->dreg;
+
+ prev_lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
+ }
+}
+
static int
ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
{
if (type->byref)
- return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
+ return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
handle_enum:
type = mini_get_basic_type_from_generic (gsctx, type);
switch (type->type) {
case MONO_TYPE_VOID:
- return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
+ return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
- return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
+ return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
- return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
+ return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
- return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
+ return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
- return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
+ return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
- return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
+ return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
} else
- return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
+ return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
case MONO_TYPE_TYPEDBYREF:
- return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
+ return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
case MONO_TYPE_GENERICINST:
type = &type->data.generic_class->container_class->byval_arg;
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt */
- return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
+ return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
default:
g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
}
callvirt_to_call (int opcode)
{
switch (opcode) {
- case OP_CALLVIRT:
+ case OP_CALL_MEMBASE:
return OP_CALL;
- case OP_VOIDCALLVIRT:
+ case OP_VOIDCALL_MEMBASE:
return OP_VOIDCALL;
- case OP_FCALLVIRT:
+ case OP_FCALL_MEMBASE:
return OP_FCALL;
- case OP_VCALLVIRT:
+ case OP_VCALL_MEMBASE:
return OP_VCALL;
- case OP_LCALLVIRT:
+ case OP_LCALL_MEMBASE:
return OP_LCALL;
default:
g_assert_not_reached ();
return -1;
}
-static int
-callvirt_to_call_membase (int opcode)
-{
- switch (opcode) {
- case OP_CALLVIRT:
- return OP_CALL_MEMBASE;
- case OP_VOIDCALLVIRT:
- return OP_VOIDCALL_MEMBASE;
- case OP_FCALLVIRT:
- return OP_FCALL_MEMBASE;
- case OP_LCALLVIRT:
- return OP_LCALL_MEMBASE;
- case OP_VCALLVIRT:
- return OP_VCALL_MEMBASE;
- default:
- g_assert_not_reached ();
- }
-
- return -1;
-}
-
#ifdef MONO_ARCH_HAVE_IMT
/* Either METHOD or IMT_ARG needs to be set */
static void
MONO_EMIT_NULL_CHECK (cfg, this_reg);
/* Make a call to delegate->invoke_impl */
- call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
call->inst.inst_basereg = this_reg;
call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
call->inst.opcode = callvirt_to_call (call->inst.opcode);
} else {
- call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
-
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
has_card_table_wb = TRUE;
#endif
- if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
+ if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
MonoInst *wbarrier;
MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
if (!vtable)
return FALSE;
mono_runtime_class_init (vtable);
- } if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
+ } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
if (cfg->run_cctors && method->klass->has_cctor) {
/*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
if (!method->klass->runtime_info)
case MONO_BREAK_POLICY_NEVER:
return FALSE;
case MONO_BREAK_POLICY_ON_DBG:
- return mono_debug_using_mono_debugger ();
+ g_warning ("mdb no longer supported");
+ return FALSE;
default:
g_warning ("Incorrect value returned from break policy callback");
return FALSE;
}
}
+static void
+emit_init_local (MonoCompile *cfg, int local, MonoType *type)
+{
+ MonoInst *var = cfg->locals [local];
+ if (COMPILE_SOFT_FLOAT (cfg)) {
+ MonoInst *store;
+ int reg = alloc_dreg (cfg, var->type);
+ emit_init_rvar (cfg, reg, type);
+ EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
+ } else {
+ emit_init_rvar (cfg, var->dreg, type);
+ }
+}
+
static int
inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
case MONO_TYPE_R4:
size = 4; break;
case MONO_TYPE_R8:
-#ifdef ARM_FPU_FPA
- return NULL; /* stupid ARM FP swapped format */
-#endif
case MONO_TYPE_I8:
case MONO_TYPE_U8:
size = 8; break;
{
int local, token;
MonoClass *klass;
+ MonoType *type;
if (size == 1) {
local = ip [1];
}
if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
- gboolean skip = FALSE;
-
/* From the INITOBJ case */
token = read32 (ip + 2);
klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
CHECK_TYPELOAD (klass);
- if (mini_type_is_reference (cfg, &klass->byval_arg)) {
- MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
- } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
- MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
- } else {
- skip = TRUE;
- }
-
- if (!skip)
- return ip + 6;
+ type = &klass->byval_arg;
+ emit_init_local (cfg, local, type);
+ return ip + 6;
}
load_error:
return NULL;
}
static gboolean
-is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
+is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
{
gboolean supported_tail_call;
int i;
-#ifdef MONO_ARCH_USE_OP_TAIL_CALL
- supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
+#ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
+ supported_tail_call = mono_arch_tail_call_supported (mono_method_signature (method), mono_method_signature (cmethod));
#else
supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
#endif
supported_tail_call = FALSE;
if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
supported_tail_call = FALSE;
+ if (call_opcode != CEE_CALL)
+ supported_tail_call = FALSE;
/* Debugging support */
#if 0
dont_verify_stloc = TRUE;
}
- if (mono_debug_using_mono_debugger ())
- cfg->keep_cil_nops = TRUE;
-
if (sig->is_inflated)
generic_context = mono_method_get_context (method);
else if (generic_container)
tblock->real_offset = clause->handler_offset;
tblock->flags |= BB_EXCEPTION_HANDLER;
- link_bblock (cfg, try_bb, tblock);
+ /*
+ * Linking the try block with the EH block hinders inlining as we won't be able to
+ * merge the bblocks from inlining and produce an artificial hole for no good reason.
+ */
+ if (COMPILE_LLVM (cfg))
+ link_bblock (cfg, try_bb, tblock);
if (*(ip + clause->handler_offset) == CEE_POP)
tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
if (cfg->method == method) {
breakpoint_id = mono_debugger_method_has_breakpoint (method);
- if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
+ if (breakpoint_id) {
MONO_INST_NEW (cfg, ins, OP_BREAK);
MONO_ADD_INS (bblock, ins);
}
if (mono_security_cas_enabled ())
CHECK_CFG_EXCEPTION;
-#ifdef MONO_ARCH_USE_OP_TAIL_CALL
- {
+ if (ARCH_HAVE_OP_TAIL_CALL) {
MonoMethodSignature *fsig = mono_method_signature (cmethod);
int i, n;
mono_arch_emit_call (cfg, call);
MONO_ADD_INS (bblock, (MonoInst*)call);
- }
-#else
- for (i = 0; i < num_args; ++i)
- /* Prevent arguments from being optimized away */
- arg_array [i]->flags |= MONO_INST_VOLATILE;
+ } else {
+ for (i = 0; i < num_args; ++i)
+ /* Prevent arguments from being optimized away */
+ arg_array [i]->flags |= MONO_INST_VOLATILE;
- MONO_INST_NEW_CALL (cfg, call, OP_JMP);
- ins = (MonoInst*)call;
- ins->inst_p0 = cmethod;
- MONO_ADD_INS (bblock, ins);
-#endif
+ MONO_INST_NEW_CALL (cfg, call, OP_JMP);
+ ins = (MonoInst*)call;
+ ins->inst_p0 = cmethod;
+ MONO_ADD_INS (bblock, ins);
+ }
ip += 5;
start_new_bblock = 1;
/*
* Making generic calls out of gsharedvt methods.
*/
- if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
+ if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
MonoRgctxInfoType info_type;
if (virtual) {
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
- !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
+ !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
supported_tail_call = TRUE;
- if (supported_tail_call) {
- if (call_opcode != CEE_CALL)
- supported_tail_call = FALSE;
- }
if (supported_tail_call) {
MonoCallInst *call;
//printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
- if (ARCH_USE_OP_TAIL_CALL) {
+ if (ARCH_HAVE_OP_TAIL_CALL) {
/* Handle tail calls similarly to normal calls */
tail_call = TRUE;
} else {
cfg->ret_var_set = TRUE;
}
} else {
+ if (cfg->lmf_var && cfg->cbb->in_count)
+ emit_pop_lmf (cfg);
+
if (cfg->ret) {
MonoType *ret_type = mono_method_signature (method)->ret;
gboolean is_special_static;
MonoType *ftype;
MonoInst *store_val = NULL;
+ MonoInst *thread_ins;
op = *ip;
is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
is_special_static = mono_class_field_is_special_static (field);
+ if (is_special_static && ((gsize)addr & 0x80000000) == 0)
+ thread_ins = mono_get_thread_intrinsic (cfg);
+ else
+ thread_ins = NULL;
+
/* Generate IR to compute the field address */
- if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
+ if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
/*
* Fast access to TLS data
* Inline version of get_thread_static_data () in
*/
guint32 offset;
int idx, static_data_reg, array_reg, dreg;
- MonoInst *thread_ins;
GSHAREDVT_FAILURE (op);
// offset &= 0x7fffffff;
// idx = (offset >> 24) - 1;
// return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
-
- thread_ins = mono_get_thread_intrinsic (cfg);
MONO_ADD_INS (cfg->cbb, thread_ins);
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
ins->sreg2 = sp [1]->dreg;
MONO_ADD_INS (bblock, ins);
-#ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
-#endif
ip += 2;
inline_costs += 10 * num_calls++;
cfg->cbb = init_localsbb;
- if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
+ if ((get_domain = mono_get_domain_intrinsic (cfg))) {
+ MONO_ADD_INS (cfg->cbb, get_domain);
+ } else {
get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
}
- else {
- get_domain->dreg = alloc_preg (cfg);
- MONO_ADD_INS (cfg->cbb, get_domain);
- }
NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
MONO_ADD_INS (cfg->cbb, store);
}
cfg->cbb = init_localsbb;
cfg->ip = NULL;
for (i = 0; i < header->num_locals; ++i) {
- MonoInst *var = cfg->locals [i];
- if (COMPILE_SOFT_FLOAT (cfg)) {
- MonoInst *store;
- int reg = alloc_dreg (cfg, var->type);
- emit_init_rvar (cfg, reg, header->locals [i]);
- EMIT_NEW_LOCSTORE (cfg, store, i, cfg->cbb->last_ins);
- } else {
- emit_init_rvar (cfg, var->dreg, header->locals [i]);
- }
+ emit_init_local (cfg, i, header->locals [i]);
}
}
}
}
+ if (cfg->lmf_var && cfg->method == method) {
+ cfg->cbb = init_localsbb;
+ emit_push_lmf (cfg);
+ }
+
if (seq_points) {
MonoBasicBlock *bb;
/* Arguments are implicitly global */
/* Putting R4 vars into registers doesn't work currently */
/* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
- if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
+ if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
/*
* Make that the variable's liveness interval doesn't contain a call, since
* that would cause the lvreg to be spilled, making the whole optimization