-/*
- * method-to-ir.c: Convert CIL to the JIT internal representation
+/**
+ * \file
+ * Convert CIL to the JIT internal representation
*
* Author:
* Paolo Molaro (lupus@ximian.com)
static MonoInst*
emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
-inline static MonoInst*
-mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
-
/* helper methods signatures */
static MonoMethodSignature *helper_sig_domain_get;
static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
break;
case STACK_PTR:
case STACK_MP:
+ case STACK_OBJ:
#if SIZEOF_VOID_P == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->klass = mono_defaults.object_class;
}
-static const char
-ldind_type [] = {
- STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
-};
+static MonoClass*
+ldind_to_type (int op)
+{
+ switch (op) {
+ case CEE_LDIND_I1: return mono_defaults.sbyte_class;
+ case CEE_LDIND_U1: return mono_defaults.byte_class;
+ case CEE_LDIND_I2: return mono_defaults.int16_class;
+ case CEE_LDIND_U2: return mono_defaults.uint16_class;
+ case CEE_LDIND_I4: return mono_defaults.int32_class;
+ case CEE_LDIND_U4: return mono_defaults.uint32_class;
+ case CEE_LDIND_I8: return mono_defaults.int64_class;
+ case CEE_LDIND_I: return mono_defaults.int_class;
+ case CEE_LDIND_R4: return mono_defaults.single_class;
+ case CEE_LDIND_R8: return mono_defaults.double_class;
+ case CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
+ default: g_error ("Unknown ldind type %d", op);
+ }
+}
#if 0
MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
- if (!cfg->compile_aot || !cfg->backend->need_got_var)
+ if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
return NULL;
if (!cfg->got_var) {
cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
return cfg->got_var;
}
-static MonoInst *
-mono_get_vtable_var (MonoCompile *cfg)
+static void
+mono_create_rgctx_var (MonoCompile *cfg)
{
- g_assert (cfg->gshared);
-
if (!cfg->rgctx_var) {
cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* force the var to be stack allocated */
cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
}
+}
+
+static MonoInst *
+mono_get_vtable_var (MonoCompile *cfg)
+{
+ g_assert (cfg->gshared);
+
+ mono_create_rgctx_var (cfg);
return cfg->rgctx_var;
}
}
}
-static MonoInst*
-emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
+MonoInst*
+mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
{
MonoInst *ins;
return ins;
}
-MonoInst*
-mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
-{
- return emit_runtime_constant (cfg, patch_type, data);
-}
-
-static void
-mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
-{
- int val_reg;
-
- g_assert (val == 0);
-
- if (align == 0)
- align = 4;
-
- if ((size <= SIZEOF_REGISTER) && (size <= align)) {
- switch (size) {
- case 1:
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
- return;
- case 2:
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
- return;
- case 4:
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
- return;
-#if SIZEOF_REGISTER == 8
- case 8:
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
- return;
-#endif
- }
- }
-
- val_reg = alloc_preg (cfg);
-
- if (SIZEOF_REGISTER == 8)
- MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
- else
- MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
-
- if (align < 4) {
- /* This could be optimized further if neccesary */
- while (size >= 1) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
- offset += 1;
- size -= 1;
- }
- return;
- }
-
- if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
- if (offset % 8) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
- offset += 4;
- size -= 4;
- }
- while (size >= 8) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
- offset += 8;
- size -= 8;
- }
- }
-
- while (size >= 4) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
- offset += 4;
- size -= 4;
- }
- while (size >= 2) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
- offset += 2;
- size -= 2;
- }
- while (size >= 1) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
- offset += 1;
- size -= 1;
- }
-}
-
-void
-mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
-{
- int cur_reg;
-
- if (align == 0)
- align = 4;
-
- /*FIXME arbitrary hack to avoid unbound code expansion.*/
- g_assert (size < 10000);
-
- if (align < 4) {
- /* This could be optimized further if neccesary */
- while (size >= 1) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 1;
- soffset += 1;
- size -= 1;
- }
- }
-
- if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
- while (size >= 8) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 8;
- soffset += 8;
- size -= 8;
- }
- }
-
- while (size >= 4) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 4;
- soffset += 4;
- size -= 4;
- }
- while (size >= 2) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 2;
- soffset += 2;
- size -= 2;
- }
- while (size >= 1) {
- cur_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
- doffset += 1;
- soffset += 1;
- size -= 1;
- }
-}
-
static MonoInst*
mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
{
* to crashes and infinite recursions.
*/
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
- return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
+ return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
} else {
gpointer getter = mono_tls_get_tls_getter (key, FALSE);
return mono_emit_jit_icall (cfg, getter, NULL);
if (cfg->compile_aot) {
MonoInst *addr;
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
- return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
+ return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
} else {
gpointer setter = mono_tls_get_tls_setter (key, FALSE);
return mono_emit_jit_icall (cfg, setter, &value);
if (!cfg->lmf_ir)
return;
- if (cfg->lmf_ir_mono_lmf) {
- MonoInst *lmf_vara_ins, *lmf_ins;
- /* Load current lmf */
- lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF);
- g_assert (lmf_ins);
- EMIT_NEW_VARLOADA (cfg, lmf_vara_ins, cfg->lmf_var, NULL);
- /* Save previous_lmf */
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_vara_ins->dreg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
- /* Set new LMF */
- mono_create_tls_set (cfg, lmf_vara_ins, TLS_KEY_LMF);
- } else {
- int lmf_reg, prev_lmf_reg;
- /*
- * Store lmf_addr in a variable, so it can be allocated to a global register.
- */
- if (!cfg->lmf_addr_var)
- cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ int lmf_reg, prev_lmf_reg;
+ /*
+ * Store lmf_addr in a variable, so it can be allocated to a global register.
+ */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
#ifdef HOST_WIN32
- ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
- g_assert (ins);
- int jit_tls_dreg = ins->dreg;
+ ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
+ g_assert (ins);
+ int jit_tls_dreg = ins->dreg;
- lmf_reg = alloc_preg (cfg);
- EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
+ lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
#else
- lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
- g_assert (lmf_ins);
+ lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
+ g_assert (lmf_ins);
#endif
- lmf_ins->dreg = cfg->lmf_addr_var->dreg;
+ lmf_ins->dreg = cfg->lmf_addr_var->dreg;
- EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
- lmf_reg = ins->dreg;
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
- prev_lmf_reg = alloc_preg (cfg);
- /* Save previous_lmf */
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
- /* Set new lmf */
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
- }
+ prev_lmf_reg = alloc_preg (cfg);
+ /* Save previous_lmf */
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
+ /* Set new lmf */
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
}
/*
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
- if (cfg->lmf_ir_mono_lmf) {
- /* Load previous_lmf */
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, alloc_preg (cfg), lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
- /* Set new LMF */
- mono_create_tls_set (cfg, ins, TLS_KEY_LMF);
- } else {
- int prev_lmf_reg;
- /*
- * Emit IR to pop the LMF:
- * *(lmf->lmf_addr) = lmf->prev_lmf
- */
- /* This could be called before emit_push_lmf () */
- if (!cfg->lmf_addr_var)
- cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- lmf_addr_reg = cfg->lmf_addr_var->dreg;
-
- prev_lmf_reg = alloc_preg (cfg);
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
- }
-}
-
-static void
-emit_instrumentation_call (MonoCompile *cfg, void *func)
-{
- MonoInst *iargs [1];
-
+ int prev_lmf_reg;
/*
- * Avoid instrumenting inlined methods since it can
- * distort profiling results.
+ * Emit IR to pop the LMF:
+ * *(lmf->lmf_addr) = lmf->prev_lmf
*/
- if (cfg->method != cfg->current_method)
- return;
+ /* This could be called before emit_push_lmf () */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ lmf_addr_reg = cfg->lmf_addr_var->dreg;
- if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
- EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
- mono_emit_jit_icall (cfg, func, iargs);
- }
+ prev_lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
}
static int
method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else {
- MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
+ MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
method_reg = ins->dreg;
}
method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else {
- MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
+ MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
method_reg = ins->dreg;
}
return ji;
}
-static int
+int
mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
{
if (cfg->gshared)
inline static MonoCallInst *
mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
- MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
+ MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline, MonoMethod *target)
{
MonoType *sig_ret;
MonoCallInst *call;
tail = FALSE;
if (tail) {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ mini_profiler_emit_tail_call (cfg, target);
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
} else
#endif
}
-inline static MonoInst*
-mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
+MonoInst*
+mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
{
MonoCallInst *call;
MonoInst *ins;
MONO_ADD_INS (cfg->cbb, ins);
}
- call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
+ call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
call->inst.sreg1 = addr->dreg;
return (MonoInst*)call;
}
-static MonoInst*
-emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
-
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
- return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
+ return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
}
#endif
need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
- call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
#ifndef DISABLE_REMOTING
if (might_be_remote)
g_assert (sig);
- call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
call->fptr = func;
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
tmp_reg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
csig = sig_to_rgctx_sig (fsig);
- return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
+ return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
}
/* Emit an indirect call to the function descriptor ADDR */
static gboolean
direct_icalls_enabled (MonoCompile *cfg)
{
+ return FALSE;
+
/* LLVM on amd64 can't handle calls to non-32 bit addresses */
#ifdef TARGET_AMD64
if (cfg->compile_llvm && !cfg->llvm_only)
mono_emit_jit_icall (cfg, mono_throw_method_access, args);
}
-static MonoMethod*
-get_memcpy_method (void)
+MonoMethod*
+mini_get_memcpy_method (void)
{
static MonoMethod *memcpy_method = NULL;
if (!memcpy_method) {
return memcpy_method;
}
-static void
-create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
-{
- MonoClassField *field;
- gpointer iter = NULL;
-
- while ((field = mono_class_get_fields (klass, &iter))) {
- int foffset;
-
- if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
- continue;
- foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
- if (mini_type_is_reference (mono_field_get_type (field))) {
- g_assert ((foffset % SIZEOF_VOID_P) == 0);
- *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
- } else {
- MonoClass *field_class = mono_class_from_mono_type (field->type);
- if (field_class->has_references)
- create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
- }
- }
-}
-
-static void
-emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
+void
+mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
{
int card_table_shift_bits;
gpointer card_table_mask;
if (!cfg->gen_write_barriers)
return;
+ //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
+
card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
wbarrier->sreg1 = ptr->dreg;
wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
- } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
+ } else if (card_table) {
int offset_reg = alloc_preg (cfg);
int card_reg;
MonoInst *ins;
+ /*
+ * We emit a fast light weight write barrier. This always marks cards as in the concurrent
+ * collector case, so, for the serial collector, it might slightly slow down nursery
+ * collections. We also expect that the host system and the target system have the same card
+ * table configuration, which is the case if they have the same pointer size.
+ */
+
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
if (card_table_mask)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
/*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
* IMM's larger than 32bits.
*/
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
card_reg = ins->dreg;
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
}
-static gboolean
-mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
-{
- int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
- unsigned need_wb = 0;
-
- if (align == 0)
- align = 4;
-
- /*types with references can't have alignment smaller than sizeof(void*) */
- if (align < SIZEOF_VOID_P)
- return FALSE;
-
- /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
- if (size > 32 * SIZEOF_VOID_P)
- return FALSE;
-
- create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
-
- /* We don't unroll more than 5 stores to avoid code bloat. */
- if (size > 5 * SIZEOF_VOID_P) {
- /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
- size += (SIZEOF_VOID_P - 1);
- size &= ~(SIZEOF_VOID_P - 1);
-
- EMIT_NEW_ICONST (cfg, iargs [2], size);
- EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
- mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
- return TRUE;
- }
-
- destreg = iargs [0]->dreg;
- srcreg = iargs [1]->dreg;
- offset = 0;
-
- dest_ptr_reg = alloc_preg (cfg);
- tmp_reg = alloc_preg (cfg);
-
- /*tmp = dreg*/
- EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
-
- while (size >= SIZEOF_VOID_P) {
- MonoInst *load_inst;
- MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
- load_inst->dreg = tmp_reg;
- load_inst->inst_basereg = srcreg;
- load_inst->inst_offset = offset;
- MONO_ADD_INS (cfg->cbb, load_inst);
-
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
-
- if (need_wb & 0x1)
- emit_write_barrier (cfg, iargs [0], load_inst);
-
- offset += SIZEOF_VOID_P;
- size -= SIZEOF_VOID_P;
- need_wb >>= 1;
-
- /*tmp += sizeof (void*)*/
- if (size >= SIZEOF_VOID_P) {
- NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
- MONO_ADD_INS (cfg->cbb, iargs [0]);
- }
- }
-
- /* Those cannot be references since size < sizeof (void*) */
- while (size >= 4) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
- offset += 4;
- size -= 4;
- }
-
- while (size >= 2) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
- offset += 2;
- size -= 2;
- }
-
- while (size >= 1) {
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
- offset += 1;
- size -= 1;
- }
-
- return TRUE;
-}
-
-/*
- * Emit code to copy a valuetype of type @klass whose address is stored in
- * @src->dreg to memory whose address is stored at @dest->dreg.
- */
-void
-mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
-{
- MonoInst *iargs [4];
- int n;
- guint32 align = 0;
- MonoMethod *memcpy_method;
- MonoInst *size_ins = NULL;
- MonoInst *memcpy_ins = NULL;
-
- g_assert (klass);
- if (cfg->gshared)
- klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
-
- /*
- * This check breaks with spilled vars... need to handle it during verification anyway.
- * g_assert (klass && klass == src->klass && klass == dest->klass);
- */
-
- if (mini_is_gsharedvt_klass (klass)) {
- g_assert (!native);
- size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
- memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
- }
-
- if (native)
- n = mono_class_native_size (klass, &align);
- else
- n = mono_class_value_size (klass, &align);
-
- /* if native is true there should be no references in the struct */
- if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
- /* Avoid barriers when storing to the stack */
- if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
- (dest->opcode == OP_LDADDR))) {
- int context_used;
-
- iargs [0] = dest;
- iargs [1] = src;
-
- context_used = mini_class_check_context_used (cfg, klass);
-
- /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
- if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
- return;
- } else if (context_used) {
- iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- } else {
- iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
- if (!cfg->compile_aot)
- mono_class_compute_gc_descriptor (klass);
- }
-
- if (size_ins)
- mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
- else
- mono_emit_jit_icall (cfg, mono_value_copy, iargs);
- return;
- }
- }
-
- if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
- /* FIXME: Optimize the case when src/dest is OP_LDADDR */
- mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
- } else {
- iargs [0] = dest;
- iargs [1] = src;
- if (size_ins)
- iargs [2] = size_ins;
- else
- EMIT_NEW_ICONST (cfg, iargs [2], n);
-
- memcpy_method = get_memcpy_method ();
- if (memcpy_ins)
- mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
- else
- mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
- }
-}
-
-static MonoMethod*
-get_memset_method (void)
+MonoMethod*
+mini_get_memset_method (void)
{
static MonoMethod *memset_method = NULL;
if (!memset_method) {
/* FIXME: Optimize this for the case when dest is an LDADDR */
mono_class_init (klass);
if (mini_is_gsharedvt_klass (klass)) {
- size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
- bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
+ size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
+ bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
if (!bzero_method)
bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
g_assert (bzero_method);
iargs [0] = dest;
iargs [1] = size_ins;
- mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
+ mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
return;
}
mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
}
else {
- memset_method = get_memset_method ();
+ memset_method = mini_get_memset_method ();
iargs [0] = dest;
EMIT_NEW_ICONST (cfg, iargs [1], 0);
EMIT_NEW_ICONST (cfg, iargs [2], n);
mrgctx_loc = mono_get_vtable_var (cfg);
EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
+ return mrgctx_var;
+ } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
+ MonoInst *mrgctx_loc, *mrgctx_var;
+
+ /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
+ mrgctx_loc = mono_get_vtable_var (cfg);
+ EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
+
+ g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
+
return mrgctx_var;
} else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
MonoInst *vtable_loc, *vtable_var;
return ins;
}
-static MonoInst*
-emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
+MonoInst*
+mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
}
MonoInst *ins;
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
} else if (context_used) {
MonoInst *vtable_ins;
} else {
rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
- return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
+ return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
MonoInst *ins;
int dreg, addr_reg;
- klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
+ klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
/* obj */
args [0] = obj;
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
- is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_START_BB (cfg, is_nullable_bb);
{
- MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
+ MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
MonoInst *unbox_call;
MonoMethodSignature *unbox_sig;
if (cfg->llvm_only)
unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
else
- unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
+ unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
addr->dreg = addr_reg;
/* This happens often in argument checking code, eg. throw new FooException... */
/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
- return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
+ alloc_ftn = mono_helper_newobj_mscorlib;
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
MonoMethod *managed_alloc = NULL;
- gboolean pass_lw;
if (!vtable) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
EMIT_NEW_ICONST (cfg, iargs [1], size);
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
- alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
- if (pass_lw) {
- guint32 lw = vtable->klass->instance_size;
- lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
- EMIT_NEW_ICONST (cfg, iargs [0], lw);
- EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
- }
- else {
- EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
- }
+ alloc_ftn = ves_icall_object_new_specific;
+ EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
}
return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
- return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
+ return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
- is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_START_BB (cfg, is_nullable_bb);
{
- MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
+ MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
MonoInst *box_call;
MonoMethodSignature *box_sig;
if (cfg->llvm_only)
box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
else
- box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
+ box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
res->type = STACK_OBJ;
res->klass = klass;
/* Set target field */
/* Optimize away setting of NULL target */
if (!MONO_INS_IS_PCONST_NULL (target)) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
+ MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
- emit_write_barrier (cfg, ptr, target);
+ mini_emit_write_barrier (cfg, ptr, target);
}
}
}
mono_domain_unlock (domain);
- code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
+ code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
}
if (mini_is_gsharedvt_type (fsig->params [0])) {
int addr_reg, deref_arg_reg;
- ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
deref_arg_reg = alloc_preg (cfg);
/* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
/* also consider num_locals? */
/* Do the size check early to avoid creating vtables */
if (!inline_limit_inited) {
- if (g_getenv ("MONO_INLINELIMIT"))
- inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
- else
+ char *inlinelimit;
+ if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
+ inline_limit = atoi (inlinelimit);
+ g_free (inlinelimit);
+ } else
inline_limit = INLINE_LENGTH_LIMIT;
inline_limit_inited = TRUE;
}
if (g_list_find (cfg->dont_inline, method))
return FALSE;
+ if (mono_profiler_get_call_instrumentation_flags (method))
+ return FALSE;
+
return TRUE;
}
#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
if (COMPILE_LLVM (cfg)) {
- /* Not needed */
+ /*
+ * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
+ * during OP_BOUNDS_CHECK decomposition, and in the implementation
+ * of OP_X86_LEA for llvm.
+ */
index2_reg = index_reg;
} else {
index2_reg = alloc_preg (cfg);
g_assert (cfg->gshared);
context_used = mini_class_check_context_used (cfg, klass);
g_assert (context_used);
- rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
+ rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
} else {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
return addr;
}
-static MonoBreakPolicy
-always_insert_breakpoint (MonoMethod *method)
-{
- return MONO_BREAK_POLICY_ALWAYS;
-}
-
-static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
-
-/**
- * mono_set_break_policy:
- * policy_callback: the new callback function
- *
- * Allow embedders to decide wherther to actually obey breakpoint instructions
- * (both break IL instructions and Debugger.Break () method calls), for example
- * to not allow an app to be aborted by a perfectly valid IL opcode when executing
- * untrusted or semi-trusted code.
- *
- * @policy_callback will be called every time a break point instruction needs to
- * be inserted with the method argument being the method that calls Debugger.Break()
- * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
- * if it wants the breakpoint to not be effective in the given method.
- * #MONO_BREAK_POLICY_ALWAYS is the default.
- */
-void
-mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
-{
- if (policy_callback)
- break_policy_func = policy_callback;
- else
- break_policy_func = always_insert_breakpoint;
-}
-
-static gboolean
-should_insert_brekpoint (MonoMethod *method) {
- switch (break_policy_func (method)) {
- case MONO_BREAK_POLICY_ALWAYS:
- return TRUE;
- case MONO_BREAK_POLICY_NEVER:
- return FALSE;
- case MONO_BREAK_POLICY_ON_DBG:
- g_warning ("mdb no longer supported");
- return FALSE;
- default:
- g_warning ("Incorrect value returned from break policy callback");
- return FALSE;
- }
-}
-
/* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
static MonoInst*
emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
if (mini_type_is_reference (&eklass->byval_arg))
- emit_write_barrier (cfg, addr, load);
+ mini_emit_write_barrier (cfg, addr, load);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
if (generic_class_is_reference_type (cfg, klass))
- emit_write_barrier (cfg, addr, sp [2]);
+ mini_emit_write_barrier (cfg, addr, sp [2]);
}
return ins;
}
return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
}
-static MonoInst*
-emit_memory_barrier (MonoCompile *cfg, int kind)
+MonoInst*
+mini_emit_memory_barrier (MonoCompile *cfg, int kind)
{
MonoInst *ins = NULL;
MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
return NULL;
}
+
+static gboolean
+mono_type_is_native_blittable (MonoType *t)
+{
+ if (MONO_TYPE_IS_REFERENCE (t))
+ return FALSE;
+
+ if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
+ return TRUE;
+
+ MonoClass *klass = mono_class_from_mono_type (t);
+
+ //MonoClass::blitable depends on mono_class_setup_fields being done.
+ mono_class_setup_fields (klass);
+ if (!klass->blittable)
+ return FALSE;
+
+ // If the native marshal size is different we can't convert PtrToStructure to a type load
+ if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
+ return FALSE;
+
+ return TRUE;
+}
+
+
static MonoInst*
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
-
- MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
+ MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
if (cmethod->klass == mono_defaults.string_class) {
if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
} else if (cmethod->klass == runtime_helpers_class) {
if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
+ return ins;
+ } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
+ MonoGenericContext *ctx = mono_method_get_context (cmethod);
+ g_assert (ctx);
+ g_assert (ctx->method_inst);
+ g_assert (ctx->method_inst->type_argc == 1);
+ MonoType *arg_type = ctx->method_inst->type_argv [0];
+ MonoType *t;
+ MonoClass *klass;
+
+ ins = NULL;
+
+ /* Resolve the argument class as possible so we can handle common cases fast */
+ t = mini_get_underlying_type (arg_type);
+ klass = mono_class_from_mono_type (t);
+ mono_class_init (klass);
+ if (MONO_TYPE_IS_REFERENCE (t))
+ EMIT_NEW_ICONST (cfg, ins, 1);
+ else if (MONO_TYPE_IS_PRIMITIVE (t))
+ EMIT_NEW_ICONST (cfg, ins, 0);
+ else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
+ EMIT_NEW_ICONST (cfg, ins, 1);
+ else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
+ EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
+ else {
+ g_assert (cfg->gshared);
+
+ /* Have to use the original argument class here */
+ MonoClass *arg_class = mono_class_from_mono_type (arg_type);
+ int context_used = mini_class_check_context_used (cfg, arg_class);
+
+ /* This returns 1 or 2 */
+ MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
+ int dreg = alloc_ireg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
+ }
+
return ins;
} else
return NULL;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
- return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
} else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
guint32 opcode = 0;
gboolean is_ref = mini_type_is_reference (fsig->params [0]);
if (opcode == OP_LOADI8_MEMBASE)
ins = mono_decompose_opcode (cfg, ins);
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
return ins;
}
opcode = OP_STORE_MEMBASE_REG;
if (opcode) {
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
MONO_INST_NEW (cfg, ins, opcode);
ins->sreg1 = args [1]->dreg;
} else {
MonoInst *load_ins;
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
/* 64 bit reads are already atomic */
MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
load_ins->type = STACK_I8;
MONO_ADD_INS (cfg->cbb, load_ins);
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
ins = load_ins;
}
}
if (cfg->gen_write_barriers && is_ref)
- emit_write_barrier (cfg, args [0], args [1]);
+ mini_emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
}
if (cfg->gen_write_barriers && is_ref)
- emit_write_barrier (cfg, args [0], args [1]);
+ mini_emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
fsig->params [1]->type == MONO_TYPE_I4) {
cfg->has_atomic_cas_i4 = TRUE;
}
else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
- ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
if (ins)
return ins;
MONO_ADD_INS (cfg->cbb, ins);
if (cfg->gen_write_barriers && is_ref)
- emit_write_barrier (cfg, args [0], args [1]);
+ mini_emit_write_barrier (cfg, args [0], args [1]);
}
}
(strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
(strcmp (cmethod->klass->name, "Debugger") == 0)) {
if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
- if (should_insert_brekpoint (cfg->method)) {
+ if (mini_should_insert_breakpoint (cfg->method)) {
ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
} else {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
+ } else if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Runtime.InteropServices") == 0) &&
+ (strcmp (cmethod->klass->name, "Marshal") == 0)) {
+ //Convert Marshal.PtrToStructure<T> of blittable T to direct loads
+ if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
+ cmethod->is_inflated &&
+ fsig->param_count == 1 &&
+ !mini_method_check_context_used (cfg, cmethod)) {
+
+ MonoGenericContext *method_context = mono_method_get_context (cmethod);
+ MonoType *arg0 = method_context->method_inst->type_argv [0];
+ if (mono_type_is_native_blittable (arg0))
+ return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
+ }
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
{
if (method->klass == mono_defaults.string_class) {
/* managed string allocation support */
- if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
+ if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
MonoMethod *managed_alloc = NULL;
prev_args = cfg->args;
prev_arg_types = cfg->arg_types;
prev_inlined_method = cfg->inlined_method;
- cfg->inlined_method = cmethod;
- cfg->ret_var_set = FALSE;
- cfg->inline_depth ++;
+ prev_ret_var_set = cfg->ret_var_set;
prev_real_offset = cfg->real_offset;
prev_cbb_hash = cfg->cbb_hash;
prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
prev_cbb = cfg->cbb;
prev_current_method = cfg->current_method;
prev_generic_context = cfg->generic_context;
- prev_ret_var_set = cfg->ret_var_set;
prev_disable_inline = cfg->disable_inline;
+ cfg->inlined_method = cmethod;
+ cfg->ret_var_set = FALSE;
+ cfg->inline_depth ++;
+
if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
virtual_ = TRUE;
return method;
}
-static inline MonoClass*
-mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
-{
- MonoError error;
- MonoClass *klass;
-
- if (method->wrapper_type != MONO_WRAPPER_NONE) {
- klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
- if (context) {
- klass = mono_class_inflate_generic_class_checked (klass, context, &error);
- mono_error_cleanup (&error); /* FIXME don't swallow the error */
- }
- } else {
- klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
- mono_error_cleanup (&error); /* FIXME don't swallow the error */
- }
- if (klass)
- mono_class_init (klass);
- return klass;
-}
-
static inline MonoMethodSignature*
mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
{
return NULL;
if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
return NULL;
- switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
- case MONO_TYPE_BOOLEAN:
+ switch (mini_get_underlying_type (&klass->byval_arg)->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
size = 1; break;
/* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
#if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
- case MONO_TYPE_CHAR:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
size = 2; break;
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
+static guint32
+mono_type_to_stloc_coerce (MonoType *type)
+{
+ if (type->byref)
+ return 0;
+
+ type = mini_get_underlying_type (type);
+handle_enum:
+ switch (type->type) {
+ case MONO_TYPE_I1:
+ return OP_ICONV_TO_I1;
+ case MONO_TYPE_U1:
+ return OP_ICONV_TO_U1;
+ case MONO_TYPE_I2:
+ return OP_ICONV_TO_I2;
+ case MONO_TYPE_U2:
+ return OP_ICONV_TO_U2;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ case MONO_TYPE_TYPEDBYREF:
+ case MONO_TYPE_GENERICINST:
+ return 0;
+ case MONO_TYPE_VALUETYPE:
+ if (type->data.klass->enumtype) {
+ type = mono_class_enum_basetype (type->data.klass);
+ goto handle_enum;
+ }
+ return 0;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
+ return 0;
+ default:
+ g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
+ }
+ return -1;
+}
+
static void
emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
{
MonoInst *ins;
+ guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
+
+ if (coerce_op) {
+ if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
+ if (cfg->verbose_level > 2)
+ printf ("Found existing coercing is enough for stloc\n");
+ } else {
+ MONO_INST_NEW (cfg, ins, coerce_op);
+ ins->dreg = alloc_ireg (cfg);
+ ins->sreg1 = sp [0]->dreg;
+ ins->type = STACK_I4;
+ ins->klass = mono_class_from_mono_type (header->locals [n]);
+ MONO_ADD_INS (cfg->cbb, ins);
+ *sp = mono_decompose_opcode (cfg, ins);
+ }
+ }
+
+
guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
}
}
+static void
+emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
+{
+ MonoInst *ins;
+ guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
+
+ if (coerce_op) {
+ if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
+ if (cfg->verbose_level > 2)
+ printf ("Found existing coercing is enough for starg\n");
+ } else {
+ MONO_INST_NEW (cfg, ins, coerce_op);
+ ins->dreg = alloc_ireg (cfg);
+ ins->sreg1 = sp [0]->dreg;
+ ins->type = STACK_I4;
+ ins->klass = mono_class_from_mono_type (cfg->arg_types [n]);
+ MONO_ADD_INS (cfg->cbb, ins);
+ *sp = mono_decompose_opcode (cfg, ins);
+ }
+ }
+
+ EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
+}
+
/*
* ldloca inhibits many optimizations so try to get rid of it in common
* cases.
icall_args [0] = thunk_arg_ins;
icall_args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
icall_args [0] = thunk_arg_ins;
icall_args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
- ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
ftndesc_ins->dreg = ftndesc_reg;
/*
* Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
// FIXME: Avoid initializing vtable_arg
emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
- mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
+ mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
}
} else if (context_used &&
((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
cmethod_addr = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
+ mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
}
} else {
INLINE_FAILURE ("ctor call");
cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
if (cfg->method == method) {
- if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
- cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
+ cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
/* ENTRY BLOCK */
NEW_BBLOCK (cfg, start_bblock);
tblock->real_offset = clause->handler_offset;
tblock->flags |= BB_EXCEPTION_HANDLER;
+ if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
+ mono_create_exvar_for_offset (cfg, clause->handler_offset);
/*
* Linking the try block with the EH block hinders inlining as we won't be able to
* merge the bblocks from inlining and produce an artificial hole for no good reason.
if ((cfg->method == method) && cfg->coverage_info) {
guint32 cil_offset = ip - header->code;
+ gpointer counter = &cfg->coverage_info->data [cil_offset].count;
cfg->coverage_info->data [cil_offset].cil_code = ip;
- /* TODO: Use an increment here */
-#if defined(TARGET_X86)
- MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
- ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
- ins->inst_imm = 1;
- MONO_ADD_INS (cfg->cbb, ins);
-#else
- EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
-#endif
+ if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
+ MonoInst *one_ins, *load_ins;
+
+ EMIT_NEW_PCONST (cfg, load_ins, counter);
+ EMIT_NEW_ICONST (cfg, one_ins, 1);
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = load_ins->dreg;
+ ins->inst_offset = 0;
+ ins->sreg2 = one_ins->dreg;
+ ins->type = STACK_I4;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ EMIT_NEW_PCONST (cfg, ins, counter);
+ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
+ }
}
if (cfg->verbose_level > 3)
MONO_ADD_INS (cfg->cbb, ins);
break;
case CEE_BREAK:
- if (should_insert_brekpoint (cfg->method)) {
+ if (mini_should_insert_breakpoint (cfg->method)) {
ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
} else {
MONO_INST_NEW (cfg, ins, OP_NOP);
CHECK_ARG (n);
if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
UNVERIFIED;
- EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
+ emit_starg_ir (cfg, sp, n);
ip += 2;
break;
case CEE_LDLOC_S:
CHECK_STACK_OVF (1);
n = ip [1];
CHECK_LOCAL (n);
- EMIT_NEW_LOCLOAD (cfg, ins, n);
+ if ((ip [2] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 2) && MONO_TYPE_ISSTRUCT (header->locals [n])) {
+ /* Avoid loading a struct just to load one of its fields */
+ EMIT_NEW_LOCLOADA (cfg, ins, n);
+ } else {
+ EMIT_NEW_LOCLOAD (cfg, ins, n);
+ }
*sp++ = ins;
ip += 2;
break;
if (cfg->gshared && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ mini_profiler_emit_tail_call (cfg, cmethod);
fsig = mono_method_signature (cmethod);
n = fsig->param_count + fsig->hasthis;
addr = emit_get_rgctx_sig (cfg, context_used,
fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
goto calli_end;
}
goto calli_end;
}
}
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
calli_end:
sp -= n;
+ if (cmethod && cmethod->klass->image == mono_defaults.corlib && !strcmp (cmethod->klass->name, "ThrowHelper"))
+ cfg->cbb->out_of_line = TRUE;
+
/*
* We have the `constrained.' prefix opcode.
*/
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
/* Non-ref case */
- nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
} else {
g_assert (mono_class_is_interface (cmethod->klass));
addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
goto call_end;
}
} else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
}
goto call_end;
}
for (i = 0; i < n; ++i)
EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
+
+ mini_profiler_emit_tail_call (cfg, cmethod);
+
MONO_INST_NEW (cfg, ins, OP_BR);
MONO_ADD_INS (cfg->cbb, ins);
tblock = start_bblock->out_bb [0];
// FIXME: Avoid initializing vtable_arg
ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
}
goto call_end;
}
ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
}
goto call_end;
}
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
- emit_write_barrier (cfg, addr, val);
+ mini_emit_write_barrier (cfg, addr, val);
if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
GSHAREDVT_FAILURE (*ip);
} else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
/* Handle tail calls similarly to normal calls */
tail_call = TRUE;
} else {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ mini_profiler_emit_tail_call (cfg, cmethod);
MONO_INST_NEW_CALL (cfg, call, OP_JMP);
call->tail_call = TRUE;
}
/* Common call */
- if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
+ if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
INLINE_FAILURE ("call");
ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
imt_arg, vtable_arg);
break;
}
case CEE_RET:
+ mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
+
if (cfg->method != method) {
/* return from inlined method */
/*
cfg->ret_var_set = TRUE;
}
} else {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
-
if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
emit_pop_lmf (cfg);
CHECK_STACK (1);
--sp;
- switch (*ip) {
- case CEE_LDIND_R4:
- case CEE_LDIND_R8:
- dreg = alloc_freg (cfg);
- break;
- case CEE_LDIND_I8:
- dreg = alloc_lreg (cfg);
- break;
- case CEE_LDIND_REF:
- dreg = alloc_ireg_ref (cfg);
- break;
- default:
- dreg = alloc_preg (cfg);
- }
-
- NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
- ins->type = ldind_type [*ip - CEE_LDIND_I1];
- if (*ip == CEE_LDIND_R4)
- ins->type = cfg->r4_stack_type;
- ins->flags |= ins_flag;
- MONO_ADD_INS (cfg->cbb, ins);
+ ins = mini_emit_memory_load (cfg, &ldind_to_type (*ip)->byval_arg, sp [0], 0, ins_flag);
*sp++ = ins;
- if (ins_flag & MONO_INST_VOLATILE) {
- /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
- }
ins_flag = 0;
++ip;
break;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
MONO_ADD_INS (cfg->cbb, ins);
if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
- emit_write_barrier (cfg, sp [0], sp [1]);
+ mini_emit_write_barrier (cfg, sp [0], sp [1]);
inline_costs += 1;
++ip;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
sp -= 2;
- if (generic_class_is_reference_type (cfg, klass)) {
- MonoInst *store, *load;
- int dreg = alloc_ireg_ref (cfg);
-
- NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
- load->flags |= ins_flag;
- MONO_ADD_INS (cfg->cbb, load);
-
- NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
- store->flags |= ins_flag;
- MONO_ADD_INS (cfg->cbb, store);
-
- if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
- emit_write_barrier (cfg, sp [0], sp [1]);
- } else {
- mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
- }
+ mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
ins_flag = 0;
ip += 5;
break;
ip += stloc_len;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
}
ins_flag = 0;
break;
}
/* Optimize the ldobj+stobj combination */
- /* The reference case ends up being a load+store anyway */
- /* Skip this if the operation is volatile. */
- if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
+ if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token)) {
CHECK_STACK (1);
sp --;
- mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
+ mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
ip += 5 + 5;
ins_flag = 0;
break;
}
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
- ins->flags |= ins_flag;
+ ins = mini_emit_memory_load (cfg, &klass->byval_arg, sp [0], 0, ins_flag);
*sp++ = ins;
- if (ins_flag & MONO_INST_VOLATILE) {
- /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
- }
-
ip += 5;
ins_flag = 0;
inline_costs += 1;
TYPE_LOAD_ERROR (cmethod->klass);
context_used = mini_method_check_context_used (cfg, cmethod);
+
+ if (!dont_verify && !cfg->skip_visibility) {
+ MonoMethod *cil_method = cmethod;
+ MonoMethod *target_method = cil_method;
+
+ if (method->is_inflated) {
+ target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
+ CHECK_CFG_ERROR;
+ }
+
+ if (!mono_method_can_access_method (method_definition, target_method) &&
+ !mono_method_can_access_method (method, cil_method))
+ emit_method_access_failure (cfg, method, cil_method);
+ }
if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
if (mini_is_gsharedvt_klass (klass)) {
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
wbarrier_ptr_ins = ins;
- /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
+ /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
} else {
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
if (mini_is_gsharedvt_klass (klass)) {
g_assert (wbarrier_ptr_ins);
- emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
+ mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
} else {
/* insert call to write barrier */
MonoInst *ptr;
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
- emit_write_barrier (cfg, ptr, sp [1]);
+ mini_emit_write_barrier (cfg, ptr, sp [1]);
}
}
}
}
+ MonoInst *field_add_inst = sp [0];
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
- dreg = alloc_ireg_mp (cfg);
- EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
- } else {
- EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
+ EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
+ foffset = 0;
}
- load->flags |= ins_flag;
+
+ load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
+
if (sp [0]->opcode != OP_LDADDR)
load->flags |= MONO_INST_FAULT;
*sp++ = load;
int addr_reg = mono_alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
}
- } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
+ } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
MonoInst *iargs [2];
g_assert (field->parent);
if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
if (op == CEE_LDSFLDA) {
if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
+ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
}
ins_flag = 0;
token = read32 (ip + 1);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (ins_flag & MONO_INST_VOLATILE) {
- /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
- }
+
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
- ins->flags |= ins_flag;
- if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
- generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
- /* insert call to write barrier */
- emit_write_barrier (cfg, sp [0], sp [1]);
- }
+ mini_emit_memory_store (cfg, &klass->byval_arg, sp [0], sp [1], ins_flag);
ins_flag = 0;
ip += 5;
inline_costs += 1;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
+ if (klass->byval_arg.type == MONO_TYPE_VOID)
+ UNVERIFIED;
context_used = mini_class_check_context_used (cfg, klass);
* ensure the rva field is big enough
*/
if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
- MonoMethod *memcpy_method = get_memcpy_method ();
+ MonoMethod *memcpy_method = mini_get_memcpy_method ();
MonoInst *iargs [3];
int add_reg = alloc_ireg_mp (cfg);
if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
GList *tmp;
- MonoExceptionClause *clause;
for (tmp = handlers; tmp; tmp = tmp->next) {
- clause = (MonoExceptionClause *)tmp->data;
+ MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data;
+ MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
+ MonoBasicBlock *dont_throw;
+
tblock = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (tblock);
link_bblock (cfg, cfg->cbb, tblock);
+
+ MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
+
MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
ins->inst_target_bb = tblock;
ins->inst_eh_block = clause;
MONO_ADD_INS (cfg->cbb, ins);
cfg->cbb->has_call_handler = 1;
+
+ /* Throw exception if exvar is set */
+ /* FIXME Do we need this for calls from catch/filter ? */
+ NEW_BBLOCK (cfg, dont_throw);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
+ mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL);
+ cfg->cbb->clause_hole = clause;
+
+ MONO_START_BB (cfg, dont_throw);
+ cfg->cbb->clause_hole = clause;
+
if (COMPILE_LLVM (cfg)) {
MonoBasicBlock *target_bb;
CHECK_STACK (info->sig->param_count);
sp -= info->sig->param_count;
- ins = mono_emit_jit_icall (cfg, info->func, sp);
+ if (cfg->compile_aot && !strcmp (info->name, "mono_threads_attach_coop")) {
+ MonoInst *addr;
+
+ /*
+ * This is called on unattached threads, so it cannot go through the trampoline
+ * infrastructure. Use an indirect call through a got slot initialized at load time
+ * instead.
+ */
+ EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name);
+ ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
+ } else {
+ ins = mono_emit_jit_icall (cfg, info->func, sp);
+ }
+
if (!MONO_TYPE_IS_VOID (info->sig->ret))
*sp++ = ins;
case CEE_MONO_LDPTR_CARD_TABLE:
case CEE_MONO_LDPTR_NURSERY_START:
case CEE_MONO_LDPTR_NURSERY_BITS:
- case CEE_MONO_LDPTR_INT_REQ_FLAG: {
+ case CEE_MONO_LDPTR_INT_REQ_FLAG:
+ case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: {
CHECK_STACK_OVF (1);
switch (ip [1]) {
- case CEE_MONO_LDPTR_CARD_TABLE:
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
- break;
- case CEE_MONO_LDPTR_NURSERY_START:
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
- break;
- case CEE_MONO_LDPTR_NURSERY_BITS:
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
- break;
- case CEE_MONO_LDPTR_INT_REQ_FLAG:
- ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
- break;
+ case CEE_MONO_LDPTR_CARD_TABLE:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
+ break;
+ case CEE_MONO_LDPTR_NURSERY_START:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
+ break;
+ case CEE_MONO_LDPTR_NURSERY_BITS:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
+ break;
+ case CEE_MONO_LDPTR_INT_REQ_FLAG:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
+ break;
+ case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL);
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
}
*sp++ = ins;
temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
temp->backend.is_pinvoke = 1;
EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
- mini_emit_stobj (cfg, dest, src, klass, TRUE);
+ mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
dest->type = STACK_VTYPE;
} else {
EMIT_NEW_RETLOADA (cfg, ins);
}
- mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
+ mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
if (sp != stack_start)
UNVERIFIED;
+ mini_profiler_emit_leave (cfg, sp [0]);
+
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
MONO_ADD_INS (cfg->cbb, ins);
cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
}
- /* Has to use a call inst since it local regalloc expects it */
+ /* Has to use a call inst since local regalloc expects it */
MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
ins = (MonoInst*)call;
sp -= 2;
MONO_ADD_INS (cfg->cbb, ins);
cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
+ /* OP_DYN_CALL might need to allocate a dynamically sized param area */
+ cfg->flags |= MONO_CFG_HAS_ALLOCA;
ip += 2;
inline_costs += 10 * num_calls++;
}
case CEE_MONO_MEMORY_BARRIER: {
CHECK_OPSIZE (6);
- emit_memory_barrier (cfg, (int)read32 (ip + 2));
+ mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
ip += 6;
break;
}
MonoInst *ad_ins, *jit_tls_ins;
MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
- g_assert (!mono_threads_is_coop_enabled ());
+ g_assert (!mono_threads_is_blocking_transition_enabled ());
cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
* instead.
*/
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
- ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
+ ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
} else {
ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
}
addr = emit_get_rgctx_sig (cfg, context_used,
fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
} else {
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
}
}
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
+ ip += 2;
+ *sp++ = ins;
+ break;
+ case CEE_MONO_GET_RGCTX_ARG:
+ CHECK_OPSIZE (2);
+ CHECK_STACK_OVF (1);
+
+ mono_create_rgctx_var (cfg);
+
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = alloc_dreg (cfg, STACK_PTR);
+ ins->sreg1 = cfg->rgctx_var->dreg;
+ ins->type = STACK_PTR;
+ MONO_ADD_INS (cfg->cbb, ins);
+
ip += 2;
*sp++ = ins;
break;
CHECK_ARG (n);
if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
UNVERIFIED;
- EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
+ emit_starg_ir (cfg, sp, n);
ip += 4;
break;
case CEE_LDLOC:
CHECK_OPSIZE (4);
n = read16 (ip + 2);
CHECK_LOCAL (n);
- EMIT_NEW_LOCLOAD (cfg, ins, n);
+ if ((ip [4] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 4) && header->locals [n]->type == MONO_TYPE_VALUETYPE) {
+ /* Avoid loading a struct just to load one of its fields */
+ EMIT_NEW_LOCLOADA (cfg, ins, n);
+ } else {
+ EMIT_NEW_LOCLOAD (cfg, ins, n);
+ }
*sp++ = ins;
ip += 4;
break;
ip += 6;
break;
case CEE_CPBLK:
- case CEE_INITBLK: {
- MonoInst *iargs [3];
CHECK_STACK (3);
sp -= 3;
-
- /* Skip optimized paths for volatile operations. */
- if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
- mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
- } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
- /* emit_memset only works when val == 0 */
- mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
- } else {
- MonoInst *call;
- iargs [0] = sp [0];
- iargs [1] = sp [1];
- iargs [2] = sp [2];
- if (ip [1] == CEE_CPBLK) {
- /*
- * FIXME: It's unclear whether we should be emitting both the acquire
- * and release barriers for cpblk. It is technically both a load and
- * store operation, so it seems like that's the sensible thing to do.
- *
- * FIXME: We emit full barriers on both sides of the operation for
- * simplicity. We should have a separate atomic memcpy method instead.
- */
- MonoMethod *memcpy_method = get_memcpy_method ();
-
- if (ins_flag & MONO_INST_VOLATILE)
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
-
- call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
- call->flags |= ins_flag;
-
- if (ins_flag & MONO_INST_VOLATILE)
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
- } else {
- MonoMethod *memset_method = get_memset_method ();
- if (ins_flag & MONO_INST_VOLATILE) {
- /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
- }
- call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
- call->flags |= ins_flag;
- }
- }
+ mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
+ ip += 2;
+ ins_flag = 0;
+ inline_costs += 1;
+ break;
+ case CEE_INITBLK:
+ CHECK_STACK (3);
+ sp -= 3;
+ mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
ip += 2;
ins_flag = 0;
inline_costs += 1;
break;
- }
case CEE_NO_:
CHECK_OPSIZE (3);
if (ip [2] & 0x1)
}
cfg->cbb = init_localsbb;
- emit_instrumentation_call (cfg, mono_profiler_method_enter);
+ mini_profiler_emit_enter (cfg);
if (seq_points) {
MonoBasicBlock *bb;
* - create a helper function for allocating a stack slot, taking into account
* MONO_CFG_HAS_SPILLUP.
* - merge r68207.
- * - merge the ia64 switch changes.
* - optimize mono_regstate2_alloc_int/float.
* - fix the pessimistic handling of variables accessed in exception handler blocks.
* - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
the values on the stack before emitting the last instruction of the bb.
*/
-#else /* !DISABLE_JIT */
-
-void
-mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
-{
-}
-
#endif /* !DISABLE_JIT */