-/*
- * method-to-ir.c: Convert CIL to the JIT internal representation
+/**
+ * \file
+ * Convert CIL to the JIT internal representation
*
* Author:
* Paolo Molaro (lupus@ximian.com)
return cfg->got_var;
}
-static MonoInst *
-mono_get_vtable_var (MonoCompile *cfg)
+static void
+mono_create_rgctx_var (MonoCompile *cfg)
{
- g_assert (cfg->gshared);
-
if (!cfg->rgctx_var) {
cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/* force the var to be stack allocated */
cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
}
+}
+
+static MonoInst *
+mono_get_vtable_var (MonoCompile *cfg)
+{
+ g_assert (cfg->gshared);
+
+ mono_create_rgctx_var (cfg);
return cfg->rgctx_var;
}
if (!cfg->lmf_ir)
return;
- if (cfg->lmf_ir_mono_lmf) {
- MonoInst *lmf_vara_ins, *lmf_ins;
- /* Load current lmf */
- lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF);
- g_assert (lmf_ins);
- EMIT_NEW_VARLOADA (cfg, lmf_vara_ins, cfg->lmf_var, NULL);
- /* Save previous_lmf */
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_vara_ins->dreg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
- /* Set new LMF */
- mono_create_tls_set (cfg, lmf_vara_ins, TLS_KEY_LMF);
- } else {
- int lmf_reg, prev_lmf_reg;
- /*
- * Store lmf_addr in a variable, so it can be allocated to a global register.
- */
- if (!cfg->lmf_addr_var)
- cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ int lmf_reg, prev_lmf_reg;
+ /*
+ * Store lmf_addr in a variable, so it can be allocated to a global register.
+ */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
#ifdef HOST_WIN32
- ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
- g_assert (ins);
- int jit_tls_dreg = ins->dreg;
+ ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
+ g_assert (ins);
+ int jit_tls_dreg = ins->dreg;
- lmf_reg = alloc_preg (cfg);
- EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
+ lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
#else
- lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
- g_assert (lmf_ins);
+ lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
+ g_assert (lmf_ins);
#endif
- lmf_ins->dreg = cfg->lmf_addr_var->dreg;
+ lmf_ins->dreg = cfg->lmf_addr_var->dreg;
- EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
- lmf_reg = ins->dreg;
+ EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
+ lmf_reg = ins->dreg;
- prev_lmf_reg = alloc_preg (cfg);
- /* Save previous_lmf */
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
- /* Set new lmf */
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
- }
+ prev_lmf_reg = alloc_preg (cfg);
+ /* Save previous_lmf */
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
+ /* Set new lmf */
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
}
/*
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
- if (cfg->lmf_ir_mono_lmf) {
- /* Load previous_lmf */
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, alloc_preg (cfg), lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
- /* Set new LMF */
- mono_create_tls_set (cfg, ins, TLS_KEY_LMF);
- } else {
- int prev_lmf_reg;
- /*
- * Emit IR to pop the LMF:
- * *(lmf->lmf_addr) = lmf->prev_lmf
- */
- /* This could be called before emit_push_lmf () */
- if (!cfg->lmf_addr_var)
- cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- lmf_addr_reg = cfg->lmf_addr_var->dreg;
+ int prev_lmf_reg;
+ /*
+ * Emit IR to pop the LMF:
+ * *(lmf->lmf_addr) = lmf->prev_lmf
+ */
+ /* This could be called before emit_push_lmf () */
+ if (!cfg->lmf_addr_var)
+ cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ lmf_addr_reg = cfg->lmf_addr_var->dreg;
- prev_lmf_reg = alloc_preg (cfg);
- EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
- }
+ prev_lmf_reg = alloc_preg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
}
static void
static gboolean
direct_icalls_enabled (MonoCompile *cfg)
{
+ return FALSE;
+
/* LLVM on amd64 can't handle calls to non-32 bit addresses */
#ifdef TARGET_AMD64
if (cfg->compile_llvm && !cfg->llvm_only)
wbarrier->sreg1 = ptr->dreg;
wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
- } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
+ } else if (card_table) {
int offset_reg = alloc_preg (cfg);
int card_reg;
MonoInst *ins;
+ /*
+ * We emit a fast light weight write barrier. This always marks cards as in the concurrent
+ * collector case, so, for the serial collector, it might slightly slow down nursery
+ * collections. We also expect that the host system and the target system have the same card
+ * table configuration, which is the case if they have the same pointer size.
+ */
+
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
if (card_table_mask)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
if (align < SIZEOF_VOID_P)
return FALSE;
- /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
- if (size > 32 * SIZEOF_VOID_P)
+ if (size > 5 * SIZEOF_VOID_P)
return FALSE;
create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
- /* We don't unroll more than 5 stores to avoid code bloat. */
- if (size > 5 * SIZEOF_VOID_P) {
- /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
- size += (SIZEOF_VOID_P - 1);
- size &= ~(SIZEOF_VOID_P - 1);
-
- EMIT_NEW_ICONST (cfg, iargs [2], size);
- EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
- mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
- return TRUE;
- }
-
destreg = iargs [0]->dreg;
srcreg = iargs [1]->dreg;
offset = 0;
else
n = mono_class_value_size (klass, &align);
+ if (!align)
+ align = SIZEOF_VOID_P;
/* if native is true there should be no references in the struct */
if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
/* Avoid barriers when storing to the stack */
/* It's ok to intrinsify under gsharing since shared code types are layout stable. */
if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
return;
- } else if (context_used) {
- iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
- } else {
- iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
- if (!cfg->compile_aot)
- mono_class_compute_gc_descriptor (klass);
- }
+ } else if (size_ins || align < SIZEOF_VOID_P) {
+ if (context_used) {
+ iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
+ } else {
+ iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
+ if (!cfg->compile_aot)
+ mono_class_compute_gc_descriptor (klass);
+ }
+ if (size_ins)
+ mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
+ else
+ mono_emit_jit_icall (cfg, mono_value_copy, iargs);
+ } else {
+ /* We don't unroll more than 5 stores to avoid code bloat. */
+ /*This is harmless and simplify mono_gc_get_range_copy_func */
+ n += (SIZEOF_VOID_P - 1);
+ n &= ~(SIZEOF_VOID_P - 1);
- if (size_ins)
- mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
- else
- mono_emit_jit_icall (cfg, mono_value_copy, iargs);
- return;
+ EMIT_NEW_ICONST (cfg, iargs [2], n);
+ mono_emit_jit_icall (cfg, mono_gc_get_range_copy_func (), iargs);
+ }
}
}
mrgctx_loc = mono_get_vtable_var (cfg);
EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
+ return mrgctx_var;
+ } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
+ MonoInst *mrgctx_loc, *mrgctx_var;
+
+ /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
+ mrgctx_loc = mono_get_vtable_var (cfg);
+ EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
+
+ g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
+
return mrgctx_var;
} else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
MonoInst *vtable_loc, *vtable_var;
#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
if (COMPILE_LLVM (cfg)) {
- /* Not needed */
+ /*
+ * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
+ * during OP_BOUNDS_CHECK decomposition, and in the implementation
+ * of OP_X86_LEA for llvm.
+ */
index2_reg = index_reg;
} else {
index2_reg = alloc_preg (cfg);
/**
* mono_set_break_policy:
- * policy_callback: the new callback function
+ * \param policy_callback the new callback function
*
* Allow embedders to decide wherther to actually obey breakpoint instructions
- * (both break IL instructions and Debugger.Break () method calls), for example
+ * (both break IL instructions and \c Debugger.Break method calls), for example
* to not allow an app to be aborted by a perfectly valid IL opcode when executing
* untrusted or semi-trusted code.
*
- * @policy_callback will be called every time a break point instruction needs to
- * be inserted with the method argument being the method that calls Debugger.Break()
- * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
+ * \p policy_callback will be called every time a break point instruction needs to
+ * be inserted with the method argument being the method that calls \c Debugger.Break
+ * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER
* if it wants the breakpoint to not be effective in the given method.
- * #MONO_BREAK_POLICY_ALWAYS is the default.
+ * \c MONO_BREAK_POLICY_ALWAYS is the default.
*/
void
mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
return NULL;
if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
return NULL;
- switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
- case MONO_TYPE_BOOLEAN:
+ switch (mini_get_underlying_type (&klass->byval_arg)->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
size = 1; break;
/* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
#if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
- case MONO_TYPE_CHAR:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
size = 2; break;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
+ if (klass->byval_arg.type == MONO_TYPE_VOID)
+ UNVERIFIED;
context_used = mini_class_check_context_used (cfg, klass);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
+ ip += 2;
+ *sp++ = ins;
+ break;
+ case CEE_MONO_GET_RGCTX_ARG:
+ CHECK_OPSIZE (2);
+ CHECK_STACK_OVF (1);
+
+ mono_create_rgctx_var (cfg);
+
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = alloc_dreg (cfg, STACK_PTR);
+ ins->sreg1 = cfg->rgctx_var->dreg;
+ ins->type = STACK_PTR;
+ MONO_ADD_INS (cfg->cbb, ins);
+
ip += 2;
*sp++ = ins;
break;