switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
return OP_MOVE;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
return OP_MOVE;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
{
if (cfg->verbose_level > 2) \
- printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
+ printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
}
else
return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
}
+ }
+ for (i = 0; i < header->num_clauses; ++i) {
+ clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, offset))
return ((i + 1) << 8) | clause->flags;
return;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
inst->type = STACK_I4;
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return STACK_I4;
return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
return 1;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (arg->type != STACK_I4 && arg->type != STACK_PTR)
continue;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
- slot_reg = -1;
- if (mono_use_imt) {
- guint32 imt_slot = mono_method_get_imt_slot (method);
- emit_imt_argument (cfg, call, call->method, imt_arg);
- slot_reg = vtable_reg;
- offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
- }
- if (slot_reg == -1) {
- slot_reg = alloc_preg (cfg);
- mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
- offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
- }
+ guint32 imt_slot = mono_method_get_imt_slot (method);
+ emit_imt_argument (cfg, call, call->method, imt_arg);
+ slot_reg = vtable_reg;
+ offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
} else {
slot_reg = vtable_reg;
offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
return ins;
}
+static gboolean
+direct_icalls_enabled (MonoCompile *cfg)
+{
+ /* LLVM on amd64 can't handle calls to non-32 bit addresses */
+#ifdef TARGET_AMD64
+ if (cfg->compile_llvm)
+ return FALSE;
+#endif
+ if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
+ return FALSE;
+ return TRUE;
+}
+
MonoInst*
mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
{
- gboolean no_wrapper = FALSE;
-
/*
* Call the jit icall without a wrapper if possible.
* The wrapper is needed for the following reasons:
* - to be able to do stack walks for asynchronously suspended
* threads when debugging.
*/
- if (info->no_raise) {
- if (cfg->compile_aot) {
- // FIXME: This might be loaded into a runtime during debugging
- // even if it is not compiled using 'soft-debug'.
- } else {
- no_wrapper = TRUE;
- /* LLVM on amd64 can't handle calls to non-32 bit addresses */
- if ((cfg->compile_llvm && SIZEOF_VOID_P == 8) || cfg->gen_seq_points_debug_data)
- no_wrapper = FALSE;
- }
- }
-
- if (no_wrapper) {
+ if (info->no_raise && direct_icalls_enabled (cfg)) {
char *name;
int costs;
wbarrier->sreg1 = ptr->dreg;
wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
- } else if (card_table) {
+ } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
int offset_reg = alloc_preg (cfg);
int card_reg = alloc_preg (cfg);
MonoInst *ins;
mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
{
MonoInst *iargs [4];
- int context_used, n;
+ int n;
guint32 align = 0;
MonoMethod *memcpy_method;
MonoInst *size_ins = NULL;
MonoInst *memcpy_ins = NULL;
g_assert (klass);
+ if (cfg->generic_sharing_context)
+ klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
+
/*
* This check breaks with spilled vars... need to handle it during verification anyway.
* g_assert (klass && klass == src->klass && klass == dest->klass);
if (mini_is_gsharedvt_klass (cfg, klass)) {
g_assert (!native);
- context_used = mini_class_check_context_used (cfg, klass);
size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
}
mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
{
MonoInst *iargs [3];
- int n, context_used;
+ int n;
guint32 align;
MonoMethod *memset_method;
MonoInst *size_ins = NULL;
static MonoMethod *bzero_method;
/* FIXME: Optimize this for the case when dest is an LDADDR */
-
mono_class_init (klass);
if (mini_is_gsharedvt_klass (cfg, klass)) {
- context_used = mini_class_check_context_used (cfg, klass);
size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
if (!bzero_method)
return emit_rgctx_fetch (cfg, rgctx, entry);
}
+/*
+ * emit_get_rgctx_virt_method:
+ *
+ * Return data for method VIRT_METHOD for a receiver of type KLASS.
+ */
+static MonoInst*
+emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
+ MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
+{
+ MonoJumpInfoVirtMethod *info;
+ MonoJumpInfoRgctxEntry *entry;
+ MonoInst *rgctx;
+
+ info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
+ info->klass = klass;
+ info->method = virt_method;
+
+ entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
+ rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
+
+ return emit_rgctx_fetch (cfg, rgctx, entry);
+}
static MonoInst*
emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
g_assert (klass->rank == 0);
element_class = emit_get_rgctx_klass (cfg, context_used,
- klass->element_class, MONO_RGCTX_INFO_KLASS);
+ klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
MonoInst *unbox_call;
MonoMethodSignature *unbox_sig;
- MonoInst *var;
-
- var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
unbox_sig->ret = &klass->byval_arg;
}
if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
- if (known_instance_size)
- EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
+ if (known_instance_size) {
+ int size = mono_class_instance_size (klass);
+
+ EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
+ }
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
#endif
if (managed_alloc) {
+ int size = mono_class_instance_size (klass);
+
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
- EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
+ EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
if (mini_is_gsharedvt_klass (cfg, klass)) {
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *res, *is_ref, *src_var, *addr;
- int addr_reg, dreg;
+ int dreg;
dreg = alloc_ireg (cfg);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
- addr_reg = alloc_ireg (cfg);
/* val is a vtype, so has to load the value manually */
src_var = get_vreg_to_inst (cfg, val->dreg);
}
}
-
static gboolean
mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
{
return FALSE;
}
+static GHashTable* direct_icall_type_hash;
+
+static gboolean
+icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
+{
+ /* LLVM on amd64 can't handle calls to non-32 bit addresses */
+ if (!direct_icalls_enabled (cfg))
+ return FALSE;
+
+ /*
+ * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
+ * Whitelist a few icalls for now.
+ */
+ if (!direct_icall_type_hash) {
+ GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
+
+ g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
+ g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
+ g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
+ mono_memory_barrier ();
+ direct_icall_type_hash = h;
+ }
+
+ if (cmethod->klass == mono_defaults.math_class)
+ return TRUE;
+ /* No locking needed */
+ if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
+ return TRUE;
+ return FALSE;
+}
+
#define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
static MonoInst*
return res;
}
+static int
+get_castclass_cache_idx (MonoCompile *cfg)
+{
+ /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
+ cfg->castclass_cache_index ++;
+ return (cfg->method_index << 16) | cfg->castclass_cache_index;
+}
+
static MonoInst*
emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
{
/* inline cache*/
if (cfg->compile_aot) {
- /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
- cfg->castclass_cache_index ++;
- idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
+ idx = get_castclass_cache_idx (cfg);
EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
} else {
EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
{
MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
- gboolean is_i4 = TRUE;
+ gboolean is_i4;
switch (enum_type->type) {
case MONO_TYPE_I8:
#endif
is_i4 = FALSE;
break;
+ default:
+ is_i4 = TRUE;
+ break;
}
{
if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
int dreg = alloc_ireg (cfg);
int index_reg = alloc_preg (cfg);
- int mult_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
#if SIZEOF_REGISTER == 8
#if defined(TARGET_X86) || defined(TARGET_AMD64)
EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
add_reg = ins->dreg;
- /* Avoid a warning */
- mult_reg = 0;
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
add_reg, 0);
#else
+ int mult_reg = alloc_preg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
for (i = 0; i < attrs->num_attrs; ++i) {
MonoCustomAttrEntry *attr = &attrs->attrs [i];
const gchar *p;
- int len;
MonoMethodSignature *sig;
if (!attr->ctor || attr->ctor->klass != klass)
continue;
/* Decode the attribute. See reflection.c */
- len = attr->data_size;
p = (const char*)attr->data;
g_assert (read16 (p) == 0x0001);
p += 2;
cfg->stat_cil_code_size += header->code_size;
seq_points = cfg->gen_seq_points && cfg->method == method;
-#ifdef PLATFORM_ANDROID
- seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
-#endif
if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
/* We could hit a seq point before attaching to the JIT (#8338) */
seq_points = FALSE;
}
- if (cfg->gen_seq_points_debug_data && cfg->method == method) {
+ if (cfg->gen_sdb_seq_points && cfg->method == method) {
minfo = mono_debug_lookup_method (method);
if (minfo) {
int i, n_il_offsets;
start_new_bblock = 1;
break;
}
- case CEE_CALLI:
+ case CEE_CALLI: {
+ MonoInst *addr;
+ MonoMethodSignature *fsig;
+
+ CHECK_OPSIZE (5);
+ token = read32 (ip + 1);
+
+ ins = NULL;
+
+ //GSHAREDVT_FAILURE (*ip);
+ cmethod = NULL;
+ CHECK_STACK (1);
+ --sp;
+ addr = *sp;
+ fsig = mini_get_signature (method, token, generic_context);
+
+ if (method->dynamic && fsig->pinvoke) {
+ MonoInst *args [3];
+
+ /*
+ * This is a call through a function pointer using a pinvoke
+ * signature. Have to create a wrapper and call that instead.
+ * FIXME: This is very slow, need to create a wrapper at JIT time
+ * instead based on the signature.
+ */
+ EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
+ EMIT_NEW_PCONST (cfg, args [1], fsig);
+ args [2] = addr;
+ addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
+ }
+
+ n = fsig->param_count + fsig->hasthis;
+
+ CHECK_STACK (n);
+
+ //g_assert (!virtual || fsig->hasthis);
+
+ sp -= n;
+
+ inline_costs += 10 * num_calls++;
+
+ /*
+ * Making generic calls out of gsharedvt methods.
+ * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
+ * patching gshared method addresses into a gsharedvt method.
+ */
+ if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
+ /*
+ * We pass the address to the gsharedvt trampoline in the rgctx reg
+ */
+ MonoInst *callee = addr;
+
+ if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
+ /* Not tested */
+ GSHAREDVT_FAILURE (*ip);
+
+ addr = emit_get_rgctx_sig (cfg, context_used,
+ fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
+ goto calli_end;
+ }
+
+ /* Prevent inlining of methods with indirect calls */
+ INLINE_FAILURE ("indirect call");
+
+ if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
+ int info_type;
+ gpointer info_data;
+
+ /*
+ * Instead of emitting an indirect call, emit a direct call
+ * with the contents of the aotconst as the patch info.
+ */
+ if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
+ info_type = addr->inst_c1;
+ info_data = addr->inst_p0;
+ } else {
+ info_type = addr->inst_right->inst_c1;
+ info_data = addr->inst_right->inst_left;
+ }
+
+ if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
+ ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
+ NULLIFY_INS (addr);
+ goto calli_end;
+ }
+ }
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+
+ calli_end:
+
+ /* End of call, INS should contain the result of the call, if any */
+
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
+ g_assert (ins);
+ *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
+ }
+
+ CHECK_CFG_EXCEPTION;
+
+ ip += 5;
+ ins_flag = 0;
+ constrained_class = NULL;
+ break;
+ }
case CEE_CALL:
case CEE_CALLVIRT: {
MonoInst *addr = NULL;
MonoMethodSignature *fsig = NULL;
int array_rank = 0;
int virtual = *ip == CEE_CALLVIRT;
- int calli = *ip == CEE_CALLI;
gboolean pass_imt_from_rgctx = FALSE;
MonoInst *imt_arg = NULL;
MonoInst *keep_this_alive = NULL;
gboolean push_res = TRUE;
gboolean skip_ret = FALSE;
gboolean delegate_invoke = FALSE;
+ gboolean direct_icall = FALSE;
+ gboolean constrained_partial_call = FALSE;
+ MonoMethod *cil_method;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
ins = NULL;
- if (calli) {
- //GSHAREDVT_FAILURE (*ip);
- cmethod = NULL;
- CHECK_STACK (1);
- --sp;
- addr = *sp;
- fsig = mini_get_signature (method, token, generic_context);
- n = fsig->param_count + fsig->hasthis;
-
- if (method->dynamic && fsig->pinvoke) {
- MonoInst *args [3];
-
- /*
- * This is a call through a function pointer using a pinvoke
- * signature. Have to create a wrapper and call that instead.
- * FIXME: This is very slow, need to create a wrapper at JIT time
- * instead based on the signature.
- */
- EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
- EMIT_NEW_PCONST (cfg, args [1], fsig);
- args [2] = addr;
- addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
+ cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
+ cil_method = cmethod;
+
+ if (constrained_class) {
+ if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
+ if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
+ g_assert (!cmethod->klass->valuetype);
+ if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
+ constrained_partial_call = TRUE;
+ }
}
- } else {
- MonoMethod *cil_method;
- cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
- cil_method = cmethod;
-
- if (constrained_class) {
- if (method->wrapper_type != MONO_WRAPPER_NONE) {
- if (cfg->verbose_level > 2)
- printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
- if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
- constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
- cfg->generic_sharing_context)) {
- cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
- CHECK_CFG_ERROR;
- }
- } else {
- if (cfg->verbose_level > 2)
- printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
+ if (method->wrapper_type != MONO_WRAPPER_NONE) {
+ if (cfg->verbose_level > 2)
+ printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
+ if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
+ constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
+ cfg->generic_sharing_context)) {
+ cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
+ CHECK_CFG_ERROR;
+ }
+ } else {
+ if (cfg->verbose_level > 2)
+ printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
- if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
- /*
- * This is needed since get_method_constrained can't find
- * the method in klass representing a type var.
- * The type var is guaranteed to be a reference type in this
- * case.
- */
- if (!mini_is_gsharedvt_klass (cfg, constrained_class))
- g_assert (!cmethod->klass->valuetype);
- } else {
- cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
- CHECK_CFG_ERROR;
- }
+ if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
+ /*
+ * This is needed since get_method_constrained can't find
+ * the method in klass representing a type var.
+ * The type var is guaranteed to be a reference type in this
+ * case.
+ */
+ if (!mini_is_gsharedvt_klass (cfg, constrained_class))
+ g_assert (!cmethod->klass->valuetype);
+ } else {
+ cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
+ CHECK_CFG_ERROR;
}
}
+ }
- if (!cmethod || mono_loader_get_last_error ())
- LOAD_ERROR;
- if (!dont_verify && !cfg->skip_visibility) {
- MonoMethod *target_method = cil_method;
- if (method->is_inflated) {
- target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
- }
- if (!mono_method_can_access_method (method_definition, target_method) &&
- !mono_method_can_access_method (method, cil_method))
- METHOD_ACCESS_FAILURE (method, cil_method);
+ if (!cmethod || mono_loader_get_last_error ())
+ LOAD_ERROR;
+ if (!dont_verify && !cfg->skip_visibility) {
+ MonoMethod *target_method = cil_method;
+ if (method->is_inflated) {
+ target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
}
+ if (!mono_method_can_access_method (method_definition, target_method) &&
+ !mono_method_can_access_method (method, cil_method))
+ METHOD_ACCESS_FAILURE (method, cil_method);
+ }
- if (mono_security_core_clr_enabled ())
- ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
-
- if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
- /* MS.NET seems to silently convert this to a callvirt */
- virtual = 1;
-
- {
- /*
- * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
- * converts to a callvirt.
- *
- * tests/bug-515884.il is an example of this behavior
- */
- const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
- const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
- if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
- virtual = 1;
- }
+ if (mono_security_core_clr_enabled ())
+ ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
- if (!cmethod->klass->inited)
- if (!mono_class_init (cmethod->klass))
- TYPE_LOAD_ERROR (cmethod->klass);
+ if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
+ /* MS.NET seems to silently convert this to a callvirt */
+ virtual = 1;
- if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
- mini_class_is_system_array (cmethod->klass)) {
- array_rank = cmethod->klass->rank;
- fsig = mono_method_signature (cmethod);
- } else {
- fsig = mono_method_signature (cmethod);
+ {
+ /*
+ * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
+ * converts to a callvirt.
+ *
+ * tests/bug-515884.il is an example of this behavior
+ */
+ const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
+ const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
+ if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
+ virtual = 1;
+ }
- if (!fsig)
- LOAD_ERROR;
+ if (!cmethod->klass->inited)
+ if (!mono_class_init (cmethod->klass))
+ TYPE_LOAD_ERROR (cmethod->klass);
- if (fsig->pinvoke) {
- MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
- check_for_pending_exc, cfg->compile_aot);
- fsig = mono_method_signature (wrapper);
- } else if (constrained_class) {
- fsig = mono_method_signature (cmethod);
- } else {
- fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
- CHECK_CFG_ERROR;
- }
- }
+ fsig = mono_method_signature (cmethod);
+ if (!fsig)
+ LOAD_ERROR;
+ if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
+ mini_class_is_system_array (cmethod->klass)) {
+ array_rank = cmethod->klass->rank;
+ } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
+ direct_icall = TRUE;
+ } else if (fsig->pinvoke) {
+ MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
+ check_for_pending_exc, cfg->compile_aot);
+ fsig = mono_method_signature (wrapper);
+ } else if (constrained_class) {
+ } else {
+ fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
+ CHECK_CFG_ERROR;
+ }
- mono_save_token_info (cfg, image, token, cil_method);
+ mono_save_token_info (cfg, image, token, cil_method);
- if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
- need_seq_point = TRUE;
+ if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
+ need_seq_point = TRUE;
- n = fsig->param_count + fsig->hasthis;
+ /* Don't support calls made using type arguments for now */
+ /*
+ if (cfg->gsharedvt) {
+ if (mini_is_gsharedvt_signature (cfg, fsig))
+ GSHAREDVT_FAILURE (*ip);
+ }
+ */
- /* Don't support calls made using type arguments for now */
- /*
- if (cfg->gsharedvt) {
- if (mini_is_gsharedvt_signature (cfg, fsig))
- GSHAREDVT_FAILURE (*ip);
- }
- */
+ if (mono_security_cas_enabled ()) {
+ if (check_linkdemand (cfg, method, cmethod))
+ INLINE_FAILURE ("linkdemand");
+ CHECK_CFG_EXCEPTION;
+ }
- if (mono_security_cas_enabled ()) {
- if (check_linkdemand (cfg, method, cmethod))
- INLINE_FAILURE ("linkdemand");
- CHECK_CFG_EXCEPTION;
- }
+ if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
+ g_assert_not_reached ();
- if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
- g_assert_not_reached ();
- }
+ n = fsig->param_count + fsig->hasthis;
- if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
+ if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
UNVERIFIED;
- if (!cfg->generic_sharing_context && cmethod)
+ if (!cfg->generic_sharing_context)
g_assert (!mono_method_check_context_used (cmethod));
CHECK_STACK (n);
/*
* We have the `constrained.' prefix opcode.
*/
- if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
+ if (constrained_partial_call) {
+ gboolean need_box = TRUE;
+
+ /*
+ * The receiver is a valuetype, but the exact type is not known at compile time. This means the
+ * called method is not known at compile time either. The called method could end up being
+ * one of the methods on the parent classes (object/valuetype/enum), in which case we need
+ * to box the receiver.
+ * A simple solution would be to box always and make a normal virtual call, but that would
+ * be bad performance wise.
+ */
+ if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
+ /*
+ * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
+ */
+ need_box = FALSE;
+ }
+
+ if (need_box) {
+ MonoInst *box_type;
+ MonoBasicBlock *is_ref_bb, *end_bb;
+ MonoInst *nonbox_call;
+
+ /*
+ * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
+ * if needed.
+ * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
+ * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
+ */
+ addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
+
+ NEW_BBLOCK (cfg, is_ref_bb);
+ NEW_BBLOCK (cfg, end_bb);
+
+ box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
+
+ /* Non-ref case */
+ nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* Ref case */
+ MONO_START_BB (cfg, is_ref_bb);
+ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
+ ins->klass = constrained_class;
+ sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ MONO_START_BB (cfg, end_bb);
+ bblock = end_bb;
+
+ nonbox_call->dreg = ins->dreg;
+ } else {
+ g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
+ addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
+ ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
+ }
+ goto call_end;
+ } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
/*
* The type parameter is instantiated as a valuetype,
* but that type doesn't override the method we're
constrained_class = NULL;
}
- if (!calli && check_call_signature (cfg, fsig, sp))
+ if (check_call_signature (cfg, fsig, sp))
UNVERIFIED;
#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
- if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
+ if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
delegate_invoke = TRUE;
#endif
- if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
+ if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
type_to_eval_stack_type ((cfg), fsig->ret, ins);
* If the callee is a shared method, then its static cctor
* might not get called after the call was patched.
*/
- if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
+ if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_generic_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
- if (cmethod)
- check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
+ check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
- if (cfg->generic_sharing_context && cmethod) {
+ if (cfg->generic_sharing_context) {
MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
context_used = mini_method_check_context_used (cfg, cmethod);
if (pass_imt_from_rgctx) {
g_assert (!pass_vtable);
- g_assert (cmethod);
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
/* Calling virtual generic methods */
- if (cmethod && virtual &&
- (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
+ if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
!(MONO_METHOD_IS_FINAL (cmethod) &&
cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
fsig->generic_param_count &&
GSHAREDVT_FAILURE (*ip);
#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
- if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
+ if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
use_imt = TRUE;
#endif
* To work around this, we extend such try blocks to include the last x bytes
* of the Monitor.Enter () call.
*/
- if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
+ if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
MonoBasicBlock *tbb;
GET_BBLOCK (cfg, tbb, ip + 5);
}
/* Conversion to a JIT intrinsic */
- if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
+ if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
bblock = cfg->cbb;
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
type_to_eval_stack_type ((cfg), fsig->ret, ins);
}
/* Inlining */
- if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
+ if ((cfg->opt & MONO_OPT_INLINE) &&
(!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
mono_method_check_inlining (cfg, cmethod)) {
int costs;
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
- if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
+ if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
!(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
MonoRgctxInfoType info_type;
GSHAREDVT_FAILURE (*ip);
if (fsig->generic_param_count) {
/* virtual generic call */
- g_assert (mono_use_imt);
g_assert (!imt_arg);
/* Same as the virtual generic case above */
imt_arg = emit_get_rgctx_method (cfg, context_used,
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
goto call_end;
- } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
- /*
- * We pass the address to the gsharedvt trampoline in the rgctx reg
- */
- MonoInst *callee = addr;
-
- if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
- /* Not tested */
- GSHAREDVT_FAILURE (*ip);
-
- addr = emit_get_rgctx_sig (cfg, context_used,
- fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
- goto call_end;
}
/* Generic sharing */
goto call_end;
}
- /* Indirect calls */
- if (addr) {
- if (call_opcode == CEE_CALL)
- g_assert (context_used);
- else if (call_opcode == CEE_CALLI)
- g_assert (!vtable_arg);
- else
- /* FIXME: what the hell is this??? */
- g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
- !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
+ /* Direct calls to icalls */
+ if (direct_icall) {
+ MonoMethod *wrapper;
+ int costs;
- /* Prevent inlining of methods with indirect calls */
- INLINE_FAILURE ("indirect call");
+ /* Inline the wrapper */
+ wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
- if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
- int info_type;
- gpointer info_data;
+ costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
+ g_assert (costs > 0);
+ cfg->real_offset += 5;
- /*
- * Instead of emitting an indirect call, emit a direct call
- * with the contents of the aotconst as the patch info.
- */
- if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
- info_type = addr->inst_c1;
- info_data = addr->inst_p0;
- } else {
- info_type = addr->inst_right->inst_c1;
- info_data = addr->inst_right->inst_left;
- }
-
- if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
- ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
- NULLIFY_INS (addr);
- goto call_end;
- }
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
+ /* *sp is already set by inline_method */
+ sp++;
+ push_res = FALSE;
}
- ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
+
+ inline_costs += costs;
+
goto call_end;
}
/* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
- if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
+ if ((ins_flag & MONO_INST_TAILCALL) &&
!vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
supported_tail_call = TRUE;
if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
MonoInst *args [3];
+ int idx;
/* obj */
args [0] = *sp;
EMIT_NEW_CLASSCONST (cfg, args [1], klass);
/* inline cache*/
- if (cfg->compile_aot)
- EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
- else
+ if (cfg->compile_aot) {
+ idx = get_castclass_cache_idx (cfg);
+ EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
+ } else {
EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
+ }
*sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
ip += 5;
token = read32 (ip + 1);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
-
+
mono_save_token_info (cfg, image, token, klass);
context_used = mini_class_check_context_used (cfg, klass);
break;
}
+ case CEE_MONO_LDPTR_CARD_TABLE: {
+ int shift_bits;
+ gpointer card_mask;
+ CHECK_STACK_OVF (1);
+
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
+ else
+ EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
+
+ *sp++ = ins;
+ ip += 2;
+ inline_costs += 10 * num_calls++;
+ break;
+ }
+ case CEE_MONO_LDPTR_NURSERY_START: {
+ int shift_bits;
+ size_t size;
+ CHECK_STACK_OVF (1);
+
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
+ else
+ EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
+
+ *sp++ = ins;
+ ip += 2;
+ inline_costs += 10 * num_calls++;
+ break;
+ }
+ case CEE_MONO_LDPTR_INT_REQ_FLAG: {
+ CHECK_STACK_OVF (1);
+
+ if (cfg->compile_aot)
+ EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
+ else
+ EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
+
+ *sp++ = ins;
+ ip += 2;
+ inline_costs += 10 * num_calls++;
+ break;
+ }
case CEE_MONO_LDPTR: {
gpointer ptr;
token = read32 (ip + 2);
ptr = mono_method_get_wrapper_data (method, token);
- /* FIXME: Generalize this */
- if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
- EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
- *sp++ = ins;
- ip += 6;
- break;
- }
EMIT_NEW_PCONST (cfg, ins, ptr);
*sp++ = ins;
ip += 6;
break;
case CEE_ENDFILTER: {
MonoExceptionClause *clause, *nearest;
- int cc, nearest_num;
+ int cc;
CHECK_STACK (1);
--sp;
ip += 2;
nearest = NULL;
- nearest_num = 0;
for (cc = 0; cc < header->num_clauses; ++cc) {
clause = &header->clauses [cc];
if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
- (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
+ (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
nearest = clause;
- nearest_num = cc;
- }
}
g_assert (nearest);
if ((ip - header->code) != nearest->handler_offset)
}
/* Add a sequence point for method entry/exit events */
- if (cfg->gen_seq_points_debug_data) {
+ if (seq_points && cfg->gen_sdb_seq_points) {
NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
MONO_ADD_INS (init_localsbb, ins);
NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);