X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmini%2Fmethod-to-ir.c;h=ce326f3c2d3443d3461073d826215663523955e4;hb=30cddad5fb4c3d290906a6e6c33ecd8b07d8b48c;hp=0a34f582c637abdd9d565e7ee08a6305f752c2f4;hpb=20ab5bfe7ae2cf84cd9e6fdad9758ded9f687ca2;p=mono.git diff --git a/mono/mini/method-to-ir.c b/mono/mini/method-to-ir.c index 0a34f582c63..ce326f3c2d3 100644 --- a/mono/mini/method-to-ir.c +++ b/mono/mini/method-to-ir.c @@ -1264,7 +1264,7 @@ mono_get_domainvar (MonoCompile *cfg) MonoInst * mono_get_got_var (MonoCompile *cfg) { - if (!cfg->compile_aot || !cfg->backend->need_got_var) + if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only) return NULL; if (!cfg->got_var) { cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL); @@ -1764,24 +1764,6 @@ emit_pop_lmf (MonoCompile *cfg) EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg); } -static void -emit_instrumentation_call (MonoCompile *cfg, void *func) -{ - MonoInst *iargs [1]; - - /* - * Avoid instrumenting inlined methods since it can - * distort profiling results. - */ - if (cfg->method != cfg->current_method) - return; - - if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) { - EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method); - mono_emit_jit_icall (cfg, func, iargs); - } -} - static int ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt) { @@ -2235,7 +2217,7 @@ check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_ inline static MonoCallInst * mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig, - MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline) + MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline, MonoMethod *target) { MonoType *sig_ret; MonoCallInst *call; @@ -2247,7 +2229,7 @@ mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig, tail = FALSE; if (tail) { - emit_instrumentation_call (cfg, mono_profiler_method_leave); + mini_profiler_emit_tail_call (cfg, target); MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL); } else @@ -2380,7 +2362,7 @@ mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, Mo MONO_ADD_INS (cfg->cbb, ins); } - call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE); + call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE, NULL); call->inst.sreg1 = addr->dreg; @@ -2474,7 +2456,7 @@ mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSign need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass); - call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline); + call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method); #ifndef DISABLE_REMOTING if (might_be_remote) @@ -2604,7 +2586,7 @@ mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature g_assert (sig); - call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE); + call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL); call->fptr = func; MONO_ADD_INS (cfg->cbb, (MonoInst*)call); @@ -2827,29 +2809,6 @@ mini_get_memcpy_method (void) return memcpy_method; } -static void -create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset) -{ - MonoClassField *field; - gpointer iter = NULL; - - while ((field = mono_class_get_fields (klass, &iter))) { - int foffset; - - if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) - continue; - foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset; - if (mini_type_is_reference (mono_field_get_type (field))) { - g_assert ((foffset % SIZEOF_VOID_P) == 0); - *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P); - } else { - MonoClass *field_class = mono_class_from_mono_type (field->type); - if (field_class->has_references) - create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset); - } - } -} - void mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value) { @@ -2863,6 +2822,8 @@ mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value) if (!cfg->gen_write_barriers) return; + //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]) + card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask); mono_gc_get_nursery (&nursery_shift_bits, &nursery_size); @@ -2906,177 +2867,6 @@ mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value) EMIT_NEW_DUMMY_USE (cfg, dummy_use, value); } -gboolean -mini_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align) -{ - int dest_ptr_reg, tmp_reg, destreg, srcreg, offset; - unsigned need_wb = 0; - - if (align == 0) - align = 4; - - /*types with references can't have alignment smaller than sizeof(void*) */ - if (align < SIZEOF_VOID_P) - return FALSE; - - if (size > 5 * SIZEOF_VOID_P) - return FALSE; - - create_write_barrier_bitmap (cfg, klass, &need_wb, 0); - - destreg = iargs [0]->dreg; - srcreg = iargs [1]->dreg; - offset = 0; - - dest_ptr_reg = alloc_preg (cfg); - tmp_reg = alloc_preg (cfg); - - /*tmp = dreg*/ - EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg); - - while (size >= SIZEOF_VOID_P) { - MonoInst *load_inst; - MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE); - load_inst->dreg = tmp_reg; - load_inst->inst_basereg = srcreg; - load_inst->inst_offset = offset; - MONO_ADD_INS (cfg->cbb, load_inst); - - MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg); - - if (need_wb & 0x1) - mini_emit_write_barrier (cfg, iargs [0], load_inst); - - offset += SIZEOF_VOID_P; - size -= SIZEOF_VOID_P; - need_wb >>= 1; - - /*tmp += sizeof (void*)*/ - if (size >= SIZEOF_VOID_P) { - NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P); - MONO_ADD_INS (cfg->cbb, iargs [0]); - } - } - - /* Those cannot be references since size < sizeof (void*) */ - while (size >= 4) { - MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset); - MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg); - offset += 4; - size -= 4; - } - - while (size >= 2) { - MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset); - MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg); - offset += 2; - size -= 2; - } - - while (size >= 1) { - MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset); - MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg); - offset += 1; - size -= 1; - } - - return TRUE; -} - -/* - * Emit code to copy a valuetype of type @klass whose address is stored in - * @src->dreg to memory whose address is stored at @dest->dreg. - */ -void -mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native) -{ - MonoInst *iargs [4]; - int n; - guint32 align = 0; - MonoMethod *memcpy_method; - MonoInst *size_ins = NULL; - MonoInst *memcpy_ins = NULL; - - g_assert (klass); - if (cfg->gshared) - klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg)); - - /* - * This check breaks with spilled vars... need to handle it during verification anyway. - * g_assert (klass && klass == src->klass && klass == dest->klass); - */ - - if (mini_is_gsharedvt_klass (klass)) { - g_assert (!native); - size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE); - memcpy_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY); - } - - if (native) - n = mono_class_native_size (klass, &align); - else - n = mono_class_value_size (klass, &align); - - if (!align) - align = SIZEOF_VOID_P; - /* if native is true there should be no references in the struct */ - if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) { - /* Avoid barriers when storing to the stack */ - if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) || - (dest->opcode == OP_LDADDR))) { - int context_used; - - iargs [0] = dest; - iargs [1] = src; - - context_used = mini_class_check_context_used (cfg, klass); - - /* It's ok to intrinsify under gsharing since shared code types are layout stable. */ - if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mini_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) { - return; - } else if (size_ins || align < SIZEOF_VOID_P) { - if (context_used) { - iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS); - } else { - iargs [2] = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass); - if (!cfg->compile_aot) - mono_class_compute_gc_descriptor (klass); - } - if (size_ins) - mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs); - else - mono_emit_jit_icall (cfg, mono_value_copy, iargs); - } else { - /* We don't unroll more than 5 stores to avoid code bloat. */ - /*This is harmless and simplify mono_gc_get_range_copy_func */ - n += (SIZEOF_VOID_P - 1); - n &= ~(SIZEOF_VOID_P - 1); - - EMIT_NEW_ICONST (cfg, iargs [2], n); - mono_emit_jit_icall (cfg, mono_gc_get_range_copy_func (), iargs); - } - } - } - - if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) { - /* FIXME: Optimize the case when src/dest is OP_LDADDR */ - mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align); - } else { - iargs [0] = dest; - iargs [1] = src; - if (size_ins) - iargs [2] = size_ins; - else - EMIT_NEW_ICONST (cfg, iargs [2], n); - - memcpy_method = mini_get_memcpy_method (); - if (memcpy_ins) - mini_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL); - else - mono_emit_method_call (cfg, memcpy_method, iargs, NULL); - } -} - MonoMethod* mini_get_memset_method (void) { @@ -3911,11 +3701,10 @@ handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_ /* This happens often in argument checking code, eg. throw new FooException... */ /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */ EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token)); - return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs); + alloc_ftn = mono_helper_newobj_mscorlib; } else { MonoVTable *vtable = mono_class_vtable (cfg->domain, klass); MonoMethod *managed_alloc = NULL; - gboolean pass_lw; if (!vtable) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); @@ -3934,16 +3723,8 @@ handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_ EMIT_NEW_ICONST (cfg, iargs [1], size); return mono_emit_method_call (cfg, managed_alloc, iargs, NULL); } - alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw); - if (pass_lw) { - guint32 lw = vtable->klass->instance_size; - lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer); - EMIT_NEW_ICONST (cfg, iargs [0], lw); - EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable); - } - else { - EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable); - } + alloc_ftn = ves_icall_object_new_specific; + EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable); } return mono_emit_jit_icall (cfg, alloc_ftn, iargs); @@ -4191,6 +3972,8 @@ handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, Mono /* Set target field */ /* Optimize away setting of NULL target */ if (!MONO_INS_IS_PCONST_NULL (target)) { + MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0); + MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg); if (cfg->gen_write_barriers) { dreg = alloc_preg (cfg); @@ -4554,6 +4337,9 @@ mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method) if (g_list_find (cfg->dont_inline, method)) return FALSE; + if (mono_profiler_get_call_instrumentation_flags (method)) + return FALSE; + return TRUE; } @@ -4763,54 +4549,6 @@ mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, uns return addr; } -static MonoBreakPolicy -always_insert_breakpoint (MonoMethod *method) -{ - return MONO_BREAK_POLICY_ALWAYS; -} - -static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint; - -/** - * mono_set_break_policy: - * \param policy_callback the new callback function - * - * Allow embedders to decide wherther to actually obey breakpoint instructions - * (both break IL instructions and \c Debugger.Break method calls), for example - * to not allow an app to be aborted by a perfectly valid IL opcode when executing - * untrusted or semi-trusted code. - * - * \p policy_callback will be called every time a break point instruction needs to - * be inserted with the method argument being the method that calls \c Debugger.Break - * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER - * if it wants the breakpoint to not be effective in the given method. - * \c MONO_BREAK_POLICY_ALWAYS is the default. - */ -void -mono_set_break_policy (MonoBreakPolicyFunc policy_callback) -{ - if (policy_callback) - break_policy_func = policy_callback; - else - break_policy_func = always_insert_breakpoint; -} - -static gboolean -should_insert_brekpoint (MonoMethod *method) { - switch (break_policy_func (method)) { - case MONO_BREAK_POLICY_ALWAYS: - return TRUE; - case MONO_BREAK_POLICY_NEVER: - return FALSE; - case MONO_BREAK_POLICY_ON_DBG: - g_warning ("mdb no longer supported"); - return FALSE; - default: - g_warning ("Incorrect value returned from break policy callback"); - return FALSE; - } -} - /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */ static MonoInst* emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set) @@ -5114,6 +4852,31 @@ mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoM return NULL; } + +static gboolean +mono_type_is_native_blittable (MonoType *t) +{ + if (MONO_TYPE_IS_REFERENCE (t)) + return FALSE; + + if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t)) + return TRUE; + + MonoClass *klass = mono_class_from_mono_type (t); + + //MonoClass::blitable depends on mono_class_setup_fields being done. + mono_class_setup_fields (klass); + if (!klass->blittable) + return FALSE; + + // If the native marshal size is different we can't convert PtrToStructure to a type load + if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL)) + return FALSE; + + return TRUE; +} + + static MonoInst* mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { @@ -5270,11 +5033,15 @@ mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSign g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); - MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]); - MonoClass *klass = mono_class_from_mono_type (t); + MonoType *arg_type = ctx->method_inst->type_argv [0]; + MonoType *t; + MonoClass *klass; ins = NULL; + /* Resolve the argument class as possible so we can handle common cases fast */ + t = mini_get_underlying_type (arg_type); + klass = mono_class_from_mono_type (t); mono_class_init (klass); if (MONO_TYPE_IS_REFERENCE (t)) EMIT_NEW_ICONST (cfg, ins, 1); @@ -5287,10 +5054,12 @@ mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSign else { g_assert (cfg->gshared); - int context_used = mini_class_check_context_used (cfg, klass); + /* Have to use the original argument class here */ + MonoClass *arg_class = mono_class_from_mono_type (arg_type); + int context_used = mini_class_check_context_used (cfg, arg_class); /* This returns 1 or 2 */ - MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS); + MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS); int dreg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1); } @@ -5930,7 +5699,7 @@ mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSign (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) && (strcmp (cmethod->klass->name, "Debugger") == 0)) { if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) { - if (should_insert_brekpoint (cfg->method)) { + if (mini_should_insert_breakpoint (cfg->method)) { ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL); } else { MONO_INST_NEW (cfg, ins, OP_NOP); @@ -6029,6 +5798,20 @@ mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSign MONO_ADD_INS (cfg->cbb, ins); return ins; } + } else if (cmethod->klass->image == mono_defaults.corlib && + (strcmp (cmethod->klass->name_space, "System.Runtime.InteropServices") == 0) && + (strcmp (cmethod->klass->name, "Marshal") == 0)) { + //Convert Marshal.PtrToStructure of blittable T to direct loads + if (strcmp (cmethod->name, "PtrToStructure") == 0 && + cmethod->is_inflated && + fsig->param_count == 1 && + !mini_method_check_context_used (cfg, cmethod)) { + + MonoGenericContext *method_context = mono_method_get_context (cmethod); + MonoType *arg0 = method_context->method_inst->type_argv [0]; + if (mono_type_is_native_blittable (arg0)) + return mini_emit_memory_load (cfg, arg0, args [0], 0, 0); + } } #ifdef MONO_ARCH_SIMD_INTRINSICS @@ -6062,7 +5845,7 @@ mini_redirect_call (MonoCompile *cfg, MonoMethod *method, { if (method->klass == mono_defaults.string_class) { /* managed string allocation support */ - if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) { + if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) { MonoInst *iargs [2]; MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass); MonoMethod *managed_alloc = NULL; @@ -6351,9 +6134,7 @@ inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, prev_args = cfg->args; prev_arg_types = cfg->arg_types; prev_inlined_method = cfg->inlined_method; - cfg->inlined_method = cmethod; - cfg->ret_var_set = FALSE; - cfg->inline_depth ++; + prev_ret_var_set = cfg->ret_var_set; prev_real_offset = cfg->real_offset; prev_cbb_hash = cfg->cbb_hash; prev_cil_offset_to_bb = cfg->cil_offset_to_bb; @@ -6363,9 +6144,12 @@ inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, prev_cbb = cfg->cbb; prev_current_method = cfg->current_method; prev_generic_context = cfg->generic_context; - prev_ret_var_set = cfg->ret_var_set; prev_disable_inline = cfg->disable_inline; + cfg->inlined_method = cmethod; + cfg->ret_var_set = FALSE; + cfg->inline_depth ++; + if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) virtual_ = TRUE; @@ -6646,27 +6430,6 @@ mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klas return method; } -MonoClass* -mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context) -{ - MonoError error; - MonoClass *klass; - - if (method->wrapper_type != MONO_WRAPPER_NONE) { - klass = (MonoClass *)mono_method_get_wrapper_data (method, token); - if (context) { - klass = mono_class_inflate_generic_class_checked (klass, context, &error); - mono_error_cleanup (&error); /* FIXME don't swallow the error */ - } - } else { - klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error); - mono_error_cleanup (&error); /* FIXME don't swallow the error */ - } - if (klass) - mono_class_init (klass); - return klass; -} - static inline MonoMethodSignature* mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error) { @@ -6847,10 +6610,78 @@ set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsign cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header); } +static guint32 +mono_type_to_stloc_coerce (MonoType *type) +{ + if (type->byref) + return 0; + + type = mini_get_underlying_type (type); +handle_enum: + switch (type->type) { + case MONO_TYPE_I1: + return OP_ICONV_TO_I1; + case MONO_TYPE_U1: + return OP_ICONV_TO_U1; + case MONO_TYPE_I2: + return OP_ICONV_TO_I2; + case MONO_TYPE_U2: + return OP_ICONV_TO_U2; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_FNPTR: + case MONO_TYPE_CLASS: + case MONO_TYPE_STRING: + case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_R4: + case MONO_TYPE_R8: + case MONO_TYPE_TYPEDBYREF: + case MONO_TYPE_GENERICINST: + return 0; + case MONO_TYPE_VALUETYPE: + if (type->data.klass->enumtype) { + type = mono_class_enum_basetype (type->data.klass); + goto handle_enum; + } + return 0; + case MONO_TYPE_VAR: + case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32 + return 0; + default: + g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type); + } + return -1; +} + static void emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n) { MonoInst *ins; + guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]); + + if (coerce_op) { + if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) { + if (cfg->verbose_level > 2) + printf ("Found existing coercing is enough for stloc\n"); + } else { + MONO_INST_NEW (cfg, ins, coerce_op); + ins->dreg = alloc_ireg (cfg); + ins->sreg1 = sp [0]->dreg; + ins->type = STACK_I4; + ins->klass = mono_class_from_mono_type (header->locals [n]); + MONO_ADD_INS (cfg->cbb, ins); + *sp = mono_decompose_opcode (cfg, ins); + } + } + + guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]); if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) { @@ -6865,6 +6696,30 @@ emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n) } } +static void +emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n) +{ + MonoInst *ins; + guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]); + + if (coerce_op) { + if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) { + if (cfg->verbose_level > 2) + printf ("Found existing coercing is enough for starg\n"); + } else { + MONO_INST_NEW (cfg, ins, coerce_op); + ins->dreg = alloc_ireg (cfg); + ins->sreg1 = sp [0]->dreg; + ins->type = STACK_I4; + ins->klass = mono_class_from_mono_type (cfg->arg_types [n]); + MONO_ADD_INS (cfg->cbb, ins); + *sp = mono_decompose_opcode (cfg, ins); + } + } + + EMIT_NEW_ARGSTORE (cfg, ins, n, *sp); +} + /* * ldloca inhibits many optimizations so try to get rid of it in common * cases. @@ -7526,8 +7381,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b cfg->dont_inline = g_list_prepend (cfg->dont_inline, method); if (cfg->method == method) { - if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE) - cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size); + cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size); /* ENTRY BLOCK */ NEW_BBLOCK (cfg, start_bblock); @@ -7562,6 +7416,8 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b tblock->real_offset = clause->handler_offset; tblock->flags |= BB_EXCEPTION_HANDLER; + if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY) + mono_create_exvar_for_offset (cfg, clause->handler_offset); /* * Linking the try block with the EH block hinders inlining as we won't be able to * merge the bblocks from inlining and produce an artificial hole for no good reason. @@ -7923,18 +7779,25 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b if ((cfg->method == method) && cfg->coverage_info) { guint32 cil_offset = ip - header->code; + gpointer counter = &cfg->coverage_info->data [cil_offset].count; cfg->coverage_info->data [cil_offset].cil_code = ip; - /* TODO: Use an increment here */ -#if defined(TARGET_X86) - MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM); - ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count); - ins->inst_imm = 1; - MONO_ADD_INS (cfg->cbb, ins); -#else - EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count)); - MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1); -#endif + if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) { + MonoInst *one_ins, *load_ins; + + EMIT_NEW_PCONST (cfg, load_ins, counter); + EMIT_NEW_ICONST (cfg, one_ins, 1); + MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4); + ins->dreg = mono_alloc_ireg (cfg); + ins->inst_basereg = load_ins->dreg; + ins->inst_offset = 0; + ins->sreg2 = one_ins->dreg; + ins->type = STACK_I4; + MONO_ADD_INS (cfg->cbb, ins); + } else { + EMIT_NEW_PCONST (cfg, ins, counter); + MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1); + } } if (cfg->verbose_level > 3) @@ -7958,7 +7821,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b MONO_ADD_INS (cfg->cbb, ins); break; case CEE_BREAK: - if (should_insert_brekpoint (cfg->method)) { + if (mini_should_insert_breakpoint (cfg->method)) { ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL); } else { MONO_INST_NEW (cfg, ins, OP_NOP); @@ -8030,7 +7893,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b CHECK_ARG (n); if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp)) UNVERIFIED; - EMIT_NEW_ARGSTORE (cfg, ins, n, *sp); + emit_starg_ir (cfg, sp, n); ip += 2; break; case CEE_LDLOC_S: @@ -8038,7 +7901,12 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b CHECK_STACK_OVF (1); n = ip [1]; CHECK_LOCAL (n); - EMIT_NEW_LOCLOAD (cfg, ins, n); + if ((ip [2] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 2) && MONO_TYPE_ISSTRUCT (header->locals [n])) { + /* Avoid loading a struct just to load one of its fields */ + EMIT_NEW_LOCLOADA (cfg, ins, n); + } else { + EMIT_NEW_LOCLOAD (cfg, ins, n); + } *sp++ = ins; ip += 2; break; @@ -8248,7 +8116,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b if (cfg->gshared && mono_method_check_context_used (cmethod)) GENERIC_SHARING_FAILURE (CEE_JMP); - emit_instrumentation_call (cfg, mono_profiler_method_leave); + mini_profiler_emit_tail_call (cfg, cmethod); fsig = mono_method_signature (cmethod); n = fsig->param_count + fsig->hasthis; @@ -8978,6 +8846,9 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b } for (i = 0; i < n; ++i) EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]); + + mini_profiler_emit_tail_call (cfg, cmethod); + MONO_INST_NEW (cfg, ins, OP_BR); MONO_ADD_INS (cfg->cbb, ins); tblock = start_bblock->out_bb [0]; @@ -9190,7 +9061,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b /* Handle tail calls similarly to normal calls */ tail_call = TRUE; } else { - emit_instrumentation_call (cfg, mono_profiler_method_leave); + mini_profiler_emit_tail_call (cfg, cmethod); MONO_INST_NEW_CALL (cfg, call, OP_JMP); call->tail_call = TRUE; @@ -9301,6 +9172,8 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b break; } case CEE_RET: + mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL); + if (cfg->method != method) { /* return from inlined method */ /* @@ -9324,8 +9197,6 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b cfg->ret_var_set = TRUE; } } else { - emit_instrumentation_call (cfg, mono_profiler_method_leave); - if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only) emit_pop_lmf (cfg); @@ -9853,23 +9724,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); sp -= 2; - if (generic_class_is_reference_type (cfg, klass)) { - MonoInst *store, *load; - int dreg = alloc_ireg_ref (cfg); - - NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0); - load->flags |= ins_flag; - MONO_ADD_INS (cfg->cbb, load); - - NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg); - store->flags |= ins_flag; - MONO_ADD_INS (cfg->cbb, store); - - if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) - mini_emit_write_barrier (cfg, sp [0], sp [1]); - } else { - mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE); - } + mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag); ins_flag = 0; ip += 5; break; @@ -9918,14 +9773,12 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b } /* Optimize the ldobj+stobj combination */ - /* The reference case ends up being a load+store anyway */ - /* Skip this if the operation is volatile. */ - if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) { + if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token)) { CHECK_STACK (1); sp --; - mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE); + mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag); ip += 5 + 5; ins_flag = 0; @@ -10596,7 +10449,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b dreg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg); wbarrier_ptr_ins = ins; - /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */ + /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg); } else { EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg); @@ -11631,18 +11484,35 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) { GList *tmp; - MonoExceptionClause *clause; for (tmp = handlers; tmp; tmp = tmp->next) { - clause = (MonoExceptionClause *)tmp->data; + MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data; + MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset); + MonoBasicBlock *dont_throw; + tblock = cfg->cil_offset_to_bb [clause->handler_offset]; g_assert (tblock); link_bblock (cfg, cfg->cbb, tblock); + + MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0); + MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER); ins->inst_target_bb = tblock; ins->inst_eh_block = clause; MONO_ADD_INS (cfg->cbb, ins); cfg->cbb->has_call_handler = 1; + + /* Throw exception if exvar is set */ + /* FIXME Do we need this for calls from catch/filter ? */ + NEW_BBLOCK (cfg, dont_throw); + MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0); + MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw); + mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL); + cfg->cbb->clause_hole = clause; + + MONO_START_BB (cfg, dont_throw); + cfg->cbb->clause_hole = clause; + if (COMPILE_LLVM (cfg)) { MonoBasicBlock *target_bb; @@ -11697,7 +11567,20 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b CHECK_STACK (info->sig->param_count); sp -= info->sig->param_count; - ins = mono_emit_jit_icall (cfg, info->func, sp); + if (cfg->compile_aot && !strcmp (info->name, "mono_threads_attach_coop")) { + MonoInst *addr; + + /* + * This is called on unattached threads, so it cannot go through the trampoline + * infrastructure. Use an indirect call through a got slot initialized at load time + * instead. + */ + EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name); + ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL); + } else { + ins = mono_emit_jit_icall (cfg, info->func, sp); + } + if (!MONO_TYPE_IS_VOID (info->sig->ret)) *sp++ = ins; @@ -11709,22 +11592,29 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b case CEE_MONO_LDPTR_CARD_TABLE: case CEE_MONO_LDPTR_NURSERY_START: case CEE_MONO_LDPTR_NURSERY_BITS: - case CEE_MONO_LDPTR_INT_REQ_FLAG: { + case CEE_MONO_LDPTR_INT_REQ_FLAG: + case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: { CHECK_STACK_OVF (1); switch (ip [1]) { - case CEE_MONO_LDPTR_CARD_TABLE: - ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL); - break; - case CEE_MONO_LDPTR_NURSERY_START: - ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL); - break; - case CEE_MONO_LDPTR_NURSERY_BITS: - ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL); - break; - case CEE_MONO_LDPTR_INT_REQ_FLAG: - ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL); - break; + case CEE_MONO_LDPTR_CARD_TABLE: + ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL); + break; + case CEE_MONO_LDPTR_NURSERY_START: + ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL); + break; + case CEE_MONO_LDPTR_NURSERY_BITS: + ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL); + break; + case CEE_MONO_LDPTR_INT_REQ_FLAG: + ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL); + break; + case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: + ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL); + break; + default: + g_assert_not_reached (); + break; } *sp++ = ins; @@ -11857,7 +11747,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL); temp->backend.is_pinvoke = 1; EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0); - mini_emit_stobj (cfg, dest, src, klass, TRUE); + mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0); EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0); dest->type = STACK_VTYPE; @@ -11888,11 +11778,13 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b } else { EMIT_NEW_RETLOADA (cfg, ins); } - mini_emit_stobj (cfg, ins, sp [0], klass, TRUE); + mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0); if (sp != stack_start) UNVERIFIED; + mini_profiler_emit_leave (cfg, sp [0]); + MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = end_bblock; MONO_ADD_INS (cfg->cbb, ins); @@ -11947,7 +11839,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b cfg->dyn_call_var->flags |= MONO_INST_VOLATILE; } - /* Has to use a call inst since it local regalloc expects it */ + /* Has to use a call inst since local regalloc expects it */ MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL); ins = (MonoInst*)call; sp -= 2; @@ -11956,6 +11848,8 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b MONO_ADD_INS (cfg->cbb, ins); cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area); + /* OP_DYN_CALL might need to allocate a dynamically sized param area */ + cfg->flags |= MONO_CFG_HAS_ALLOCA; ip += 2; inline_costs += 10 * num_calls++; @@ -11989,7 +11883,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b MonoInst *ad_ins, *jit_tls_ins; MonoBasicBlock *next_bb = NULL, *call_bb = NULL; - g_assert (!mono_threads_is_coop_enabled ()); + g_assert (!mono_threads_is_blocking_transition_enabled ()); cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL); @@ -12487,7 +12381,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b CHECK_ARG (n); if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp)) UNVERIFIED; - EMIT_NEW_ARGSTORE (cfg, ins, n, *sp); + emit_starg_ir (cfg, sp, n); ip += 4; break; case CEE_LDLOC: @@ -12495,7 +12389,12 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b CHECK_OPSIZE (4); n = read16 (ip + 2); CHECK_LOCAL (n); - EMIT_NEW_LOCLOAD (cfg, ins, n); + if ((ip [4] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 4) && header->locals [n]->type == MONO_TYPE_VALUETYPE) { + /* Avoid loading a struct just to load one of its fields */ + EMIT_NEW_LOCLOADA (cfg, ins, n); + } else { + EMIT_NEW_LOCLOAD (cfg, ins, n); + } *sp++ = ins; ip += 4; break; @@ -12829,7 +12728,7 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b } cfg->cbb = init_localsbb; - emit_instrumentation_call (cfg, mono_profiler_method_enter); + mini_profiler_emit_enter (cfg); if (seq_points) { MonoBasicBlock *bb; @@ -14406,11 +14305,4 @@ NOTES the values on the stack before emitting the last instruction of the bb. */ -#else /* !DISABLE_JIT */ - -void -mono_set_break_policy (MonoBreakPolicyFunc policy_callback) -{ -} - #endif /* !DISABLE_JIT */