X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmini%2Fmini-amd64.c;h=f4945b9459f1c78a2db5ee1fcdbcc8bbb693437a;hb=7f23a0c1f3b357cea793f429b52a918f45157855;hp=e1b304ebc8f4b4e07a1b95655fab6198288a10d1;hpb=bfb8d1f57bcd8403b136395fc18fc97bfbacd3f0;p=mono.git diff --git a/mono/mini/mini-amd64.c b/mono/mini/mini-amd64.c index e1b304ebc8f..f4945b9459f 100644 --- a/mono/mini/mini-amd64.c +++ b/mono/mini/mini-amd64.c @@ -10,6 +10,8 @@ * Zoltan Varga (vargaz@gmail.com) * * (C) 2003 Ximian, Inc. + * Copyright 2003-2011 Novell, Inc (http://www.novell.com) + * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) */ #include "mini.h" #include @@ -26,6 +28,8 @@ #include #include #include +#include +#include #include "trace.h" #include "ir-emit.h" @@ -65,6 +69,13 @@ static CRITICAL_SECTION mini_arch_mutex; MonoBreakpointInfo mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE]; +/* Structure used by the sequence points in AOTed code */ +typedef struct { + gpointer ss_trigger_page; + gpointer bp_trigger_page; + gpointer bp_addrs [MONO_ZERO_LEN_ARRAY]; +} SeqPointInfo; + /* * The code generated for sequence points reads from this location, which is * made read-only when single stepping is enabled. @@ -160,36 +171,11 @@ mono_arch_xregname (int reg) return "unknown"; } -G_GNUC_UNUSED static void -break_count (void) -{ -} - -G_GNUC_UNUSED static gboolean -debug_count (void) -{ - static int count = 0; - count ++; - - if (!getenv ("COUNT")) - return TRUE; - - if (count == atoi (getenv ("COUNT"))) { - break_count (); - } - - if (count > atoi (getenv ("COUNT"))) { - return FALSE; - } - - return TRUE; -} - static gboolean debug_omit_fp (void) { #if 0 - return debug_count (); + return mono_debug_count (); #else return TRUE; #endif @@ -212,49 +198,49 @@ amd64_is_near_call (guint8 *code) /* amd64_call_reg_internal, which uses amd64_alu_* macros, etc. */ /* We only want to force bundle alignment for the top level instruction, */ /* so NaCl pseudo-instructions can be implemented with sub instructions. */ -static guint32 nacl_instruction_depth; +static MonoNativeTlsKey nacl_instruction_depth; -static guint32 nacl_rex_tag; -static guint32 nacl_legacy_prefix_tag; +static MonoNativeTlsKey nacl_rex_tag; +static MonoNativeTlsKey nacl_legacy_prefix_tag; void amd64_nacl_clear_legacy_prefix_tag () { - TlsSetValue (nacl_legacy_prefix_tag, NULL); + mono_native_tls_set_value (nacl_legacy_prefix_tag, NULL); } void amd64_nacl_tag_legacy_prefix (guint8* code) { - if (TlsGetValue (nacl_legacy_prefix_tag) == NULL) - TlsSetValue (nacl_legacy_prefix_tag, code); + if (mono_native_tls_get_value (nacl_legacy_prefix_tag) == NULL) + mono_native_tls_set_value (nacl_legacy_prefix_tag, code); } void amd64_nacl_tag_rex (guint8* code) { - TlsSetValue (nacl_rex_tag, code); + mono_native_tls_set_value (nacl_rex_tag, code); } guint8* amd64_nacl_get_legacy_prefix_tag () { - return (guint8*)TlsGetValue (nacl_legacy_prefix_tag); + return (guint8*)mono_native_tls_get_value (nacl_legacy_prefix_tag); } guint8* amd64_nacl_get_rex_tag () { - return (guint8*)TlsGetValue (nacl_rex_tag); + return (guint8*)mono_native_tls_get_value (nacl_rex_tag); } /* Increment the instruction "depth" described above */ void amd64_nacl_instruction_pre () { - intptr_t depth = (intptr_t) TlsGetValue (nacl_instruction_depth); + intptr_t depth = (intptr_t) mono_native_tls_get_value (nacl_instruction_depth); depth++; - TlsSetValue (nacl_instruction_depth, (gpointer)depth); + mono_native_tls_set_value (nacl_instruction_depth, (gpointer)depth); } /* amd64_nacl_instruction_post: Decrement instruction "depth", force bundle */ @@ -265,9 +251,9 @@ amd64_nacl_instruction_pre () void amd64_nacl_instruction_post (guint8 **start, guint8 **end) { - intptr_t depth = (intptr_t) TlsGetValue(nacl_instruction_depth); + intptr_t depth = (intptr_t) mono_native_tls_get_value (nacl_instruction_depth); depth--; - TlsSetValue (nacl_instruction_depth, (void*)depth); + mono_native_tls_set_value (nacl_instruction_depth, (void*)depth); g_assert ( depth >= 0 ); if (depth == 0) { @@ -1191,7 +1177,7 @@ get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSign * Returns the size of the argument area on the stack. */ int -mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) +mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k; CallInfo *cinfo = get_call_info (NULL, NULL, csig); @@ -1286,10 +1272,10 @@ mono_arch_init (void) InitializeCriticalSection (&mini_arch_mutex); #if defined(__native_client_codegen__) - nacl_instruction_depth = TlsAlloc (); - TlsSetValue (nacl_instruction_depth, (gpointer)0); - nacl_rex_tag = TlsAlloc (); - nacl_legacy_prefix_tag = TlsAlloc (); + mono_native_tls_alloc (&nacl_instruction_depth, NULL); + mono_native_tls_set_value (nacl_instruction_depth, (gpointer)0); + mono_native_tls_alloc (&nacl_rex_tag, NULL); + mono_native_tls_alloc (&nacl_legacy_prefix_tag, NULL); #endif #ifdef MONO_ARCH_NOMAP32BIT @@ -1297,16 +1283,16 @@ mono_arch_init (void) /* amd64_mov_reg_imm () + amd64_mov_reg_membase () */ breakpoint_size = 13; breakpoint_fault_size = 3; - /* amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); */ - single_step_fault_size = 5; #else flags = MONO_MMAP_READ|MONO_MMAP_32BIT; /* amd64_mov_reg_mem () */ breakpoint_size = 8; breakpoint_fault_size = 8; - single_step_fault_size = 8; #endif + /* amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); */ + single_step_fault_size = 4; + ss_trigger_page = mono_valloc (NULL, mono_pagesize (), flags); bp_trigger_page = mono_valloc (NULL, mono_pagesize (), flags); mono_mprotect (bp_trigger_page, mono_pagesize (), 0); @@ -1324,9 +1310,9 @@ mono_arch_cleanup (void) { DeleteCriticalSection (&mini_arch_mutex); #if defined(__native_client_codegen__) - TlsFree (nacl_instruction_depth); - TlsFree (nacl_rex_tag); - TlsFree (nacl_legacy_prefix_tag); + mono_native_tls_free (nacl_instruction_depth); + mono_native_tls_free (nacl_rex_tag); + mono_native_tls_free (nacl_legacy_prefix_tag); #endif } @@ -1334,7 +1320,7 @@ mono_arch_cleanup (void) * This function returns the optimizations supported on this cpu. */ guint32 -mono_arch_cpu_optimizazions (guint32 *exclude_mask) +mono_arch_cpu_optimizations (guint32 *exclude_mask) { int eax, ebx, ecx, edx; guint32 opts = 0; @@ -1773,7 +1759,7 @@ mono_arch_allocate_vars (MonoCompile *cfg) } else { if (cfg->arch.omit_fp) cfg->arch.reg_save_area_offset = offset; - /* Reserve space for caller saved registers */ + /* Reserve space for callee saved registers */ for (i = 0; i < AMD64_NREG; ++i) if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) { offset += sizeof(mgreg_t); @@ -1817,9 +1803,9 @@ mono_arch_allocate_vars (MonoCompile *cfg) cfg->ret->inst_basereg = cfg->frame_reg; if (cfg->arch.omit_fp) { cfg->ret->inst_offset = offset; - offset += 16; + offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16; } else { - offset += 16; + offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16; cfg->ret->inst_offset = - offset; } break; @@ -2026,6 +2012,12 @@ mono_arch_create_vars (MonoCompile *cfg) if (cfg->gen_seq_points) { MonoInst *ins; + if (cfg->compile_aot) { + MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL); + ins->flags |= MONO_INST_VOLATILE; + cfg->arch.seq_point_info_var = ins; + } + ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_trigger_page_var = ins; @@ -2050,6 +2042,10 @@ mono_arch_create_vars (MonoCompile *cfg) lmf_var->flags |= MONO_INST_LMF; cfg->arch.lmf_var = lmf_var; } + +#ifndef MONO_AMD64_NO_PUSHES + cfg->arch_eh_jit_info = 1; +#endif } static void @@ -2113,14 +2109,11 @@ emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoInst *arg; MonoMethodSignature *tmp_sig; - MonoInst *sig_arg; + int sig_reg; if (call->tail_call) NOT_IMPLEMENTED; - /* FIXME: Add support for signature tokens to AOT */ - cfg->disable_aot = TRUE; - g_assert (cinfo->sig_cookie.storage == ArgOnStack); /* @@ -2134,16 +2127,14 @@ emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); - MONO_INST_NEW (cfg, sig_arg, OP_ICONST); - sig_arg->dreg = mono_alloc_ireg (cfg); - sig_arg->inst_p0 = tmp_sig; - MONO_ADD_INS (cfg->cbb, sig_arg); + sig_reg = mono_alloc_ireg (cfg); + MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); if (cfg->arch.no_pushes) { - MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_arg->dreg); + MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg); } else { MONO_INST_NEW (cfg, arg, OP_X86_PUSH); - arg->sreg1 = sig_arg->dreg; + arg->sreg1 = sig_reg; MONO_ADD_INS (cfg->cbb, arg); } } @@ -3597,11 +3588,12 @@ mono_amd64_have_tls_get (void) #ifdef __APPLE__ static gboolean have_tls_get = FALSE; static gboolean inited = FALSE; + guint8 *ins; if (inited) return have_tls_get; - guint8 *ins = (guint8*)pthread_getspecific; + ins = (guint8*)pthread_getspecific; /* * We're looking for these two instructions: @@ -4268,9 +4260,6 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) case OP_SEQ_POINT: { int i; - if (cfg->compile_aot) - NOT_IMPLEMENTED; - /* * Read from the single stepping trigger page. This will cause a * SIGSEGV when single stepping is enabled. @@ -4278,29 +4267,41 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) * a breakpoint is hit will step to the next IL offset. */ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { - if (((guint64)ss_trigger_page >> 32) == 0) - amd64_mov_reg_mem (code, AMD64_R11, (guint64)ss_trigger_page, 4); - else { - MonoInst *var = cfg->arch.ss_trigger_page_var; + MonoInst *var = cfg->arch.ss_trigger_page_var; - amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8); - amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); - } + amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8); + amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); } /* * This is the address which is saved in seq points, - * get_ip_for_single_step () / get_ip_for_breakpoint () needs to compute this - * from the address of the instruction causing the fault. */ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); - /* - * A placeholder for a possible breakpoint inserted by - * mono_arch_set_breakpoint (). + if (cfg->compile_aot) { + guint32 offset = code - cfg->native_code; + guint32 val; + MonoInst *info_var = cfg->arch.seq_point_info_var; + + /* Load info var */ + amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8); + val = ((offset) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs); + /* Load the info->bp_addrs [offset], which is either a valid address or the address of a trigger page */ + amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8); + } else { + /* + * A placeholder for a possible breakpoint inserted by + * mono_arch_set_breakpoint (). + */ + for (i = 0; i < breakpoint_size; ++i) + x86_nop (code); + } + /* + * Add an additional nop so skipping the bp doesn't cause the ip to point + * to another IL offset. */ - for (i = 0; i < breakpoint_size; ++i) - x86_nop (code); + x86_nop (code); break; } case OP_ADDCC: @@ -4749,7 +4750,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) /* FIXME: no tracing support... */ if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) - code = mono_arch_instrument_epilog_full (cfg, mono_profiler_method_leave, code, FALSE, FALSE); + code = mono_arch_instrument_epilog_full (cfg, mono_profiler_method_leave, code, FALSE, TRUE); g_assert (!cfg->method->save_lmf); @@ -5568,9 +5569,14 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) break; } case OP_MEMORY_BARRIER: { - /* http://blogs.sun.com/dave/resource/NHM-Pipeline-Blog-V2.txt */ - x86_prefix (code, X86_LOCK_PREFIX); - amd64_alu_membase_imm (code, X86_ADD, AMD64_RSP, 0, 0); + switch (ins->backend.memory_barrier_kind) { + case StoreLoadBarrier: + case FullBarrier: + /* http://blogs.sun.com/dave/resource/NHM-Pipeline-Blog-V2.txt */ + x86_prefix (code, X86_LOCK_PREFIX); + amd64_alu_membase_imm (code, X86_ADD, AMD64_RSP, 0, 0); + break; + } break; } case OP_ATOMIC_ADD_I4: @@ -5705,6 +5711,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) gpointer card_table = mono_gc_get_card_table (&card_table_shift, &card_table_mask); guint64 nursery_start = (guint64)mono_gc_get_nursery (&nursery_shift, &nursery_size); + guint64 shifted_nursery_start = nursery_start >> nursery_shift; /*If either point to the stack we can simply avoid the WB. This happens due to * optimizations revealing a stack store that was not visible when op_cardtable was emited. @@ -5737,7 +5744,19 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) if (value != AMD64_RDX) amd64_mov_reg_reg (code, AMD64_RDX, value, 8); amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift); - amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, nursery_start >> nursery_shift); + if (shifted_nursery_start >> 31) { + /* + * The value we need to compare against is 64 bits, so we need + * another spare register. We use RBX, which we save and + * restore. + */ + amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8); + amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start); + amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX); + amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8); + } else { + amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start); + } br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE); amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8); amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift); @@ -6775,6 +6794,9 @@ mono_arch_emit_prolog (MonoCompile *cfg) (cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP)); amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer)); + + mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code); + mono_add_var_location (cfg, cfg->rgctx_var, FALSE, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, code - cfg->native_code, 0); } /* compute max_length in order to use short forward jumps */ @@ -6896,6 +6918,16 @@ mono_arch_emit_prolog (MonoCompile *cfg) size = 8; */ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, size); + + /* + * Save the original location of 'this', + * get_generic_info_from_stack_frame () needs this to properly look up + * the argument value during the handling of async exceptions. + */ + if (ins == cfg->args [0]) { + mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); + mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); + } break; } case ArgInFloatSSEReg: @@ -6942,61 +6974,11 @@ mono_arch_emit_prolog (MonoCompile *cfg) default: g_assert_not_reached (); } - } - } - - /* Might need to attach the thread to the JIT or change the domain for the callback */ - if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) { - guint64 domain = (guint64)cfg->domain; - - args_clobbered = TRUE; - - /* - * The call might clobber argument registers, but they are already - * saved to the stack/global regs. - */ - if (appdomain_tls_offset != -1 && lmf_tls_offset != -1) { - guint8 *buf, *no_domain_branch; - code = mono_amd64_emit_tls_get (code, AMD64_RAX, appdomain_tls_offset); - if (cfg->compile_aot) { - /* AOT code is only used in the root domain */ - amd64_mov_reg_imm (code, AMD64_ARG_REG1, 0); - } else { - if ((domain >> 32) == 0) - amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4); - else - amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8); - } - amd64_alu_reg_reg (code, X86_CMP, AMD64_RAX, AMD64_ARG_REG1); - no_domain_branch = code; - x86_branch8 (code, X86_CC_NE, 0, 0); - code = mono_amd64_emit_tls_get ( code, AMD64_RAX, lmf_addr_tls_offset); - amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX); - buf = code; - x86_branch8 (code, X86_CC_NE, 0, 0); - amd64_patch (no_domain_branch, code); - code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, - (gpointer)"mono_jit_thread_attach", TRUE); - amd64_patch (buf, code); -#ifdef HOST_WIN32 - /* The TLS key actually contains a pointer to the MonoJitTlsData structure */ - /* FIXME: Add a separate key for LMF to avoid this */ - amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf)); -#endif - } else { - g_assert (!cfg->compile_aot); - if (cfg->compile_aot) { - /* AOT code is only used in the root domain */ - amd64_mov_reg_imm (code, AMD64_ARG_REG1, 0); - } else { - if ((domain >> 32) == 0) - amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4); - else - amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8); + if (ins == cfg->args [0]) { + mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); + mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0); } - code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, - (gpointer)"mono_jit_thread_attach", TRUE); } } @@ -7079,15 +7061,31 @@ mono_arch_emit_prolog (MonoCompile *cfg) } } - /* Initialize ss_trigger_page_var */ - if (cfg->arch.ss_trigger_page_var) { - MonoInst *var = cfg->arch.ss_trigger_page_var; + if (cfg->gen_seq_points) { + MonoInst *info_var = cfg->arch.seq_point_info_var; - g_assert (!cfg->compile_aot); - g_assert (var->opcode == OP_REGOFFSET); + /* Initialize seq_point_info_var */ + if (cfg->compile_aot) { + /* Initialize the variable from a GOT slot */ + /* Same as OP_AOTCONST */ + mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer)); + g_assert (info_var->opcode == OP_REGOFFSET); + amd64_mov_membase_reg (code, info_var->inst_basereg, info_var->inst_offset, AMD64_R11, 8); + } + + /* Initialize ss_trigger_page_var */ + ins = cfg->arch.ss_trigger_page_var; - amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page); - amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8); + g_assert (ins->opcode == OP_REGOFFSET); + + if (cfg->compile_aot) { + amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page), 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page); + } + amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8); } cfg->code_len = code - cfg->native_code; @@ -7532,6 +7530,7 @@ mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolea int save_mode = SAVE_NONE; MonoMethod *method = cfg->method; MonoType *ret_type = mini_type_get_underlying_type (NULL, mono_method_signature (method)->ret); + int i; switch (ret_type->type) { case MONO_TYPE_VOID: @@ -7600,8 +7599,8 @@ mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolea amd64_mov_reg_imm (code, AMD64_RAX, 0); if (preserve_argument_registers) { - amd64_push_reg (code, MONO_AMD64_ARG_REG1); - amd64_push_reg (code, MONO_AMD64_ARG_REG2); + for (i = 0; i < PARAM_REGS; ++i) + amd64_push_reg (code, param_regs [i]); } mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method); @@ -7609,8 +7608,8 @@ mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolea code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func, TRUE); if (preserve_argument_registers) { - amd64_pop_reg (code, MONO_AMD64_ARG_REG2); - amd64_pop_reg (code, MONO_AMD64_ARG_REG1); + for (i = PARAM_REGS - 1; i >= 0; --i) + amd64_pop_reg (code, param_regs [i]); } /* Restore result */ @@ -7972,42 +7971,33 @@ mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_targe return start; } - -/* - * Support for fast access to the thread-local lmf structure using the GS - * segment register on NPTL + kernel 2.6.x. - */ - -static gboolean tls_offset_inited = FALSE; - void -mono_arch_setup_jit_tls_data (MonoJitTlsData *tls) +mono_arch_finish_init (void) { - if (!tls_offset_inited) { #ifdef HOST_WIN32 - /* - * We need to init this multiple times, since when we are first called, the key might not - * be initialized yet. - */ - appdomain_tls_offset = mono_domain_get_tls_key (); - lmf_tls_offset = mono_get_jit_tls_key (); - lmf_addr_tls_offset = mono_get_jit_tls_key (); - - /* Only 64 tls entries can be accessed using inline code */ - if (appdomain_tls_offset >= 64) - appdomain_tls_offset = -1; - if (lmf_tls_offset >= 64) - lmf_tls_offset = -1; + /* + * We need to init this multiple times, since when we are first called, the key might not + * be initialized yet. + */ + appdomain_tls_offset = mono_domain_get_tls_key (); + lmf_tls_offset = mono_get_jit_tls_key (); + lmf_addr_tls_offset = mono_get_jit_tls_key (); + + /* Only 64 tls entries can be accessed using inline code */ + if (appdomain_tls_offset >= 64) + appdomain_tls_offset = -1; + if (lmf_tls_offset >= 64) + lmf_tls_offset = -1; + if (lmf_addr_tls_offset >= 64) + lmf_addr_tls_offset = -1; #else - tls_offset_inited = TRUE; #ifdef MONO_XEN_OPT - optimize_for_xen = access ("/proc/xen", F_OK) == 0; + optimize_for_xen = access ("/proc/xen", F_OK) == 0; #endif - appdomain_tls_offset = mono_domain_get_tls_offset (); - lmf_tls_offset = mono_get_lmf_tls_offset (); - lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset (); + appdomain_tls_offset = mono_domain_get_tls_offset (); + lmf_tls_offset = mono_get_lmf_tls_offset (); + lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset (); #endif - } } void @@ -8477,20 +8467,28 @@ mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) guint8 *code = ip; guint8 *orig_code = code; - /* - * In production, we will use int3 (has to fix the size in the md - * file). But that could confuse gdb, so during development, we emit a SIGSEGV - * instead. - */ - g_assert (code [0] == 0x90); - if (breakpoint_size == 8) { - amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4); + if (ji->from_aot) { + guint32 native_offset = ip - (guint8*)ji->code_start; + SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start); + + g_assert (info->bp_addrs [native_offset] == 0); + info->bp_addrs [native_offset] = bp_trigger_page; } else { - amd64_mov_reg_imm_size (code, AMD64_R11, (guint64)bp_trigger_page, 8); - amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 4); - } + /* + * In production, we will use int3 (has to fix the size in the md + * file). But that could confuse gdb, so during development, we emit a SIGSEGV + * instead. + */ + g_assert (code [0] == 0x90); + if (breakpoint_size == 8) { + amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4); + } else { + amd64_mov_reg_imm_size (code, AMD64_R11, (guint64)bp_trigger_page, 8); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 4); + } - g_assert (code - orig_code == breakpoint_size); + g_assert (code - orig_code == breakpoint_size); + } } /* @@ -8504,8 +8502,16 @@ mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) guint8 *code = ip; int i; - for (i = 0; i < breakpoint_size; ++i) - x86_nop (code); + if (ji->from_aot) { + guint32 native_offset = ip - (guint8*)ji->code_start; + SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start); + + g_assert (info->bp_addrs [native_offset] == 0); + info->bp_addrs [native_offset] = info; + } else { + for (i = 0; i < breakpoint_size; ++i) + x86_nop (code); + } } gboolean @@ -8524,22 +8530,6 @@ mono_arch_is_breakpoint_event (void *info, void *sigctx) #endif } -/* - * mono_arch_get_ip_for_breakpoint: - * - * Convert the ip in CTX to the address where a breakpoint was placed. - */ -guint8* -mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx) -{ - guint8 *ip = MONO_CONTEXT_GET_IP (ctx); - - /* ip points to the instruction causing the fault */ - ip -= (breakpoint_size - breakpoint_fault_size); - - return ip; -} - /* * mono_arch_skip_breakpoint: * @@ -8547,9 +8537,14 @@ mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx) * we resume, the instruction is not executed again. */ void -mono_arch_skip_breakpoint (MonoContext *ctx) +mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { - MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size); + if (ji->from_aot) { + /* amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8) */ + MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 3); + } else { + MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size); + } } /* @@ -8596,21 +8591,6 @@ mono_arch_is_single_step_event (void *info, void *sigctx) #endif } -/* - * mono_arch_get_ip_for_single_step: - * - * Convert the ip in CTX to the address stored in seq_points. - */ -guint8* -mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx) -{ - guint8 *ip = MONO_CONTEXT_GET_IP (ctx); - - ip += single_step_fault_size; - - return ip; -} - /* * mono_arch_skip_single_step: * @@ -8632,8 +8612,37 @@ mono_arch_skip_single_step (MonoContext *ctx) gpointer mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code) { - NOT_IMPLEMENTED; - return NULL; + SeqPointInfo *info; + MonoJitInfo *ji; + int i; + + // FIXME: Add a free function + + mono_domain_lock (domain); + info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points, + code); + mono_domain_unlock (domain); + + if (!info) { + ji = mono_jit_info_table_find (domain, (char*)code); + g_assert (ji); + + // FIXME: Optimize the size + info = g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer))); + + info->ss_trigger_page = ss_trigger_page; + info->bp_trigger_page = bp_trigger_page; + /* Initialize to a valid address */ + for (i = 0; i < ji->code_size; ++i) + info->bp_addrs [i] = info; + + mono_domain_lock (domain); + g_hash_table_insert (domain_jit_info (domain)->arch_seq_points, + code, info); + mono_domain_unlock (domain); + } + + return info; } #endif