X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmini%2Fexceptions-amd64.c;h=6490dec007395920c7088fc01da0dcd64c8eb692;hb=fb037f28a274fc2ab522ad6b6b1bb864ec0c530b;hp=b15470e759fd9c2c582e5a3f556a1bc434bc01c0;hpb=c39d7ce9985a7067c1cbf44188007c9433901940;p=mono.git diff --git a/mono/mini/exceptions-amd64.c b/mono/mini/exceptions-amd64.c index b15470e759f..6490dec0073 100644 --- a/mono/mini/exceptions-amd64.c +++ b/mono/mini/exceptions-amd64.c @@ -11,16 +11,20 @@ #include #include #include +#ifndef PLATFORM_WIN32 #include +#endif #include #include #include #include +#include #include #include #include #include +#include #include "mini.h" #include "mini-amd64.h" @@ -55,15 +59,19 @@ LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep) sctx = g_malloc(sizeof(MonoContext)); /* Copy Win32 context to UNIX style context */ - sctx->eax = ctx->Eax; - sctx->ebx = ctx->Ebx; - sctx->ecx = ctx->Ecx; - sctx->edx = ctx->Edx; - sctx->ebp = ctx->Ebp; - sctx->esp = ctx->Esp; - sctx->esi = ctx->Esi; - sctx->edi = ctx->Edi; - sctx->eip = ctx->Eip; + sctx->rax = ctx->Rax; + sctx->rbx = ctx->Rbx; + sctx->rcx = ctx->Rcx; + sctx->rdx = ctx->Rdx; + sctx->rbp = ctx->Rbp; + sctx->rsp = ctx->Rsp; + sctx->rsi = ctx->Rsi; + sctx->rdi = ctx->Rdi; + sctx->rip = ctx->Rip; + sctx->r12 = ctx->R12; + sctx->r13 = ctx->R13; + sctx->r14 = ctx->R14; + sctx->r15 = ctx->R15; switch (er->ExceptionCode) { case EXCEPTION_ACCESS_VIOLATION: @@ -85,15 +93,17 @@ LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep) } /* Copy context back */ - ctx->Eax = sctx->eax; - ctx->Ebx = sctx->ebx; - ctx->Ecx = sctx->ecx; - ctx->Edx = sctx->edx; - ctx->Ebp = sctx->ebp; - ctx->Esp = sctx->esp; - ctx->Esi = sctx->esi; - ctx->Edi = sctx->edi; - ctx->Eip = sctx->eip; + ctx->Rax = sctx->rax; + ctx->Rbx = sctx->rbx; + ctx->Rcx = sctx->rcx; + ctx->Rdx = sctx->rdx; + ctx->Rbp = sctx->rbp; + ctx->Rsp = sctx->rsp; + ctx->Rsi = sctx->rsi; + ctx->Rdi = sctx->rdi; + ctx->Rip = sctx->rip; + + g_free (sctx); return res; } @@ -133,36 +143,46 @@ void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler) * Returns a pointer to a method which restores a previously saved sigcontext. */ gpointer -mono_arch_get_restore_context (void) +mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8 *start = NULL; - static gboolean inited = FALSE; + guint8 *start = NULL; guint8 *code; - if (inited) - return start; - /* restore_contect (MonoContext *ctx) */ + *ji = NULL; + start = code = mono_global_codeman_reserve (256); - /* get return address */ - amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rip), 8); + amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8); + + /* Restore all registers except %rip and %r11 */ + amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8); + amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8); + amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8); + amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8); + amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8); + amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8); + amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8); + //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8); + //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8); + //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8); + amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8); + amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8); + amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8); + amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8); + + amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8); - /* Restore registers */ - amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8); - amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8); - amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8); - amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8); - amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8); - amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8); - - amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rsp), 8); + /* get return address */ + amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8); /* jump to the saved IP */ - amd64_jump_reg (code, AMD64_RAX); + amd64_jump_reg (code, AMD64_R11); - inited = TRUE; + mono_arch_flush_icache (start, code - start); + + *code_size = code - start; return start; } @@ -175,18 +195,16 @@ mono_arch_get_restore_context (void) * @exc object in this case). */ gpointer -mono_arch_get_call_filter (void) +mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8 *start; - static gboolean inited = FALSE; + guint8 *start; int i; guint8 *code; guint32 pos; - if (inited) - return start; + *ji = NULL; - start = code = mono_global_codeman_reserve (64); + start = code = mono_global_codeman_reserve (128); /* call_filter (MonoContext *ctx, unsigned long eip) */ code = start; @@ -212,16 +230,20 @@ mono_arch_get_call_filter (void) amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* set new EBP */ - amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8); + amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8); /* load callee saved regs */ - amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8); - amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8); - amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8); - amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8); - amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8); + amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8); + amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8); + amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8); + amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8); + amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8); +#ifdef PLATFORM_WIN32 + amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8); + amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8); +#endif /* call the handler */ - amd64_call_reg (code, AMD64_RSI); + amd64_call_reg (code, AMD64_ARG_REG2); if (! (pos & 8)) amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); @@ -237,23 +259,33 @@ mono_arch_get_call_filter (void) amd64_leave (code); amd64_ret (code); - g_assert ((code - start) < 64); + g_assert ((code - start) < 128); - inited = TRUE; + mono_arch_flush_icache (start, code - start); + + *code_size = code - start; return start; } -static void -throw_exception (MonoObject *exc, guint64 rip, guint64 rsp, - guint64 rbx, guint64 rbp, guint64 r12, guint64 r13, - guint64 r14, guint64 r15, guint64 rethrow) +/* + * The first few arguments are dummy, to force the other arguments to be passed on + * the stack, this avoids overwriting the argument registers in the throw trampoline. + */ +void +mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4, + guint64 dummy5, guint64 dummy6, + MonoObject *exc, guint64 rip, guint64 rsp, + guint64 rbx, guint64 rbp, guint64 r12, guint64 r13, + guint64 r14, guint64 r15, guint64 rdi, guint64 rsi, + guint64 rax, guint64 rcx, guint64 rdx, + guint64 rethrow) { static void (*restore_context) (MonoContext *); MonoContext ctx; if (!restore_context) - restore_context = mono_arch_get_restore_context (); + restore_context = mono_get_restore_context (); ctx.rsp = rsp; ctx.rip = rip; @@ -263,6 +295,11 @@ throw_exception (MonoObject *exc, guint64 rip, guint64 rsp, ctx.r13 = r13; ctx.r14 = r14; ctx.r15 = r15; + ctx.rdi = rdi; + ctx.rsi = rsi; + ctx.rax = rax; + ctx.rcx = rcx; + ctx.rdx = rdx; if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) { /* @@ -302,7 +339,7 @@ throw_exception (MonoObject *exc, guint64 rip, guint64 rsp, } static gpointer -get_throw_trampoline (gboolean rethrow) +get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { guint8* start; guint8 *code; @@ -311,30 +348,59 @@ get_throw_trampoline (gboolean rethrow) code = start; - /* Exception */ - amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 8); - /* IP */ - amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RSP, 0, 8); - /* SP */ - amd64_lea_membase (code, AMD64_RDX, AMD64_RSP, 8); - /* Callee saved regs */ - amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8); - amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8); - amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8); - /* align stack */ - amd64_push_imm (code, 0); + *ji = NULL; + + amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8); + /* reverse order */ amd64_push_imm (code, rethrow); + amd64_push_reg (code, AMD64_RDX); + amd64_push_reg (code, AMD64_RCX); + amd64_push_reg (code, AMD64_RAX); + amd64_push_reg (code, AMD64_RSI); + amd64_push_reg (code, AMD64_RDI); amd64_push_reg (code, AMD64_R15); amd64_push_reg (code, AMD64_R14); amd64_push_reg (code, AMD64_R13); + amd64_push_reg (code, AMD64_R12); + amd64_push_reg (code, AMD64_RBP); + amd64_push_reg (code, AMD64_RBX); + + /* SP */ + amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8); + amd64_push_reg (code, AMD64_RAX); + + /* IP */ + amd64_push_membase (code, AMD64_R11, 0); + + /* Exception */ + amd64_push_reg (code, AMD64_ARG_REG1); + +#ifdef PLATFORM_WIN32 + /* align stack */ + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); +#endif - amd64_mov_reg_imm (code, AMD64_R11, throw_exception); + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception); + } amd64_call_reg (code, AMD64_R11); amd64_breakpoint (code); + mono_arch_flush_icache (start, code - start); + g_assert ((code - start) < 64); + *code_size = code - start; + return start; } @@ -347,52 +413,34 @@ get_throw_trampoline (gboolean rethrow) * */ gpointer -mono_arch_get_throw_exception (void) +mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8* start; - static gboolean inited = FALSE; - - if (inited) - return start; - - start = get_throw_trampoline (FALSE); - - inited = TRUE; - - return start; + return get_throw_trampoline (FALSE, code_size, ji, aot); } gpointer -mono_arch_get_rethrow_exception (void) +mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8* start; - static gboolean inited = FALSE; - - if (inited) - return start; - - start = get_throw_trampoline (TRUE); - - inited = TRUE; - - return start; + return get_throw_trampoline (TRUE, code_size, ji, aot); } gpointer -mono_arch_get_throw_exception_by_name (void) +mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8* start; - static gboolean inited = FALSE; + guint8* start; guint8 *code; - if (inited) - return start; - start = code = mono_global_codeman_reserve (64); + *ji = NULL; + /* Not used on amd64 */ amd64_breakpoint (code); + mono_arch_flush_icache (start, code - start); + + *code_size = code - start; + return start; } @@ -407,47 +455,65 @@ mono_arch_get_throw_exception_by_name (void) * needs no relocations in the caller. */ gpointer -mono_arch_get_throw_corlib_exception (void) +mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { static guint8* start; - static gboolean inited = FALSE; guint8 *code; guint64 throw_ex; - if (inited) - return start; - start = code = mono_global_codeman_reserve (64); + *ji = NULL; + /* Push throw_ip */ - amd64_push_reg (code, AMD64_RSI); + amd64_push_reg (code, AMD64_ARG_REG2); /* Call exception_from_token */ - amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RDI, 8); - amd64_mov_reg_imm (code, AMD64_RDI, mono_defaults.exception_class->image); - amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token); + amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8); + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image); + amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8); + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image); + amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token); + } +#ifdef PLATFORM_WIN32 + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32); +#endif amd64_call_reg (code, AMD64_R11); +#ifdef PLATFORM_WIN32 + amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32); +#endif /* Compute throw_ip */ - amd64_pop_reg (code, AMD64_RSI); + amd64_pop_reg (code, AMD64_ARG_REG2); /* return addr */ - amd64_pop_reg (code, AMD64_RDX); - amd64_alu_reg_reg (code, X86_SUB, AMD64_RDX, AMD64_RSI); + amd64_pop_reg (code, AMD64_ARG_REG3); + amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2); /* Put the throw_ip at the top of the misaligned stack */ - amd64_push_reg (code, AMD64_RDX); + amd64_push_reg (code, AMD64_ARG_REG3); - throw_ex = (guint64)mono_arch_get_throw_exception (); + throw_ex = (guint64)mono_get_throw_exception (); /* Call throw_exception */ - amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RAX, 8); - amd64_mov_reg_imm (code, AMD64_R11, throw_ex); + amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8); + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, throw_ex); + } /* The original IP is on the stack */ amd64_jump_reg (code, AMD64_R11); g_assert ((code - start) < 64); - inited = TRUE; + mono_arch_flush_icache (start, code - start); + + *code_size = code - start; return start; } @@ -479,35 +545,38 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf if (managed) *managed = FALSE; + *new_ctx = *ctx; + if (ji != NULL) { int offset; gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0; - *new_ctx = *ctx; - if (managed) if (!ji->method->wrapper_type) *managed = TRUE; /* - * Some managed methods like pinvoke wrappers might have save_lmf set. - * In this case, register save/restore code is not generated by the - * JIT, so we have to restore callee saved registers from the lmf. + * If a method has save_lmf set, then register save/restore code is not generated + * by the JIT, so we have to restore callee saved registers from the lmf. */ if (ji->method->save_lmf) { + MonoLMF *lmf_addr; + /* - * We only need to do this if the exception was raised in managed - * code, since otherwise the lmf was already popped of the stack. + * *lmf might not point to the LMF pushed by this method, so compute the LMF + * address instead. */ - if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) { - new_ctx->rbp = (*lmf)->ebp; - new_ctx->rbx = (*lmf)->rbx; - new_ctx->rsp = (*lmf)->rsp; - new_ctx->r12 = (*lmf)->r12; - new_ctx->r13 = (*lmf)->r13; - new_ctx->r14 = (*lmf)->r14; - new_ctx->r15 = (*lmf)->r15; - } + if (omit_fp) + lmf_addr = (MonoLMF*)ctx->rsp; + else + lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF)); + + new_ctx->rbp = lmf_addr->rbp; + new_ctx->rbx = lmf_addr->rbx; + new_ctx->r12 = lmf_addr->r12; + new_ctx->r13 = lmf_addr->r13; + new_ctx->r14 = lmf_addr->r14; + new_ctx->r15 = lmf_addr->r15; } else { offset = omit_fp ? 0 : -1; @@ -552,7 +621,7 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) { /* remove any unused lmf */ - *lmf = (*lmf)->previous_lmf; + *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1); } if (omit_fp) { @@ -580,20 +649,34 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf return ji; } else if (*lmf) { - - *new_ctx = *ctx; + guint64 rip; - if (!(*lmf)->method) + if (((guint64)(*lmf)->previous_lmf) & 1) { + /* This LMF has the rip field set */ + rip = (*lmf)->rip; + } else if ((*lmf)->rsp == 0) { + /* Top LMF entry */ return (gpointer)-1; - - if ((ji = mono_jit_info_table_find (domain, (gpointer)(*lmf)->rip))) { } else { + /* + * The rsp field is set just before the call which transitioned to native + * code. Obtain the rip from the stack. + */ + rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer)); + } + + ji = mono_jit_info_table_find (domain, (gpointer)rip); + if (!ji) { + if (!(*lmf)->method) + /* Top LMF entry */ + return (gpointer)-1; + /* Trampoline lmf frame */ memset (res, 0, sizeof (MonoJitInfo)); res->method = (*lmf)->method; } - new_ctx->rip = (*lmf)->rip; - new_ctx->rbp = (*lmf)->ebp; + new_ctx->rip = rip; + new_ctx->rbp = (*lmf)->rbp; new_ctx->rsp = (*lmf)->rsp; new_ctx->rbx = (*lmf)->rbx; @@ -602,7 +685,7 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf new_ctx->r14 = (*lmf)->r14; new_ctx->r15 = (*lmf)->r15; - *lmf = (*lmf)->previous_lmf; + *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1); return ji ? ji : res; } @@ -630,6 +713,7 @@ mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only) return TRUE; } +#ifdef MONO_ARCH_USE_SIGACTION static inline guint64* gregs_from_ucontext (ucontext_t *ctx) { @@ -641,10 +725,11 @@ gregs_from_ucontext (ucontext_t *ctx) return gregs; } - +#endif void mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx) { +#ifdef MONO_ARCH_USE_SIGACTION ucontext_t *ctx = (ucontext_t*)sigctx; guint64 *gregs = gregs_from_ucontext (ctx); @@ -662,11 +747,29 @@ mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx) mctx->r13 = gregs [REG_R13]; mctx->r14 = gregs [REG_R14]; mctx->r15 = gregs [REG_R15]; +#else + MonoContext *ctx = (MonoContext *)sigctx; + + mctx->rax = ctx->rax; + mctx->rbx = ctx->rbx; + mctx->rcx = ctx->rcx; + mctx->rdx = ctx->rdx; + mctx->rbp = ctx->rbp; + mctx->rsp = ctx->rsp; + mctx->rsi = ctx->rsi; + mctx->rdi = ctx->rdi; + mctx->rip = ctx->rip; + mctx->r12 = ctx->r12; + mctx->r13 = ctx->r13; + mctx->r14 = ctx->r14; + mctx->r15 = ctx->r15; +#endif } void mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx) { +#ifdef MONO_ARCH_USE_SIGACTION ucontext_t *ctx = (ucontext_t*)sigctx; guint64 *gregs = gregs_from_ucontext (ctx); @@ -684,15 +787,476 @@ mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx) gregs [REG_R13] = mctx->r13; gregs [REG_R14] = mctx->r14; gregs [REG_R15] = mctx->r15; +#else + MonoContext *ctx = (MonoContext *)sigctx; + + ctx->rax = mctx->rax; + ctx->rbx = mctx->rbx; + ctx->rcx = mctx->rcx; + ctx->rdx = mctx->rdx; + ctx->rbp = mctx->rbp; + ctx->rsp = mctx->rsp; + ctx->rsi = mctx->rsi; + ctx->rdi = mctx->rdi; + ctx->rip = mctx->rip; + ctx->r12 = mctx->r12; + ctx->r13 = mctx->r13; + ctx->r14 = mctx->r14; + ctx->r15 = mctx->r15; +#endif } gpointer mono_arch_ip_from_context (void *sigctx) { + +#ifdef MONO_ARCH_USE_SIGACTION + ucontext_t *ctx = (ucontext_t*)sigctx; guint64 *gregs = gregs_from_ucontext (ctx); return (gpointer)gregs [REG_RIP]; +#else + MonoContext *ctx = sigctx; + return (gpointer)ctx->rip; +#endif +} + +static void +restore_soft_guard_pages (void) +{ + MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id); + if (jit_tls->stack_ovf_guard_base) + mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE); } +/* + * this function modifies mctx so that when it is restored, it + * won't execcute starting at mctx.eip, but in a function that + * will restore the protection on the soft-guard pages and return back to + * continue at mctx.eip. + */ +static void +prepare_for_guard_pages (MonoContext *mctx) +{ + gpointer *sp; + sp = (gpointer)(mctx->rsp); + sp -= 1; + /* the return addr */ + sp [0] = (gpointer)(mctx->rip); + mctx->rip = (guint64)restore_soft_guard_pages; + mctx->rsp = (guint64)sp; +} + +static void +altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf) +{ + void (*restore_context) (MonoContext *); + MonoContext mctx; + + restore_context = mono_get_restore_context (); + mono_arch_sigctx_to_monoctx (sigctx, &mctx); + mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE); + if (stack_ovf) + prepare_for_guard_pages (&mctx); + restore_context (&mctx); +} + +void +mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf) +{ +#ifdef MONO_ARCH_USE_SIGACTION + MonoException *exc = NULL; + ucontext_t *ctx = (ucontext_t*)sigctx; + guint64 *gregs = gregs_from_ucontext (ctx); + MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]); + gpointer *sp; + int frame_size; + + if (stack_ovf) + exc = mono_domain_get ()->stack_overflow_ex; + if (!ji) + mono_handle_native_sigsegv (SIGSEGV, sigctx); + + /* setup a call frame on the real stack so that control is returned there + * and exception handling can continue. + * The frame looks like: + * ucontext struct + * ... + * return ip + * 128 is the size of the red zone + */ + frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128; + frame_size += 15; + frame_size &= ~15; + sp = (gpointer)(gregs [REG_RSP] & ~15); + sp = (gpointer)((char*)sp - frame_size); + /* the arguments must be aligned */ + sp [-1] = (gpointer)gregs [REG_RIP]; + /* may need to adjust pointers in the new struct copy, depending on the OS */ + memcpy (sp + 4, ctx, sizeof (ucontext_t)); + /* at the return form the signal handler execution starts in altstack_handle_and_restore() */ + gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore; + gregs [REG_RSP] = (unsigned long)(sp - 1); + gregs [REG_RDI] = (unsigned long)(sp + 4); + gregs [REG_RSI] = (guint64)exc; + gregs [REG_RDX] = stack_ovf; +#endif +} + +static guint64 +get_original_ip (void) +{ + MonoLMF *lmf = mono_get_lmf (); + + g_assert (lmf); + + /* Reset the change to previous_lmf */ + lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1); + + return lmf->rip; +} + +static gpointer +get_throw_pending_exception (void) +{ + static guint8* start; + static gboolean inited = FALSE; + guint8 *code; + guint8 *br[1]; + gpointer throw_trampoline; + + if (inited) + return start; + + start = code = mono_global_codeman_reserve (128); + + /* We are in the frame of a managed method after a call */ + /* + * We would like to throw the pending exception in such a way that it looks to + * be thrown from the managed method. + */ + + /* Save registers which might contain the return value of the call */ + amd64_push_reg (code, AMD64_RAX); + amd64_push_reg (code, AMD64_RDX); + + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); + amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0); + + /* Align stack */ + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); + + /* Obtain the pending exception */ + amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception); + amd64_call_reg (code, AMD64_R11); + + /* Check if it is NULL, and branch */ + amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0); + br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); + + /* exc != NULL branch */ + + /* Save the exc on the stack */ + amd64_push_reg (code, AMD64_RAX); + /* Align stack */ + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); + + /* Obtain the original ip and clear the flag in previous_lmf */ + amd64_mov_reg_imm (code, AMD64_R11, get_original_ip); + amd64_call_reg (code, AMD64_R11); + + /* Load exc */ + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8); + + /* Pop saved stuff from the stack */ + amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8); + + /* Setup arguments for the throw trampoline */ + /* Exception */ + amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8); + /* The trampoline expects the caller ip to be pushed on the stack */ + amd64_push_reg (code, AMD64_RAX); + + /* Call the throw trampoline */ + throw_trampoline = mono_get_throw_exception (); + amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline); + /* We use a jump instead of a call so we can push the original ip on the stack */ + amd64_jump_reg (code, AMD64_R11); + + /* ex == NULL branch */ + mono_amd64_patch (br [0], code); + + /* Obtain the original ip and clear the flag in previous_lmf */ + amd64_mov_reg_imm (code, AMD64_R11, get_original_ip); + amd64_call_reg (code, AMD64_R11); + amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8); + + /* Restore registers */ + amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); + amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0); + amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); + amd64_pop_reg (code, AMD64_RDX); + amd64_pop_reg (code, AMD64_RAX); + + /* Return to original code */ + amd64_jump_reg (code, AMD64_R11); + + g_assert ((code - start) < 128); + + inited = TRUE; + + return start; +} + +/* + * Called when a thread receives an async exception while executing unmanaged code. + * Instead of checking for this exception in the managed-to-native wrapper, we hijack + * the return address on the stack to point to a helper routine which throws the + * exception. + */ +void +mono_arch_notify_pending_exc (void) +{ + MonoLMF *lmf = mono_get_lmf (); + + if (lmf->rsp == 0) + /* Initial LMF */ + return; + + if ((guint64)lmf->previous_lmf & 1) + /* Already hijacked or trampoline LMF entry */ + return; + + /* lmf->rsp is set just before making the call which transitions to unmanaged code */ + lmf->rip = *(guint64*)(lmf->rsp - 8); + /* Signal that lmf->rip is set */ + lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1); + + *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception (); +} + +#ifdef PLATFORM_WIN32 + +/* + * The mono_arch_unwindinfo* methods are used to build and add + * function table info for each emitted method from mono. On Winx64 + * the seh handler will not be called if the mono methods are not + * added to the function table. + * + * We should not need to add non-volatile register info to the + * table since mono stores that info elsewhere. (Except for the register + * used for the fp.) + */ + +#define MONO_MAX_UNWIND_CODES 22 + +typedef union _UNWIND_CODE { + struct { + guchar CodeOffset; + guchar UnwindOp : 4; + guchar OpInfo : 4; + }; + gushort FrameOffset; +} UNWIND_CODE, *PUNWIND_CODE; + +typedef struct _UNWIND_INFO { + guchar Version : 3; + guchar Flags : 5; + guchar SizeOfProlog; + guchar CountOfCodes; + guchar FrameRegister : 4; + guchar FrameOffset : 4; + /* custom size for mono allowing for mono allowing for*/ + /*UWOP_PUSH_NONVOL ebp offset = 21*/ + /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/ + /*UWOP_SET_FPREG : requires 2 offset = 17*/ + /*UWOP_PUSH_NONVOL offset = 15-0*/ + UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES]; + +/* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1]; + * union { + * OPTIONAL ULONG ExceptionHandler; + * OPTIONAL ULONG FunctionEntry; + * }; + * OPTIONAL ULONG ExceptionData[]; */ +} UNWIND_INFO, *PUNWIND_INFO; + +typedef struct +{ + RUNTIME_FUNCTION runtimeFunction; + UNWIND_INFO unwindInfo; +} MonoUnwindInfo, *PMonoUnwindInfo; + +static void +mono_arch_unwindinfo_create (gpointer* monoui) +{ + PMonoUnwindInfo newunwindinfo; + *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1); + newunwindinfo->unwindInfo.Version = 1; +} + +void +mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg ) +{ + PMonoUnwindInfo unwindinfo; + PUNWIND_CODE unwindcode; + guchar codeindex; + if (!*monoui) + mono_arch_unwindinfo_create (monoui); + + unwindinfo = (MonoUnwindInfo*)*monoui; + + if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES) + g_error ("Larger allocation needed for the unwind information."); + + codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes); + unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex]; + unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/ + unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin)); + unwindcode->OpInfo = reg; + + if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset) + g_error ("Adding unwind info in wrong order."); + + unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset; +} + +void +mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg ) +{ + PMonoUnwindInfo unwindinfo; + PUNWIND_CODE unwindcode; + guchar codeindex; + if (!*monoui) + mono_arch_unwindinfo_create (monoui); + + unwindinfo = (MonoUnwindInfo*)*monoui; + + if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES) + g_error ("Larger allocation needed for the unwind information."); + + codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2); + unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex]; + unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/ + unwindcode++; + unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/ + unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin)); + unwindcode->OpInfo = reg; + + unwindinfo->unwindInfo.FrameRegister = reg; + + if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset) + g_error ("Adding unwind info in wrong order."); + + unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset; +} + +void +mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size ) +{ + PMonoUnwindInfo unwindinfo; + PUNWIND_CODE unwindcode; + guchar codeindex; + guchar codesneeded; + if (!*monoui) + mono_arch_unwindinfo_create (monoui); + + unwindinfo = (MonoUnwindInfo*)*monoui; + + if (size < 0x8) + g_error ("Stack allocation must be equal to or greater than 0x8."); + + if (size <= 0x80) + codesneeded = 1; + else if (size <= 0x7FFF8) + codesneeded = 2; + else + codesneeded = 3; + + if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES) + g_error ("Larger allocation needed for the unwind information."); + + codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded); + unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex]; + + if (codesneeded == 1) { + /*The size of the allocation is + (the number in the OpInfo member) times 8 plus 8*/ + unwindcode->OpInfo = (size - 8)/8; + unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/ + } + else { + if (codesneeded == 3) { + /*the unscaled size of the allocation is recorded + in the next two slots in little-endian format*/ + *((unsigned int*)(&unwindcode->FrameOffset)) = size; + unwindcode += 2; + unwindcode->OpInfo = 1; + } + else { + /*the size of the allocation divided by 8 + is recorded in the next slot*/ + unwindcode->FrameOffset = size/8; + unwindcode++; + unwindcode->OpInfo = 0; + + } + unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/ + } + + unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin)); + + if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset) + g_error ("Adding unwind info in wrong order."); + + unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset; +} + +guint +mono_arch_unwindinfo_get_size (gpointer monoui) +{ + PMonoUnwindInfo unwindinfo; + if (!monoui) + return 0; + + unwindinfo = (MonoUnwindInfo*)monoui; + return (8 + sizeof (MonoUnwindInfo)) - + (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes)); +} + +void +mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size) +{ + PMonoUnwindInfo unwindinfo, targetinfo; + guchar codecount; + guint64 targetlocation; + if (!*monoui) + return; + + unwindinfo = (MonoUnwindInfo*)*monoui; + targetlocation = (guint64)&(((guchar*)code)[code_size]); + targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8); + + unwindinfo->runtimeFunction.EndAddress = code_size; + unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code); + + memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES)); + + codecount = unwindinfo->unwindInfo.CountOfCodes; + if (codecount) { + memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount], + sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes); + } + + g_free (unwindinfo); + *monoui = 0; + + RtlAddFunctionTable (&targetinfo->runtimeFunction, 1, (DWORD64)code); +} + +#endif + + +