X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmini%2Fexceptions-amd64.c;h=525ff9867045a5d2febeda428ddbd1eeed250126;hb=3dd824a7c5306569fbbe08e0f3e54f36619822d6;hp=31c7a7a212e683593b7d00d0194da820aeb01a73;hpb=ff228e1c801bda9666b6edab3ee962e05edcf480;p=mono.git diff --git a/mono/mini/exceptions-amd64.c b/mono/mini/exceptions-amd64.c index 31c7a7a212e..525ff986704 100644 --- a/mono/mini/exceptions-amd64.c +++ b/mono/mini/exceptions-amd64.c @@ -11,19 +11,25 @@ #include #include #include -#include +#ifdef HAVE_UCONTEXT_H +#include +#endif #include #include #include #include +#include #include #include #include #include +#include #include "mini.h" #include "mini-amd64.h" +#include "tasklets.h" +#include "debug-mini.h" #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1)) @@ -55,15 +61,19 @@ LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep) sctx = g_malloc(sizeof(MonoContext)); /* Copy Win32 context to UNIX style context */ - sctx->eax = ctx->Eax; - sctx->ebx = ctx->Ebx; - sctx->ecx = ctx->Ecx; - sctx->edx = ctx->Edx; - sctx->ebp = ctx->Ebp; - sctx->esp = ctx->Esp; - sctx->esi = ctx->Esi; - sctx->edi = ctx->Edi; - sctx->eip = ctx->Eip; + sctx->rax = ctx->Rax; + sctx->rbx = ctx->Rbx; + sctx->rcx = ctx->Rcx; + sctx->rdx = ctx->Rdx; + sctx->rbp = ctx->Rbp; + sctx->rsp = ctx->Rsp; + sctx->rsi = ctx->Rsi; + sctx->rdi = ctx->Rdi; + sctx->rip = ctx->Rip; + sctx->r12 = ctx->R12; + sctx->r13 = ctx->R13; + sctx->r14 = ctx->R14; + sctx->r15 = ctx->R15; switch (er->ExceptionCode) { case EXCEPTION_ACCESS_VIOLATION: @@ -85,15 +95,24 @@ LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep) } /* Copy context back */ - ctx->Eax = sctx->eax; - ctx->Ebx = sctx->ebx; - ctx->Ecx = sctx->ecx; - ctx->Edx = sctx->edx; - ctx->Ebp = sctx->ebp; - ctx->Esp = sctx->esp; - ctx->Esi = sctx->esi; - ctx->Edi = sctx->edi; - ctx->Eip = sctx->eip; + /* Nonvolatile */ + ctx->Rsp = sctx->rsp; + ctx->Rdi = sctx->rdi; + ctx->Rsi = sctx->rsi; + ctx->Rbx = sctx->rbx; + ctx->Rbp = sctx->rbp; + ctx->R12 = sctx->r12; + ctx->R13 = sctx->r13; + ctx->R14 = sctx->r14; + ctx->R15 = sctx->r15; + ctx->Rip = sctx->rip; + + /* Volatile But should not matter?*/ + ctx->Rax = sctx->rax; + ctx->Rcx = sctx->rcx; + ctx->Rdx = sctx->rdx; + + g_free (sctx); return res; } @@ -133,36 +152,52 @@ void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler) * Returns a pointer to a method which restores a previously saved sigcontext. */ gpointer -mono_arch_get_restore_context (void) +mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8 *start = NULL; - static gboolean inited = FALSE; + guint8 *start = NULL; guint8 *code; - if (inited) - return start; - /* restore_contect (MonoContext *ctx) */ - start = code = mono_global_codeman_reserve (256); - - /* get return address */ - amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rip), 8); + *ji = NULL; - /* Restore registers */ - amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8); - amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8); - amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8); - amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8); - amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8); - amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8); + start = code = mono_global_codeman_reserve (256); - amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rsp), 8); + amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8); + + /* Restore all registers except %rip and %r11 */ + amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8); + amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8); + amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8); + amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8); + amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8); + amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8); + amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8); + //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8); + //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8); + //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8); + amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8); + amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8); + amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8); + amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8); + + if (mono_running_on_valgrind ()) { + /* Prevent 'Address 0x... is just below the stack ptr.' errors */ + amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8); + amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8); + } else { + amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8); + /* get return address */ + amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8); + } /* jump to the saved IP */ - amd64_jump_reg (code, AMD64_RAX); + amd64_jump_reg (code, AMD64_R11); + + mono_arch_flush_icache (start, code - start); - inited = TRUE; + *code_size = code - start; return start; } @@ -175,18 +210,16 @@ mono_arch_get_restore_context (void) * @exc object in this case). */ gpointer -mono_arch_get_call_filter (void) +mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8 *start; - static gboolean inited = FALSE; + guint8 *start; int i; guint8 *code; guint32 pos; - if (inited) - return start; + *ji = NULL; - start = code = mono_global_codeman_reserve (64); + start = code = mono_global_codeman_reserve (128); /* call_filter (MonoContext *ctx, unsigned long eip) */ code = start; @@ -212,16 +245,20 @@ mono_arch_get_call_filter (void) amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* set new EBP */ - amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8); + amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8); /* load callee saved regs */ - amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8); - amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8); - amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8); - amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8); - amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8); + amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8); + amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8); + amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8); + amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8); + amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8); +#ifdef PLATFORM_WIN32 + amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8); + amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8); +#endif /* call the handler */ - amd64_call_reg (code, AMD64_RSI); + amd64_call_reg (code, AMD64_ARG_REG2); if (! (pos & 8)) amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); @@ -237,26 +274,33 @@ mono_arch_get_call_filter (void) amd64_leave (code); amd64_ret (code); - g_assert ((code - start) < 64); + g_assert ((code - start) < 128); + + mono_arch_flush_icache (start, code - start); - inited = TRUE; + *code_size = code - start; return start; } -static void -throw_exception (MonoObject *exc, guint64 rip, guint64 rsp, - guint64 rbx, guint64 rbp, guint64 r12, guint64 r13, - guint64 r14, guint64 r15, guint64 rethrow) +/* + * The first few arguments are dummy, to force the other arguments to be passed on + * the stack, this avoids overwriting the argument registers in the throw trampoline. + */ +void +mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4, + guint64 dummy5, guint64 dummy6, + MonoObject *exc, guint64 rip, guint64 rsp, + guint64 rbx, guint64 rbp, guint64 r12, guint64 r13, + guint64 r14, guint64 r15, guint64 rdi, guint64 rsi, + guint64 rax, guint64 rcx, guint64 rdx, + guint64 rethrow) { static void (*restore_context) (MonoContext *); MonoContext ctx; if (!restore_context) - restore_context = mono_arch_get_restore_context (); - - /* adjust eip so that it point into the call instruction */ - rip -= 1; + restore_context = mono_get_restore_context (); ctx.rsp = rsp; ctx.rip = rip; @@ -266,20 +310,46 @@ throw_exception (MonoObject *exc, guint64 rip, guint64 rsp, ctx.r13 = r13; ctx.r14 = r14; ctx.r15 = r15; + ctx.rdi = rdi; + ctx.rsi = rsi; + ctx.rax = rax; + ctx.rcx = rcx; + ctx.rdx = rdx; if (mono_object_isinst (exc, mono_defaults.exception_class)) { MonoException *mono_ex = (MonoException*)exc; if (!rethrow) mono_ex->stack_trace = NULL; } - mono_handle_exception (&ctx, exc, (gpointer)(rip + 1), FALSE); + + if (mono_debug_using_mono_debugger ()) { + guint8 buf [16], *code; + + mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf)); + code = buf + 8; + + if (buf [3] == 0xe8) { + MonoContext ctx_cp = ctx; + ctx_cp.rip = rip - 5; + + if (mono_debugger_handle_exception (&ctx_cp, exc)) { + restore_context (&ctx_cp); + g_assert_not_reached (); + } + } + } + + /* adjust eip so that it point into the call instruction */ + ctx.rip -= 1; + + mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE); restore_context (&ctx); g_assert_not_reached (); } static gpointer -get_throw_trampoline (gboolean rethrow) +get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { guint8* start; guint8 *code; @@ -288,30 +358,59 @@ get_throw_trampoline (gboolean rethrow) code = start; - /* Exception */ - amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 8); - /* IP */ - amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RSP, 0, 8); - /* SP */ - amd64_lea_membase (code, AMD64_RDX, AMD64_RSP, 8); - /* Callee saved regs */ - amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8); - amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8); - amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8); - /* align stack */ - amd64_push_imm (code, 0); + *ji = NULL; + + amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8); + /* reverse order */ amd64_push_imm (code, rethrow); + amd64_push_reg (code, AMD64_RDX); + amd64_push_reg (code, AMD64_RCX); + amd64_push_reg (code, AMD64_RAX); + amd64_push_reg (code, AMD64_RSI); + amd64_push_reg (code, AMD64_RDI); amd64_push_reg (code, AMD64_R15); amd64_push_reg (code, AMD64_R14); amd64_push_reg (code, AMD64_R13); + amd64_push_reg (code, AMD64_R12); + amd64_push_reg (code, AMD64_RBP); + amd64_push_reg (code, AMD64_RBX); + + /* SP */ + amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8); + amd64_push_reg (code, AMD64_RAX); + + /* IP */ + amd64_push_membase (code, AMD64_R11, 0); - amd64_mov_reg_imm (code, AMD64_R11, throw_exception); + /* Exception */ + amd64_push_reg (code, AMD64_ARG_REG1); + +#ifdef PLATFORM_WIN32 + /* align stack */ + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); + amd64_push_imm (code, 0); +#endif + + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception); + } amd64_call_reg (code, AMD64_R11); amd64_breakpoint (code); + mono_arch_flush_icache (start, code - start); + g_assert ((code - start) < 64); + *code_size = code - start; + return start; } @@ -324,52 +423,34 @@ get_throw_trampoline (gboolean rethrow) * */ gpointer -mono_arch_get_throw_exception (void) +mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8* start; - static gboolean inited = FALSE; - - if (inited) - return start; - - start = get_throw_trampoline (FALSE); - - inited = TRUE; - - return start; + return get_throw_trampoline (FALSE, code_size, ji, aot); } gpointer -mono_arch_get_rethrow_exception (void) +mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8* start; - static gboolean inited = FALSE; - - if (inited) - return start; - - start = get_throw_trampoline (TRUE); - - inited = TRUE; - - return start; + return get_throw_trampoline (TRUE, code_size, ji, aot); } gpointer -mono_arch_get_throw_exception_by_name (void) +mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { - static guint8* start; - static gboolean inited = FALSE; + guint8* start; guint8 *code; - if (inited) - return start; - start = code = mono_global_codeman_reserve (64); + *ji = NULL; + /* Not used on amd64 */ amd64_breakpoint (code); + mono_arch_flush_icache (start, code - start); + + *code_size = code - start; + return start; } @@ -384,47 +465,65 @@ mono_arch_get_throw_exception_by_name (void) * needs no relocations in the caller. */ gpointer -mono_arch_get_throw_corlib_exception (void) +mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) { static guint8* start; - static gboolean inited = FALSE; guint8 *code; guint64 throw_ex; - if (inited) - return start; - start = code = mono_global_codeman_reserve (64); + *ji = NULL; + /* Push throw_ip */ - amd64_push_reg (code, AMD64_RSI); + amd64_push_reg (code, AMD64_ARG_REG2); /* Call exception_from_token */ - amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RDI, 8); - amd64_mov_reg_imm (code, AMD64_RDI, mono_defaults.exception_class->image); - amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token); + amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8); + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image); + amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8); + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image); + amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token); + } +#ifdef PLATFORM_WIN32 + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32); +#endif amd64_call_reg (code, AMD64_R11); +#ifdef PLATFORM_WIN32 + amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32); +#endif /* Compute throw_ip */ - amd64_pop_reg (code, AMD64_RSI); + amd64_pop_reg (code, AMD64_ARG_REG2); /* return addr */ - amd64_pop_reg (code, AMD64_RDX); - amd64_alu_reg_reg (code, X86_SUB, AMD64_RDX, AMD64_RSI); + amd64_pop_reg (code, AMD64_ARG_REG3); + amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2); /* Put the throw_ip at the top of the misaligned stack */ - amd64_push_reg (code, AMD64_RDX); + amd64_push_reg (code, AMD64_ARG_REG3); - throw_ex = (guint64)mono_arch_get_throw_exception (); + throw_ex = (guint64)mono_get_throw_exception (); /* Call throw_exception */ - amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RAX, 8); - amd64_mov_reg_imm (code, AMD64_R11, throw_ex); + amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8); + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, throw_ex); + } /* The original IP is on the stack */ amd64_jump_reg (code, AMD64_R11); g_assert ((code - start) < 64); - inited = TRUE; + mono_arch_flush_icache (start, code - start); + + *code_size = code - start; return start; } @@ -440,11 +539,9 @@ mono_arch_get_throw_corlib_exception (void) */ MonoJitInfo * mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx, - MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset, - gboolean *managed) + MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed) { MonoJitInfo *ji; - int i; gpointer ip = MONO_CONTEXT_GET_IP (ctx); /* Avoid costly table lookup during stack overflow */ @@ -456,109 +553,115 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf if (managed) *managed = FALSE; - if (ji != NULL) { - int offset; + *new_ctx = *ctx; - *new_ctx = *ctx; + if (ji != NULL) { + gssize regs [MONO_MAX_IREGS + 1]; + guint8 *cfa; + guint32 unwind_info_len; + guint8 *unwind_info; if (managed) if (!ji->method->wrapper_type) *managed = TRUE; - /* - * Some managed methods like pinvoke wrappers might have save_lmf set. - * In this case, register save/restore code is not generated by the - * JIT, so we have to restore callee saved registers from the lmf. - */ - if (ji->method->save_lmf) { - /* - * We only need to do this if the exception was raised in managed - * code, since otherwise the lmf was already popped of the stack. - */ - if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) { - new_ctx->rbx = (*lmf)->rbx; - new_ctx->r12 = (*lmf)->r12; - new_ctx->r13 = (*lmf)->r13; - new_ctx->r14 = (*lmf)->r14; - new_ctx->r15 = (*lmf)->r15; - } - } - else { - offset = -1; - /* restore caller saved registers */ - for (i = 0; i < AMD64_NREG; i ++) - if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) { - guint64 reg = *((guint64 *)ctx->SC_EBP + offset); - offset --; - switch (i) { - case AMD64_RBX: - new_ctx->rbx = reg; - break; - case AMD64_R12: - new_ctx->r12 = reg; - break; - case AMD64_R13: - new_ctx->r13 = reg; - break; - case AMD64_R14: - new_ctx->r14 = reg; - break; - case AMD64_R15: - new_ctx->r15 = reg; - break; - default: - g_assert_not_reached (); - } - } - } - - if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) { + if (ji->from_aot) + unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); + else + unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); + + regs [AMD64_RAX] = new_ctx->rax; + regs [AMD64_RBX] = new_ctx->rbx; + regs [AMD64_RCX] = new_ctx->rcx; + regs [AMD64_RDX] = new_ctx->rdx; + regs [AMD64_RBP] = new_ctx->rbp; + regs [AMD64_RSP] = new_ctx->rsp; + regs [AMD64_RSI] = new_ctx->rsi; + regs [AMD64_RDI] = new_ctx->rdi; + regs [AMD64_RIP] = new_ctx->rip; + regs [AMD64_R12] = new_ctx->r12; + regs [AMD64_R13] = new_ctx->r13; + regs [AMD64_R14] = new_ctx->r14; + regs [AMD64_R15] = new_ctx->r15; + + mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, + (guint8*)ji->code_start + ji->code_size, + ip, regs, MONO_MAX_IREGS + 1, &cfa); + + new_ctx->rax = regs [AMD64_RAX]; + new_ctx->rbx = regs [AMD64_RBX]; + new_ctx->rcx = regs [AMD64_RCX]; + new_ctx->rdx = regs [AMD64_RDX]; + new_ctx->rbp = regs [AMD64_RBP]; + new_ctx->rsp = regs [AMD64_RSP]; + new_ctx->rsi = regs [AMD64_RSI]; + new_ctx->rdi = regs [AMD64_RDI]; + new_ctx->rip = regs [AMD64_RIP]; + new_ctx->r12 = regs [AMD64_R12]; + new_ctx->r13 = regs [AMD64_R13]; + new_ctx->r14 = regs [AMD64_R14]; + new_ctx->r15 = regs [AMD64_R15]; + + /* The CFA becomes the new SP value */ + new_ctx->rsp = (gssize)cfa; + + /* Adjust IP */ + new_ctx->rip --; + + if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) { /* remove any unused lmf */ - *lmf = (*lmf)->previous_lmf; + *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1); } - /* Pop EBP and the return address */ - new_ctx->SC_ESP = ctx->SC_EBP + (2 * sizeof (gpointer)); - /* we substract 1, so that the IP points into the call instruction */ - new_ctx->SC_EIP = *((guint64 *)ctx->SC_EBP + 1) - 1; - new_ctx->SC_EBP = *((guint64 *)ctx->SC_EBP); - +#ifndef MONO_AMD64_NO_PUSHES /* Pop arguments off the stack */ { MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1); guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info); - new_ctx->SC_ESP += stack_to_pop; + new_ctx->rsp += stack_to_pop; } +#endif return ji; } else if (*lmf) { - - *new_ctx = *ctx; + guint64 rip; - if (!(*lmf)->method) + if (((guint64)(*lmf)->previous_lmf) & 1) { + /* This LMF has the rip field set */ + rip = (*lmf)->rip; + } else if ((*lmf)->rsp == 0) { + /* Top LMF entry */ return (gpointer)-1; - - if ((ji = mono_jit_info_table_find (domain, (gpointer)(*lmf)->rip))) { } else { - memset (res, 0, sizeof (MonoJitInfo)); - res->method = (*lmf)->method; + /* + * The rsp field is set just before the call which transitioned to native + * code. Obtain the rip from the stack. + */ + rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer)); } - new_ctx->SC_RIP = (*lmf)->rip; - new_ctx->SC_RBP = (*lmf)->ebp; + ji = mono_jit_info_table_find (domain, (gpointer)rip); + if (!ji) { + // FIXME: This can happen with multiple appdomains (bug #444383) + return (gpointer)-1; + } - new_ctx->SC_RBX = (*lmf)->rbx; - new_ctx->SC_R12 = (*lmf)->r12; - new_ctx->SC_R13 = (*lmf)->r13; - new_ctx->SC_R14 = (*lmf)->r14; - new_ctx->SC_R15 = (*lmf)->r15; + new_ctx->rip = rip; + new_ctx->rbp = (*lmf)->rbp; + new_ctx->rsp = (*lmf)->rsp; - /* the lmf is always stored on the stack, so the following - * expression points to a stack location which can be used as ESP */ - new_ctx->SC_ESP = ALIGN_TO ((guint64)&((*lmf)->rip), 16); + new_ctx->rbx = (*lmf)->rbx; + new_ctx->r12 = (*lmf)->r12; + new_ctx->r13 = (*lmf)->r13; + new_ctx->r14 = (*lmf)->r14; + new_ctx->r15 = (*lmf)->r15; +#ifdef PLATFORM_WIN32 + new_ctx->rdi = (*lmf)->rdi; + new_ctx->rsi = (*lmf)->rsi; +#endif - *lmf = (*lmf)->previous_lmf; + *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1); return ji ? ji : res; } @@ -575,46 +678,678 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf gboolean mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only) { - ucontext_t *ctx = (ucontext_t*)sigctx; MonoContext mctx; - mctx.rax = ctx->uc_mcontext.gregs [REG_RAX]; - mctx.rbx = ctx->uc_mcontext.gregs [REG_RBX]; - mctx.rcx = ctx->uc_mcontext.gregs [REG_RCX]; - mctx.rdx = ctx->uc_mcontext.gregs [REG_RDX]; - mctx.rbp = ctx->uc_mcontext.gregs [REG_RBP]; - mctx.rsp = ctx->uc_mcontext.gregs [REG_RSP]; - mctx.rsi = ctx->uc_mcontext.gregs [REG_RSI]; - mctx.rdi = ctx->uc_mcontext.gregs [REG_RDI]; - mctx.rip = ctx->uc_mcontext.gregs [REG_RIP]; - mctx.r12 = ctx->uc_mcontext.gregs [REG_R12]; - mctx.r13 = ctx->uc_mcontext.gregs [REG_R13]; - mctx.r14 = ctx->uc_mcontext.gregs [REG_R14]; - mctx.r15 = ctx->uc_mcontext.gregs [REG_R15]; - - mono_handle_exception (&mctx, obj, (gpointer)mctx.rip, test_only); - - ctx->uc_mcontext.gregs [REG_RAX] = mctx.rax; - ctx->uc_mcontext.gregs [REG_RBX] = mctx.rbx; - ctx->uc_mcontext.gregs [REG_RCX] = mctx.rcx; - ctx->uc_mcontext.gregs [REG_RDX] = mctx.rdx; - ctx->uc_mcontext.gregs [REG_RBP] = mctx.rbp; - ctx->uc_mcontext.gregs [REG_RSP] = mctx.rsp; - ctx->uc_mcontext.gregs [REG_RSI] = mctx.rsi; - ctx->uc_mcontext.gregs [REG_RDI] = mctx.rdi; - ctx->uc_mcontext.gregs [REG_RIP] = mctx.rip; - ctx->uc_mcontext.gregs [REG_R12] = mctx.r12; - ctx->uc_mcontext.gregs [REG_R13] = mctx.r13; - ctx->uc_mcontext.gregs [REG_R14] = mctx.r14; - ctx->uc_mcontext.gregs [REG_R15] = mctx.r15; + mono_arch_sigctx_to_monoctx (sigctx, &mctx); + + if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) + return TRUE; + + mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only); + + mono_arch_monoctx_to_sigctx (&mctx, sigctx); return TRUE; } +#ifdef MONO_ARCH_USE_SIGACTION +static inline guint64* +gregs_from_ucontext (ucontext_t *ctx) +{ +#ifdef __FreeBSD__ + guint64 *gregs = (guint64 *) &ctx->uc_mcontext; +#else + guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs; +#endif + + return gregs; +} +#endif +void +mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx) +{ +#ifdef MONO_ARCH_USE_SIGACTION + ucontext_t *ctx = (ucontext_t*)sigctx; + + guint64 *gregs = gregs_from_ucontext (ctx); + + mctx->rax = gregs [REG_RAX]; + mctx->rbx = gregs [REG_RBX]; + mctx->rcx = gregs [REG_RCX]; + mctx->rdx = gregs [REG_RDX]; + mctx->rbp = gregs [REG_RBP]; + mctx->rsp = gregs [REG_RSP]; + mctx->rsi = gregs [REG_RSI]; + mctx->rdi = gregs [REG_RDI]; + mctx->rip = gregs [REG_RIP]; + mctx->r12 = gregs [REG_R12]; + mctx->r13 = gregs [REG_R13]; + mctx->r14 = gregs [REG_R14]; + mctx->r15 = gregs [REG_R15]; +#else + MonoContext *ctx = (MonoContext *)sigctx; + + mctx->rax = ctx->rax; + mctx->rbx = ctx->rbx; + mctx->rcx = ctx->rcx; + mctx->rdx = ctx->rdx; + mctx->rbp = ctx->rbp; + mctx->rsp = ctx->rsp; + mctx->rsi = ctx->rsi; + mctx->rdi = ctx->rdi; + mctx->rip = ctx->rip; + mctx->r12 = ctx->r12; + mctx->r13 = ctx->r13; + mctx->r14 = ctx->r14; + mctx->r15 = ctx->r15; +#endif +} + +void +mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx) +{ +#ifdef MONO_ARCH_USE_SIGACTION + ucontext_t *ctx = (ucontext_t*)sigctx; + + guint64 *gregs = gregs_from_ucontext (ctx); + + gregs [REG_RAX] = mctx->rax; + gregs [REG_RBX] = mctx->rbx; + gregs [REG_RCX] = mctx->rcx; + gregs [REG_RDX] = mctx->rdx; + gregs [REG_RBP] = mctx->rbp; + gregs [REG_RSP] = mctx->rsp; + gregs [REG_RSI] = mctx->rsi; + gregs [REG_RDI] = mctx->rdi; + gregs [REG_RIP] = mctx->rip; + gregs [REG_R12] = mctx->r12; + gregs [REG_R13] = mctx->r13; + gregs [REG_R14] = mctx->r14; + gregs [REG_R15] = mctx->r15; +#else + MonoContext *ctx = (MonoContext *)sigctx; + + ctx->rax = mctx->rax; + ctx->rbx = mctx->rbx; + ctx->rcx = mctx->rcx; + ctx->rdx = mctx->rdx; + ctx->rbp = mctx->rbp; + ctx->rsp = mctx->rsp; + ctx->rsi = mctx->rsi; + ctx->rdi = mctx->rdi; + ctx->rip = mctx->rip; + ctx->r12 = mctx->r12; + ctx->r13 = mctx->r13; + ctx->r14 = mctx->r14; + ctx->r15 = mctx->r15; +#endif +} + gpointer mono_arch_ip_from_context (void *sigctx) { + +#ifdef MONO_ARCH_USE_SIGACTION + ucontext_t *ctx = (ucontext_t*)sigctx; - return (gpointer)ctx->uc_mcontext.gregs [REG_RIP]; + + guint64 *gregs = gregs_from_ucontext (ctx); + + return (gpointer)gregs [REG_RIP]; +#else + MonoContext *ctx = sigctx; + return (gpointer)ctx->rip; +#endif +} + +static void +restore_soft_guard_pages (void) +{ + MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id); + if (jit_tls->stack_ovf_guard_base) + mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE); +} + +/* + * this function modifies mctx so that when it is restored, it + * won't execcute starting at mctx.eip, but in a function that + * will restore the protection on the soft-guard pages and return back to + * continue at mctx.eip. + */ +static void +prepare_for_guard_pages (MonoContext *mctx) +{ + gpointer *sp; + sp = (gpointer)(mctx->rsp); + sp -= 1; + /* the return addr */ + sp [0] = (gpointer)(mctx->rip); + mctx->rip = (guint64)restore_soft_guard_pages; + mctx->rsp = (guint64)sp; +} + +static void +altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf) +{ + void (*restore_context) (MonoContext *); + MonoContext mctx; + + restore_context = mono_get_restore_context (); + mono_arch_sigctx_to_monoctx (sigctx, &mctx); + + if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) { + if (stack_ovf) + prepare_for_guard_pages (&mctx); + restore_context (&mctx); + } + + mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE); + if (stack_ovf) + prepare_for_guard_pages (&mctx); + restore_context (&mctx); +} + +void +mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf) +{ +#ifdef MONO_ARCH_USE_SIGACTION + MonoException *exc = NULL; + ucontext_t *ctx = (ucontext_t*)sigctx; + guint64 *gregs = gregs_from_ucontext (ctx); + MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]); + gpointer *sp; + int frame_size; + + if (stack_ovf) + exc = mono_domain_get ()->stack_overflow_ex; + if (!ji) + mono_handle_native_sigsegv (SIGSEGV, sigctx); + + /* setup a call frame on the real stack so that control is returned there + * and exception handling can continue. + * The frame looks like: + * ucontext struct + * ... + * return ip + * 128 is the size of the red zone + */ + frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128; + frame_size += 15; + frame_size &= ~15; + sp = (gpointer)(gregs [REG_RSP] & ~15); + sp = (gpointer)((char*)sp - frame_size); + /* the arguments must be aligned */ + sp [-1] = (gpointer)gregs [REG_RIP]; + /* may need to adjust pointers in the new struct copy, depending on the OS */ + memcpy (sp + 4, ctx, sizeof (ucontext_t)); + /* at the return form the signal handler execution starts in altstack_handle_and_restore() */ + gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore; + gregs [REG_RSP] = (unsigned long)(sp - 1); + gregs [REG_RDI] = (unsigned long)(sp + 4); + gregs [REG_RSI] = (guint64)exc; + gregs [REG_RDX] = stack_ovf; +#endif +} + +guint64 +mono_amd64_get_original_ip (void) +{ + MonoLMF *lmf = mono_get_lmf (); + + g_assert (lmf); + + /* Reset the change to previous_lmf */ + lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1); + + return lmf->rip; +} + +gpointer +mono_arch_get_throw_pending_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) +{ + guint8 *code, *start; + guint8 *br[1]; + gpointer throw_trampoline; + + *ji = NULL; + + start = code = mono_global_codeman_reserve (128); + + /* We are in the frame of a managed method after a call */ + /* + * We would like to throw the pending exception in such a way that it looks to + * be thrown from the managed method. + */ + + /* Save registers which might contain the return value of the call */ + amd64_push_reg (code, AMD64_RAX); + amd64_push_reg (code, AMD64_RDX); + + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); + amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0); + + /* Align stack */ + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); + + /* Obtain the pending exception */ + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception); + } + amd64_call_reg (code, AMD64_R11); + + /* Check if it is NULL, and branch */ + amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0); + br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); + + /* exc != NULL branch */ + + /* Save the exc on the stack */ + amd64_push_reg (code, AMD64_RAX); + /* Align stack */ + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); + + /* Obtain the original ip and clear the flag in previous_lmf */ + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip); + } + amd64_call_reg (code, AMD64_R11); + + /* Load exc */ + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8); + + /* Pop saved stuff from the stack */ + amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8); + + /* Setup arguments for the throw trampoline */ + /* Exception */ + amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8); + /* The trampoline expects the caller ip to be pushed on the stack */ + amd64_push_reg (code, AMD64_RAX); + + /* Call the throw trampoline */ + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + throw_trampoline = mono_get_throw_exception (); + amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline); + } + /* We use a jump instead of a call so we can push the original ip on the stack */ + amd64_jump_reg (code, AMD64_R11); + + /* ex == NULL branch */ + mono_amd64_patch (br [0], code); + + /* Obtain the original ip and clear the flag in previous_lmf */ + if (aot) { + *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip); + } + amd64_call_reg (code, AMD64_R11); + amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8); + + /* Restore registers */ + amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); + amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0); + amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); + amd64_pop_reg (code, AMD64_RDX); + amd64_pop_reg (code, AMD64_RAX); + + /* Return to original code */ + amd64_jump_reg (code, AMD64_R11); + + g_assert ((code - start) < 128); + + *code_size = code - start; + + return start; +} + +static gpointer throw_pending_exception; + +/* + * Called when a thread receives an async exception while executing unmanaged code. + * Instead of checking for this exception in the managed-to-native wrapper, we hijack + * the return address on the stack to point to a helper routine which throws the + * exception. + */ +void +mono_arch_notify_pending_exc (void) +{ + MonoLMF *lmf = mono_get_lmf (); + + if (lmf->rsp == 0) + /* Initial LMF */ + return; + + if ((guint64)lmf->previous_lmf & 1) + /* Already hijacked or trampoline LMF entry */ + return; + + /* lmf->rsp is set just before making the call which transitions to unmanaged code */ + lmf->rip = *(guint64*)(lmf->rsp - 8); + /* Signal that lmf->rip is set */ + lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1); + + *(gpointer*)(lmf->rsp - 8) = throw_pending_exception; +} + +void +mono_arch_exceptions_init (void) +{ + guint32 code_size; + MonoJumpInfo *ji; + + if (mono_aot_only) { + throw_pending_exception = mono_aot_get_named_code ("throw_pending_exception"); + } else { + /* Call this to avoid initialization races */ + throw_pending_exception = mono_arch_get_throw_pending_exception_full (&code_size, &ji, FALSE); + } +} + +#ifdef PLATFORM_WIN32 + +/* + * The mono_arch_unwindinfo* methods are used to build and add + * function table info for each emitted method from mono. On Winx64 + * the seh handler will not be called if the mono methods are not + * added to the function table. + * + * We should not need to add non-volatile register info to the + * table since mono stores that info elsewhere. (Except for the register + * used for the fp.) + */ + +#define MONO_MAX_UNWIND_CODES 22 + +typedef union _UNWIND_CODE { + struct { + guchar CodeOffset; + guchar UnwindOp : 4; + guchar OpInfo : 4; + }; + gushort FrameOffset; +} UNWIND_CODE, *PUNWIND_CODE; + +typedef struct _UNWIND_INFO { + guchar Version : 3; + guchar Flags : 5; + guchar SizeOfProlog; + guchar CountOfCodes; + guchar FrameRegister : 4; + guchar FrameOffset : 4; + /* custom size for mono allowing for mono allowing for*/ + /*UWOP_PUSH_NONVOL ebp offset = 21*/ + /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/ + /*UWOP_SET_FPREG : requires 2 offset = 17*/ + /*UWOP_PUSH_NONVOL offset = 15-0*/ + UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES]; + +/* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1]; + * union { + * OPTIONAL ULONG ExceptionHandler; + * OPTIONAL ULONG FunctionEntry; + * }; + * OPTIONAL ULONG ExceptionData[]; */ +} UNWIND_INFO, *PUNWIND_INFO; + +typedef struct +{ + RUNTIME_FUNCTION runtimeFunction; + UNWIND_INFO unwindInfo; +} MonoUnwindInfo, *PMonoUnwindInfo; + +static void +mono_arch_unwindinfo_create (gpointer* monoui) +{ + PMonoUnwindInfo newunwindinfo; + *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1); + newunwindinfo->unwindInfo.Version = 1; +} + +void +mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg ) +{ + PMonoUnwindInfo unwindinfo; + PUNWIND_CODE unwindcode; + guchar codeindex; + if (!*monoui) + mono_arch_unwindinfo_create (monoui); + + unwindinfo = (MonoUnwindInfo*)*monoui; + + if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES) + g_error ("Larger allocation needed for the unwind information."); + + codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes); + unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex]; + unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/ + unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin)); + unwindcode->OpInfo = reg; + + if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset) + g_error ("Adding unwind info in wrong order."); + + unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset; +} + +void +mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg ) +{ + PMonoUnwindInfo unwindinfo; + PUNWIND_CODE unwindcode; + guchar codeindex; + if (!*monoui) + mono_arch_unwindinfo_create (monoui); + + unwindinfo = (MonoUnwindInfo*)*monoui; + + if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES) + g_error ("Larger allocation needed for the unwind information."); + + codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2); + unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex]; + unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/ + unwindcode++; + unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/ + unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin)); + unwindcode->OpInfo = reg; + + unwindinfo->unwindInfo.FrameRegister = reg; + + if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset) + g_error ("Adding unwind info in wrong order."); + + unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset; +} + +void +mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size ) +{ + PMonoUnwindInfo unwindinfo; + PUNWIND_CODE unwindcode; + guchar codeindex; + guchar codesneeded; + if (!*monoui) + mono_arch_unwindinfo_create (monoui); + + unwindinfo = (MonoUnwindInfo*)*monoui; + + if (size < 0x8) + g_error ("Stack allocation must be equal to or greater than 0x8."); + + if (size <= 0x80) + codesneeded = 1; + else if (size <= 0x7FFF8) + codesneeded = 2; + else + codesneeded = 3; + + if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES) + g_error ("Larger allocation needed for the unwind information."); + + codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded); + unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex]; + + if (codesneeded == 1) { + /*The size of the allocation is + (the number in the OpInfo member) times 8 plus 8*/ + unwindcode->OpInfo = (size - 8)/8; + unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/ + } + else { + if (codesneeded == 3) { + /*the unscaled size of the allocation is recorded + in the next two slots in little-endian format*/ + *((unsigned int*)(&unwindcode->FrameOffset)) = size; + unwindcode += 2; + unwindcode->OpInfo = 1; + } + else { + /*the size of the allocation divided by 8 + is recorded in the next slot*/ + unwindcode->FrameOffset = size/8; + unwindcode++; + unwindcode->OpInfo = 0; + + } + unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/ + } + + unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin)); + + if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset) + g_error ("Adding unwind info in wrong order."); + + unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset; +} + +guint +mono_arch_unwindinfo_get_size (gpointer monoui) +{ + PMonoUnwindInfo unwindinfo; + if (!monoui) + return 0; + + unwindinfo = (MonoUnwindInfo*)monoui; + return (8 + sizeof (MonoUnwindInfo)) - + (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes)); +} + +PRUNTIME_FUNCTION +MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context ) +{ + MonoJitInfo *ji; + guint64 pos; + PMonoUnwindInfo targetinfo; + MonoDomain *domain = mono_domain_get (); + + ji = mono_jit_info_table_find (domain, (char*)ControlPc); + if (!ji) + return 0; + + pos = (guint64)(((char*)ji->code_start) + ji->code_size); + + targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8); + + targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context); + + return &targetinfo->runtimeFunction; +} + +void +mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size) +{ + PMonoUnwindInfo unwindinfo, targetinfo; + guchar codecount; + guint64 targetlocation; + if (!*monoui) + return; + + unwindinfo = (MonoUnwindInfo*)*monoui; + targetlocation = (guint64)&(((guchar*)code)[code_size]); + targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8); + + unwindinfo->runtimeFunction.EndAddress = code_size; + unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code); + + memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES)); + + codecount = unwindinfo->unwindInfo.CountOfCodes; + if (codecount) { + memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount], + sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes); + } + + g_free (unwindinfo); + *monoui = 0; + + RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL); +} + +#endif + +#if MONO_SUPPORT_TASKLETS +MonoContinuationRestore +mono_tasklets_arch_restore (void) +{ + static guint8* saved = NULL; + guint8 *code, *start; + int cont_reg = AMD64_R9; /* register usable on both call conventions */ + + if (saved) + return (MonoContinuationRestore)saved; + code = start = mono_global_codeman_reserve (64); + /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */ + /* cont is in AMD64_ARG_REG1 ($rcx or $rdi) + * state is in AMD64_ARG_REG2 ($rdx or $rsi) + * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx) + * We move cont to cont_reg since we need both rcx and rdi for the copy + * state is moved to $rax so it's setup as the return value and we can overwrite $rsi + */ + amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8); + amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8); + /* setup the copy of the stack */ + amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int)); + amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3); + x86_cld (code); + amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer)); + amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer)); + amd64_prefix (code, X86_REP_PREFIX); + amd64_movsl (code); + + /* now restore the registers from the LMF */ + amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8); + amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8); + amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8); + amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8); + amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8); + amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8); + amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8); +#ifdef PLATFORM_WIN32 + amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8); + amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8); +#endif + amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8); + + /* restore the lmf chain */ + /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4); + x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/ + + /* state is already in rax */ + amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip)); + g_assert ((code - start) <= 64); + saved = start; + return (MonoContinuationRestore)saved; } +#endif