/* * exceptions-amd64.c: exception support for AMD64 * * Authors: * Dietmar Maurer (dietmar@ximian.com) * * (C) 2001 Ximian, Inc. */ #include #include #include #include #ifndef PLATFORM_WIN32 #include #endif #include #include #include #include #include #include #include #include #include #include #include "mini.h" #include "mini-amd64.h" #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1)) #ifdef PLATFORM_WIN32 static MonoW32ExceptionHandler fpe_handler; static MonoW32ExceptionHandler ill_handler; static MonoW32ExceptionHandler segv_handler; static LPTOP_LEVEL_EXCEPTION_FILTER old_handler; #define W32_SEH_HANDLE_EX(_ex) \ if (_ex##_handler) _ex##_handler((int)sctx) /* * Unhandled Exception Filter * Top-level per-process exception handler. */ LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep) { EXCEPTION_RECORD* er; CONTEXT* ctx; MonoContext* sctx; LONG res; res = EXCEPTION_CONTINUE_EXECUTION; er = ep->ExceptionRecord; ctx = ep->ContextRecord; sctx = g_malloc(sizeof(MonoContext)); /* Copy Win32 context to UNIX style context */ sctx->rax = ctx->Rax; sctx->rbx = ctx->Rbx; sctx->rcx = ctx->Rcx; sctx->rdx = ctx->Rdx; sctx->rbp = ctx->Rbp; sctx->rsp = ctx->Rsp; sctx->rsi = ctx->Rsi; sctx->rdi = ctx->Rdi; sctx->rip = ctx->Rip; sctx->r12 = ctx->R12; sctx->r13 = ctx->R13; sctx->r14 = ctx->R14; sctx->r15 = ctx->R15; switch (er->ExceptionCode) { case EXCEPTION_ACCESS_VIOLATION: W32_SEH_HANDLE_EX(segv); break; case EXCEPTION_ILLEGAL_INSTRUCTION: W32_SEH_HANDLE_EX(ill); break; case EXCEPTION_INT_DIVIDE_BY_ZERO: case EXCEPTION_INT_OVERFLOW: case EXCEPTION_FLT_DIVIDE_BY_ZERO: case EXCEPTION_FLT_OVERFLOW: case EXCEPTION_FLT_UNDERFLOW: case EXCEPTION_FLT_INEXACT_RESULT: W32_SEH_HANDLE_EX(fpe); break; default: break; } /* Copy context back */ ctx->Rax = sctx->rax; ctx->Rbx = sctx->rbx; ctx->Rcx = sctx->rcx; ctx->Rdx = sctx->rdx; ctx->Rbp = sctx->rbp; ctx->Rsp = sctx->rsp; ctx->Rsi = sctx->rsi; ctx->Rdi = sctx->rdi; ctx->Rip = sctx->rip; g_free (sctx); return res; } void win32_seh_init() { old_handler = SetUnhandledExceptionFilter(seh_handler); } void win32_seh_cleanup() { if (old_handler) SetUnhandledExceptionFilter(old_handler); } void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler) { switch (type) { case SIGFPE: fpe_handler = handler; break; case SIGILL: ill_handler = handler; break; case SIGSEGV: segv_handler = handler; break; default: break; } } #endif /* PLATFORM_WIN32 */ /* * mono_arch_get_restore_context: * * Returns a pointer to a method which restores a previously saved sigcontext. */ gpointer mono_arch_get_restore_context (void) { static guint8 *start = NULL; static gboolean inited = FALSE; guint8 *code; if (inited) return start; /* restore_contect (MonoContext *ctx) */ start = code = mono_global_codeman_reserve (256); amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8); /* Restore all registers except %rip and %r11 */ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8); amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8); amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8); amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8); amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8); amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8); amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8); //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8); //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8); //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8); amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8); amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8); amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8); amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8); amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8); /* get return address */ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8); /* jump to the saved IP */ amd64_jump_reg (code, AMD64_R11); inited = TRUE; return start; } /* * mono_arch_get_call_filter: * * Returns a pointer to a method which calls an exception filter. We * also use this function to call finally handlers (we pass NULL as * @exc object in this case). */ gpointer mono_arch_get_call_filter (void) { static guint8 *start; static gboolean inited = FALSE; int i; guint8 *code; guint32 pos; if (inited) return start; start = code = mono_global_codeman_reserve (128); /* call_filter (MonoContext *ctx, unsigned long eip) */ code = start; /* Alloc new frame */ amd64_push_reg (code, AMD64_RBP); amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8); /* Save callee saved regs */ pos = 0; for (i = 0; i < AMD64_NREG; ++i) if (AMD64_IS_CALLEE_SAVED_REG (i)) { amd64_push_reg (code, i); pos += 8; } /* Save EBP */ pos += 8; amd64_push_reg (code, AMD64_RBP); /* Make stack misaligned, the call will make it aligned again */ if (! (pos & 8)) amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* set new EBP */ amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8); /* load callee saved regs */ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8); amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8); amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8); amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8); amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8); #ifdef PLATFORM_WIN32 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8); amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8); #endif /* call the handler */ amd64_call_reg (code, AMD64_ARG_REG2); if (! (pos & 8)) amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); /* restore RBP */ amd64_pop_reg (code, AMD64_RBP); /* Restore callee saved regs */ for (i = AMD64_NREG; i >= 0; --i) if (AMD64_IS_CALLEE_SAVED_REG (i)) amd64_pop_reg (code, i); amd64_leave (code); amd64_ret (code); g_assert ((code - start) < 128); inited = TRUE; return start; } /* * The first few arguments are dummy, to force the other arguments to be passed on * the stack, this avoids overwriting the argument registers in the throw trampoline. */ static void throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4, guint64 dummy5, guint64 dummy6, MonoObject *exc, guint64 rip, guint64 rsp, guint64 rbx, guint64 rbp, guint64 r12, guint64 r13, guint64 r14, guint64 r15, guint64 rdi, guint64 rsi, guint64 rax, guint64 rcx, guint64 rdx, guint64 rethrow) { static void (*restore_context) (MonoContext *); MonoContext ctx; if (!restore_context) restore_context = mono_arch_get_restore_context (); ctx.rsp = rsp; ctx.rip = rip; ctx.rbx = rbx; ctx.rbp = rbp; ctx.r12 = r12; ctx.r13 = r13; ctx.r14 = r14; ctx.r15 = r15; ctx.rdi = rdi; ctx.rsi = rsi; ctx.rax = rax; ctx.rcx = rcx; ctx.rdx = rdx; if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) { /* * The debugger wants us to stop on the `throw' instruction. * By the time we get here, it already inserted a breakpoint on * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw'). */ /* FIXME FIXME * * In case of a rethrow, the JIT is emitting code like this: * * mov 0xffffffffffffffd0(%rbp),%rax' * mov %rax,%rdi * callq throw * * Here, restore_context() wouldn't restore the %rax register correctly. */ ctx.rip = rip - 8; ctx.rsp = rsp + 8; restore_context (&ctx); g_assert_not_reached (); } /* adjust eip so that it point into the call instruction */ ctx.rip -= 1; if (mono_object_isinst (exc, mono_defaults.exception_class)) { MonoException *mono_ex = (MonoException*)exc; if (!rethrow) mono_ex->stack_trace = NULL; } mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE); restore_context (&ctx); g_assert_not_reached (); } static gpointer get_throw_trampoline (gboolean rethrow) { guint8* start; guint8 *code; start = code = mono_global_codeman_reserve (64); code = start; amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8); /* reverse order */ amd64_push_imm (code, rethrow); amd64_push_reg (code, AMD64_RDX); amd64_push_reg (code, AMD64_RCX); amd64_push_reg (code, AMD64_RAX); amd64_push_reg (code, AMD64_RSI); amd64_push_reg (code, AMD64_RDI); amd64_push_reg (code, AMD64_R15); amd64_push_reg (code, AMD64_R14); amd64_push_reg (code, AMD64_R13); amd64_push_reg (code, AMD64_R12); amd64_push_reg (code, AMD64_RBP); amd64_push_reg (code, AMD64_RBX); /* SP */ amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8); amd64_push_reg (code, AMD64_RAX); /* IP */ amd64_push_membase (code, AMD64_R11, 0); /* Exception */ amd64_push_reg (code, AMD64_ARG_REG1); #ifdef PLATFORM_WIN32 /* align stack */ amd64_push_imm (code, 0); amd64_push_imm (code, 0); amd64_push_imm (code, 0); amd64_push_imm (code, 0); amd64_push_imm (code, 0); amd64_push_imm (code, 0); #endif amd64_mov_reg_imm (code, AMD64_R11, throw_exception); amd64_call_reg (code, AMD64_R11); amd64_breakpoint (code); g_assert ((code - start) < 64); return start; } /** * mono_arch_get_throw_exception: * * Returns a function pointer which can be used to raise * exceptions. The returned function has the following * signature: void (*func) (MonoException *exc); * */ gpointer mono_arch_get_throw_exception (void) { static guint8* start; static gboolean inited = FALSE; if (inited) return start; start = get_throw_trampoline (FALSE); inited = TRUE; return start; } gpointer mono_arch_get_rethrow_exception (void) { static guint8* start; static gboolean inited = FALSE; if (inited) return start; start = get_throw_trampoline (TRUE); inited = TRUE; return start; } gpointer mono_arch_get_throw_exception_by_name (void) { static guint8* start; static gboolean inited = FALSE; guint8 *code; if (inited) return start; start = code = mono_global_codeman_reserve (64); /* Not used on amd64 */ amd64_breakpoint (code); return start; } /** * mono_arch_get_throw_corlib_exception: * * Returns a function pointer which can be used to raise * corlib exceptions. The returned function has the following * signature: void (*func) (guint32 ex_token, guint32 offset); * Here, offset is the offset which needs to be substracted from the caller IP * to get the IP of the throw. Passing the offset has the advantage that it * needs no relocations in the caller. */ gpointer mono_arch_get_throw_corlib_exception (void) { static guint8* start; static gboolean inited = FALSE; guint8 *code; guint64 throw_ex; if (inited) return start; start = code = mono_global_codeman_reserve (64); /* Push throw_ip */ amd64_push_reg (code, AMD64_ARG_REG2); /* Call exception_from_token */ amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8); amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image); amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token); amd64_call_reg (code, AMD64_R11); /* Compute throw_ip */ amd64_pop_reg (code, AMD64_ARG_REG2); /* return addr */ amd64_pop_reg (code, AMD64_ARG_REG3); amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2); /* Put the throw_ip at the top of the misaligned stack */ amd64_push_reg (code, AMD64_ARG_REG3); throw_ex = (guint64)mono_arch_get_throw_exception (); /* Call throw_exception */ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8); amd64_mov_reg_imm (code, AMD64_R11, throw_ex); /* The original IP is on the stack */ amd64_jump_reg (code, AMD64_R11); g_assert ((code - start) < 64); inited = TRUE; return start; } /* mono_arch_find_jit_info: * * This function is used to gather information from @ctx. It return the * MonoJitInfo of the corresponding function, unwinds one stack frame and * stores the resulting context into @new_ctx. It also stores a string * describing the stack location into @trace (if not NULL), and modifies * the @lmf if necessary. @native_offset return the IP offset from the * start of the function or -1 if that info is not available. */ MonoJitInfo * mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx, MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset, gboolean *managed) { MonoJitInfo *ji; int i; gpointer ip = MONO_CONTEXT_GET_IP (ctx); /* Avoid costly table lookup during stack overflow */ if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size))) ji = prev_ji; else ji = mono_jit_info_table_find (domain, ip); if (managed) *managed = FALSE; *new_ctx = *ctx; if (ji != NULL) { int offset; gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0; if (managed) if (!ji->method->wrapper_type) *managed = TRUE; /* * If a method has save_lmf set, then register save/restore code is not generated * by the JIT, so we have to restore callee saved registers from the lmf. */ if (ji->method->save_lmf) { MonoLMF *lmf_addr; /* * *lmf might not point to the LMF pushed by this method, so compute the LMF * address instead. */ if (omit_fp) lmf_addr = (MonoLMF*)ctx->rsp; else lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF)); new_ctx->rbp = lmf_addr->rbp; new_ctx->rbx = lmf_addr->rbx; new_ctx->r12 = lmf_addr->r12; new_ctx->r13 = lmf_addr->r13; new_ctx->r14 = lmf_addr->r14; new_ctx->r15 = lmf_addr->r15; } else { offset = omit_fp ? 0 : -1; /* restore caller saved registers */ for (i = 0; i < AMD64_NREG; i ++) if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) { guint64 reg; if (omit_fp) { reg = *((guint64*)ctx->rsp + offset); offset ++; } else { reg = *((guint64 *)ctx->rbp + offset); offset --; } switch (i) { case AMD64_RBX: new_ctx->rbx = reg; break; case AMD64_R12: new_ctx->r12 = reg; break; case AMD64_R13: new_ctx->r13 = reg; break; case AMD64_R14: new_ctx->r14 = reg; break; case AMD64_R15: new_ctx->r15 = reg; break; case AMD64_RBP: new_ctx->rbp = reg; break; default: g_assert_not_reached (); } } } if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) { /* remove any unused lmf */ *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1); } if (omit_fp) { /* Pop frame */ new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff); new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1; /* Pop return address */ new_ctx->rsp += 8; } else { /* Pop EBP and the return address */ new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer)); /* we substract 1, so that the IP points into the call instruction */ new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1; new_ctx->rbp = *((guint64 *)ctx->rbp); } /* Pop arguments off the stack */ { MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1); guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info); new_ctx->rsp += stack_to_pop; } return ji; } else if (*lmf) { guint64 rip; if (((guint64)(*lmf)->previous_lmf) & 1) { /* This LMF has the rip field set */ rip = (*lmf)->rip; } else if ((*lmf)->rsp == 0) { /* Top LMF entry */ return (gpointer)-1; } else { /* * The rsp field is set just before the call which transitioned to native * code. Obtain the rip from the stack. */ rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer)); } ji = mono_jit_info_table_find (domain, (gpointer)rip); if (!ji) { if (!(*lmf)->method) /* Top LMF entry */ return (gpointer)-1; /* Trampoline lmf frame */ memset (res, 0, sizeof (MonoJitInfo)); res->method = (*lmf)->method; } new_ctx->rip = rip; new_ctx->rbp = (*lmf)->rbp; new_ctx->rsp = (*lmf)->rsp; new_ctx->rbx = (*lmf)->rbx; new_ctx->r12 = (*lmf)->r12; new_ctx->r13 = (*lmf)->r13; new_ctx->r14 = (*lmf)->r14; new_ctx->r15 = (*lmf)->r15; *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1); return ji ? ji : res; } return NULL; } /** * mono_arch_handle_exception: * * @ctx: saved processor state * @obj: the exception object */ gboolean mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only) { MonoContext mctx; mono_arch_sigctx_to_monoctx (sigctx, &mctx); mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only); mono_arch_monoctx_to_sigctx (&mctx, sigctx); return TRUE; } #ifdef MONO_ARCH_USE_SIGACTION static inline guint64* gregs_from_ucontext (ucontext_t *ctx) { #ifdef __FreeBSD__ guint64 *gregs = (guint64 *) &ctx->uc_mcontext; #else guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs; #endif return gregs; } #endif void mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx) { #ifdef MONO_ARCH_USE_SIGACTION ucontext_t *ctx = (ucontext_t*)sigctx; guint64 *gregs = gregs_from_ucontext (ctx); mctx->rax = gregs [REG_RAX]; mctx->rbx = gregs [REG_RBX]; mctx->rcx = gregs [REG_RCX]; mctx->rdx = gregs [REG_RDX]; mctx->rbp = gregs [REG_RBP]; mctx->rsp = gregs [REG_RSP]; mctx->rsi = gregs [REG_RSI]; mctx->rdi = gregs [REG_RDI]; mctx->rip = gregs [REG_RIP]; mctx->r12 = gregs [REG_R12]; mctx->r13 = gregs [REG_R13]; mctx->r14 = gregs [REG_R14]; mctx->r15 = gregs [REG_R15]; #else MonoContext *ctx = (MonoContext *)sigctx; mctx->rax = ctx->rax; mctx->rbx = ctx->rbx; mctx->rcx = ctx->rcx; mctx->rdx = ctx->rdx; mctx->rbp = ctx->rbp; mctx->rsp = ctx->rsp; mctx->rsi = ctx->rsi; mctx->rdi = ctx->rdi; mctx->rip = ctx->rip; mctx->r12 = ctx->r12; mctx->r13 = ctx->r13; mctx->r14 = ctx->r14; mctx->r15 = ctx->r15; #endif } void mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx) { #ifdef MONO_ARCH_USE_SIGACTION ucontext_t *ctx = (ucontext_t*)sigctx; guint64 *gregs = gregs_from_ucontext (ctx); gregs [REG_RAX] = mctx->rax; gregs [REG_RBX] = mctx->rbx; gregs [REG_RCX] = mctx->rcx; gregs [REG_RDX] = mctx->rdx; gregs [REG_RBP] = mctx->rbp; gregs [REG_RSP] = mctx->rsp; gregs [REG_RSI] = mctx->rsi; gregs [REG_RDI] = mctx->rdi; gregs [REG_RIP] = mctx->rip; gregs [REG_R12] = mctx->r12; gregs [REG_R13] = mctx->r13; gregs [REG_R14] = mctx->r14; gregs [REG_R15] = mctx->r15; #else MonoContext *ctx = (MonoContext *)sigctx; ctx->rax = mctx->rax; ctx->rbx = mctx->rbx; ctx->rcx = mctx->rcx; ctx->rdx = mctx->rdx; ctx->rbp = mctx->rbp; ctx->rsp = mctx->rsp; ctx->rsi = mctx->rsi; ctx->rdi = mctx->rdi; ctx->rip = mctx->rip; ctx->r12 = mctx->r12; ctx->r13 = mctx->r13; ctx->r14 = mctx->r14; ctx->r15 = mctx->r15; #endif } gpointer mono_arch_ip_from_context (void *sigctx) { #ifdef MONO_ARCH_USE_SIGACTION ucontext_t *ctx = (ucontext_t*)sigctx; guint64 *gregs = gregs_from_ucontext (ctx); return (gpointer)gregs [REG_RIP]; #else MonoContext *ctx = sigctx; return (gpointer)ctx->rip; #endif } static void restore_soft_guard_pages (void) { MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id); if (jit_tls->stack_ovf_guard_base) mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE); } /* * this function modifies mctx so that when it is restored, it * won't execcute starting at mctx.eip, but in a function that * will restore the protection on the soft-guard pages and return back to * continue at mctx.eip. */ static void prepare_for_guard_pages (MonoContext *mctx) { gpointer *sp; sp = (gpointer)(mctx->rsp); sp -= 1; /* the return addr */ sp [0] = (gpointer)(mctx->rip); mctx->rip = (guint64)restore_soft_guard_pages; mctx->rsp = (guint64)sp; } static void altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf) { void (*restore_context) (MonoContext *); MonoContext mctx; restore_context = mono_arch_get_restore_context (); mono_arch_sigctx_to_monoctx (sigctx, &mctx); mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE); if (stack_ovf) prepare_for_guard_pages (&mctx); restore_context (&mctx); } void mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf) { #ifdef MONO_ARCH_USE_SIGACTION MonoException *exc = NULL; ucontext_t *ctx = (ucontext_t*)sigctx; guint64 *gregs = gregs_from_ucontext (ctx); MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]); gpointer *sp; int frame_size; if (stack_ovf) exc = mono_domain_get ()->stack_overflow_ex; if (!ji) mono_handle_native_sigsegv (SIGSEGV, sigctx); /* setup a call frame on the real stack so that control is returned there * and exception handling can continue. * The frame looks like: * ucontext struct * ... * return ip * 128 is the size of the red zone */ frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128; frame_size += 15; frame_size &= ~15; sp = (gpointer)(gregs [REG_RSP] & ~15); sp = (gpointer)((char*)sp - frame_size); /* the arguments must be aligned */ sp [-1] = (gpointer)gregs [REG_RIP]; /* may need to adjust pointers in the new struct copy, depending on the OS */ memcpy (sp + 4, ctx, sizeof (ucontext_t)); /* at the return form the signal handler execution starts in altstack_handle_and_restore() */ gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore; gregs [REG_RSP] = (unsigned long)(sp - 1); gregs [REG_RDI] = (unsigned long)(sp + 4); gregs [REG_RSI] = (guint64)exc; gregs [REG_RDX] = stack_ovf; #endif } static guint64 get_original_ip (void) { MonoLMF *lmf = mono_get_lmf (); g_assert (lmf); /* Reset the change to previous_lmf */ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1); return lmf->rip; } static gpointer get_throw_pending_exception (void) { static guint8* start; static gboolean inited = FALSE; guint8 *code; guint8 *br[1]; gpointer throw_trampoline; if (inited) return start; start = code = mono_global_codeman_reserve (128); /* We are in the frame of a managed method after a call */ /* * We would like to throw the pending exception in such a way that it looks to * be thrown from the managed method. */ /* Save registers which might contain the return value of the call */ amd64_push_reg (code, AMD64_RAX); amd64_push_reg (code, AMD64_RDX); amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0); /* Align stack */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* Obtain the pending exception */ amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception); amd64_call_reg (code, AMD64_R11); /* Check if it is NULL, and branch */ amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0); br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); /* exc != NULL branch */ /* Save the exc on the stack */ amd64_push_reg (code, AMD64_RAX); /* Align stack */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* Obtain the original ip and clear the flag in previous_lmf */ amd64_mov_reg_imm (code, AMD64_R11, get_original_ip); amd64_call_reg (code, AMD64_R11); /* Load exc */ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8); /* Pop saved stuff from the stack */ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8); /* Setup arguments for the throw trampoline */ /* Exception */ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8); /* The trampoline expects the caller ip to be pushed on the stack */ amd64_push_reg (code, AMD64_RAX); /* Call the throw trampoline */ throw_trampoline = mono_arch_get_throw_exception (); amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline); /* We use a jump instead of a call so we can push the original ip on the stack */ amd64_jump_reg (code, AMD64_R11); /* ex == NULL branch */ mono_amd64_patch (br [0], code); /* Obtain the original ip and clear the flag in previous_lmf */ amd64_mov_reg_imm (code, AMD64_R11, get_original_ip); amd64_call_reg (code, AMD64_R11); amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8); /* Restore registers */ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0); amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); amd64_pop_reg (code, AMD64_RDX); amd64_pop_reg (code, AMD64_RAX); /* Return to original code */ amd64_jump_reg (code, AMD64_R11); g_assert ((code - start) < 128); inited = TRUE; return start; } /* * Called when a thread receives an async exception while executing unmanaged code. * Instead of checking for this exception in the managed-to-native wrapper, we hijack * the return address on the stack to point to a helper routine which throws the * exception. */ void mono_arch_notify_pending_exc (void) { MonoLMF *lmf = mono_get_lmf (); if (lmf->rsp == 0) /* Initial LMF */ return; if ((guint64)lmf->previous_lmf & 1) /* Already hijacked or trampoline LMF entry */ return; /* lmf->rsp is set just before making the call which transitions to unmanaged code */ lmf->rip = *(guint64*)(lmf->rsp - 8); /* Signal that lmf->rip is set */ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1); *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception (); }