2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbp = sctx->rbp;
101 ctx->Rsp = sctx->rsp;
102 ctx->Rsi = sctx->rsi;
103 ctx->Rdi = sctx->rdi;
104 ctx->Rip = sctx->rip;
111 void win32_seh_init()
113 old_handler = SetUnhandledExceptionFilter(seh_handler);
116 void win32_seh_cleanup()
118 if (old_handler) SetUnhandledExceptionFilter(old_handler);
121 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
125 fpe_handler = handler;
128 ill_handler = handler;
131 segv_handler = handler;
138 #endif /* PLATFORM_WIN32 */
141 * mono_arch_get_restore_context:
143 * Returns a pointer to a method which restores a previously saved sigcontext.
146 mono_arch_get_restore_context (void)
148 static guint8 *start = NULL;
149 static gboolean inited = FALSE;
155 /* restore_contect (MonoContext *ctx) */
157 start = code = mono_global_codeman_reserve (256);
159 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
161 /* Restore all registers except %rip and %r11 */
162 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
163 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
164 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
165 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
166 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
167 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
168 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
169 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
170 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
171 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
172 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
173 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
174 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
175 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
177 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
179 /* get return address */
180 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
182 /* jump to the saved IP */
183 amd64_jump_reg (code, AMD64_R11);
191 * mono_arch_get_call_filter:
193 * Returns a pointer to a method which calls an exception filter. We
194 * also use this function to call finally handlers (we pass NULL as
195 * @exc object in this case).
198 mono_arch_get_call_filter (void)
200 static guint8 *start;
201 static gboolean inited = FALSE;
209 start = code = mono_global_codeman_reserve (128);
211 /* call_filter (MonoContext *ctx, unsigned long eip) */
214 /* Alloc new frame */
215 amd64_push_reg (code, AMD64_RBP);
216 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
218 /* Save callee saved regs */
220 for (i = 0; i < AMD64_NREG; ++i)
221 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
222 amd64_push_reg (code, i);
228 amd64_push_reg (code, AMD64_RBP);
230 /* Make stack misaligned, the call will make it aligned again */
232 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
235 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
236 /* load callee saved regs */
237 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
238 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
239 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
240 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
241 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
242 #ifdef PLATFORM_WIN32
243 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
244 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
247 /* call the handler */
248 amd64_call_reg (code, AMD64_ARG_REG2);
251 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
254 amd64_pop_reg (code, AMD64_RBP);
256 /* Restore callee saved regs */
257 for (i = AMD64_NREG; i >= 0; --i)
258 if (AMD64_IS_CALLEE_SAVED_REG (i))
259 amd64_pop_reg (code, i);
264 g_assert ((code - start) < 128);
272 * The first few arguments are dummy, to force the other arguments to be passed on
273 * the stack, this avoids overwriting the argument registers in the throw trampoline.
276 throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
277 guint64 dummy5, guint64 dummy6,
278 MonoObject *exc, guint64 rip, guint64 rsp,
279 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
280 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
281 guint64 rax, guint64 rcx, guint64 rdx,
284 static void (*restore_context) (MonoContext *);
287 if (!restore_context)
288 restore_context = mono_arch_get_restore_context ();
304 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
306 * The debugger wants us to stop on the `throw' instruction.
307 * By the time we get here, it already inserted a breakpoint on
308 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
313 * In case of a rethrow, the JIT is emitting code like this:
315 * mov 0xffffffffffffffd0(%rbp),%rax'
319 * Here, restore_context() wouldn't restore the %rax register correctly.
323 restore_context (&ctx);
324 g_assert_not_reached ();
327 /* adjust eip so that it point into the call instruction */
330 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
331 MonoException *mono_ex = (MonoException*)exc;
333 mono_ex->stack_trace = NULL;
335 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
336 restore_context (&ctx);
338 g_assert_not_reached ();
342 get_throw_trampoline (gboolean rethrow)
347 start = code = mono_global_codeman_reserve (64);
351 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
353 #ifdef PLATFORM_WIN32
354 // FIXME: Sync with the !WIN32 code below
358 amd64_push_imm (code, 0);
359 amd64_push_imm (code, 0);
360 amd64_push_imm (code, 0);
361 amd64_push_imm (code, 0);
363 /* No need to align stack */
364 //amd64_push_imm (code, 0);
367 amd64_push_imm (code, rethrow);
368 amd64_push_reg (code, AMD64_RDX);
369 amd64_push_reg (code, AMD64_RCX);
370 amd64_push_reg (code, AMD64_RAX);
371 amd64_push_reg (code, AMD64_RSI);
372 amd64_push_reg (code, AMD64_RDI);
373 amd64_push_reg (code, AMD64_R15);
374 amd64_push_reg (code, AMD64_R14);
375 amd64_push_reg (code, AMD64_R13);
376 amd64_push_reg (code, AMD64_R12);
377 amd64_push_reg (code, AMD64_RBP);
378 amd64_push_reg (code, AMD64_RBX);
381 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
382 amd64_push_reg (code, AMD64_RAX);
385 amd64_push_membase (code, AMD64_R11, 0);
388 amd64_push_reg (code, AMD64_ARG_REG1);
391 amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
392 amd64_call_reg (code, AMD64_R11);
393 amd64_breakpoint (code);
395 g_assert ((code - start) < 64);
401 * mono_arch_get_throw_exception:
403 * Returns a function pointer which can be used to raise
404 * exceptions. The returned function has the following
405 * signature: void (*func) (MonoException *exc);
409 mono_arch_get_throw_exception (void)
411 static guint8* start;
412 static gboolean inited = FALSE;
417 start = get_throw_trampoline (FALSE);
425 mono_arch_get_rethrow_exception (void)
427 static guint8* start;
428 static gboolean inited = FALSE;
433 start = get_throw_trampoline (TRUE);
441 mono_arch_get_throw_exception_by_name (void)
443 static guint8* start;
444 static gboolean inited = FALSE;
450 start = code = mono_global_codeman_reserve (64);
452 /* Not used on amd64 */
453 amd64_breakpoint (code);
459 * mono_arch_get_throw_corlib_exception:
461 * Returns a function pointer which can be used to raise
462 * corlib exceptions. The returned function has the following
463 * signature: void (*func) (guint32 ex_token, guint32 offset);
464 * Here, offset is the offset which needs to be substracted from the caller IP
465 * to get the IP of the throw. Passing the offset has the advantage that it
466 * needs no relocations in the caller.
469 mono_arch_get_throw_corlib_exception (void)
471 static guint8* start;
472 static gboolean inited = FALSE;
479 start = code = mono_global_codeman_reserve (64);
482 amd64_push_reg (code, AMD64_ARG_REG2);
484 /* Call exception_from_token */
485 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
486 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
487 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
488 amd64_call_reg (code, AMD64_R11);
490 /* Compute throw_ip */
491 amd64_pop_reg (code, AMD64_ARG_REG2);
493 amd64_pop_reg (code, AMD64_ARG_REG3);
494 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
496 /* Put the throw_ip at the top of the misaligned stack */
497 amd64_push_reg (code, AMD64_ARG_REG3);
499 throw_ex = (guint64)mono_arch_get_throw_exception ();
501 /* Call throw_exception */
502 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
503 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
504 /* The original IP is on the stack */
505 amd64_jump_reg (code, AMD64_R11);
507 g_assert ((code - start) < 64);
514 /* mono_arch_find_jit_info:
516 * This function is used to gather information from @ctx. It return the
517 * MonoJitInfo of the corresponding function, unwinds one stack frame and
518 * stores the resulting context into @new_ctx. It also stores a string
519 * describing the stack location into @trace (if not NULL), and modifies
520 * the @lmf if necessary. @native_offset return the IP offset from the
521 * start of the function or -1 if that info is not available.
524 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
525 MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
530 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
532 /* Avoid costly table lookup during stack overflow */
533 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
536 ji = mono_jit_info_table_find (domain, ip);
545 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
548 if (!ji->method->wrapper_type)
552 * If a method has save_lmf set, then register save/restore code is not generated
553 * by the JIT, so we have to restore callee saved registers from the lmf.
555 if (ji->method->save_lmf) {
559 * *lmf might not point to the LMF pushed by this method, so compute the LMF
563 lmf_addr = (MonoLMF*)ctx->rsp;
565 lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
567 new_ctx->rbp = lmf_addr->rbp;
568 new_ctx->rbx = lmf_addr->rbx;
569 new_ctx->r12 = lmf_addr->r12;
570 new_ctx->r13 = lmf_addr->r13;
571 new_ctx->r14 = lmf_addr->r14;
572 new_ctx->r15 = lmf_addr->r15;
575 offset = omit_fp ? 0 : -1;
576 /* restore caller saved registers */
577 for (i = 0; i < AMD64_NREG; i ++)
578 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
582 reg = *((guint64*)ctx->rsp + offset);
586 reg = *((guint64 *)ctx->rbp + offset);
610 g_assert_not_reached ();
615 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
616 /* remove any unused lmf */
617 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
622 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
623 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
624 /* Pop return address */
628 /* Pop EBP and the return address */
629 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
630 /* we substract 1, so that the IP points into the call instruction */
631 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
632 new_ctx->rbp = *((guint64 *)ctx->rbp);
635 /* Pop arguments off the stack */
637 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
639 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
640 new_ctx->rsp += stack_to_pop;
647 if (((guint64)(*lmf)->previous_lmf) & 1) {
648 /* This LMF has the rip field set */
650 } else if ((*lmf)->rsp == 0) {
655 * The rsp field is set just before the call which transitioned to native
656 * code. Obtain the rip from the stack.
658 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
661 ji = mono_jit_info_table_find (domain, (gpointer)rip);
666 /* Trampoline lmf frame */
667 memset (res, 0, sizeof (MonoJitInfo));
668 res->method = (*lmf)->method;
672 new_ctx->rbp = (*lmf)->rbp;
673 new_ctx->rsp = (*lmf)->rsp;
675 new_ctx->rbx = (*lmf)->rbx;
676 new_ctx->r12 = (*lmf)->r12;
677 new_ctx->r13 = (*lmf)->r13;
678 new_ctx->r14 = (*lmf)->r14;
679 new_ctx->r15 = (*lmf)->r15;
681 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
683 return ji ? ji : res;
690 * mono_arch_handle_exception:
692 * @ctx: saved processor state
693 * @obj: the exception object
696 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
700 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
702 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
704 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
709 #ifdef MONO_ARCH_USE_SIGACTION
710 static inline guint64*
711 gregs_from_ucontext (ucontext_t *ctx)
714 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
716 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
723 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
725 #ifdef MONO_ARCH_USE_SIGACTION
726 ucontext_t *ctx = (ucontext_t*)sigctx;
728 guint64 *gregs = gregs_from_ucontext (ctx);
730 mctx->rax = gregs [REG_RAX];
731 mctx->rbx = gregs [REG_RBX];
732 mctx->rcx = gregs [REG_RCX];
733 mctx->rdx = gregs [REG_RDX];
734 mctx->rbp = gregs [REG_RBP];
735 mctx->rsp = gregs [REG_RSP];
736 mctx->rsi = gregs [REG_RSI];
737 mctx->rdi = gregs [REG_RDI];
738 mctx->rip = gregs [REG_RIP];
739 mctx->r12 = gregs [REG_R12];
740 mctx->r13 = gregs [REG_R13];
741 mctx->r14 = gregs [REG_R14];
742 mctx->r15 = gregs [REG_R15];
744 MonoContext *ctx = (MonoContext *)sigctx;
746 mctx->rax = ctx->rax;
747 mctx->rbx = ctx->rbx;
748 mctx->rcx = ctx->rcx;
749 mctx->rdx = ctx->rdx;
750 mctx->rbp = ctx->rbp;
751 mctx->rsp = ctx->rsp;
752 mctx->rsi = ctx->rsi;
753 mctx->rdi = ctx->rdi;
754 mctx->rip = ctx->rip;
755 mctx->r12 = ctx->r12;
756 mctx->r13 = ctx->r13;
757 mctx->r14 = ctx->r14;
758 mctx->r15 = ctx->r15;
763 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
765 #ifdef MONO_ARCH_USE_SIGACTION
766 ucontext_t *ctx = (ucontext_t*)sigctx;
768 guint64 *gregs = gregs_from_ucontext (ctx);
770 gregs [REG_RAX] = mctx->rax;
771 gregs [REG_RBX] = mctx->rbx;
772 gregs [REG_RCX] = mctx->rcx;
773 gregs [REG_RDX] = mctx->rdx;
774 gregs [REG_RBP] = mctx->rbp;
775 gregs [REG_RSP] = mctx->rsp;
776 gregs [REG_RSI] = mctx->rsi;
777 gregs [REG_RDI] = mctx->rdi;
778 gregs [REG_RIP] = mctx->rip;
779 gregs [REG_R12] = mctx->r12;
780 gregs [REG_R13] = mctx->r13;
781 gregs [REG_R14] = mctx->r14;
782 gregs [REG_R15] = mctx->r15;
784 MonoContext *ctx = (MonoContext *)sigctx;
786 ctx->rax = mctx->rax;
787 ctx->rbx = mctx->rbx;
788 ctx->rcx = mctx->rcx;
789 ctx->rdx = mctx->rdx;
790 ctx->rbp = mctx->rbp;
791 ctx->rsp = mctx->rsp;
792 ctx->rsi = mctx->rsi;
793 ctx->rdi = mctx->rdi;
794 ctx->rip = mctx->rip;
795 ctx->r12 = mctx->r12;
796 ctx->r13 = mctx->r13;
797 ctx->r14 = mctx->r14;
798 ctx->r15 = mctx->r15;
803 mono_arch_ip_from_context (void *sigctx)
806 #ifdef MONO_ARCH_USE_SIGACTION
808 ucontext_t *ctx = (ucontext_t*)sigctx;
810 guint64 *gregs = gregs_from_ucontext (ctx);
812 return (gpointer)gregs [REG_RIP];
814 MonoContext *ctx = sigctx;
815 return (gpointer)ctx->rip;
820 restore_soft_guard_pages (void)
822 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
823 if (jit_tls->stack_ovf_guard_base)
824 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
828 * this function modifies mctx so that when it is restored, it
829 * won't execcute starting at mctx.eip, but in a function that
830 * will restore the protection on the soft-guard pages and return back to
831 * continue at mctx.eip.
834 prepare_for_guard_pages (MonoContext *mctx)
837 sp = (gpointer)(mctx->rsp);
839 /* the return addr */
840 sp [0] = (gpointer)(mctx->rip);
841 mctx->rip = (unsigned long)restore_soft_guard_pages;
842 mctx->rsp = (unsigned long)sp;
846 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
848 void (*restore_context) (MonoContext *);
851 restore_context = mono_arch_get_restore_context ();
852 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
853 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
855 prepare_for_guard_pages (&mctx);
856 restore_context (&mctx);
860 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
862 #ifdef MONO_ARCH_USE_SIGACTION
863 MonoException *exc = NULL;
864 ucontext_t *ctx = (ucontext_t*)sigctx;
865 guint64 *gregs = gregs_from_ucontext (ctx);
866 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
871 exc = mono_domain_get ()->stack_overflow_ex;
873 mono_handle_native_sigsegv (SIGSEGV, sigctx);
875 /* setup a call frame on the real stack so that control is returned there
876 * and exception handling can continue.
877 * The frame looks like:
881 * 128 is the size of the red zone
883 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
886 sp = (gpointer)(gregs [REG_RSP] & ~15);
887 sp = (gpointer)((char*)sp - frame_size);
888 /* the arguments must be aligned */
889 sp [-1] = (gpointer)gregs [REG_RIP];
890 /* may need to adjust pointers in the new struct copy, depending on the OS */
891 memcpy (sp + 4, ctx, sizeof (ucontext_t));
892 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
893 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
894 gregs [REG_RSP] = (unsigned long)(sp - 1);
895 gregs [REG_RDI] = (unsigned long)(sp + 4);
896 gregs [REG_RSI] = (guint64)exc;
897 gregs [REG_RDX] = stack_ovf;
902 get_original_ip (void)
904 MonoLMF *lmf = mono_get_lmf ();
908 /* Reset the change to previous_lmf */
909 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
915 get_throw_pending_exception (void)
917 static guint8* start;
918 static gboolean inited = FALSE;
921 gpointer throw_trampoline;
926 start = code = mono_global_codeman_reserve (128);
928 /* We are in the frame of a managed method after a call */
930 * We would like to throw the pending exception in such a way that it looks to
931 * be thrown from the managed method.
934 /* Save registers which might contain the return value of the call */
935 amd64_push_reg (code, AMD64_RAX);
936 amd64_push_reg (code, AMD64_RDX);
938 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
939 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
942 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
944 /* Obtain the pending exception */
945 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
946 amd64_call_reg (code, AMD64_R11);
948 /* Check if it is NULL, and branch */
949 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
950 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
952 /* exc != NULL branch */
954 /* Save the exc on the stack */
955 amd64_push_reg (code, AMD64_RAX);
957 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
959 /* Obtain the original ip and clear the flag in previous_lmf */
960 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
961 amd64_call_reg (code, AMD64_R11);
964 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
966 /* Pop saved stuff from the stack */
967 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
969 /* Setup arguments for the throw trampoline */
971 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
972 /* The trampoline expects the caller ip to be pushed on the stack */
973 amd64_push_reg (code, AMD64_RAX);
975 /* Call the throw trampoline */
976 throw_trampoline = mono_arch_get_throw_exception ();
977 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
978 /* We use a jump instead of a call so we can push the original ip on the stack */
979 amd64_jump_reg (code, AMD64_R11);
981 /* ex == NULL branch */
982 mono_amd64_patch (br [0], code);
984 /* Obtain the original ip and clear the flag in previous_lmf */
985 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
986 amd64_call_reg (code, AMD64_R11);
987 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
989 /* Restore registers */
990 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
991 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
992 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
993 amd64_pop_reg (code, AMD64_RDX);
994 amd64_pop_reg (code, AMD64_RAX);
996 /* Return to original code */
997 amd64_jump_reg (code, AMD64_R11);
999 g_assert ((code - start) < 128);
1007 * Called when a thread receives an async exception while executing unmanaged code.
1008 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1009 * the return address on the stack to point to a helper routine which throws the
1013 mono_arch_notify_pending_exc (void)
1015 MonoLMF *lmf = mono_get_lmf ();
1021 if ((guint64)lmf->previous_lmf & 1)
1022 /* Already hijacked or trampoline LMF entry */
1025 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1026 lmf->rip = *(guint64*)(lmf->rsp - 8);
1027 /* Signal that lmf->rip is set */
1028 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1030 *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();