2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbp = sctx->rbp;
101 ctx->Rsp = sctx->rsp;
102 ctx->Rsi = sctx->rsi;
103 ctx->Rdi = sctx->rdi;
104 ctx->Rip = sctx->rip;
111 void win32_seh_init()
113 old_handler = SetUnhandledExceptionFilter(seh_handler);
116 void win32_seh_cleanup()
118 if (old_handler) SetUnhandledExceptionFilter(old_handler);
121 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
125 fpe_handler = handler;
128 ill_handler = handler;
131 segv_handler = handler;
138 #endif /* PLATFORM_WIN32 */
141 * mono_arch_get_restore_context:
143 * Returns a pointer to a method which restores a previously saved sigcontext.
146 mono_arch_get_restore_context (void)
148 static guint8 *start = NULL;
149 static gboolean inited = FALSE;
155 /* restore_contect (MonoContext *ctx) */
157 start = code = mono_global_codeman_reserve (256);
159 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
161 /* Restore all registers except %rip and %r11 */
162 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
163 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
164 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
165 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
166 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
167 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
168 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
169 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
170 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
171 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
172 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
173 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
174 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
175 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
177 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
179 /* get return address */
180 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
182 /* jump to the saved IP */
183 amd64_jump_reg (code, AMD64_R11);
191 * mono_arch_get_call_filter:
193 * Returns a pointer to a method which calls an exception filter. We
194 * also use this function to call finally handlers (we pass NULL as
195 * @exc object in this case).
198 mono_arch_get_call_filter (void)
200 static guint8 *start;
201 static gboolean inited = FALSE;
209 start = code = mono_global_codeman_reserve (128);
211 /* call_filter (MonoContext *ctx, unsigned long eip) */
214 /* Alloc new frame */
215 amd64_push_reg (code, AMD64_RBP);
216 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
218 /* Save callee saved regs */
220 for (i = 0; i < AMD64_NREG; ++i)
221 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
222 amd64_push_reg (code, i);
228 amd64_push_reg (code, AMD64_RBP);
230 /* Make stack misaligned, the call will make it aligned again */
232 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
235 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
236 /* load callee saved regs */
237 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
238 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
239 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
240 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
241 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
242 #ifdef PLATFORM_WIN32
243 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
244 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
247 /* call the handler */
248 amd64_call_reg (code, AMD64_ARG_REG2);
251 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
254 amd64_pop_reg (code, AMD64_RBP);
256 /* Restore callee saved regs */
257 for (i = AMD64_NREG; i >= 0; --i)
258 if (AMD64_IS_CALLEE_SAVED_REG (i))
259 amd64_pop_reg (code, i);
264 g_assert ((code - start) < 128);
272 * The first few arguments are dummy, to force the other arguments to be passed on
273 * the stack, this avoids overwriting the argument registers in the throw trampoline.
276 throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
277 guint64 dummy5, guint64 dummy6,
278 MonoObject *exc, guint64 rip, guint64 rsp,
279 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
280 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
281 guint64 rax, guint64 rcx, guint64 rdx,
284 static void (*restore_context) (MonoContext *);
287 if (!restore_context)
288 restore_context = mono_arch_get_restore_context ();
304 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
306 * The debugger wants us to stop on the `throw' instruction.
307 * By the time we get here, it already inserted a breakpoint on
308 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
313 * In case of a rethrow, the JIT is emitting code like this:
315 * mov 0xffffffffffffffd0(%rbp),%rax'
319 * Here, restore_context() wouldn't restore the %rax register correctly.
323 restore_context (&ctx);
324 g_assert_not_reached ();
327 /* adjust eip so that it point into the call instruction */
330 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
331 MonoException *mono_ex = (MonoException*)exc;
333 mono_ex->stack_trace = NULL;
335 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
336 restore_context (&ctx);
338 g_assert_not_reached ();
342 get_throw_trampoline (gboolean rethrow)
347 start = code = mono_global_codeman_reserve (64);
351 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
354 amd64_push_imm (code, rethrow);
355 amd64_push_reg (code, AMD64_RDX);
356 amd64_push_reg (code, AMD64_RCX);
357 amd64_push_reg (code, AMD64_RAX);
358 amd64_push_reg (code, AMD64_RSI);
359 amd64_push_reg (code, AMD64_RDI);
360 amd64_push_reg (code, AMD64_R15);
361 amd64_push_reg (code, AMD64_R14);
362 amd64_push_reg (code, AMD64_R13);
363 amd64_push_reg (code, AMD64_R12);
364 amd64_push_reg (code, AMD64_RBP);
365 amd64_push_reg (code, AMD64_RBX);
368 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
369 amd64_push_reg (code, AMD64_RAX);
372 amd64_push_membase (code, AMD64_R11, 0);
375 amd64_push_reg (code, AMD64_ARG_REG1);
377 #ifdef PLATFORM_WIN32
379 amd64_push_imm (code, 0);
380 amd64_push_imm (code, 0);
381 amd64_push_imm (code, 0);
382 amd64_push_imm (code, 0);
383 amd64_push_imm (code, 0);
384 amd64_push_imm (code, 0);
387 amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
388 amd64_call_reg (code, AMD64_R11);
389 amd64_breakpoint (code);
391 g_assert ((code - start) < 64);
397 * mono_arch_get_throw_exception:
399 * Returns a function pointer which can be used to raise
400 * exceptions. The returned function has the following
401 * signature: void (*func) (MonoException *exc);
405 mono_arch_get_throw_exception (void)
407 static guint8* start;
408 static gboolean inited = FALSE;
413 start = get_throw_trampoline (FALSE);
421 mono_arch_get_rethrow_exception (void)
423 static guint8* start;
424 static gboolean inited = FALSE;
429 start = get_throw_trampoline (TRUE);
437 mono_arch_get_throw_exception_by_name (void)
439 static guint8* start;
440 static gboolean inited = FALSE;
446 start = code = mono_global_codeman_reserve (64);
448 /* Not used on amd64 */
449 amd64_breakpoint (code);
455 * mono_arch_get_throw_corlib_exception:
457 * Returns a function pointer which can be used to raise
458 * corlib exceptions. The returned function has the following
459 * signature: void (*func) (guint32 ex_token, guint32 offset);
460 * Here, offset is the offset which needs to be substracted from the caller IP
461 * to get the IP of the throw. Passing the offset has the advantage that it
462 * needs no relocations in the caller.
465 mono_arch_get_throw_corlib_exception (void)
467 static guint8* start;
468 static gboolean inited = FALSE;
475 start = code = mono_global_codeman_reserve (64);
478 amd64_push_reg (code, AMD64_ARG_REG2);
480 /* Call exception_from_token */
481 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
482 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
483 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
484 amd64_call_reg (code, AMD64_R11);
486 /* Compute throw_ip */
487 amd64_pop_reg (code, AMD64_ARG_REG2);
489 amd64_pop_reg (code, AMD64_ARG_REG3);
490 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
492 /* Put the throw_ip at the top of the misaligned stack */
493 amd64_push_reg (code, AMD64_ARG_REG3);
495 throw_ex = (guint64)mono_arch_get_throw_exception ();
497 /* Call throw_exception */
498 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
499 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
500 /* The original IP is on the stack */
501 amd64_jump_reg (code, AMD64_R11);
503 g_assert ((code - start) < 64);
510 /* mono_arch_find_jit_info:
512 * This function is used to gather information from @ctx. It return the
513 * MonoJitInfo of the corresponding function, unwinds one stack frame and
514 * stores the resulting context into @new_ctx. It also stores a string
515 * describing the stack location into @trace (if not NULL), and modifies
516 * the @lmf if necessary. @native_offset return the IP offset from the
517 * start of the function or -1 if that info is not available.
520 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
521 MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
526 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
528 /* Avoid costly table lookup during stack overflow */
529 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
532 ji = mono_jit_info_table_find (domain, ip);
541 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
544 if (!ji->method->wrapper_type)
548 * If a method has save_lmf set, then register save/restore code is not generated
549 * by the JIT, so we have to restore callee saved registers from the lmf.
551 if (ji->method->save_lmf) {
555 * *lmf might not point to the LMF pushed by this method, so compute the LMF
559 lmf_addr = (MonoLMF*)ctx->rsp;
561 lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
563 new_ctx->rbp = lmf_addr->rbp;
564 new_ctx->rbx = lmf_addr->rbx;
565 new_ctx->r12 = lmf_addr->r12;
566 new_ctx->r13 = lmf_addr->r13;
567 new_ctx->r14 = lmf_addr->r14;
568 new_ctx->r15 = lmf_addr->r15;
571 offset = omit_fp ? 0 : -1;
572 /* restore caller saved registers */
573 for (i = 0; i < AMD64_NREG; i ++)
574 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
578 reg = *((guint64*)ctx->rsp + offset);
582 reg = *((guint64 *)ctx->rbp + offset);
606 g_assert_not_reached ();
611 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
612 /* remove any unused lmf */
613 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
618 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
619 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
620 /* Pop return address */
624 /* Pop EBP and the return address */
625 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
626 /* we substract 1, so that the IP points into the call instruction */
627 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
628 new_ctx->rbp = *((guint64 *)ctx->rbp);
631 /* Pop arguments off the stack */
633 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
635 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
636 new_ctx->rsp += stack_to_pop;
643 if (((guint64)(*lmf)->previous_lmf) & 1) {
644 /* This LMF has the rip field set */
646 } else if ((*lmf)->rsp == 0) {
651 * The rsp field is set just before the call which transitioned to native
652 * code. Obtain the rip from the stack.
654 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
657 ji = mono_jit_info_table_find (domain, (gpointer)rip);
662 /* Trampoline lmf frame */
663 memset (res, 0, sizeof (MonoJitInfo));
664 res->method = (*lmf)->method;
668 new_ctx->rbp = (*lmf)->rbp;
669 new_ctx->rsp = (*lmf)->rsp;
671 new_ctx->rbx = (*lmf)->rbx;
672 new_ctx->r12 = (*lmf)->r12;
673 new_ctx->r13 = (*lmf)->r13;
674 new_ctx->r14 = (*lmf)->r14;
675 new_ctx->r15 = (*lmf)->r15;
677 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
679 return ji ? ji : res;
686 * mono_arch_handle_exception:
688 * @ctx: saved processor state
689 * @obj: the exception object
692 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
696 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
698 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
700 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
705 #ifdef MONO_ARCH_USE_SIGACTION
706 static inline guint64*
707 gregs_from_ucontext (ucontext_t *ctx)
710 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
712 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
719 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
721 #ifdef MONO_ARCH_USE_SIGACTION
722 ucontext_t *ctx = (ucontext_t*)sigctx;
724 guint64 *gregs = gregs_from_ucontext (ctx);
726 mctx->rax = gregs [REG_RAX];
727 mctx->rbx = gregs [REG_RBX];
728 mctx->rcx = gregs [REG_RCX];
729 mctx->rdx = gregs [REG_RDX];
730 mctx->rbp = gregs [REG_RBP];
731 mctx->rsp = gregs [REG_RSP];
732 mctx->rsi = gregs [REG_RSI];
733 mctx->rdi = gregs [REG_RDI];
734 mctx->rip = gregs [REG_RIP];
735 mctx->r12 = gregs [REG_R12];
736 mctx->r13 = gregs [REG_R13];
737 mctx->r14 = gregs [REG_R14];
738 mctx->r15 = gregs [REG_R15];
740 MonoContext *ctx = (MonoContext *)sigctx;
742 mctx->rax = ctx->rax;
743 mctx->rbx = ctx->rbx;
744 mctx->rcx = ctx->rcx;
745 mctx->rdx = ctx->rdx;
746 mctx->rbp = ctx->rbp;
747 mctx->rsp = ctx->rsp;
748 mctx->rsi = ctx->rsi;
749 mctx->rdi = ctx->rdi;
750 mctx->rip = ctx->rip;
751 mctx->r12 = ctx->r12;
752 mctx->r13 = ctx->r13;
753 mctx->r14 = ctx->r14;
754 mctx->r15 = ctx->r15;
759 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
761 #ifdef MONO_ARCH_USE_SIGACTION
762 ucontext_t *ctx = (ucontext_t*)sigctx;
764 guint64 *gregs = gregs_from_ucontext (ctx);
766 gregs [REG_RAX] = mctx->rax;
767 gregs [REG_RBX] = mctx->rbx;
768 gregs [REG_RCX] = mctx->rcx;
769 gregs [REG_RDX] = mctx->rdx;
770 gregs [REG_RBP] = mctx->rbp;
771 gregs [REG_RSP] = mctx->rsp;
772 gregs [REG_RSI] = mctx->rsi;
773 gregs [REG_RDI] = mctx->rdi;
774 gregs [REG_RIP] = mctx->rip;
775 gregs [REG_R12] = mctx->r12;
776 gregs [REG_R13] = mctx->r13;
777 gregs [REG_R14] = mctx->r14;
778 gregs [REG_R15] = mctx->r15;
780 MonoContext *ctx = (MonoContext *)sigctx;
782 ctx->rax = mctx->rax;
783 ctx->rbx = mctx->rbx;
784 ctx->rcx = mctx->rcx;
785 ctx->rdx = mctx->rdx;
786 ctx->rbp = mctx->rbp;
787 ctx->rsp = mctx->rsp;
788 ctx->rsi = mctx->rsi;
789 ctx->rdi = mctx->rdi;
790 ctx->rip = mctx->rip;
791 ctx->r12 = mctx->r12;
792 ctx->r13 = mctx->r13;
793 ctx->r14 = mctx->r14;
794 ctx->r15 = mctx->r15;
799 mono_arch_ip_from_context (void *sigctx)
802 #ifdef MONO_ARCH_USE_SIGACTION
804 ucontext_t *ctx = (ucontext_t*)sigctx;
806 guint64 *gregs = gregs_from_ucontext (ctx);
808 return (gpointer)gregs [REG_RIP];
810 MonoContext *ctx = sigctx;
811 return (gpointer)ctx->rip;
816 restore_soft_guard_pages (void)
818 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
819 if (jit_tls->stack_ovf_guard_base)
820 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
824 * this function modifies mctx so that when it is restored, it
825 * won't execcute starting at mctx.eip, but in a function that
826 * will restore the protection on the soft-guard pages and return back to
827 * continue at mctx.eip.
830 prepare_for_guard_pages (MonoContext *mctx)
833 sp = (gpointer)(mctx->rsp);
835 /* the return addr */
836 sp [0] = (gpointer)(mctx->rip);
837 mctx->rip = (guint64)restore_soft_guard_pages;
838 mctx->rsp = (guint64)sp;
842 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
844 void (*restore_context) (MonoContext *);
847 restore_context = mono_arch_get_restore_context ();
848 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
849 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
851 prepare_for_guard_pages (&mctx);
852 restore_context (&mctx);
856 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
858 #ifdef MONO_ARCH_USE_SIGACTION
859 MonoException *exc = NULL;
860 ucontext_t *ctx = (ucontext_t*)sigctx;
861 guint64 *gregs = gregs_from_ucontext (ctx);
862 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
867 exc = mono_domain_get ()->stack_overflow_ex;
869 mono_handle_native_sigsegv (SIGSEGV, sigctx);
871 /* setup a call frame on the real stack so that control is returned there
872 * and exception handling can continue.
873 * The frame looks like:
877 * 128 is the size of the red zone
879 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
882 sp = (gpointer)(gregs [REG_RSP] & ~15);
883 sp = (gpointer)((char*)sp - frame_size);
884 /* the arguments must be aligned */
885 sp [-1] = (gpointer)gregs [REG_RIP];
886 /* may need to adjust pointers in the new struct copy, depending on the OS */
887 memcpy (sp + 4, ctx, sizeof (ucontext_t));
888 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
889 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
890 gregs [REG_RSP] = (unsigned long)(sp - 1);
891 gregs [REG_RDI] = (unsigned long)(sp + 4);
892 gregs [REG_RSI] = (guint64)exc;
893 gregs [REG_RDX] = stack_ovf;
898 get_original_ip (void)
900 MonoLMF *lmf = mono_get_lmf ();
904 /* Reset the change to previous_lmf */
905 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
911 get_throw_pending_exception (void)
913 static guint8* start;
914 static gboolean inited = FALSE;
917 gpointer throw_trampoline;
922 start = code = mono_global_codeman_reserve (128);
924 /* We are in the frame of a managed method after a call */
926 * We would like to throw the pending exception in such a way that it looks to
927 * be thrown from the managed method.
930 /* Save registers which might contain the return value of the call */
931 amd64_push_reg (code, AMD64_RAX);
932 amd64_push_reg (code, AMD64_RDX);
934 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
935 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
938 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
940 /* Obtain the pending exception */
941 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
942 amd64_call_reg (code, AMD64_R11);
944 /* Check if it is NULL, and branch */
945 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
946 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
948 /* exc != NULL branch */
950 /* Save the exc on the stack */
951 amd64_push_reg (code, AMD64_RAX);
953 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
955 /* Obtain the original ip and clear the flag in previous_lmf */
956 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
957 amd64_call_reg (code, AMD64_R11);
960 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
962 /* Pop saved stuff from the stack */
963 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
965 /* Setup arguments for the throw trampoline */
967 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
968 /* The trampoline expects the caller ip to be pushed on the stack */
969 amd64_push_reg (code, AMD64_RAX);
971 /* Call the throw trampoline */
972 throw_trampoline = mono_arch_get_throw_exception ();
973 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
974 /* We use a jump instead of a call so we can push the original ip on the stack */
975 amd64_jump_reg (code, AMD64_R11);
977 /* ex == NULL branch */
978 mono_amd64_patch (br [0], code);
980 /* Obtain the original ip and clear the flag in previous_lmf */
981 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
982 amd64_call_reg (code, AMD64_R11);
983 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
985 /* Restore registers */
986 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
987 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
988 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
989 amd64_pop_reg (code, AMD64_RDX);
990 amd64_pop_reg (code, AMD64_RAX);
992 /* Return to original code */
993 amd64_jump_reg (code, AMD64_R11);
995 g_assert ((code - start) < 128);
1003 * Called when a thread receives an async exception while executing unmanaged code.
1004 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1005 * the return address on the stack to point to a helper routine which throws the
1009 mono_arch_notify_pending_exc (void)
1011 MonoLMF *lmf = mono_get_lmf ();
1017 if ((guint64)lmf->previous_lmf & 1)
1018 /* Already hijacked or trampoline LMF entry */
1021 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1022 lmf->rip = *(guint64*)(lmf->rsp - 8);
1023 /* Signal that lmf->rip is set */
1024 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1026 *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();