2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/debug-helpers.h>
23 #include <mono/metadata/exception.h>
24 #include <mono/metadata/gc-internal.h>
25 #include <mono/metadata/mono-debug.h>
26 #include <mono/utils/mono-mmap.h>
29 #include "mini-amd64.h"
31 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
34 static MonoW32ExceptionHandler fpe_handler;
35 static MonoW32ExceptionHandler ill_handler;
36 static MonoW32ExceptionHandler segv_handler;
38 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
40 #define W32_SEH_HANDLE_EX(_ex) \
41 if (_ex##_handler) _ex##_handler((int)sctx)
44 * Unhandled Exception Filter
45 * Top-level per-process exception handler.
47 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
54 res = EXCEPTION_CONTINUE_EXECUTION;
56 er = ep->ExceptionRecord;
57 ctx = ep->ContextRecord;
58 sctx = g_malloc(sizeof(MonoContext));
60 /* Copy Win32 context to UNIX style context */
75 switch (er->ExceptionCode) {
76 case EXCEPTION_ACCESS_VIOLATION:
77 W32_SEH_HANDLE_EX(segv);
79 case EXCEPTION_ILLEGAL_INSTRUCTION:
80 W32_SEH_HANDLE_EX(ill);
82 case EXCEPTION_INT_DIVIDE_BY_ZERO:
83 case EXCEPTION_INT_OVERFLOW:
84 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
85 case EXCEPTION_FLT_OVERFLOW:
86 case EXCEPTION_FLT_UNDERFLOW:
87 case EXCEPTION_FLT_INEXACT_RESULT:
88 W32_SEH_HANDLE_EX(fpe);
94 /* Copy context back */
100 ctx->Rsp = sctx->rsp;
101 ctx->Rsi = sctx->rsi;
102 ctx->Rdi = sctx->rdi;
103 ctx->Rip = sctx->rip;
110 void win32_seh_init()
112 old_handler = SetUnhandledExceptionFilter(seh_handler);
115 void win32_seh_cleanup()
117 if (old_handler) SetUnhandledExceptionFilter(old_handler);
120 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
124 fpe_handler = handler;
127 ill_handler = handler;
130 segv_handler = handler;
137 #endif /* PLATFORM_WIN32 */
140 * mono_arch_get_restore_context:
142 * Returns a pointer to a method which restores a previously saved sigcontext.
145 mono_arch_get_restore_context (void)
147 static guint8 *start = NULL;
148 static gboolean inited = FALSE;
154 /* restore_contect (MonoContext *ctx) */
156 start = code = mono_global_codeman_reserve (256);
158 /* get return address */
159 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rip), 8);
161 /* Restore registers */
162 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
163 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
164 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
165 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
166 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
167 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
168 #ifdef PLATFORM_WIN32
169 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
170 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
173 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsp), 8);
175 /* jump to the saved IP */
176 amd64_jump_reg (code, AMD64_RAX);
184 * mono_arch_get_call_filter:
186 * Returns a pointer to a method which calls an exception filter. We
187 * also use this function to call finally handlers (we pass NULL as
188 * @exc object in this case).
191 mono_arch_get_call_filter (void)
193 static guint8 *start;
194 static gboolean inited = FALSE;
202 start = code = mono_global_codeman_reserve (128);
204 /* call_filter (MonoContext *ctx, unsigned long eip) */
207 /* Alloc new frame */
208 amd64_push_reg (code, AMD64_RBP);
209 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
211 /* Save callee saved regs */
213 for (i = 0; i < AMD64_NREG; ++i)
214 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
215 amd64_push_reg (code, i);
221 amd64_push_reg (code, AMD64_RBP);
223 /* Make stack misaligned, the call will make it aligned again */
225 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
228 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
229 /* load callee saved regs */
230 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
231 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
232 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
233 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
234 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
235 #ifdef PLATFORM_WIN32
236 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
237 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
240 /* call the handler */
241 amd64_call_reg (code, AMD64_ARG_REG2);
244 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
247 amd64_pop_reg (code, AMD64_RBP);
249 /* Restore callee saved regs */
250 for (i = AMD64_NREG; i >= 0; --i)
251 if (AMD64_IS_CALLEE_SAVED_REG (i))
252 amd64_pop_reg (code, i);
257 g_assert ((code - start) < 128);
263 #ifdef PLATFORM_WIN32
265 throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
266 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
267 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi, guint64 rethrow)
270 throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
271 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
272 guint64 r14, guint64 r15, guint64 rethrow)
275 static void (*restore_context) (MonoContext *);
278 if (!restore_context)
279 restore_context = mono_arch_get_restore_context ();
289 #ifdef PLATFORM_WIN32
294 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
296 * The debugger wants us to stop on the `throw' instruction.
297 * By the time we get here, it already inserted a breakpoint on
298 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
303 * In case of a rethrow, the JIT is emitting code like this:
305 * mov 0xffffffffffffffd0(%rbp),%rax'
309 * Here, restore_context() wouldn't restore the %rax register correctly.
313 restore_context (&ctx);
314 g_assert_not_reached ();
317 /* adjust eip so that it point into the call instruction */
320 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
321 MonoException *mono_ex = (MonoException*)exc;
323 mono_ex->stack_trace = NULL;
325 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
326 restore_context (&ctx);
328 g_assert_not_reached ();
332 get_throw_trampoline (gboolean rethrow)
337 start = code = mono_global_codeman_reserve (64);
342 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_ARG_REG1, 8);
344 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RSP, 0, 8);
346 amd64_lea_membase (code, AMD64_ARG_REG3, AMD64_RSP, 8);
348 #ifdef PLATFORM_WIN32
349 /* Callee saved regs */
350 amd64_mov_reg_reg (code, AMD64_R9, AMD64_RBX, 8);
352 amd64_push_imm (code, rethrow);
353 amd64_push_reg (code, AMD64_RSI);
354 amd64_push_reg (code, AMD64_RDI);
355 amd64_push_reg (code, AMD64_R15);
356 amd64_push_reg (code, AMD64_R14);
357 amd64_push_reg (code, AMD64_R13);
358 amd64_push_reg (code, AMD64_R12);
359 amd64_push_reg (code, AMD64_RBP);
361 amd64_push_imm (code, 0);
362 amd64_push_imm (code, 0);
363 amd64_push_imm (code, 0);
364 amd64_push_imm (code, 0);
366 /* Callee saved regs */
367 amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8);
368 amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8);
369 amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8);
371 amd64_push_imm (code, 0);
373 amd64_push_imm (code, rethrow);
374 amd64_push_reg (code, AMD64_R15);
375 amd64_push_reg (code, AMD64_R14);
376 amd64_push_reg (code, AMD64_R13);
379 amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
380 amd64_call_reg (code, AMD64_R11);
381 amd64_breakpoint (code);
383 g_assert ((code - start) < 64);
389 * mono_arch_get_throw_exception:
391 * Returns a function pointer which can be used to raise
392 * exceptions. The returned function has the following
393 * signature: void (*func) (MonoException *exc);
397 mono_arch_get_throw_exception (void)
399 static guint8* start;
400 static gboolean inited = FALSE;
405 start = get_throw_trampoline (FALSE);
413 mono_arch_get_rethrow_exception (void)
415 static guint8* start;
416 static gboolean inited = FALSE;
421 start = get_throw_trampoline (TRUE);
429 mono_arch_get_throw_exception_by_name (void)
431 static guint8* start;
432 static gboolean inited = FALSE;
438 start = code = mono_global_codeman_reserve (64);
440 /* Not used on amd64 */
441 amd64_breakpoint (code);
447 * mono_arch_get_throw_corlib_exception:
449 * Returns a function pointer which can be used to raise
450 * corlib exceptions. The returned function has the following
451 * signature: void (*func) (guint32 ex_token, guint32 offset);
452 * Here, offset is the offset which needs to be substracted from the caller IP
453 * to get the IP of the throw. Passing the offset has the advantage that it
454 * needs no relocations in the caller.
457 mono_arch_get_throw_corlib_exception (void)
459 static guint8* start;
460 static gboolean inited = FALSE;
467 start = code = mono_global_codeman_reserve (64);
470 amd64_push_reg (code, AMD64_ARG_REG2);
472 /* Call exception_from_token */
473 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
474 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
475 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
476 amd64_call_reg (code, AMD64_R11);
478 /* Compute throw_ip */
479 amd64_pop_reg (code, AMD64_ARG_REG2);
481 amd64_pop_reg (code, AMD64_ARG_REG3);
482 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
484 /* Put the throw_ip at the top of the misaligned stack */
485 amd64_push_reg (code, AMD64_ARG_REG3);
487 throw_ex = (guint64)mono_arch_get_throw_exception ();
489 /* Call throw_exception */
490 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
491 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
492 /* The original IP is on the stack */
493 amd64_jump_reg (code, AMD64_R11);
495 g_assert ((code - start) < 64);
502 /* mono_arch_find_jit_info:
504 * This function is used to gather information from @ctx. It return the
505 * MonoJitInfo of the corresponding function, unwinds one stack frame and
506 * stores the resulting context into @new_ctx. It also stores a string
507 * describing the stack location into @trace (if not NULL), and modifies
508 * the @lmf if necessary. @native_offset return the IP offset from the
509 * start of the function or -1 if that info is not available.
512 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
513 MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
518 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
520 /* Avoid costly table lookup during stack overflow */
521 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
524 ji = mono_jit_info_table_find (domain, ip);
531 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
536 if (!ji->method->wrapper_type)
540 * Some managed methods like pinvoke wrappers might have save_lmf set.
541 * In this case, register save/restore code is not generated by the
542 * JIT, so we have to restore callee saved registers from the lmf.
544 if (ji->method->save_lmf) {
548 * If the exception was raised in unmanaged code, then the LMF was already
549 * popped off the stack. We detect that case by comparing the sp (or bp)
550 * value in the LMF with the one in the context. This works because these
551 * registers are saved to the LMF at the start of the method.
554 lmf_match = (*lmf) && (*lmf)->rsp == ctx->rsp;
556 lmf_match = (*lmf) && (*lmf)->rbp == ctx->rbp;
558 if (*lmf && lmf_match) {
559 /* Make sure the LMF belongs to this method */
560 g_assert ((*lmf)->rip >= (guint64)ji->code_start && (*lmf)->rip <= ((guint64)((guint8*)ji->code_start + ji->code_size)));
562 new_ctx->rbp = (*lmf)->rbp;
563 new_ctx->rbx = (*lmf)->rbx;
564 new_ctx->rsp = (*lmf)->rsp;
565 new_ctx->r12 = (*lmf)->r12;
566 new_ctx->r13 = (*lmf)->r13;
567 new_ctx->r14 = (*lmf)->r14;
568 new_ctx->r15 = (*lmf)->r15;
570 *lmf = (*lmf)->previous_lmf;
574 offset = omit_fp ? 0 : -1;
575 /* restore caller saved registers */
576 for (i = 0; i < AMD64_NREG; i ++)
577 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
581 reg = *((guint64*)ctx->rsp + offset);
585 reg = *((guint64 *)ctx->rbp + offset);
609 g_assert_not_reached ();
614 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
615 /* remove any unused lmf */
616 *lmf = (*lmf)->previous_lmf;
621 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
622 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
623 /* Pop return address */
627 /* Pop EBP and the return address */
628 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
629 /* we substract 1, so that the IP points into the call instruction */
630 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
631 new_ctx->rbp = *((guint64 *)ctx->rbp);
634 /* Pop arguments off the stack */
636 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
638 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
639 new_ctx->rsp += stack_to_pop;
647 if ((ji = mono_jit_info_table_find (domain, (gpointer)(*lmf)->rip))) {
652 /* Trampoline lmf frame */
653 memset (res, 0, sizeof (MonoJitInfo));
654 res->method = (*lmf)->method;
657 new_ctx->rip = (*lmf)->rip;
658 new_ctx->rbp = (*lmf)->rbp;
659 new_ctx->rsp = (*lmf)->rsp;
661 new_ctx->rbx = (*lmf)->rbx;
662 new_ctx->r12 = (*lmf)->r12;
663 new_ctx->r13 = (*lmf)->r13;
664 new_ctx->r14 = (*lmf)->r14;
665 new_ctx->r15 = (*lmf)->r15;
667 *lmf = (*lmf)->previous_lmf;
669 return ji ? ji : res;
676 * mono_arch_handle_exception:
678 * @ctx: saved processor state
679 * @obj: the exception object
682 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
686 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
688 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
690 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
695 #ifdef MONO_ARCH_USE_SIGACTION
696 static inline guint64*
697 gregs_from_ucontext (ucontext_t *ctx)
700 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
702 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
709 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
711 #ifdef MONO_ARCH_USE_SIGACTION
712 ucontext_t *ctx = (ucontext_t*)sigctx;
714 guint64 *gregs = gregs_from_ucontext (ctx);
716 mctx->rax = gregs [REG_RAX];
717 mctx->rbx = gregs [REG_RBX];
718 mctx->rcx = gregs [REG_RCX];
719 mctx->rdx = gregs [REG_RDX];
720 mctx->rbp = gregs [REG_RBP];
721 mctx->rsp = gregs [REG_RSP];
722 mctx->rsi = gregs [REG_RSI];
723 mctx->rdi = gregs [REG_RDI];
724 mctx->rip = gregs [REG_RIP];
725 mctx->r12 = gregs [REG_R12];
726 mctx->r13 = gregs [REG_R13];
727 mctx->r14 = gregs [REG_R14];
728 mctx->r15 = gregs [REG_R15];
730 MonoContext *ctx = (MonoContext *)sigctx;
732 mctx->rax = ctx->rax;
733 mctx->rbx = ctx->rbx;
734 mctx->rcx = ctx->rcx;
735 mctx->rdx = ctx->rdx;
736 mctx->rbp = ctx->rbp;
737 mctx->rsp = ctx->rsp;
738 mctx->rsi = ctx->rsi;
739 mctx->rdi = ctx->rdi;
740 mctx->rip = ctx->rip;
741 mctx->r12 = ctx->r12;
742 mctx->r13 = ctx->r13;
743 mctx->r14 = ctx->r14;
744 mctx->r15 = ctx->r15;
749 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
751 #ifdef MONO_ARCH_USE_SIGACTION
752 ucontext_t *ctx = (ucontext_t*)sigctx;
754 guint64 *gregs = gregs_from_ucontext (ctx);
756 gregs [REG_RAX] = mctx->rax;
757 gregs [REG_RBX] = mctx->rbx;
758 gregs [REG_RCX] = mctx->rcx;
759 gregs [REG_RDX] = mctx->rdx;
760 gregs [REG_RBP] = mctx->rbp;
761 gregs [REG_RSP] = mctx->rsp;
762 gregs [REG_RSI] = mctx->rsi;
763 gregs [REG_RDI] = mctx->rdi;
764 gregs [REG_RIP] = mctx->rip;
765 gregs [REG_R12] = mctx->r12;
766 gregs [REG_R13] = mctx->r13;
767 gregs [REG_R14] = mctx->r14;
768 gregs [REG_R15] = mctx->r15;
770 MonoContext *ctx = (MonoContext *)sigctx;
772 ctx->rax = mctx->rax;
773 ctx->rbx = mctx->rbx;
774 ctx->rcx = mctx->rcx;
775 ctx->rdx = mctx->rdx;
776 ctx->rbp = mctx->rbp;
777 ctx->rsp = mctx->rsp;
778 ctx->rsi = mctx->rsi;
779 ctx->rdi = mctx->rdi;
780 ctx->rip = mctx->rip;
781 ctx->r12 = mctx->r12;
782 ctx->r13 = mctx->r13;
783 ctx->r14 = mctx->r14;
784 ctx->r15 = mctx->r15;
789 mono_arch_ip_from_context (void *sigctx)
792 #ifdef MONO_ARCH_USE_SIGACTION
794 ucontext_t *ctx = (ucontext_t*)sigctx;
796 guint64 *gregs = gregs_from_ucontext (ctx);
798 return (gpointer)gregs [REG_RIP];
800 MonoContext *ctx = sigctx;
801 return (gpointer)ctx->rip;
806 restore_soft_guard_pages (void)
808 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
809 if (jit_tls->stack_ovf_guard_base)
810 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
814 * this function modifies mctx so that when it is restored, it
815 * won't execcute starting at mctx.eip, but in a function that
816 * will restore the protection on the soft-guard pages and return back to
817 * continue at mctx.eip.
820 prepare_for_guard_pages (MonoContext *mctx)
823 sp = (gpointer)(mctx->rsp);
825 /* the return addr */
826 sp [0] = (gpointer)(mctx->rip);
827 mctx->rip = (unsigned long)restore_soft_guard_pages;
828 mctx->rsp = (unsigned long)sp;
832 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
834 void (*restore_context) (MonoContext *);
837 restore_context = mono_arch_get_restore_context ();
838 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
839 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
841 prepare_for_guard_pages (&mctx);
842 restore_context (&mctx);
846 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
848 #ifdef MONO_ARCH_USE_SIGACTION
849 MonoException *exc = NULL;
850 ucontext_t *ctx = (ucontext_t*)sigctx;
851 guint64 *gregs = gregs_from_ucontext (ctx);
852 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
857 exc = mono_domain_get ()->stack_overflow_ex;
859 mono_handle_native_sigsegv (SIGSEGV, sigctx);
861 /* setup a call frame on the real stack so that control is returned there
862 * and exception handling can continue.
863 * The frame looks like:
867 * 128 is the size of the red zone
869 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
872 sp = (gpointer)(gregs [REG_RSP] & ~15);
873 sp = (gpointer)((char*)sp - frame_size);
874 /* the arguments must be aligned */
875 sp [-1] = (gpointer)gregs [REG_RIP];
876 /* may need to adjust pointers in the new struct copy, depending on the OS */
877 memcpy (sp + 4, ctx, sizeof (ucontext_t));
878 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
879 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
880 gregs [REG_RSP] = (unsigned long)(sp - 1);
881 gregs [REG_RDI] = (unsigned long)(sp + 4);
882 gregs [REG_RSI] = (guint64)exc;
883 gregs [REG_RDX] = stack_ovf;