2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbp = sctx->rbp;
101 ctx->Rsp = sctx->rsp;
102 ctx->Rsi = sctx->rsi;
103 ctx->Rdi = sctx->rdi;
104 ctx->Rip = sctx->rip;
111 void win32_seh_init()
113 old_handler = SetUnhandledExceptionFilter(seh_handler);
116 void win32_seh_cleanup()
118 if (old_handler) SetUnhandledExceptionFilter(old_handler);
121 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
125 fpe_handler = handler;
128 ill_handler = handler;
131 segv_handler = handler;
138 #endif /* PLATFORM_WIN32 */
141 * mono_arch_get_restore_context:
143 * Returns a pointer to a method which restores a previously saved sigcontext.
146 mono_arch_get_restore_context (void)
148 static guint8 *start = NULL;
149 static gboolean inited = FALSE;
155 /* restore_contect (MonoContext *ctx) */
157 start = code = mono_global_codeman_reserve (256);
159 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
161 /* Restore all registers except %rip and %r11 */
162 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
163 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
164 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
165 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
166 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
167 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
168 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
169 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
170 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
171 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
172 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
173 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
174 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
175 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
177 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
179 /* get return address */
180 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
182 /* jump to the saved IP */
183 amd64_jump_reg (code, AMD64_R11);
191 * mono_arch_get_call_filter:
193 * Returns a pointer to a method which calls an exception filter. We
194 * also use this function to call finally handlers (we pass NULL as
195 * @exc object in this case).
198 mono_arch_get_call_filter (void)
200 static guint8 *start;
201 static gboolean inited = FALSE;
209 start = code = mono_global_codeman_reserve (128);
211 /* call_filter (MonoContext *ctx, unsigned long eip) */
214 /* Alloc new frame */
215 amd64_push_reg (code, AMD64_RBP);
216 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
218 /* Save callee saved regs */
220 for (i = 0; i < AMD64_NREG; ++i)
221 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
222 amd64_push_reg (code, i);
228 amd64_push_reg (code, AMD64_RBP);
230 /* Make stack misaligned, the call will make it aligned again */
232 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
235 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
236 /* load callee saved regs */
237 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
238 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
239 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
240 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
241 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
242 #ifdef PLATFORM_WIN32
243 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
244 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
247 /* call the handler */
248 amd64_call_reg (code, AMD64_ARG_REG2);
251 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
254 amd64_pop_reg (code, AMD64_RBP);
256 /* Restore callee saved regs */
257 for (i = AMD64_NREG; i >= 0; --i)
258 if (AMD64_IS_CALLEE_SAVED_REG (i))
259 amd64_pop_reg (code, i);
264 g_assert ((code - start) < 128);
270 #ifdef PLATFORM_WIN32
272 throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
273 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
274 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi, guint64 rethrow)
277 throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
278 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
279 guint64 r14, guint64 r15, guint64 rethrow)
282 static void (*restore_context) (MonoContext *);
285 if (!restore_context)
286 restore_context = mono_arch_get_restore_context ();
296 #ifdef PLATFORM_WIN32
301 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
303 * The debugger wants us to stop on the `throw' instruction.
304 * By the time we get here, it already inserted a breakpoint on
305 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
310 * In case of a rethrow, the JIT is emitting code like this:
312 * mov 0xffffffffffffffd0(%rbp),%rax'
316 * Here, restore_context() wouldn't restore the %rax register correctly.
320 restore_context (&ctx);
321 g_assert_not_reached ();
324 /* adjust eip so that it point into the call instruction */
327 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
328 MonoException *mono_ex = (MonoException*)exc;
330 mono_ex->stack_trace = NULL;
332 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
333 restore_context (&ctx);
335 g_assert_not_reached ();
339 get_throw_trampoline (gboolean rethrow)
344 start = code = mono_global_codeman_reserve (64);
349 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_ARG_REG1, 8);
351 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RSP, 0, 8);
353 amd64_lea_membase (code, AMD64_ARG_REG3, AMD64_RSP, 8);
355 #ifdef PLATFORM_WIN32
356 /* Callee saved regs */
357 amd64_mov_reg_reg (code, AMD64_R9, AMD64_RBX, 8);
359 amd64_push_imm (code, rethrow);
360 amd64_push_reg (code, AMD64_RSI);
361 amd64_push_reg (code, AMD64_RDI);
362 amd64_push_reg (code, AMD64_R15);
363 amd64_push_reg (code, AMD64_R14);
364 amd64_push_reg (code, AMD64_R13);
365 amd64_push_reg (code, AMD64_R12);
366 amd64_push_reg (code, AMD64_RBP);
368 amd64_push_imm (code, 0);
369 amd64_push_imm (code, 0);
370 amd64_push_imm (code, 0);
371 amd64_push_imm (code, 0);
373 /* Callee saved regs */
374 amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8);
375 amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8);
376 amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8);
378 amd64_push_imm (code, 0);
380 amd64_push_imm (code, rethrow);
381 amd64_push_reg (code, AMD64_R15);
382 amd64_push_reg (code, AMD64_R14);
383 amd64_push_reg (code, AMD64_R13);
386 amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
387 amd64_call_reg (code, AMD64_R11);
388 amd64_breakpoint (code);
390 g_assert ((code - start) < 64);
396 * mono_arch_get_throw_exception:
398 * Returns a function pointer which can be used to raise
399 * exceptions. The returned function has the following
400 * signature: void (*func) (MonoException *exc);
404 mono_arch_get_throw_exception (void)
406 static guint8* start;
407 static gboolean inited = FALSE;
412 start = get_throw_trampoline (FALSE);
420 mono_arch_get_rethrow_exception (void)
422 static guint8* start;
423 static gboolean inited = FALSE;
428 start = get_throw_trampoline (TRUE);
436 mono_arch_get_throw_exception_by_name (void)
438 static guint8* start;
439 static gboolean inited = FALSE;
445 start = code = mono_global_codeman_reserve (64);
447 /* Not used on amd64 */
448 amd64_breakpoint (code);
454 * mono_arch_get_throw_corlib_exception:
456 * Returns a function pointer which can be used to raise
457 * corlib exceptions. The returned function has the following
458 * signature: void (*func) (guint32 ex_token, guint32 offset);
459 * Here, offset is the offset which needs to be substracted from the caller IP
460 * to get the IP of the throw. Passing the offset has the advantage that it
461 * needs no relocations in the caller.
464 mono_arch_get_throw_corlib_exception (void)
466 static guint8* start;
467 static gboolean inited = FALSE;
474 start = code = mono_global_codeman_reserve (64);
477 amd64_push_reg (code, AMD64_ARG_REG2);
479 /* Call exception_from_token */
480 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
481 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
482 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
483 amd64_call_reg (code, AMD64_R11);
485 /* Compute throw_ip */
486 amd64_pop_reg (code, AMD64_ARG_REG2);
488 amd64_pop_reg (code, AMD64_ARG_REG3);
489 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
491 /* Put the throw_ip at the top of the misaligned stack */
492 amd64_push_reg (code, AMD64_ARG_REG3);
494 throw_ex = (guint64)mono_arch_get_throw_exception ();
496 /* Call throw_exception */
497 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
498 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
499 /* The original IP is on the stack */
500 amd64_jump_reg (code, AMD64_R11);
502 g_assert ((code - start) < 64);
509 /* mono_arch_find_jit_info:
511 * This function is used to gather information from @ctx. It return the
512 * MonoJitInfo of the corresponding function, unwinds one stack frame and
513 * stores the resulting context into @new_ctx. It also stores a string
514 * describing the stack location into @trace (if not NULL), and modifies
515 * the @lmf if necessary. @native_offset return the IP offset from the
516 * start of the function or -1 if that info is not available.
519 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
520 MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
525 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
527 /* Avoid costly table lookup during stack overflow */
528 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
531 ji = mono_jit_info_table_find (domain, ip);
540 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
543 if (!ji->method->wrapper_type)
547 * If a method has save_lmf set, then register save/restore code is not generated
548 * by the JIT, so we have to restore callee saved registers from the lmf.
550 if (ji->method->save_lmf) {
554 * *lmf might not point to the LMF pushed by this method, so compute the LMF
558 lmf_addr = (MonoLMF*)ctx->rsp;
560 lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
562 new_ctx->rbp = lmf_addr->rbp;
563 new_ctx->rbx = lmf_addr->rbx;
564 new_ctx->r12 = lmf_addr->r12;
565 new_ctx->r13 = lmf_addr->r13;
566 new_ctx->r14 = lmf_addr->r14;
567 new_ctx->r15 = lmf_addr->r15;
570 offset = omit_fp ? 0 : -1;
571 /* restore caller saved registers */
572 for (i = 0; i < AMD64_NREG; i ++)
573 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
577 reg = *((guint64*)ctx->rsp + offset);
581 reg = *((guint64 *)ctx->rbp + offset);
605 g_assert_not_reached ();
610 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
611 /* remove any unused lmf */
612 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
617 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
618 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
619 /* Pop return address */
623 /* Pop EBP and the return address */
624 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
625 /* we substract 1, so that the IP points into the call instruction */
626 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
627 new_ctx->rbp = *((guint64 *)ctx->rbp);
630 /* Pop arguments off the stack */
632 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
634 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
635 new_ctx->rsp += stack_to_pop;
642 if (((guint64)(*lmf)->previous_lmf) & 1) {
643 /* This LMF has the rip field set */
645 } else if ((*lmf)->rsp == 0) {
650 * The rsp field is set just before the call which transitioned to native
651 * code. Obtain the rip from the stack.
653 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
656 ji = mono_jit_info_table_find (domain, (gpointer)rip);
661 /* Trampoline lmf frame */
662 memset (res, 0, sizeof (MonoJitInfo));
663 res->method = (*lmf)->method;
667 new_ctx->rbp = (*lmf)->rbp;
668 new_ctx->rsp = (*lmf)->rsp;
670 new_ctx->rbx = (*lmf)->rbx;
671 new_ctx->r12 = (*lmf)->r12;
672 new_ctx->r13 = (*lmf)->r13;
673 new_ctx->r14 = (*lmf)->r14;
674 new_ctx->r15 = (*lmf)->r15;
676 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
678 return ji ? ji : res;
685 * mono_arch_handle_exception:
687 * @ctx: saved processor state
688 * @obj: the exception object
691 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
695 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
697 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
699 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
704 #ifdef MONO_ARCH_USE_SIGACTION
705 static inline guint64*
706 gregs_from_ucontext (ucontext_t *ctx)
709 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
711 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
718 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
720 #ifdef MONO_ARCH_USE_SIGACTION
721 ucontext_t *ctx = (ucontext_t*)sigctx;
723 guint64 *gregs = gregs_from_ucontext (ctx);
725 mctx->rax = gregs [REG_RAX];
726 mctx->rbx = gregs [REG_RBX];
727 mctx->rcx = gregs [REG_RCX];
728 mctx->rdx = gregs [REG_RDX];
729 mctx->rbp = gregs [REG_RBP];
730 mctx->rsp = gregs [REG_RSP];
731 mctx->rsi = gregs [REG_RSI];
732 mctx->rdi = gregs [REG_RDI];
733 mctx->rip = gregs [REG_RIP];
734 mctx->r12 = gregs [REG_R12];
735 mctx->r13 = gregs [REG_R13];
736 mctx->r14 = gregs [REG_R14];
737 mctx->r15 = gregs [REG_R15];
739 MonoContext *ctx = (MonoContext *)sigctx;
741 mctx->rax = ctx->rax;
742 mctx->rbx = ctx->rbx;
743 mctx->rcx = ctx->rcx;
744 mctx->rdx = ctx->rdx;
745 mctx->rbp = ctx->rbp;
746 mctx->rsp = ctx->rsp;
747 mctx->rsi = ctx->rsi;
748 mctx->rdi = ctx->rdi;
749 mctx->rip = ctx->rip;
750 mctx->r12 = ctx->r12;
751 mctx->r13 = ctx->r13;
752 mctx->r14 = ctx->r14;
753 mctx->r15 = ctx->r15;
758 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
760 #ifdef MONO_ARCH_USE_SIGACTION
761 ucontext_t *ctx = (ucontext_t*)sigctx;
763 guint64 *gregs = gregs_from_ucontext (ctx);
765 gregs [REG_RAX] = mctx->rax;
766 gregs [REG_RBX] = mctx->rbx;
767 gregs [REG_RCX] = mctx->rcx;
768 gregs [REG_RDX] = mctx->rdx;
769 gregs [REG_RBP] = mctx->rbp;
770 gregs [REG_RSP] = mctx->rsp;
771 gregs [REG_RSI] = mctx->rsi;
772 gregs [REG_RDI] = mctx->rdi;
773 gregs [REG_RIP] = mctx->rip;
774 gregs [REG_R12] = mctx->r12;
775 gregs [REG_R13] = mctx->r13;
776 gregs [REG_R14] = mctx->r14;
777 gregs [REG_R15] = mctx->r15;
779 MonoContext *ctx = (MonoContext *)sigctx;
781 ctx->rax = mctx->rax;
782 ctx->rbx = mctx->rbx;
783 ctx->rcx = mctx->rcx;
784 ctx->rdx = mctx->rdx;
785 ctx->rbp = mctx->rbp;
786 ctx->rsp = mctx->rsp;
787 ctx->rsi = mctx->rsi;
788 ctx->rdi = mctx->rdi;
789 ctx->rip = mctx->rip;
790 ctx->r12 = mctx->r12;
791 ctx->r13 = mctx->r13;
792 ctx->r14 = mctx->r14;
793 ctx->r15 = mctx->r15;
798 mono_arch_ip_from_context (void *sigctx)
801 #ifdef MONO_ARCH_USE_SIGACTION
803 ucontext_t *ctx = (ucontext_t*)sigctx;
805 guint64 *gregs = gregs_from_ucontext (ctx);
807 return (gpointer)gregs [REG_RIP];
809 MonoContext *ctx = sigctx;
810 return (gpointer)ctx->rip;
815 restore_soft_guard_pages (void)
817 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
818 if (jit_tls->stack_ovf_guard_base)
819 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
823 * this function modifies mctx so that when it is restored, it
824 * won't execcute starting at mctx.eip, but in a function that
825 * will restore the protection on the soft-guard pages and return back to
826 * continue at mctx.eip.
829 prepare_for_guard_pages (MonoContext *mctx)
832 sp = (gpointer)(mctx->rsp);
834 /* the return addr */
835 sp [0] = (gpointer)(mctx->rip);
836 mctx->rip = (unsigned long)restore_soft_guard_pages;
837 mctx->rsp = (unsigned long)sp;
841 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
843 void (*restore_context) (MonoContext *);
846 restore_context = mono_arch_get_restore_context ();
847 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
848 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
850 prepare_for_guard_pages (&mctx);
851 restore_context (&mctx);
855 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
857 #ifdef MONO_ARCH_USE_SIGACTION
858 MonoException *exc = NULL;
859 ucontext_t *ctx = (ucontext_t*)sigctx;
860 guint64 *gregs = gregs_from_ucontext (ctx);
861 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
866 exc = mono_domain_get ()->stack_overflow_ex;
868 mono_handle_native_sigsegv (SIGSEGV, sigctx);
870 /* setup a call frame on the real stack so that control is returned there
871 * and exception handling can continue.
872 * The frame looks like:
876 * 128 is the size of the red zone
878 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
881 sp = (gpointer)(gregs [REG_RSP] & ~15);
882 sp = (gpointer)((char*)sp - frame_size);
883 /* the arguments must be aligned */
884 sp [-1] = (gpointer)gregs [REG_RIP];
885 /* may need to adjust pointers in the new struct copy, depending on the OS */
886 memcpy (sp + 4, ctx, sizeof (ucontext_t));
887 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
888 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
889 gregs [REG_RSP] = (unsigned long)(sp - 1);
890 gregs [REG_RDI] = (unsigned long)(sp + 4);
891 gregs [REG_RSI] = (guint64)exc;
892 gregs [REG_RDX] = stack_ovf;
897 get_original_ip (void)
899 MonoLMF *lmf = mono_get_lmf ();
903 /* Reset the change to previous_lmf */
904 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
910 get_throw_pending_exception (void)
912 static guint8* start;
913 static gboolean inited = FALSE;
916 gpointer throw_trampoline;
921 start = code = mono_global_codeman_reserve (128);
923 /* We are in the frame of a managed method after a call */
925 * We would like to throw the pending exception in such a way that it looks to
926 * be thrown from the managed method.
929 /* Save registers which might contain the return value of the call */
930 amd64_push_reg (code, AMD64_RAX);
931 amd64_push_reg (code, AMD64_RDX);
933 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
934 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
937 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
939 /* Obtain the pending exception */
940 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
941 amd64_call_reg (code, AMD64_R11);
943 /* Check if it is NULL, and branch */
944 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
945 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
947 /* exc != NULL branch */
949 /* Save the exc on the stack */
950 amd64_push_reg (code, AMD64_RAX);
952 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
954 /* Obtain the original ip and clear the flag in previous_lmf */
955 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
956 amd64_call_reg (code, AMD64_R11);
959 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
961 /* Pop saved stuff from the stack */
962 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
964 /* Setup arguments for the throw trampoline */
966 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
967 /* The trampoline expects the caller ip to be pushed on the stack */
968 amd64_push_reg (code, AMD64_RAX);
970 /* Call the throw trampoline */
971 throw_trampoline = mono_arch_get_throw_exception ();
972 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
973 /* We use a jump instead of a call so we can push the original ip on the stack */
974 amd64_jump_reg (code, AMD64_R11);
976 /* ex == NULL branch */
977 mono_amd64_patch (br [0], code);
979 /* Obtain the original ip and clear the flag in previous_lmf */
980 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
981 amd64_call_reg (code, AMD64_R11);
982 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
984 /* Restore registers */
985 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
986 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
987 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
988 amd64_pop_reg (code, AMD64_RDX);
989 amd64_pop_reg (code, AMD64_RAX);
991 /* Return to original code */
992 amd64_jump_reg (code, AMD64_R11);
994 g_assert ((code - start) < 128);
1002 * Called when a thread receives an async exception while executing unmanaged code.
1003 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1004 * the return address on the stack to point to a helper routine which throws the
1008 mono_arch_notify_pending_exc (void)
1010 MonoLMF *lmf = mono_get_lmf ();
1016 if ((guint64)lmf->previous_lmf & 1)
1017 /* Already hijacked or trampoline LMF entry */
1020 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1021 lmf->rip = *(guint64*)(lmf->rsp - 8);
1022 /* Signal that lmf->rip is set */
1023 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1025 *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();