2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbx = sctx->rbx;
101 ctx->Rbp = sctx->rbp;
102 ctx->R12 = sctx->r12;
103 ctx->R13 = sctx->r13;
104 ctx->R14 = sctx->r14;
105 ctx->R15 = sctx->r15;
106 ctx->Rip = sctx->rip;
108 /* Volatile But should not matter?*/
109 ctx->Rax = sctx->rax;
110 ctx->Rcx = sctx->rcx;
111 ctx->Rdx = sctx->rdx;
118 void win32_seh_init()
120 old_handler = SetUnhandledExceptionFilter(seh_handler);
123 void win32_seh_cleanup()
125 if (old_handler) SetUnhandledExceptionFilter(old_handler);
128 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
132 fpe_handler = handler;
135 ill_handler = handler;
138 segv_handler = handler;
145 #endif /* PLATFORM_WIN32 */
148 * mono_arch_get_restore_context:
150 * Returns a pointer to a method which restores a previously saved sigcontext.
153 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
155 guint8 *start = NULL;
158 /* restore_contect (MonoContext *ctx) */
162 start = code = mono_global_codeman_reserve (256);
164 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
166 /* Restore all registers except %rip and %r11 */
167 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
168 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
169 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
170 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
171 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
172 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
173 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
174 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
175 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
176 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
177 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
178 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
179 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
180 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
182 if (mono_running_on_valgrind ()) {
183 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
184 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
185 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
186 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
188 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
189 /* get return address */
190 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
193 /* jump to the saved IP */
194 amd64_jump_reg (code, AMD64_R11);
196 mono_arch_flush_icache (start, code - start);
198 *code_size = code - start;
204 * mono_arch_get_call_filter:
206 * Returns a pointer to a method which calls an exception filter. We
207 * also use this function to call finally handlers (we pass NULL as
208 * @exc object in this case).
211 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
220 start = code = mono_global_codeman_reserve (128);
222 /* call_filter (MonoContext *ctx, unsigned long eip) */
225 /* Alloc new frame */
226 amd64_push_reg (code, AMD64_RBP);
227 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
229 /* Save callee saved regs */
231 for (i = 0; i < AMD64_NREG; ++i)
232 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
233 amd64_push_reg (code, i);
239 amd64_push_reg (code, AMD64_RBP);
241 /* Make stack misaligned, the call will make it aligned again */
243 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
246 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
247 /* load callee saved regs */
248 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
249 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
250 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
251 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
252 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
253 #ifdef PLATFORM_WIN32
254 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
255 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
258 /* call the handler */
259 amd64_call_reg (code, AMD64_ARG_REG2);
262 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
265 amd64_pop_reg (code, AMD64_RBP);
267 /* Restore callee saved regs */
268 for (i = AMD64_NREG; i >= 0; --i)
269 if (AMD64_IS_CALLEE_SAVED_REG (i))
270 amd64_pop_reg (code, i);
275 g_assert ((code - start) < 128);
277 mono_arch_flush_icache (start, code - start);
279 *code_size = code - start;
285 * The first few arguments are dummy, to force the other arguments to be passed on
286 * the stack, this avoids overwriting the argument registers in the throw trampoline.
289 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
290 guint64 dummy5, guint64 dummy6,
291 MonoObject *exc, guint64 rip, guint64 rsp,
292 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
293 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
294 guint64 rax, guint64 rcx, guint64 rdx,
297 static void (*restore_context) (MonoContext *);
300 if (!restore_context)
301 restore_context = mono_get_restore_context ();
317 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
319 * The debugger wants us to stop on the `throw' instruction.
320 * By the time we get here, it already inserted a breakpoint on
321 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
326 * In case of a rethrow, the JIT is emitting code like this:
328 * mov 0xffffffffffffffd0(%rbp),%rax'
332 * Here, restore_context() wouldn't restore the %rax register correctly.
336 restore_context (&ctx);
337 g_assert_not_reached ();
340 /* adjust eip so that it point into the call instruction */
343 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
344 MonoException *mono_ex = (MonoException*)exc;
346 mono_ex->stack_trace = NULL;
348 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
349 restore_context (&ctx);
351 g_assert_not_reached ();
355 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
360 start = code = mono_global_codeman_reserve (64);
366 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
369 amd64_push_imm (code, rethrow);
370 amd64_push_reg (code, AMD64_RDX);
371 amd64_push_reg (code, AMD64_RCX);
372 amd64_push_reg (code, AMD64_RAX);
373 amd64_push_reg (code, AMD64_RSI);
374 amd64_push_reg (code, AMD64_RDI);
375 amd64_push_reg (code, AMD64_R15);
376 amd64_push_reg (code, AMD64_R14);
377 amd64_push_reg (code, AMD64_R13);
378 amd64_push_reg (code, AMD64_R12);
379 amd64_push_reg (code, AMD64_RBP);
380 amd64_push_reg (code, AMD64_RBX);
383 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
384 amd64_push_reg (code, AMD64_RAX);
387 amd64_push_membase (code, AMD64_R11, 0);
390 amd64_push_reg (code, AMD64_ARG_REG1);
392 #ifdef PLATFORM_WIN32
394 amd64_push_imm (code, 0);
395 amd64_push_imm (code, 0);
396 amd64_push_imm (code, 0);
397 amd64_push_imm (code, 0);
398 amd64_push_imm (code, 0);
399 amd64_push_imm (code, 0);
403 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
404 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
406 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
408 amd64_call_reg (code, AMD64_R11);
409 amd64_breakpoint (code);
411 mono_arch_flush_icache (start, code - start);
413 g_assert ((code - start) < 64);
415 *code_size = code - start;
421 * mono_arch_get_throw_exception:
423 * Returns a function pointer which can be used to raise
424 * exceptions. The returned function has the following
425 * signature: void (*func) (MonoException *exc);
429 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
431 return get_throw_trampoline (FALSE, code_size, ji, aot);
435 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
437 return get_throw_trampoline (TRUE, code_size, ji, aot);
441 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
446 start = code = mono_global_codeman_reserve (64);
450 /* Not used on amd64 */
451 amd64_breakpoint (code);
453 mono_arch_flush_icache (start, code - start);
455 *code_size = code - start;
461 * mono_arch_get_throw_corlib_exception:
463 * Returns a function pointer which can be used to raise
464 * corlib exceptions. The returned function has the following
465 * signature: void (*func) (guint32 ex_token, guint32 offset);
466 * Here, offset is the offset which needs to be substracted from the caller IP
467 * to get the IP of the throw. Passing the offset has the advantage that it
468 * needs no relocations in the caller.
471 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
473 static guint8* start;
477 start = code = mono_global_codeman_reserve (64);
482 amd64_push_reg (code, AMD64_ARG_REG2);
484 /* Call exception_from_token */
485 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
487 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
488 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
489 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
490 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
492 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
493 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
495 #ifdef PLATFORM_WIN32
496 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
498 amd64_call_reg (code, AMD64_R11);
499 #ifdef PLATFORM_WIN32
500 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
503 /* Compute throw_ip */
504 amd64_pop_reg (code, AMD64_ARG_REG2);
506 amd64_pop_reg (code, AMD64_ARG_REG3);
507 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
509 /* Put the throw_ip at the top of the misaligned stack */
510 amd64_push_reg (code, AMD64_ARG_REG3);
512 throw_ex = (guint64)mono_get_throw_exception ();
514 /* Call throw_exception */
515 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
517 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
518 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
520 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
522 /* The original IP is on the stack */
523 amd64_jump_reg (code, AMD64_R11);
525 g_assert ((code - start) < 64);
527 mono_arch_flush_icache (start, code - start);
529 *code_size = code - start;
534 /* mono_arch_find_jit_info:
536 * This function is used to gather information from @ctx. It return the
537 * MonoJitInfo of the corresponding function, unwinds one stack frame and
538 * stores the resulting context into @new_ctx. It also stores a string
539 * describing the stack location into @trace (if not NULL), and modifies
540 * the @lmf if necessary. @native_offset return the IP offset from the
541 * start of the function or -1 if that info is not available.
544 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
545 MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed)
549 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
551 /* Avoid costly table lookup during stack overflow */
552 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
555 ji = mono_jit_info_table_find (domain, ip);
564 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
567 if (!ji->method->wrapper_type)
571 * If a method has save_lmf set, then register save/restore code is not generated
572 * by the JIT, so we have to restore callee saved registers from the lmf.
574 if (ji->method->save_lmf) {
578 * *lmf might not point to the LMF pushed by this method, so compute the LMF
582 lmf_addr = (MonoLMF*)ctx->rsp;
584 lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
586 new_ctx->rbp = lmf_addr->rbp;
587 new_ctx->rbx = lmf_addr->rbx;
588 new_ctx->r12 = lmf_addr->r12;
589 new_ctx->r13 = lmf_addr->r13;
590 new_ctx->r14 = lmf_addr->r14;
591 new_ctx->r15 = lmf_addr->r15;
594 offset = omit_fp ? 0 : -1;
595 /* restore caller saved registers */
596 for (i = 0; i < AMD64_NREG; i ++)
597 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
601 reg = *((guint64*)ctx->rsp + offset);
605 reg = *((guint64 *)ctx->rbp + offset);
629 g_assert_not_reached ();
634 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
635 /* remove any unused lmf */
636 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
641 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
642 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
643 /* Pop return address */
647 /* Pop EBP and the return address */
648 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
649 /* we substract 1, so that the IP points into the call instruction */
650 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
651 new_ctx->rbp = *((guint64 *)ctx->rbp);
654 /* Pop arguments off the stack */
656 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
658 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
659 new_ctx->rsp += stack_to_pop;
666 if (((guint64)(*lmf)->previous_lmf) & 1) {
667 /* This LMF has the rip field set */
669 } else if ((*lmf)->rsp == 0) {
674 * The rsp field is set just before the call which transitioned to native
675 * code. Obtain the rip from the stack.
677 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
680 ji = mono_jit_info_table_find (domain, (gpointer)rip);
685 /* Trampoline lmf frame */
686 memset (res, 0, sizeof (MonoJitInfo));
687 res->method = (*lmf)->method;
691 new_ctx->rbp = (*lmf)->rbp;
692 new_ctx->rsp = (*lmf)->rsp;
694 new_ctx->rbx = (*lmf)->rbx;
695 new_ctx->r12 = (*lmf)->r12;
696 new_ctx->r13 = (*lmf)->r13;
697 new_ctx->r14 = (*lmf)->r14;
698 new_ctx->r15 = (*lmf)->r15;
700 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
702 return ji ? ji : res;
709 * mono_arch_handle_exception:
711 * @ctx: saved processor state
712 * @obj: the exception object
715 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
719 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
721 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
723 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
728 #ifdef MONO_ARCH_USE_SIGACTION
729 static inline guint64*
730 gregs_from_ucontext (ucontext_t *ctx)
733 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
735 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
742 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
744 #ifdef MONO_ARCH_USE_SIGACTION
745 ucontext_t *ctx = (ucontext_t*)sigctx;
747 guint64 *gregs = gregs_from_ucontext (ctx);
749 mctx->rax = gregs [REG_RAX];
750 mctx->rbx = gregs [REG_RBX];
751 mctx->rcx = gregs [REG_RCX];
752 mctx->rdx = gregs [REG_RDX];
753 mctx->rbp = gregs [REG_RBP];
754 mctx->rsp = gregs [REG_RSP];
755 mctx->rsi = gregs [REG_RSI];
756 mctx->rdi = gregs [REG_RDI];
757 mctx->rip = gregs [REG_RIP];
758 mctx->r12 = gregs [REG_R12];
759 mctx->r13 = gregs [REG_R13];
760 mctx->r14 = gregs [REG_R14];
761 mctx->r15 = gregs [REG_R15];
763 MonoContext *ctx = (MonoContext *)sigctx;
765 mctx->rax = ctx->rax;
766 mctx->rbx = ctx->rbx;
767 mctx->rcx = ctx->rcx;
768 mctx->rdx = ctx->rdx;
769 mctx->rbp = ctx->rbp;
770 mctx->rsp = ctx->rsp;
771 mctx->rsi = ctx->rsi;
772 mctx->rdi = ctx->rdi;
773 mctx->rip = ctx->rip;
774 mctx->r12 = ctx->r12;
775 mctx->r13 = ctx->r13;
776 mctx->r14 = ctx->r14;
777 mctx->r15 = ctx->r15;
782 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
784 #ifdef MONO_ARCH_USE_SIGACTION
785 ucontext_t *ctx = (ucontext_t*)sigctx;
787 guint64 *gregs = gregs_from_ucontext (ctx);
789 gregs [REG_RAX] = mctx->rax;
790 gregs [REG_RBX] = mctx->rbx;
791 gregs [REG_RCX] = mctx->rcx;
792 gregs [REG_RDX] = mctx->rdx;
793 gregs [REG_RBP] = mctx->rbp;
794 gregs [REG_RSP] = mctx->rsp;
795 gregs [REG_RSI] = mctx->rsi;
796 gregs [REG_RDI] = mctx->rdi;
797 gregs [REG_RIP] = mctx->rip;
798 gregs [REG_R12] = mctx->r12;
799 gregs [REG_R13] = mctx->r13;
800 gregs [REG_R14] = mctx->r14;
801 gregs [REG_R15] = mctx->r15;
803 MonoContext *ctx = (MonoContext *)sigctx;
805 ctx->rax = mctx->rax;
806 ctx->rbx = mctx->rbx;
807 ctx->rcx = mctx->rcx;
808 ctx->rdx = mctx->rdx;
809 ctx->rbp = mctx->rbp;
810 ctx->rsp = mctx->rsp;
811 ctx->rsi = mctx->rsi;
812 ctx->rdi = mctx->rdi;
813 ctx->rip = mctx->rip;
814 ctx->r12 = mctx->r12;
815 ctx->r13 = mctx->r13;
816 ctx->r14 = mctx->r14;
817 ctx->r15 = mctx->r15;
822 mono_arch_ip_from_context (void *sigctx)
825 #ifdef MONO_ARCH_USE_SIGACTION
827 ucontext_t *ctx = (ucontext_t*)sigctx;
829 guint64 *gregs = gregs_from_ucontext (ctx);
831 return (gpointer)gregs [REG_RIP];
833 MonoContext *ctx = sigctx;
834 return (gpointer)ctx->rip;
839 restore_soft_guard_pages (void)
841 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
842 if (jit_tls->stack_ovf_guard_base)
843 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
847 * this function modifies mctx so that when it is restored, it
848 * won't execcute starting at mctx.eip, but in a function that
849 * will restore the protection on the soft-guard pages and return back to
850 * continue at mctx.eip.
853 prepare_for_guard_pages (MonoContext *mctx)
856 sp = (gpointer)(mctx->rsp);
858 /* the return addr */
859 sp [0] = (gpointer)(mctx->rip);
860 mctx->rip = (guint64)restore_soft_guard_pages;
861 mctx->rsp = (guint64)sp;
865 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
867 void (*restore_context) (MonoContext *);
870 restore_context = mono_get_restore_context ();
871 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
872 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
874 prepare_for_guard_pages (&mctx);
875 restore_context (&mctx);
879 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
881 #ifdef MONO_ARCH_USE_SIGACTION
882 MonoException *exc = NULL;
883 ucontext_t *ctx = (ucontext_t*)sigctx;
884 guint64 *gregs = gregs_from_ucontext (ctx);
885 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
890 exc = mono_domain_get ()->stack_overflow_ex;
892 mono_handle_native_sigsegv (SIGSEGV, sigctx);
894 /* setup a call frame on the real stack so that control is returned there
895 * and exception handling can continue.
896 * The frame looks like:
900 * 128 is the size of the red zone
902 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
905 sp = (gpointer)(gregs [REG_RSP] & ~15);
906 sp = (gpointer)((char*)sp - frame_size);
907 /* the arguments must be aligned */
908 sp [-1] = (gpointer)gregs [REG_RIP];
909 /* may need to adjust pointers in the new struct copy, depending on the OS */
910 memcpy (sp + 4, ctx, sizeof (ucontext_t));
911 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
912 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
913 gregs [REG_RSP] = (unsigned long)(sp - 1);
914 gregs [REG_RDI] = (unsigned long)(sp + 4);
915 gregs [REG_RSI] = (guint64)exc;
916 gregs [REG_RDX] = stack_ovf;
921 get_original_ip (void)
923 MonoLMF *lmf = mono_get_lmf ();
927 /* Reset the change to previous_lmf */
928 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
934 get_throw_pending_exception (void)
936 static guint8* start;
937 static gboolean inited = FALSE;
940 gpointer throw_trampoline;
945 start = code = mono_global_codeman_reserve (128);
947 /* We are in the frame of a managed method after a call */
949 * We would like to throw the pending exception in such a way that it looks to
950 * be thrown from the managed method.
953 /* Save registers which might contain the return value of the call */
954 amd64_push_reg (code, AMD64_RAX);
955 amd64_push_reg (code, AMD64_RDX);
957 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
958 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
961 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
963 /* Obtain the pending exception */
964 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
965 amd64_call_reg (code, AMD64_R11);
967 /* Check if it is NULL, and branch */
968 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
969 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
971 /* exc != NULL branch */
973 /* Save the exc on the stack */
974 amd64_push_reg (code, AMD64_RAX);
976 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
978 /* Obtain the original ip and clear the flag in previous_lmf */
979 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
980 amd64_call_reg (code, AMD64_R11);
983 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
985 /* Pop saved stuff from the stack */
986 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
988 /* Setup arguments for the throw trampoline */
990 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
991 /* The trampoline expects the caller ip to be pushed on the stack */
992 amd64_push_reg (code, AMD64_RAX);
994 /* Call the throw trampoline */
995 throw_trampoline = mono_get_throw_exception ();
996 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
997 /* We use a jump instead of a call so we can push the original ip on the stack */
998 amd64_jump_reg (code, AMD64_R11);
1000 /* ex == NULL branch */
1001 mono_amd64_patch (br [0], code);
1003 /* Obtain the original ip and clear the flag in previous_lmf */
1004 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
1005 amd64_call_reg (code, AMD64_R11);
1006 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1008 /* Restore registers */
1009 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1010 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1011 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1012 amd64_pop_reg (code, AMD64_RDX);
1013 amd64_pop_reg (code, AMD64_RAX);
1015 /* Return to original code */
1016 amd64_jump_reg (code, AMD64_R11);
1018 g_assert ((code - start) < 128);
1026 * Called when a thread receives an async exception while executing unmanaged code.
1027 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1028 * the return address on the stack to point to a helper routine which throws the
1032 mono_arch_notify_pending_exc (void)
1034 MonoLMF *lmf = mono_get_lmf ();
1040 if ((guint64)lmf->previous_lmf & 1)
1041 /* Already hijacked or trampoline LMF entry */
1044 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1045 lmf->rip = *(guint64*)(lmf->rsp - 8);
1046 /* Signal that lmf->rip is set */
1047 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1049 *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
1052 #ifdef PLATFORM_WIN32
1055 * The mono_arch_unwindinfo* methods are used to build and add
1056 * function table info for each emitted method from mono. On Winx64
1057 * the seh handler will not be called if the mono methods are not
1058 * added to the function table.
1060 * We should not need to add non-volatile register info to the
1061 * table since mono stores that info elsewhere. (Except for the register
1065 #define MONO_MAX_UNWIND_CODES 22
1067 typedef union _UNWIND_CODE {
1070 guchar UnwindOp : 4;
1073 gushort FrameOffset;
1074 } UNWIND_CODE, *PUNWIND_CODE;
1076 typedef struct _UNWIND_INFO {
1079 guchar SizeOfProlog;
1080 guchar CountOfCodes;
1081 guchar FrameRegister : 4;
1082 guchar FrameOffset : 4;
1083 /* custom size for mono allowing for mono allowing for*/
1084 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1085 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1086 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1087 /*UWOP_PUSH_NONVOL offset = 15-0*/
1088 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1090 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1092 * OPTIONAL ULONG ExceptionHandler;
1093 * OPTIONAL ULONG FunctionEntry;
1095 * OPTIONAL ULONG ExceptionData[]; */
1096 } UNWIND_INFO, *PUNWIND_INFO;
1100 RUNTIME_FUNCTION runtimeFunction;
1101 UNWIND_INFO unwindInfo;
1102 } MonoUnwindInfo, *PMonoUnwindInfo;
1105 mono_arch_unwindinfo_create (gpointer* monoui)
1107 PMonoUnwindInfo newunwindinfo;
1108 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1109 newunwindinfo->unwindInfo.Version = 1;
1113 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1115 PMonoUnwindInfo unwindinfo;
1116 PUNWIND_CODE unwindcode;
1119 mono_arch_unwindinfo_create (monoui);
1121 unwindinfo = (MonoUnwindInfo*)*monoui;
1123 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1124 g_error ("Larger allocation needed for the unwind information.");
1126 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1127 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1128 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1129 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1130 unwindcode->OpInfo = reg;
1132 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1133 g_error ("Adding unwind info in wrong order.");
1135 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1139 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1141 PMonoUnwindInfo unwindinfo;
1142 PUNWIND_CODE unwindcode;
1145 mono_arch_unwindinfo_create (monoui);
1147 unwindinfo = (MonoUnwindInfo*)*monoui;
1149 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1150 g_error ("Larger allocation needed for the unwind information.");
1152 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1153 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1154 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1156 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1157 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1158 unwindcode->OpInfo = reg;
1160 unwindinfo->unwindInfo.FrameRegister = reg;
1162 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1163 g_error ("Adding unwind info in wrong order.");
1165 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1169 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1171 PMonoUnwindInfo unwindinfo;
1172 PUNWIND_CODE unwindcode;
1176 mono_arch_unwindinfo_create (monoui);
1178 unwindinfo = (MonoUnwindInfo*)*monoui;
1181 g_error ("Stack allocation must be equal to or greater than 0x8.");
1185 else if (size <= 0x7FFF8)
1190 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1191 g_error ("Larger allocation needed for the unwind information.");
1193 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1194 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1196 if (codesneeded == 1) {
1197 /*The size of the allocation is
1198 (the number in the OpInfo member) times 8 plus 8*/
1199 unwindcode->OpInfo = (size - 8)/8;
1200 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1203 if (codesneeded == 3) {
1204 /*the unscaled size of the allocation is recorded
1205 in the next two slots in little-endian format*/
1206 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1208 unwindcode->OpInfo = 1;
1211 /*the size of the allocation divided by 8
1212 is recorded in the next slot*/
1213 unwindcode->FrameOffset = size/8;
1215 unwindcode->OpInfo = 0;
1218 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1221 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1223 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1224 g_error ("Adding unwind info in wrong order.");
1226 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1230 mono_arch_unwindinfo_get_size (gpointer monoui)
1232 PMonoUnwindInfo unwindinfo;
1236 unwindinfo = (MonoUnwindInfo*)monoui;
1237 return (8 + sizeof (MonoUnwindInfo)) -
1238 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1242 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1244 PMonoUnwindInfo unwindinfo, targetinfo;
1246 guint64 targetlocation;
1250 unwindinfo = (MonoUnwindInfo*)*monoui;
1251 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1252 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1254 unwindinfo->runtimeFunction.EndAddress = code_size;
1255 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1257 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1259 codecount = unwindinfo->unwindInfo.CountOfCodes;
1261 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1262 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1265 g_free (unwindinfo);
1268 RtlAddFunctionTable (&targetinfo->runtimeFunction, 1, (DWORD64)code);