2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbx = sctx->rbx;
101 ctx->Rbp = sctx->rbp;
102 ctx->R12 = sctx->r12;
103 ctx->R13 = sctx->r13;
104 ctx->R14 = sctx->r14;
105 ctx->R15 = sctx->r15;
106 ctx->Rip = sctx->rip;
108 /* Volatile But should not matter?*/
109 ctx->Rax = sctx->rax;
110 ctx->Rcx = sctx->rcx;
111 ctx->Rdx = sctx->rdx;
118 void win32_seh_init()
120 old_handler = SetUnhandledExceptionFilter(seh_handler);
123 void win32_seh_cleanup()
125 if (old_handler) SetUnhandledExceptionFilter(old_handler);
128 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
132 fpe_handler = handler;
135 ill_handler = handler;
138 segv_handler = handler;
145 #endif /* PLATFORM_WIN32 */
148 * mono_arch_get_restore_context:
150 * Returns a pointer to a method which restores a previously saved sigcontext.
153 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
155 guint8 *start = NULL;
158 /* restore_contect (MonoContext *ctx) */
162 start = code = mono_global_codeman_reserve (256);
164 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
166 /* Restore all registers except %rip and %r11 */
167 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
168 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
169 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
170 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
171 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
172 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
173 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
174 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
175 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
176 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
177 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
178 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
179 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
180 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
182 if (mono_running_on_valgrind ()) {
183 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
184 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
185 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
186 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
188 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
189 /* get return address */
190 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
193 /* jump to the saved IP */
194 amd64_jump_reg (code, AMD64_R11);
196 mono_arch_flush_icache (start, code - start);
198 *code_size = code - start;
204 * mono_arch_get_call_filter:
206 * Returns a pointer to a method which calls an exception filter. We
207 * also use this function to call finally handlers (we pass NULL as
208 * @exc object in this case).
211 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
220 start = code = mono_global_codeman_reserve (128);
222 /* call_filter (MonoContext *ctx, unsigned long eip) */
225 /* Alloc new frame */
226 amd64_push_reg (code, AMD64_RBP);
227 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
229 /* Save callee saved regs */
231 for (i = 0; i < AMD64_NREG; ++i)
232 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
233 amd64_push_reg (code, i);
239 amd64_push_reg (code, AMD64_RBP);
241 /* Make stack misaligned, the call will make it aligned again */
243 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
246 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
247 /* load callee saved regs */
248 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
249 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
250 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
251 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
252 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
253 #ifdef PLATFORM_WIN32
254 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
255 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
258 /* call the handler */
259 amd64_call_reg (code, AMD64_ARG_REG2);
262 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
265 amd64_pop_reg (code, AMD64_RBP);
267 /* Restore callee saved regs */
268 for (i = AMD64_NREG; i >= 0; --i)
269 if (AMD64_IS_CALLEE_SAVED_REG (i))
270 amd64_pop_reg (code, i);
275 g_assert ((code - start) < 128);
277 mono_arch_flush_icache (start, code - start);
279 *code_size = code - start;
285 * The first few arguments are dummy, to force the other arguments to be passed on
286 * the stack, this avoids overwriting the argument registers in the throw trampoline.
289 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
290 guint64 dummy5, guint64 dummy6,
291 MonoObject *exc, guint64 rip, guint64 rsp,
292 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
293 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
294 guint64 rax, guint64 rcx, guint64 rdx,
297 static void (*restore_context) (MonoContext *);
300 if (!restore_context)
301 restore_context = mono_get_restore_context ();
317 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
319 * The debugger wants us to stop on the `throw' instruction.
320 * By the time we get here, it already inserted a breakpoint on
321 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
326 * In case of a rethrow, the JIT is emitting code like this:
328 * mov 0xffffffffffffffd0(%rbp),%rax'
332 * Here, restore_context() wouldn't restore the %rax register correctly.
336 restore_context (&ctx);
337 g_assert_not_reached ();
340 /* adjust eip so that it point into the call instruction */
343 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
344 MonoException *mono_ex = (MonoException*)exc;
346 mono_ex->stack_trace = NULL;
348 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
349 restore_context (&ctx);
351 g_assert_not_reached ();
355 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
360 start = code = mono_global_codeman_reserve (64);
366 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
369 amd64_push_imm (code, rethrow);
370 amd64_push_reg (code, AMD64_RDX);
371 amd64_push_reg (code, AMD64_RCX);
372 amd64_push_reg (code, AMD64_RAX);
373 amd64_push_reg (code, AMD64_RSI);
374 amd64_push_reg (code, AMD64_RDI);
375 amd64_push_reg (code, AMD64_R15);
376 amd64_push_reg (code, AMD64_R14);
377 amd64_push_reg (code, AMD64_R13);
378 amd64_push_reg (code, AMD64_R12);
379 amd64_push_reg (code, AMD64_RBP);
380 amd64_push_reg (code, AMD64_RBX);
383 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
384 amd64_push_reg (code, AMD64_RAX);
387 amd64_push_membase (code, AMD64_R11, 0);
390 amd64_push_reg (code, AMD64_ARG_REG1);
392 #ifdef PLATFORM_WIN32
394 amd64_push_imm (code, 0);
395 amd64_push_imm (code, 0);
396 amd64_push_imm (code, 0);
397 amd64_push_imm (code, 0);
398 amd64_push_imm (code, 0);
399 amd64_push_imm (code, 0);
403 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
404 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
406 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
408 amd64_call_reg (code, AMD64_R11);
409 amd64_breakpoint (code);
411 mono_arch_flush_icache (start, code - start);
413 g_assert ((code - start) < 64);
415 *code_size = code - start;
421 * mono_arch_get_throw_exception:
423 * Returns a function pointer which can be used to raise
424 * exceptions. The returned function has the following
425 * signature: void (*func) (MonoException *exc);
429 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
431 return get_throw_trampoline (FALSE, code_size, ji, aot);
435 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
437 return get_throw_trampoline (TRUE, code_size, ji, aot);
441 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
446 start = code = mono_global_codeman_reserve (64);
450 /* Not used on amd64 */
451 amd64_breakpoint (code);
453 mono_arch_flush_icache (start, code - start);
455 *code_size = code - start;
461 * mono_arch_get_throw_corlib_exception:
463 * Returns a function pointer which can be used to raise
464 * corlib exceptions. The returned function has the following
465 * signature: void (*func) (guint32 ex_token, guint32 offset);
466 * Here, offset is the offset which needs to be substracted from the caller IP
467 * to get the IP of the throw. Passing the offset has the advantage that it
468 * needs no relocations in the caller.
471 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
473 static guint8* start;
477 start = code = mono_global_codeman_reserve (64);
482 amd64_push_reg (code, AMD64_ARG_REG2);
484 /* Call exception_from_token */
485 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
487 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
488 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
489 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
490 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
492 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
493 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
495 #ifdef PLATFORM_WIN32
496 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
498 amd64_call_reg (code, AMD64_R11);
499 #ifdef PLATFORM_WIN32
500 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
503 /* Compute throw_ip */
504 amd64_pop_reg (code, AMD64_ARG_REG2);
506 amd64_pop_reg (code, AMD64_ARG_REG3);
507 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
509 /* Put the throw_ip at the top of the misaligned stack */
510 amd64_push_reg (code, AMD64_ARG_REG3);
512 throw_ex = (guint64)mono_get_throw_exception ();
514 /* Call throw_exception */
515 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
517 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
518 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
520 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
522 /* The original IP is on the stack */
523 amd64_jump_reg (code, AMD64_R11);
525 g_assert ((code - start) < 64);
527 mono_arch_flush_icache (start, code - start);
529 *code_size = code - start;
534 /* mono_arch_find_jit_info:
536 * This function is used to gather information from @ctx. It return the
537 * MonoJitInfo of the corresponding function, unwinds one stack frame and
538 * stores the resulting context into @new_ctx. It also stores a string
539 * describing the stack location into @trace (if not NULL), and modifies
540 * the @lmf if necessary. @native_offset return the IP offset from the
541 * start of the function or -1 if that info is not available.
544 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
545 MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed)
549 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
551 /* Avoid costly table lookup during stack overflow */
552 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
555 ji = mono_jit_info_table_find (domain, ip);
564 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
567 if (!ji->method->wrapper_type)
571 * If a method has save_lmf set, then register save/restore code is not generated
572 * by the JIT, so we have to restore callee saved registers from the lmf.
574 if (ji->method->save_lmf) {
578 * *lmf might not point to the LMF pushed by this method, so compute the LMF
582 lmf_addr = (MonoLMF*)ctx->rsp;
584 lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
586 new_ctx->rbp = lmf_addr->rbp;
587 new_ctx->rbx = lmf_addr->rbx;
588 new_ctx->r12 = lmf_addr->r12;
589 new_ctx->r13 = lmf_addr->r13;
590 new_ctx->r14 = lmf_addr->r14;
591 new_ctx->r15 = lmf_addr->r15;
594 offset = omit_fp ? 0 : -1;
595 /* restore caller saved registers */
596 for (i = 0; i < AMD64_NREG; i ++)
597 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
601 reg = *((guint64*)ctx->rsp + offset);
605 reg = *((guint64 *)ctx->rbp + offset);
629 g_assert_not_reached ();
634 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
635 /* remove any unused lmf */
636 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
641 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
642 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
643 /* Pop return address */
647 /* Pop EBP and the return address */
648 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
649 /* we substract 1, so that the IP points into the call instruction */
650 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
651 new_ctx->rbp = *((guint64 *)ctx->rbp);
654 /* Pop arguments off the stack */
656 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
658 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
659 new_ctx->rsp += stack_to_pop;
666 if (((guint64)(*lmf)->previous_lmf) & 1) {
667 /* This LMF has the rip field set */
669 } else if ((*lmf)->rsp == 0) {
674 * The rsp field is set just before the call which transitioned to native
675 * code. Obtain the rip from the stack.
677 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
680 ji = mono_jit_info_table_find (domain, (gpointer)rip);
682 // FIXME: This can happen with multiple appdomains (bug #444383)
687 new_ctx->rbp = (*lmf)->rbp;
688 new_ctx->rsp = (*lmf)->rsp;
690 new_ctx->rbx = (*lmf)->rbx;
691 new_ctx->r12 = (*lmf)->r12;
692 new_ctx->r13 = (*lmf)->r13;
693 new_ctx->r14 = (*lmf)->r14;
694 new_ctx->r15 = (*lmf)->r15;
696 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
698 return ji ? ji : res;
705 * mono_arch_handle_exception:
707 * @ctx: saved processor state
708 * @obj: the exception object
711 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
715 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
717 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
719 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
724 #ifdef MONO_ARCH_USE_SIGACTION
725 static inline guint64*
726 gregs_from_ucontext (ucontext_t *ctx)
729 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
731 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
738 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
740 #ifdef MONO_ARCH_USE_SIGACTION
741 ucontext_t *ctx = (ucontext_t*)sigctx;
743 guint64 *gregs = gregs_from_ucontext (ctx);
745 mctx->rax = gregs [REG_RAX];
746 mctx->rbx = gregs [REG_RBX];
747 mctx->rcx = gregs [REG_RCX];
748 mctx->rdx = gregs [REG_RDX];
749 mctx->rbp = gregs [REG_RBP];
750 mctx->rsp = gregs [REG_RSP];
751 mctx->rsi = gregs [REG_RSI];
752 mctx->rdi = gregs [REG_RDI];
753 mctx->rip = gregs [REG_RIP];
754 mctx->r12 = gregs [REG_R12];
755 mctx->r13 = gregs [REG_R13];
756 mctx->r14 = gregs [REG_R14];
757 mctx->r15 = gregs [REG_R15];
759 MonoContext *ctx = (MonoContext *)sigctx;
761 mctx->rax = ctx->rax;
762 mctx->rbx = ctx->rbx;
763 mctx->rcx = ctx->rcx;
764 mctx->rdx = ctx->rdx;
765 mctx->rbp = ctx->rbp;
766 mctx->rsp = ctx->rsp;
767 mctx->rsi = ctx->rsi;
768 mctx->rdi = ctx->rdi;
769 mctx->rip = ctx->rip;
770 mctx->r12 = ctx->r12;
771 mctx->r13 = ctx->r13;
772 mctx->r14 = ctx->r14;
773 mctx->r15 = ctx->r15;
778 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
780 #ifdef MONO_ARCH_USE_SIGACTION
781 ucontext_t *ctx = (ucontext_t*)sigctx;
783 guint64 *gregs = gregs_from_ucontext (ctx);
785 gregs [REG_RAX] = mctx->rax;
786 gregs [REG_RBX] = mctx->rbx;
787 gregs [REG_RCX] = mctx->rcx;
788 gregs [REG_RDX] = mctx->rdx;
789 gregs [REG_RBP] = mctx->rbp;
790 gregs [REG_RSP] = mctx->rsp;
791 gregs [REG_RSI] = mctx->rsi;
792 gregs [REG_RDI] = mctx->rdi;
793 gregs [REG_RIP] = mctx->rip;
794 gregs [REG_R12] = mctx->r12;
795 gregs [REG_R13] = mctx->r13;
796 gregs [REG_R14] = mctx->r14;
797 gregs [REG_R15] = mctx->r15;
799 MonoContext *ctx = (MonoContext *)sigctx;
801 ctx->rax = mctx->rax;
802 ctx->rbx = mctx->rbx;
803 ctx->rcx = mctx->rcx;
804 ctx->rdx = mctx->rdx;
805 ctx->rbp = mctx->rbp;
806 ctx->rsp = mctx->rsp;
807 ctx->rsi = mctx->rsi;
808 ctx->rdi = mctx->rdi;
809 ctx->rip = mctx->rip;
810 ctx->r12 = mctx->r12;
811 ctx->r13 = mctx->r13;
812 ctx->r14 = mctx->r14;
813 ctx->r15 = mctx->r15;
818 mono_arch_ip_from_context (void *sigctx)
821 #ifdef MONO_ARCH_USE_SIGACTION
823 ucontext_t *ctx = (ucontext_t*)sigctx;
825 guint64 *gregs = gregs_from_ucontext (ctx);
827 return (gpointer)gregs [REG_RIP];
829 MonoContext *ctx = sigctx;
830 return (gpointer)ctx->rip;
835 restore_soft_guard_pages (void)
837 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
838 if (jit_tls->stack_ovf_guard_base)
839 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
843 * this function modifies mctx so that when it is restored, it
844 * won't execcute starting at mctx.eip, but in a function that
845 * will restore the protection on the soft-guard pages and return back to
846 * continue at mctx.eip.
849 prepare_for_guard_pages (MonoContext *mctx)
852 sp = (gpointer)(mctx->rsp);
854 /* the return addr */
855 sp [0] = (gpointer)(mctx->rip);
856 mctx->rip = (guint64)restore_soft_guard_pages;
857 mctx->rsp = (guint64)sp;
861 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
863 void (*restore_context) (MonoContext *);
866 restore_context = mono_get_restore_context ();
867 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
868 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
870 prepare_for_guard_pages (&mctx);
871 restore_context (&mctx);
875 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
877 #ifdef MONO_ARCH_USE_SIGACTION
878 MonoException *exc = NULL;
879 ucontext_t *ctx = (ucontext_t*)sigctx;
880 guint64 *gregs = gregs_from_ucontext (ctx);
881 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
886 exc = mono_domain_get ()->stack_overflow_ex;
888 mono_handle_native_sigsegv (SIGSEGV, sigctx);
890 /* setup a call frame on the real stack so that control is returned there
891 * and exception handling can continue.
892 * The frame looks like:
896 * 128 is the size of the red zone
898 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
901 sp = (gpointer)(gregs [REG_RSP] & ~15);
902 sp = (gpointer)((char*)sp - frame_size);
903 /* the arguments must be aligned */
904 sp [-1] = (gpointer)gregs [REG_RIP];
905 /* may need to adjust pointers in the new struct copy, depending on the OS */
906 memcpy (sp + 4, ctx, sizeof (ucontext_t));
907 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
908 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
909 gregs [REG_RSP] = (unsigned long)(sp - 1);
910 gregs [REG_RDI] = (unsigned long)(sp + 4);
911 gregs [REG_RSI] = (guint64)exc;
912 gregs [REG_RDX] = stack_ovf;
917 get_original_ip (void)
919 MonoLMF *lmf = mono_get_lmf ();
923 /* Reset the change to previous_lmf */
924 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
930 get_throw_pending_exception (void)
932 static guint8* start;
933 static gboolean inited = FALSE;
936 gpointer throw_trampoline;
941 start = code = mono_global_codeman_reserve (128);
943 /* We are in the frame of a managed method after a call */
945 * We would like to throw the pending exception in such a way that it looks to
946 * be thrown from the managed method.
949 /* Save registers which might contain the return value of the call */
950 amd64_push_reg (code, AMD64_RAX);
951 amd64_push_reg (code, AMD64_RDX);
953 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
954 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
957 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
959 /* Obtain the pending exception */
960 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
961 amd64_call_reg (code, AMD64_R11);
963 /* Check if it is NULL, and branch */
964 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
965 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
967 /* exc != NULL branch */
969 /* Save the exc on the stack */
970 amd64_push_reg (code, AMD64_RAX);
972 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
974 /* Obtain the original ip and clear the flag in previous_lmf */
975 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
976 amd64_call_reg (code, AMD64_R11);
979 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
981 /* Pop saved stuff from the stack */
982 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
984 /* Setup arguments for the throw trampoline */
986 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
987 /* The trampoline expects the caller ip to be pushed on the stack */
988 amd64_push_reg (code, AMD64_RAX);
990 /* Call the throw trampoline */
991 throw_trampoline = mono_get_throw_exception ();
992 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
993 /* We use a jump instead of a call so we can push the original ip on the stack */
994 amd64_jump_reg (code, AMD64_R11);
996 /* ex == NULL branch */
997 mono_amd64_patch (br [0], code);
999 /* Obtain the original ip and clear the flag in previous_lmf */
1000 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
1001 amd64_call_reg (code, AMD64_R11);
1002 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1004 /* Restore registers */
1005 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1006 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1007 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1008 amd64_pop_reg (code, AMD64_RDX);
1009 amd64_pop_reg (code, AMD64_RAX);
1011 /* Return to original code */
1012 amd64_jump_reg (code, AMD64_R11);
1014 g_assert ((code - start) < 128);
1022 * Called when a thread receives an async exception while executing unmanaged code.
1023 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1024 * the return address on the stack to point to a helper routine which throws the
1028 mono_arch_notify_pending_exc (void)
1030 MonoLMF *lmf = mono_get_lmf ();
1036 if ((guint64)lmf->previous_lmf & 1)
1037 /* Already hijacked or trampoline LMF entry */
1040 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1041 lmf->rip = *(guint64*)(lmf->rsp - 8);
1042 /* Signal that lmf->rip is set */
1043 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1045 *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
1048 #ifdef PLATFORM_WIN32
1051 * The mono_arch_unwindinfo* methods are used to build and add
1052 * function table info for each emitted method from mono. On Winx64
1053 * the seh handler will not be called if the mono methods are not
1054 * added to the function table.
1056 * We should not need to add non-volatile register info to the
1057 * table since mono stores that info elsewhere. (Except for the register
1061 #define MONO_MAX_UNWIND_CODES 22
1063 typedef union _UNWIND_CODE {
1066 guchar UnwindOp : 4;
1069 gushort FrameOffset;
1070 } UNWIND_CODE, *PUNWIND_CODE;
1072 typedef struct _UNWIND_INFO {
1075 guchar SizeOfProlog;
1076 guchar CountOfCodes;
1077 guchar FrameRegister : 4;
1078 guchar FrameOffset : 4;
1079 /* custom size for mono allowing for mono allowing for*/
1080 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1081 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1082 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1083 /*UWOP_PUSH_NONVOL offset = 15-0*/
1084 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1086 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1088 * OPTIONAL ULONG ExceptionHandler;
1089 * OPTIONAL ULONG FunctionEntry;
1091 * OPTIONAL ULONG ExceptionData[]; */
1092 } UNWIND_INFO, *PUNWIND_INFO;
1096 RUNTIME_FUNCTION runtimeFunction;
1097 UNWIND_INFO unwindInfo;
1098 } MonoUnwindInfo, *PMonoUnwindInfo;
1101 mono_arch_unwindinfo_create (gpointer* monoui)
1103 PMonoUnwindInfo newunwindinfo;
1104 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1105 newunwindinfo->unwindInfo.Version = 1;
1109 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1111 PMonoUnwindInfo unwindinfo;
1112 PUNWIND_CODE unwindcode;
1115 mono_arch_unwindinfo_create (monoui);
1117 unwindinfo = (MonoUnwindInfo*)*monoui;
1119 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1120 g_error ("Larger allocation needed for the unwind information.");
1122 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1123 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1124 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1125 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1126 unwindcode->OpInfo = reg;
1128 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1129 g_error ("Adding unwind info in wrong order.");
1131 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1135 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1137 PMonoUnwindInfo unwindinfo;
1138 PUNWIND_CODE unwindcode;
1141 mono_arch_unwindinfo_create (monoui);
1143 unwindinfo = (MonoUnwindInfo*)*monoui;
1145 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1146 g_error ("Larger allocation needed for the unwind information.");
1148 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1149 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1150 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1152 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1153 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1154 unwindcode->OpInfo = reg;
1156 unwindinfo->unwindInfo.FrameRegister = reg;
1158 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1159 g_error ("Adding unwind info in wrong order.");
1161 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1165 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1167 PMonoUnwindInfo unwindinfo;
1168 PUNWIND_CODE unwindcode;
1172 mono_arch_unwindinfo_create (monoui);
1174 unwindinfo = (MonoUnwindInfo*)*monoui;
1177 g_error ("Stack allocation must be equal to or greater than 0x8.");
1181 else if (size <= 0x7FFF8)
1186 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1187 g_error ("Larger allocation needed for the unwind information.");
1189 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1190 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1192 if (codesneeded == 1) {
1193 /*The size of the allocation is
1194 (the number in the OpInfo member) times 8 plus 8*/
1195 unwindcode->OpInfo = (size - 8)/8;
1196 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1199 if (codesneeded == 3) {
1200 /*the unscaled size of the allocation is recorded
1201 in the next two slots in little-endian format*/
1202 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1204 unwindcode->OpInfo = 1;
1207 /*the size of the allocation divided by 8
1208 is recorded in the next slot*/
1209 unwindcode->FrameOffset = size/8;
1211 unwindcode->OpInfo = 0;
1214 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1217 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1219 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1220 g_error ("Adding unwind info in wrong order.");
1222 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1226 mono_arch_unwindinfo_get_size (gpointer monoui)
1228 PMonoUnwindInfo unwindinfo;
1232 unwindinfo = (MonoUnwindInfo*)monoui;
1233 return (8 + sizeof (MonoUnwindInfo)) -
1234 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1238 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1242 PMonoUnwindInfo targetinfo;
1243 MonoDomain *domain = mono_domain_get ();
1245 ji = mono_jit_info_table_find (domain, (char*)ControlPc);
1249 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1251 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1253 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1255 return &targetinfo->runtimeFunction;
1259 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1261 PMonoUnwindInfo unwindinfo, targetinfo;
1263 guint64 targetlocation;
1267 unwindinfo = (MonoUnwindInfo*)*monoui;
1268 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1269 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1271 unwindinfo->runtimeFunction.EndAddress = code_size;
1272 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1274 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1276 codecount = unwindinfo->unwindInfo.CountOfCodes;
1278 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1279 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1282 g_free (unwindinfo);
1285 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);