2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbp = sctx->rbp;
101 ctx->Rsp = sctx->rsp;
102 ctx->Rsi = sctx->rsi;
103 ctx->Rdi = sctx->rdi;
104 ctx->Rip = sctx->rip;
111 void win32_seh_init()
113 old_handler = SetUnhandledExceptionFilter(seh_handler);
116 void win32_seh_cleanup()
118 if (old_handler) SetUnhandledExceptionFilter(old_handler);
121 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
125 fpe_handler = handler;
128 ill_handler = handler;
131 segv_handler = handler;
138 #endif /* PLATFORM_WIN32 */
141 * mono_arch_get_restore_context:
143 * Returns a pointer to a method which restores a previously saved sigcontext.
146 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
148 guint8 *start = NULL;
151 /* restore_contect (MonoContext *ctx) */
155 start = code = mono_global_codeman_reserve (256);
157 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
159 /* Restore all registers except %rip and %r11 */
160 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
161 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
162 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
163 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
164 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
165 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
166 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
167 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
168 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
169 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
170 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
171 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
172 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
173 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
175 if (mono_running_on_valgrind ()) {
176 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
177 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
178 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
179 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
181 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
182 /* get return address */
183 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
186 /* jump to the saved IP */
187 amd64_jump_reg (code, AMD64_R11);
189 mono_arch_flush_icache (start, code - start);
191 *code_size = code - start;
197 * mono_arch_get_call_filter:
199 * Returns a pointer to a method which calls an exception filter. We
200 * also use this function to call finally handlers (we pass NULL as
201 * @exc object in this case).
204 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
213 start = code = mono_global_codeman_reserve (128);
215 /* call_filter (MonoContext *ctx, unsigned long eip) */
218 /* Alloc new frame */
219 amd64_push_reg (code, AMD64_RBP);
220 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
222 /* Save callee saved regs */
224 for (i = 0; i < AMD64_NREG; ++i)
225 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
226 amd64_push_reg (code, i);
232 amd64_push_reg (code, AMD64_RBP);
234 /* Make stack misaligned, the call will make it aligned again */
236 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
239 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
240 /* load callee saved regs */
241 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
242 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
243 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
244 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
245 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
246 #ifdef PLATFORM_WIN32
247 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
248 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
251 /* call the handler */
252 amd64_call_reg (code, AMD64_ARG_REG2);
255 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
258 amd64_pop_reg (code, AMD64_RBP);
260 /* Restore callee saved regs */
261 for (i = AMD64_NREG; i >= 0; --i)
262 if (AMD64_IS_CALLEE_SAVED_REG (i))
263 amd64_pop_reg (code, i);
268 g_assert ((code - start) < 128);
270 mono_arch_flush_icache (start, code - start);
272 *code_size = code - start;
278 * The first few arguments are dummy, to force the other arguments to be passed on
279 * the stack, this avoids overwriting the argument registers in the throw trampoline.
282 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
283 guint64 dummy5, guint64 dummy6,
284 MonoObject *exc, guint64 rip, guint64 rsp,
285 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
286 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
287 guint64 rax, guint64 rcx, guint64 rdx,
290 static void (*restore_context) (MonoContext *);
293 if (!restore_context)
294 restore_context = mono_get_restore_context ();
310 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
312 * The debugger wants us to stop on the `throw' instruction.
313 * By the time we get here, it already inserted a breakpoint on
314 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
319 * In case of a rethrow, the JIT is emitting code like this:
321 * mov 0xffffffffffffffd0(%rbp),%rax'
325 * Here, restore_context() wouldn't restore the %rax register correctly.
329 restore_context (&ctx);
330 g_assert_not_reached ();
333 /* adjust eip so that it point into the call instruction */
336 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
337 MonoException *mono_ex = (MonoException*)exc;
339 mono_ex->stack_trace = NULL;
341 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
342 restore_context (&ctx);
344 g_assert_not_reached ();
348 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
353 start = code = mono_global_codeman_reserve (64);
359 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
362 amd64_push_imm (code, rethrow);
363 amd64_push_reg (code, AMD64_RDX);
364 amd64_push_reg (code, AMD64_RCX);
365 amd64_push_reg (code, AMD64_RAX);
366 amd64_push_reg (code, AMD64_RSI);
367 amd64_push_reg (code, AMD64_RDI);
368 amd64_push_reg (code, AMD64_R15);
369 amd64_push_reg (code, AMD64_R14);
370 amd64_push_reg (code, AMD64_R13);
371 amd64_push_reg (code, AMD64_R12);
372 amd64_push_reg (code, AMD64_RBP);
373 amd64_push_reg (code, AMD64_RBX);
376 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
377 amd64_push_reg (code, AMD64_RAX);
380 amd64_push_membase (code, AMD64_R11, 0);
383 amd64_push_reg (code, AMD64_ARG_REG1);
385 #ifdef PLATFORM_WIN32
387 amd64_push_imm (code, 0);
388 amd64_push_imm (code, 0);
389 amd64_push_imm (code, 0);
390 amd64_push_imm (code, 0);
391 amd64_push_imm (code, 0);
392 amd64_push_imm (code, 0);
396 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
397 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
399 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
401 amd64_call_reg (code, AMD64_R11);
402 amd64_breakpoint (code);
404 mono_arch_flush_icache (start, code - start);
406 g_assert ((code - start) < 64);
408 *code_size = code - start;
414 * mono_arch_get_throw_exception:
416 * Returns a function pointer which can be used to raise
417 * exceptions. The returned function has the following
418 * signature: void (*func) (MonoException *exc);
422 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
424 return get_throw_trampoline (FALSE, code_size, ji, aot);
428 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
430 return get_throw_trampoline (TRUE, code_size, ji, aot);
434 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
439 start = code = mono_global_codeman_reserve (64);
443 /* Not used on amd64 */
444 amd64_breakpoint (code);
446 mono_arch_flush_icache (start, code - start);
448 *code_size = code - start;
454 * mono_arch_get_throw_corlib_exception:
456 * Returns a function pointer which can be used to raise
457 * corlib exceptions. The returned function has the following
458 * signature: void (*func) (guint32 ex_token, guint32 offset);
459 * Here, offset is the offset which needs to be substracted from the caller IP
460 * to get the IP of the throw. Passing the offset has the advantage that it
461 * needs no relocations in the caller.
464 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
466 static guint8* start;
470 start = code = mono_global_codeman_reserve (64);
475 amd64_push_reg (code, AMD64_ARG_REG2);
477 /* Call exception_from_token */
478 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
480 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
481 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
482 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
483 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
485 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
486 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
488 #ifdef PLATFORM_WIN32
489 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
491 amd64_call_reg (code, AMD64_R11);
492 #ifdef PLATFORM_WIN32
493 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
496 /* Compute throw_ip */
497 amd64_pop_reg (code, AMD64_ARG_REG2);
499 amd64_pop_reg (code, AMD64_ARG_REG3);
500 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
502 /* Put the throw_ip at the top of the misaligned stack */
503 amd64_push_reg (code, AMD64_ARG_REG3);
505 throw_ex = (guint64)mono_get_throw_exception ();
507 /* Call throw_exception */
508 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
510 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
511 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
513 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
515 /* The original IP is on the stack */
516 amd64_jump_reg (code, AMD64_R11);
518 g_assert ((code - start) < 64);
520 mono_arch_flush_icache (start, code - start);
522 *code_size = code - start;
527 /* mono_arch_find_jit_info:
529 * This function is used to gather information from @ctx. It return the
530 * MonoJitInfo of the corresponding function, unwinds one stack frame and
531 * stores the resulting context into @new_ctx. It also stores a string
532 * describing the stack location into @trace (if not NULL), and modifies
533 * the @lmf if necessary. @native_offset return the IP offset from the
534 * start of the function or -1 if that info is not available.
537 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
538 MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed)
542 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
544 /* Avoid costly table lookup during stack overflow */
545 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
548 ji = mono_jit_info_table_find (domain, ip);
557 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
560 if (!ji->method->wrapper_type)
564 * If a method has save_lmf set, then register save/restore code is not generated
565 * by the JIT, so we have to restore callee saved registers from the lmf.
567 if (ji->method->save_lmf) {
571 * *lmf might not point to the LMF pushed by this method, so compute the LMF
575 lmf_addr = (MonoLMF*)ctx->rsp;
577 lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
579 new_ctx->rbp = lmf_addr->rbp;
580 new_ctx->rbx = lmf_addr->rbx;
581 new_ctx->r12 = lmf_addr->r12;
582 new_ctx->r13 = lmf_addr->r13;
583 new_ctx->r14 = lmf_addr->r14;
584 new_ctx->r15 = lmf_addr->r15;
587 offset = omit_fp ? 0 : -1;
588 /* restore caller saved registers */
589 for (i = 0; i < AMD64_NREG; i ++)
590 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
594 reg = *((guint64*)ctx->rsp + offset);
598 reg = *((guint64 *)ctx->rbp + offset);
622 g_assert_not_reached ();
627 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
628 /* remove any unused lmf */
629 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
634 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
635 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
636 /* Pop return address */
640 /* Pop EBP and the return address */
641 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
642 /* we substract 1, so that the IP points into the call instruction */
643 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
644 new_ctx->rbp = *((guint64 *)ctx->rbp);
647 /* Pop arguments off the stack */
649 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
651 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
652 new_ctx->rsp += stack_to_pop;
659 if (((guint64)(*lmf)->previous_lmf) & 1) {
660 /* This LMF has the rip field set */
662 } else if ((*lmf)->rsp == 0) {
667 * The rsp field is set just before the call which transitioned to native
668 * code. Obtain the rip from the stack.
670 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
673 ji = mono_jit_info_table_find (domain, (gpointer)rip);
678 /* Trampoline lmf frame */
679 memset (res, 0, sizeof (MonoJitInfo));
680 res->method = (*lmf)->method;
684 new_ctx->rbp = (*lmf)->rbp;
685 new_ctx->rsp = (*lmf)->rsp;
687 new_ctx->rbx = (*lmf)->rbx;
688 new_ctx->r12 = (*lmf)->r12;
689 new_ctx->r13 = (*lmf)->r13;
690 new_ctx->r14 = (*lmf)->r14;
691 new_ctx->r15 = (*lmf)->r15;
693 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
695 return ji ? ji : res;
702 * mono_arch_handle_exception:
704 * @ctx: saved processor state
705 * @obj: the exception object
708 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
712 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
714 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
716 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
721 #ifdef MONO_ARCH_USE_SIGACTION
722 static inline guint64*
723 gregs_from_ucontext (ucontext_t *ctx)
726 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
728 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
735 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
737 #ifdef MONO_ARCH_USE_SIGACTION
738 ucontext_t *ctx = (ucontext_t*)sigctx;
740 guint64 *gregs = gregs_from_ucontext (ctx);
742 mctx->rax = gregs [REG_RAX];
743 mctx->rbx = gregs [REG_RBX];
744 mctx->rcx = gregs [REG_RCX];
745 mctx->rdx = gregs [REG_RDX];
746 mctx->rbp = gregs [REG_RBP];
747 mctx->rsp = gregs [REG_RSP];
748 mctx->rsi = gregs [REG_RSI];
749 mctx->rdi = gregs [REG_RDI];
750 mctx->rip = gregs [REG_RIP];
751 mctx->r12 = gregs [REG_R12];
752 mctx->r13 = gregs [REG_R13];
753 mctx->r14 = gregs [REG_R14];
754 mctx->r15 = gregs [REG_R15];
756 MonoContext *ctx = (MonoContext *)sigctx;
758 mctx->rax = ctx->rax;
759 mctx->rbx = ctx->rbx;
760 mctx->rcx = ctx->rcx;
761 mctx->rdx = ctx->rdx;
762 mctx->rbp = ctx->rbp;
763 mctx->rsp = ctx->rsp;
764 mctx->rsi = ctx->rsi;
765 mctx->rdi = ctx->rdi;
766 mctx->rip = ctx->rip;
767 mctx->r12 = ctx->r12;
768 mctx->r13 = ctx->r13;
769 mctx->r14 = ctx->r14;
770 mctx->r15 = ctx->r15;
775 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
777 #ifdef MONO_ARCH_USE_SIGACTION
778 ucontext_t *ctx = (ucontext_t*)sigctx;
780 guint64 *gregs = gregs_from_ucontext (ctx);
782 gregs [REG_RAX] = mctx->rax;
783 gregs [REG_RBX] = mctx->rbx;
784 gregs [REG_RCX] = mctx->rcx;
785 gregs [REG_RDX] = mctx->rdx;
786 gregs [REG_RBP] = mctx->rbp;
787 gregs [REG_RSP] = mctx->rsp;
788 gregs [REG_RSI] = mctx->rsi;
789 gregs [REG_RDI] = mctx->rdi;
790 gregs [REG_RIP] = mctx->rip;
791 gregs [REG_R12] = mctx->r12;
792 gregs [REG_R13] = mctx->r13;
793 gregs [REG_R14] = mctx->r14;
794 gregs [REG_R15] = mctx->r15;
796 MonoContext *ctx = (MonoContext *)sigctx;
798 ctx->rax = mctx->rax;
799 ctx->rbx = mctx->rbx;
800 ctx->rcx = mctx->rcx;
801 ctx->rdx = mctx->rdx;
802 ctx->rbp = mctx->rbp;
803 ctx->rsp = mctx->rsp;
804 ctx->rsi = mctx->rsi;
805 ctx->rdi = mctx->rdi;
806 ctx->rip = mctx->rip;
807 ctx->r12 = mctx->r12;
808 ctx->r13 = mctx->r13;
809 ctx->r14 = mctx->r14;
810 ctx->r15 = mctx->r15;
815 mono_arch_ip_from_context (void *sigctx)
818 #ifdef MONO_ARCH_USE_SIGACTION
820 ucontext_t *ctx = (ucontext_t*)sigctx;
822 guint64 *gregs = gregs_from_ucontext (ctx);
824 return (gpointer)gregs [REG_RIP];
826 MonoContext *ctx = sigctx;
827 return (gpointer)ctx->rip;
832 restore_soft_guard_pages (void)
834 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
835 if (jit_tls->stack_ovf_guard_base)
836 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
840 * this function modifies mctx so that when it is restored, it
841 * won't execcute starting at mctx.eip, but in a function that
842 * will restore the protection on the soft-guard pages and return back to
843 * continue at mctx.eip.
846 prepare_for_guard_pages (MonoContext *mctx)
849 sp = (gpointer)(mctx->rsp);
851 /* the return addr */
852 sp [0] = (gpointer)(mctx->rip);
853 mctx->rip = (guint64)restore_soft_guard_pages;
854 mctx->rsp = (guint64)sp;
858 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
860 void (*restore_context) (MonoContext *);
863 restore_context = mono_get_restore_context ();
864 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
865 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
867 prepare_for_guard_pages (&mctx);
868 restore_context (&mctx);
872 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
874 #ifdef MONO_ARCH_USE_SIGACTION
875 MonoException *exc = NULL;
876 ucontext_t *ctx = (ucontext_t*)sigctx;
877 guint64 *gregs = gregs_from_ucontext (ctx);
878 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
883 exc = mono_domain_get ()->stack_overflow_ex;
885 mono_handle_native_sigsegv (SIGSEGV, sigctx);
887 /* setup a call frame on the real stack so that control is returned there
888 * and exception handling can continue.
889 * The frame looks like:
893 * 128 is the size of the red zone
895 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
898 sp = (gpointer)(gregs [REG_RSP] & ~15);
899 sp = (gpointer)((char*)sp - frame_size);
900 /* the arguments must be aligned */
901 sp [-1] = (gpointer)gregs [REG_RIP];
902 /* may need to adjust pointers in the new struct copy, depending on the OS */
903 memcpy (sp + 4, ctx, sizeof (ucontext_t));
904 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
905 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
906 gregs [REG_RSP] = (unsigned long)(sp - 1);
907 gregs [REG_RDI] = (unsigned long)(sp + 4);
908 gregs [REG_RSI] = (guint64)exc;
909 gregs [REG_RDX] = stack_ovf;
914 get_original_ip (void)
916 MonoLMF *lmf = mono_get_lmf ();
920 /* Reset the change to previous_lmf */
921 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
927 get_throw_pending_exception (void)
929 static guint8* start;
930 static gboolean inited = FALSE;
933 gpointer throw_trampoline;
938 start = code = mono_global_codeman_reserve (128);
940 /* We are in the frame of a managed method after a call */
942 * We would like to throw the pending exception in such a way that it looks to
943 * be thrown from the managed method.
946 /* Save registers which might contain the return value of the call */
947 amd64_push_reg (code, AMD64_RAX);
948 amd64_push_reg (code, AMD64_RDX);
950 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
951 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
954 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
956 /* Obtain the pending exception */
957 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
958 amd64_call_reg (code, AMD64_R11);
960 /* Check if it is NULL, and branch */
961 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
962 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
964 /* exc != NULL branch */
966 /* Save the exc on the stack */
967 amd64_push_reg (code, AMD64_RAX);
969 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
971 /* Obtain the original ip and clear the flag in previous_lmf */
972 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
973 amd64_call_reg (code, AMD64_R11);
976 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
978 /* Pop saved stuff from the stack */
979 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
981 /* Setup arguments for the throw trampoline */
983 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
984 /* The trampoline expects the caller ip to be pushed on the stack */
985 amd64_push_reg (code, AMD64_RAX);
987 /* Call the throw trampoline */
988 throw_trampoline = mono_get_throw_exception ();
989 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
990 /* We use a jump instead of a call so we can push the original ip on the stack */
991 amd64_jump_reg (code, AMD64_R11);
993 /* ex == NULL branch */
994 mono_amd64_patch (br [0], code);
996 /* Obtain the original ip and clear the flag in previous_lmf */
997 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
998 amd64_call_reg (code, AMD64_R11);
999 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1001 /* Restore registers */
1002 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1003 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1004 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1005 amd64_pop_reg (code, AMD64_RDX);
1006 amd64_pop_reg (code, AMD64_RAX);
1008 /* Return to original code */
1009 amd64_jump_reg (code, AMD64_R11);
1011 g_assert ((code - start) < 128);
1019 * Called when a thread receives an async exception while executing unmanaged code.
1020 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1021 * the return address on the stack to point to a helper routine which throws the
1025 mono_arch_notify_pending_exc (void)
1027 MonoLMF *lmf = mono_get_lmf ();
1033 if ((guint64)lmf->previous_lmf & 1)
1034 /* Already hijacked or trampoline LMF entry */
1037 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1038 lmf->rip = *(guint64*)(lmf->rsp - 8);
1039 /* Signal that lmf->rip is set */
1040 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1042 *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
1045 #ifdef PLATFORM_WIN32
1048 * The mono_arch_unwindinfo* methods are used to build and add
1049 * function table info for each emitted method from mono. On Winx64
1050 * the seh handler will not be called if the mono methods are not
1051 * added to the function table.
1053 * We should not need to add non-volatile register info to the
1054 * table since mono stores that info elsewhere. (Except for the register
1058 #define MONO_MAX_UNWIND_CODES 22
1060 typedef union _UNWIND_CODE {
1063 guchar UnwindOp : 4;
1066 gushort FrameOffset;
1067 } UNWIND_CODE, *PUNWIND_CODE;
1069 typedef struct _UNWIND_INFO {
1072 guchar SizeOfProlog;
1073 guchar CountOfCodes;
1074 guchar FrameRegister : 4;
1075 guchar FrameOffset : 4;
1076 /* custom size for mono allowing for mono allowing for*/
1077 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1078 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1079 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1080 /*UWOP_PUSH_NONVOL offset = 15-0*/
1081 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1083 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1085 * OPTIONAL ULONG ExceptionHandler;
1086 * OPTIONAL ULONG FunctionEntry;
1088 * OPTIONAL ULONG ExceptionData[]; */
1089 } UNWIND_INFO, *PUNWIND_INFO;
1093 RUNTIME_FUNCTION runtimeFunction;
1094 UNWIND_INFO unwindInfo;
1095 } MonoUnwindInfo, *PMonoUnwindInfo;
1098 mono_arch_unwindinfo_create (gpointer* monoui)
1100 PMonoUnwindInfo newunwindinfo;
1101 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1102 newunwindinfo->unwindInfo.Version = 1;
1106 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1108 PMonoUnwindInfo unwindinfo;
1109 PUNWIND_CODE unwindcode;
1112 mono_arch_unwindinfo_create (monoui);
1114 unwindinfo = (MonoUnwindInfo*)*monoui;
1116 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1117 g_error ("Larger allocation needed for the unwind information.");
1119 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1120 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1121 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1122 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1123 unwindcode->OpInfo = reg;
1125 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1126 g_error ("Adding unwind info in wrong order.");
1128 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1132 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1134 PMonoUnwindInfo unwindinfo;
1135 PUNWIND_CODE unwindcode;
1138 mono_arch_unwindinfo_create (monoui);
1140 unwindinfo = (MonoUnwindInfo*)*monoui;
1142 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1143 g_error ("Larger allocation needed for the unwind information.");
1145 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1146 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1147 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1149 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1150 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1151 unwindcode->OpInfo = reg;
1153 unwindinfo->unwindInfo.FrameRegister = reg;
1155 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1156 g_error ("Adding unwind info in wrong order.");
1158 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1162 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1164 PMonoUnwindInfo unwindinfo;
1165 PUNWIND_CODE unwindcode;
1169 mono_arch_unwindinfo_create (monoui);
1171 unwindinfo = (MonoUnwindInfo*)*monoui;
1174 g_error ("Stack allocation must be equal to or greater than 0x8.");
1178 else if (size <= 0x7FFF8)
1183 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1184 g_error ("Larger allocation needed for the unwind information.");
1186 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1187 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1189 if (codesneeded == 1) {
1190 /*The size of the allocation is
1191 (the number in the OpInfo member) times 8 plus 8*/
1192 unwindcode->OpInfo = (size - 8)/8;
1193 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1196 if (codesneeded == 3) {
1197 /*the unscaled size of the allocation is recorded
1198 in the next two slots in little-endian format*/
1199 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1201 unwindcode->OpInfo = 1;
1204 /*the size of the allocation divided by 8
1205 is recorded in the next slot*/
1206 unwindcode->FrameOffset = size/8;
1208 unwindcode->OpInfo = 0;
1211 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1214 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1216 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1217 g_error ("Adding unwind info in wrong order.");
1219 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1223 mono_arch_unwindinfo_get_size (gpointer monoui)
1225 PMonoUnwindInfo unwindinfo;
1229 unwindinfo = (MonoUnwindInfo*)monoui;
1230 return (8 + sizeof (MonoUnwindInfo)) -
1231 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1235 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1237 PMonoUnwindInfo unwindinfo, targetinfo;
1239 guint64 targetlocation;
1243 unwindinfo = (MonoUnwindInfo*)*monoui;
1244 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1245 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1247 unwindinfo->runtimeFunction.EndAddress = code_size;
1248 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1250 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1252 codecount = unwindinfo->unwindInfo.CountOfCodes;
1254 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1255 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1258 g_free (unwindinfo);
1261 RtlAddFunctionTable (&targetinfo->runtimeFunction, 1, (DWORD64)code);