2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbp = sctx->rbp;
101 ctx->Rsp = sctx->rsp;
102 ctx->Rsi = sctx->rsi;
103 ctx->Rdi = sctx->rdi;
104 ctx->Rip = sctx->rip;
111 void win32_seh_init()
113 old_handler = SetUnhandledExceptionFilter(seh_handler);
116 void win32_seh_cleanup()
118 if (old_handler) SetUnhandledExceptionFilter(old_handler);
121 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
125 fpe_handler = handler;
128 ill_handler = handler;
131 segv_handler = handler;
138 #endif /* PLATFORM_WIN32 */
141 * mono_arch_get_restore_context:
143 * Returns a pointer to a method which restores a previously saved sigcontext.
146 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
148 guint8 *start = NULL;
151 /* restore_contect (MonoContext *ctx) */
155 start = code = mono_global_codeman_reserve (256);
157 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
159 /* Restore all registers except %rip and %r11 */
160 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
161 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
162 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
163 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
164 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
165 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
166 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
167 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
168 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
169 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
170 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
171 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
172 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
173 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
175 if (mono_running_on_valgrind ()) {
176 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
177 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
178 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
179 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
181 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
182 /* get return address */
183 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
186 /* jump to the saved IP */
187 amd64_jump_reg (code, AMD64_R11);
189 mono_arch_flush_icache (start, code - start);
191 *code_size = code - start;
197 * mono_arch_get_call_filter:
199 * Returns a pointer to a method which calls an exception filter. We
200 * also use this function to call finally handlers (we pass NULL as
201 * @exc object in this case).
204 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
213 start = code = mono_global_codeman_reserve (128);
215 /* call_filter (MonoContext *ctx, unsigned long eip) */
218 /* Alloc new frame */
219 amd64_push_reg (code, AMD64_RBP);
220 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
222 /* Save callee saved regs */
224 for (i = 0; i < AMD64_NREG; ++i)
225 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
226 amd64_push_reg (code, i);
232 amd64_push_reg (code, AMD64_RBP);
234 /* Make stack misaligned, the call will make it aligned again */
236 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
239 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
240 /* load callee saved regs */
241 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
242 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
243 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
244 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
245 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
246 #ifdef PLATFORM_WIN32
247 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
248 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
251 /* call the handler */
252 amd64_call_reg (code, AMD64_ARG_REG2);
255 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
258 amd64_pop_reg (code, AMD64_RBP);
260 /* Restore callee saved regs */
261 for (i = AMD64_NREG; i >= 0; --i)
262 if (AMD64_IS_CALLEE_SAVED_REG (i))
263 amd64_pop_reg (code, i);
268 g_assert ((code - start) < 128);
270 mono_arch_flush_icache (start, code - start);
272 *code_size = code - start;
278 * The first few arguments are dummy, to force the other arguments to be passed on
279 * the stack, this avoids overwriting the argument registers in the throw trampoline.
282 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
283 guint64 dummy5, guint64 dummy6,
284 MonoObject *exc, guint64 rip, guint64 rsp,
285 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
286 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
287 guint64 rax, guint64 rcx, guint64 rdx,
290 static void (*restore_context) (MonoContext *);
293 if (!restore_context)
294 restore_context = mono_get_restore_context ();
310 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
312 * The debugger wants us to stop on the `throw' instruction.
313 * By the time we get here, it already inserted a breakpoint on
314 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
319 * In case of a rethrow, the JIT is emitting code like this:
321 * mov 0xffffffffffffffd0(%rbp),%rax'
325 * Here, restore_context() wouldn't restore the %rax register correctly.
329 restore_context (&ctx);
330 g_assert_not_reached ();
333 /* adjust eip so that it point into the call instruction */
336 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
337 MonoException *mono_ex = (MonoException*)exc;
339 mono_ex->stack_trace = NULL;
341 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
342 restore_context (&ctx);
344 g_assert_not_reached ();
348 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
353 start = code = mono_global_codeman_reserve (64);
359 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
362 amd64_push_imm (code, rethrow);
363 amd64_push_reg (code, AMD64_RDX);
364 amd64_push_reg (code, AMD64_RCX);
365 amd64_push_reg (code, AMD64_RAX);
366 amd64_push_reg (code, AMD64_RSI);
367 amd64_push_reg (code, AMD64_RDI);
368 amd64_push_reg (code, AMD64_R15);
369 amd64_push_reg (code, AMD64_R14);
370 amd64_push_reg (code, AMD64_R13);
371 amd64_push_reg (code, AMD64_R12);
372 amd64_push_reg (code, AMD64_RBP);
373 amd64_push_reg (code, AMD64_RBX);
376 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
377 amd64_push_reg (code, AMD64_RAX);
380 amd64_push_membase (code, AMD64_R11, 0);
383 amd64_push_reg (code, AMD64_ARG_REG1);
385 #ifdef PLATFORM_WIN32
387 amd64_push_imm (code, 0);
388 amd64_push_imm (code, 0);
389 amd64_push_imm (code, 0);
390 amd64_push_imm (code, 0);
391 amd64_push_imm (code, 0);
392 amd64_push_imm (code, 0);
396 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
397 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
399 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
401 amd64_call_reg (code, AMD64_R11);
402 amd64_breakpoint (code);
404 mono_arch_flush_icache (start, code - start);
406 g_assert ((code - start) < 64);
408 *code_size = code - start;
414 * mono_arch_get_throw_exception:
416 * Returns a function pointer which can be used to raise
417 * exceptions. The returned function has the following
418 * signature: void (*func) (MonoException *exc);
422 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
424 return get_throw_trampoline (FALSE, code_size, ji, aot);
428 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
430 return get_throw_trampoline (TRUE, code_size, ji, aot);
434 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
439 start = code = mono_global_codeman_reserve (64);
443 /* Not used on amd64 */
444 amd64_breakpoint (code);
446 mono_arch_flush_icache (start, code - start);
448 *code_size = code - start;
454 * mono_arch_get_throw_corlib_exception:
456 * Returns a function pointer which can be used to raise
457 * corlib exceptions. The returned function has the following
458 * signature: void (*func) (guint32 ex_token, guint32 offset);
459 * Here, offset is the offset which needs to be substracted from the caller IP
460 * to get the IP of the throw. Passing the offset has the advantage that it
461 * needs no relocations in the caller.
464 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
466 static guint8* start;
470 start = code = mono_global_codeman_reserve (64);
475 amd64_push_reg (code, AMD64_ARG_REG2);
477 /* Call exception_from_token */
478 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
480 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
481 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
482 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
483 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
485 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
486 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
488 #ifdef PLATFORM_WIN32
489 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
491 amd64_call_reg (code, AMD64_R11);
492 #ifdef PLATFORM_WIN32
493 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
496 /* Compute throw_ip */
497 amd64_pop_reg (code, AMD64_ARG_REG2);
499 amd64_pop_reg (code, AMD64_ARG_REG3);
500 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
502 /* Put the throw_ip at the top of the misaligned stack */
503 amd64_push_reg (code, AMD64_ARG_REG3);
505 throw_ex = (guint64)mono_get_throw_exception ();
507 /* Call throw_exception */
508 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
510 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
511 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
513 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
515 /* The original IP is on the stack */
516 amd64_jump_reg (code, AMD64_R11);
518 g_assert ((code - start) < 64);
520 mono_arch_flush_icache (start, code - start);
522 *code_size = code - start;
527 /* mono_arch_find_jit_info:
529 * This function is used to gather information from @ctx. It return the
530 * MonoJitInfo of the corresponding function, unwinds one stack frame and
531 * stores the resulting context into @new_ctx. It also stores a string
532 * describing the stack location into @trace (if not NULL), and modifies
533 * the @lmf if necessary. @native_offset return the IP offset from the
534 * start of the function or -1 if that info is not available.
537 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
538 MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
543 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
545 /* Avoid costly table lookup during stack overflow */
546 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
549 ji = mono_jit_info_table_find (domain, ip);
558 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
561 if (!ji->method->wrapper_type)
565 * If a method has save_lmf set, then register save/restore code is not generated
566 * by the JIT, so we have to restore callee saved registers from the lmf.
568 if (ji->method->save_lmf) {
572 * *lmf might not point to the LMF pushed by this method, so compute the LMF
576 lmf_addr = (MonoLMF*)ctx->rsp;
578 lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
580 new_ctx->rbp = lmf_addr->rbp;
581 new_ctx->rbx = lmf_addr->rbx;
582 new_ctx->r12 = lmf_addr->r12;
583 new_ctx->r13 = lmf_addr->r13;
584 new_ctx->r14 = lmf_addr->r14;
585 new_ctx->r15 = lmf_addr->r15;
588 offset = omit_fp ? 0 : -1;
589 /* restore caller saved registers */
590 for (i = 0; i < AMD64_NREG; i ++)
591 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
595 reg = *((guint64*)ctx->rsp + offset);
599 reg = *((guint64 *)ctx->rbp + offset);
623 g_assert_not_reached ();
628 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
629 /* remove any unused lmf */
630 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
635 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
636 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
637 /* Pop return address */
641 /* Pop EBP and the return address */
642 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
643 /* we substract 1, so that the IP points into the call instruction */
644 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
645 new_ctx->rbp = *((guint64 *)ctx->rbp);
648 /* Pop arguments off the stack */
650 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
652 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
653 new_ctx->rsp += stack_to_pop;
660 if (((guint64)(*lmf)->previous_lmf) & 1) {
661 /* This LMF has the rip field set */
663 } else if ((*lmf)->rsp == 0) {
668 * The rsp field is set just before the call which transitioned to native
669 * code. Obtain the rip from the stack.
671 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
674 ji = mono_jit_info_table_find (domain, (gpointer)rip);
679 /* Trampoline lmf frame */
680 memset (res, 0, sizeof (MonoJitInfo));
681 res->method = (*lmf)->method;
685 new_ctx->rbp = (*lmf)->rbp;
686 new_ctx->rsp = (*lmf)->rsp;
688 new_ctx->rbx = (*lmf)->rbx;
689 new_ctx->r12 = (*lmf)->r12;
690 new_ctx->r13 = (*lmf)->r13;
691 new_ctx->r14 = (*lmf)->r14;
692 new_ctx->r15 = (*lmf)->r15;
694 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
696 return ji ? ji : res;
703 * mono_arch_handle_exception:
705 * @ctx: saved processor state
706 * @obj: the exception object
709 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
713 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
715 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
717 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
722 #ifdef MONO_ARCH_USE_SIGACTION
723 static inline guint64*
724 gregs_from_ucontext (ucontext_t *ctx)
727 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
729 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
736 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
738 #ifdef MONO_ARCH_USE_SIGACTION
739 ucontext_t *ctx = (ucontext_t*)sigctx;
741 guint64 *gregs = gregs_from_ucontext (ctx);
743 mctx->rax = gregs [REG_RAX];
744 mctx->rbx = gregs [REG_RBX];
745 mctx->rcx = gregs [REG_RCX];
746 mctx->rdx = gregs [REG_RDX];
747 mctx->rbp = gregs [REG_RBP];
748 mctx->rsp = gregs [REG_RSP];
749 mctx->rsi = gregs [REG_RSI];
750 mctx->rdi = gregs [REG_RDI];
751 mctx->rip = gregs [REG_RIP];
752 mctx->r12 = gregs [REG_R12];
753 mctx->r13 = gregs [REG_R13];
754 mctx->r14 = gregs [REG_R14];
755 mctx->r15 = gregs [REG_R15];
757 MonoContext *ctx = (MonoContext *)sigctx;
759 mctx->rax = ctx->rax;
760 mctx->rbx = ctx->rbx;
761 mctx->rcx = ctx->rcx;
762 mctx->rdx = ctx->rdx;
763 mctx->rbp = ctx->rbp;
764 mctx->rsp = ctx->rsp;
765 mctx->rsi = ctx->rsi;
766 mctx->rdi = ctx->rdi;
767 mctx->rip = ctx->rip;
768 mctx->r12 = ctx->r12;
769 mctx->r13 = ctx->r13;
770 mctx->r14 = ctx->r14;
771 mctx->r15 = ctx->r15;
776 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
778 #ifdef MONO_ARCH_USE_SIGACTION
779 ucontext_t *ctx = (ucontext_t*)sigctx;
781 guint64 *gregs = gregs_from_ucontext (ctx);
783 gregs [REG_RAX] = mctx->rax;
784 gregs [REG_RBX] = mctx->rbx;
785 gregs [REG_RCX] = mctx->rcx;
786 gregs [REG_RDX] = mctx->rdx;
787 gregs [REG_RBP] = mctx->rbp;
788 gregs [REG_RSP] = mctx->rsp;
789 gregs [REG_RSI] = mctx->rsi;
790 gregs [REG_RDI] = mctx->rdi;
791 gregs [REG_RIP] = mctx->rip;
792 gregs [REG_R12] = mctx->r12;
793 gregs [REG_R13] = mctx->r13;
794 gregs [REG_R14] = mctx->r14;
795 gregs [REG_R15] = mctx->r15;
797 MonoContext *ctx = (MonoContext *)sigctx;
799 ctx->rax = mctx->rax;
800 ctx->rbx = mctx->rbx;
801 ctx->rcx = mctx->rcx;
802 ctx->rdx = mctx->rdx;
803 ctx->rbp = mctx->rbp;
804 ctx->rsp = mctx->rsp;
805 ctx->rsi = mctx->rsi;
806 ctx->rdi = mctx->rdi;
807 ctx->rip = mctx->rip;
808 ctx->r12 = mctx->r12;
809 ctx->r13 = mctx->r13;
810 ctx->r14 = mctx->r14;
811 ctx->r15 = mctx->r15;
816 mono_arch_ip_from_context (void *sigctx)
819 #ifdef MONO_ARCH_USE_SIGACTION
821 ucontext_t *ctx = (ucontext_t*)sigctx;
823 guint64 *gregs = gregs_from_ucontext (ctx);
825 return (gpointer)gregs [REG_RIP];
827 MonoContext *ctx = sigctx;
828 return (gpointer)ctx->rip;
833 restore_soft_guard_pages (void)
835 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
836 if (jit_tls->stack_ovf_guard_base)
837 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
841 * this function modifies mctx so that when it is restored, it
842 * won't execcute starting at mctx.eip, but in a function that
843 * will restore the protection on the soft-guard pages and return back to
844 * continue at mctx.eip.
847 prepare_for_guard_pages (MonoContext *mctx)
850 sp = (gpointer)(mctx->rsp);
852 /* the return addr */
853 sp [0] = (gpointer)(mctx->rip);
854 mctx->rip = (guint64)restore_soft_guard_pages;
855 mctx->rsp = (guint64)sp;
859 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
861 void (*restore_context) (MonoContext *);
864 restore_context = mono_get_restore_context ();
865 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
866 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
868 prepare_for_guard_pages (&mctx);
869 restore_context (&mctx);
873 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
875 #ifdef MONO_ARCH_USE_SIGACTION
876 MonoException *exc = NULL;
877 ucontext_t *ctx = (ucontext_t*)sigctx;
878 guint64 *gregs = gregs_from_ucontext (ctx);
879 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
884 exc = mono_domain_get ()->stack_overflow_ex;
886 mono_handle_native_sigsegv (SIGSEGV, sigctx);
888 /* setup a call frame on the real stack so that control is returned there
889 * and exception handling can continue.
890 * The frame looks like:
894 * 128 is the size of the red zone
896 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
899 sp = (gpointer)(gregs [REG_RSP] & ~15);
900 sp = (gpointer)((char*)sp - frame_size);
901 /* the arguments must be aligned */
902 sp [-1] = (gpointer)gregs [REG_RIP];
903 /* may need to adjust pointers in the new struct copy, depending on the OS */
904 memcpy (sp + 4, ctx, sizeof (ucontext_t));
905 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
906 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
907 gregs [REG_RSP] = (unsigned long)(sp - 1);
908 gregs [REG_RDI] = (unsigned long)(sp + 4);
909 gregs [REG_RSI] = (guint64)exc;
910 gregs [REG_RDX] = stack_ovf;
915 get_original_ip (void)
917 MonoLMF *lmf = mono_get_lmf ();
921 /* Reset the change to previous_lmf */
922 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
928 get_throw_pending_exception (void)
930 static guint8* start;
931 static gboolean inited = FALSE;
934 gpointer throw_trampoline;
939 start = code = mono_global_codeman_reserve (128);
941 /* We are in the frame of a managed method after a call */
943 * We would like to throw the pending exception in such a way that it looks to
944 * be thrown from the managed method.
947 /* Save registers which might contain the return value of the call */
948 amd64_push_reg (code, AMD64_RAX);
949 amd64_push_reg (code, AMD64_RDX);
951 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
952 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
955 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
957 /* Obtain the pending exception */
958 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
959 amd64_call_reg (code, AMD64_R11);
961 /* Check if it is NULL, and branch */
962 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
963 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
965 /* exc != NULL branch */
967 /* Save the exc on the stack */
968 amd64_push_reg (code, AMD64_RAX);
970 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
972 /* Obtain the original ip and clear the flag in previous_lmf */
973 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
974 amd64_call_reg (code, AMD64_R11);
977 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
979 /* Pop saved stuff from the stack */
980 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
982 /* Setup arguments for the throw trampoline */
984 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
985 /* The trampoline expects the caller ip to be pushed on the stack */
986 amd64_push_reg (code, AMD64_RAX);
988 /* Call the throw trampoline */
989 throw_trampoline = mono_get_throw_exception ();
990 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
991 /* We use a jump instead of a call so we can push the original ip on the stack */
992 amd64_jump_reg (code, AMD64_R11);
994 /* ex == NULL branch */
995 mono_amd64_patch (br [0], code);
997 /* Obtain the original ip and clear the flag in previous_lmf */
998 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
999 amd64_call_reg (code, AMD64_R11);
1000 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1002 /* Restore registers */
1003 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1004 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1005 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1006 amd64_pop_reg (code, AMD64_RDX);
1007 amd64_pop_reg (code, AMD64_RAX);
1009 /* Return to original code */
1010 amd64_jump_reg (code, AMD64_R11);
1012 g_assert ((code - start) < 128);
1020 * Called when a thread receives an async exception while executing unmanaged code.
1021 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1022 * the return address on the stack to point to a helper routine which throws the
1026 mono_arch_notify_pending_exc (void)
1028 MonoLMF *lmf = mono_get_lmf ();
1034 if ((guint64)lmf->previous_lmf & 1)
1035 /* Already hijacked or trampoline LMF entry */
1038 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1039 lmf->rip = *(guint64*)(lmf->rsp - 8);
1040 /* Signal that lmf->rip is set */
1041 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1043 *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
1046 #ifdef PLATFORM_WIN32
1049 * The mono_arch_unwindinfo* methods are used to build and add
1050 * function table info for each emitted method from mono. On Winx64
1051 * the seh handler will not be called if the mono methods are not
1052 * added to the function table.
1054 * We should not need to add non-volatile register info to the
1055 * table since mono stores that info elsewhere. (Except for the register
1059 #define MONO_MAX_UNWIND_CODES 22
1061 typedef union _UNWIND_CODE {
1064 guchar UnwindOp : 4;
1067 gushort FrameOffset;
1068 } UNWIND_CODE, *PUNWIND_CODE;
1070 typedef struct _UNWIND_INFO {
1073 guchar SizeOfProlog;
1074 guchar CountOfCodes;
1075 guchar FrameRegister : 4;
1076 guchar FrameOffset : 4;
1077 /* custom size for mono allowing for mono allowing for*/
1078 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1079 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1080 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1081 /*UWOP_PUSH_NONVOL offset = 15-0*/
1082 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1084 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1086 * OPTIONAL ULONG ExceptionHandler;
1087 * OPTIONAL ULONG FunctionEntry;
1089 * OPTIONAL ULONG ExceptionData[]; */
1090 } UNWIND_INFO, *PUNWIND_INFO;
1094 RUNTIME_FUNCTION runtimeFunction;
1095 UNWIND_INFO unwindInfo;
1096 } MonoUnwindInfo, *PMonoUnwindInfo;
1099 mono_arch_unwindinfo_create (gpointer* monoui)
1101 PMonoUnwindInfo newunwindinfo;
1102 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1103 newunwindinfo->unwindInfo.Version = 1;
1107 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1109 PMonoUnwindInfo unwindinfo;
1110 PUNWIND_CODE unwindcode;
1113 mono_arch_unwindinfo_create (monoui);
1115 unwindinfo = (MonoUnwindInfo*)*monoui;
1117 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1118 g_error ("Larger allocation needed for the unwind information.");
1120 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1121 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1122 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1123 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1124 unwindcode->OpInfo = reg;
1126 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1127 g_error ("Adding unwind info in wrong order.");
1129 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1133 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1135 PMonoUnwindInfo unwindinfo;
1136 PUNWIND_CODE unwindcode;
1139 mono_arch_unwindinfo_create (monoui);
1141 unwindinfo = (MonoUnwindInfo*)*monoui;
1143 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1144 g_error ("Larger allocation needed for the unwind information.");
1146 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1147 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1148 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1150 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1151 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1152 unwindcode->OpInfo = reg;
1154 unwindinfo->unwindInfo.FrameRegister = reg;
1156 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1157 g_error ("Adding unwind info in wrong order.");
1159 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1163 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1165 PMonoUnwindInfo unwindinfo;
1166 PUNWIND_CODE unwindcode;
1170 mono_arch_unwindinfo_create (monoui);
1172 unwindinfo = (MonoUnwindInfo*)*monoui;
1175 g_error ("Stack allocation must be equal to or greater than 0x8.");
1179 else if (size <= 0x7FFF8)
1184 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1185 g_error ("Larger allocation needed for the unwind information.");
1187 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1188 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1190 if (codesneeded == 1) {
1191 /*The size of the allocation is
1192 (the number in the OpInfo member) times 8 plus 8*/
1193 unwindcode->OpInfo = (size - 8)/8;
1194 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1197 if (codesneeded == 3) {
1198 /*the unscaled size of the allocation is recorded
1199 in the next two slots in little-endian format*/
1200 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1202 unwindcode->OpInfo = 1;
1205 /*the size of the allocation divided by 8
1206 is recorded in the next slot*/
1207 unwindcode->FrameOffset = size/8;
1209 unwindcode->OpInfo = 0;
1212 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1215 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1217 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1218 g_error ("Adding unwind info in wrong order.");
1220 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1224 mono_arch_unwindinfo_get_size (gpointer monoui)
1226 PMonoUnwindInfo unwindinfo;
1230 unwindinfo = (MonoUnwindInfo*)monoui;
1231 return (8 + sizeof (MonoUnwindInfo)) -
1232 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1236 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1238 PMonoUnwindInfo unwindinfo, targetinfo;
1240 guint64 targetlocation;
1244 unwindinfo = (MonoUnwindInfo*)*monoui;
1245 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1246 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1248 unwindinfo->runtimeFunction.EndAddress = code_size;
1249 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1251 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1253 codecount = unwindinfo->unwindInfo.CountOfCodes;
1255 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1256 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1259 g_free (unwindinfo);
1262 RtlAddFunctionTable (&targetinfo->runtimeFunction, 1, (DWORD64)code);