2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifndef PLATFORM_WIN32
15 #include <sys/ucontext.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbp = sctx->rbp;
101 ctx->Rsp = sctx->rsp;
102 ctx->Rsi = sctx->rsi;
103 ctx->Rdi = sctx->rdi;
104 ctx->Rip = sctx->rip;
111 void win32_seh_init()
113 old_handler = SetUnhandledExceptionFilter(seh_handler);
116 void win32_seh_cleanup()
118 if (old_handler) SetUnhandledExceptionFilter(old_handler);
121 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
125 fpe_handler = handler;
128 ill_handler = handler;
131 segv_handler = handler;
138 #endif /* PLATFORM_WIN32 */
141 * mono_arch_get_restore_context:
143 * Returns a pointer to a method which restores a previously saved sigcontext.
146 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
148 guint8 *start = NULL;
151 /* restore_contect (MonoContext *ctx) */
155 start = code = mono_global_codeman_reserve (256);
157 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
159 /* Restore all registers except %rip and %r11 */
160 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
161 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
162 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
163 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
164 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
165 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
166 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
167 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
168 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
169 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
170 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
171 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
172 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
173 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
175 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
177 /* get return address */
178 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
180 /* jump to the saved IP */
181 amd64_jump_reg (code, AMD64_R11);
183 mono_arch_flush_icache (start, code - start);
185 *code_size = code - start;
191 * mono_arch_get_call_filter:
193 * Returns a pointer to a method which calls an exception filter. We
194 * also use this function to call finally handlers (we pass NULL as
195 * @exc object in this case).
198 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
207 start = code = mono_global_codeman_reserve (128);
209 /* call_filter (MonoContext *ctx, unsigned long eip) */
212 /* Alloc new frame */
213 amd64_push_reg (code, AMD64_RBP);
214 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
216 /* Save callee saved regs */
218 for (i = 0; i < AMD64_NREG; ++i)
219 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
220 amd64_push_reg (code, i);
226 amd64_push_reg (code, AMD64_RBP);
228 /* Make stack misaligned, the call will make it aligned again */
230 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
233 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
234 /* load callee saved regs */
235 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
236 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
237 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
238 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
239 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
240 #ifdef PLATFORM_WIN32
241 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
242 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
245 /* call the handler */
246 amd64_call_reg (code, AMD64_ARG_REG2);
249 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
252 amd64_pop_reg (code, AMD64_RBP);
254 /* Restore callee saved regs */
255 for (i = AMD64_NREG; i >= 0; --i)
256 if (AMD64_IS_CALLEE_SAVED_REG (i))
257 amd64_pop_reg (code, i);
262 g_assert ((code - start) < 128);
264 mono_arch_flush_icache (start, code - start);
266 *code_size = code - start;
272 * The first few arguments are dummy, to force the other arguments to be passed on
273 * the stack, this avoids overwriting the argument registers in the throw trampoline.
276 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
277 guint64 dummy5, guint64 dummy6,
278 MonoObject *exc, guint64 rip, guint64 rsp,
279 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
280 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
281 guint64 rax, guint64 rcx, guint64 rdx,
284 static void (*restore_context) (MonoContext *);
287 if (!restore_context)
288 restore_context = mono_get_restore_context ();
304 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
306 * The debugger wants us to stop on the `throw' instruction.
307 * By the time we get here, it already inserted a breakpoint on
308 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
313 * In case of a rethrow, the JIT is emitting code like this:
315 * mov 0xffffffffffffffd0(%rbp),%rax'
319 * Here, restore_context() wouldn't restore the %rax register correctly.
323 restore_context (&ctx);
324 g_assert_not_reached ();
327 /* adjust eip so that it point into the call instruction */
330 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
331 MonoException *mono_ex = (MonoException*)exc;
333 mono_ex->stack_trace = NULL;
335 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
336 restore_context (&ctx);
338 g_assert_not_reached ();
342 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
347 start = code = mono_global_codeman_reserve (64);
353 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
356 amd64_push_imm (code, rethrow);
357 amd64_push_reg (code, AMD64_RDX);
358 amd64_push_reg (code, AMD64_RCX);
359 amd64_push_reg (code, AMD64_RAX);
360 amd64_push_reg (code, AMD64_RSI);
361 amd64_push_reg (code, AMD64_RDI);
362 amd64_push_reg (code, AMD64_R15);
363 amd64_push_reg (code, AMD64_R14);
364 amd64_push_reg (code, AMD64_R13);
365 amd64_push_reg (code, AMD64_R12);
366 amd64_push_reg (code, AMD64_RBP);
367 amd64_push_reg (code, AMD64_RBX);
370 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
371 amd64_push_reg (code, AMD64_RAX);
374 amd64_push_membase (code, AMD64_R11, 0);
377 amd64_push_reg (code, AMD64_ARG_REG1);
379 #ifdef PLATFORM_WIN32
381 amd64_push_imm (code, 0);
382 amd64_push_imm (code, 0);
383 amd64_push_imm (code, 0);
384 amd64_push_imm (code, 0);
385 amd64_push_imm (code, 0);
386 amd64_push_imm (code, 0);
390 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
391 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
393 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
395 amd64_call_reg (code, AMD64_R11);
396 amd64_breakpoint (code);
398 mono_arch_flush_icache (start, code - start);
400 g_assert ((code - start) < 64);
402 *code_size = code - start;
408 * mono_arch_get_throw_exception:
410 * Returns a function pointer which can be used to raise
411 * exceptions. The returned function has the following
412 * signature: void (*func) (MonoException *exc);
416 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
418 return get_throw_trampoline (FALSE, code_size, ji, aot);
422 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
424 return get_throw_trampoline (TRUE, code_size, ji, aot);
428 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
433 start = code = mono_global_codeman_reserve (64);
437 /* Not used on amd64 */
438 amd64_breakpoint (code);
440 mono_arch_flush_icache (start, code - start);
442 *code_size = code - start;
448 * mono_arch_get_throw_corlib_exception:
450 * Returns a function pointer which can be used to raise
451 * corlib exceptions. The returned function has the following
452 * signature: void (*func) (guint32 ex_token, guint32 offset);
453 * Here, offset is the offset which needs to be substracted from the caller IP
454 * to get the IP of the throw. Passing the offset has the advantage that it
455 * needs no relocations in the caller.
458 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
460 static guint8* start;
464 start = code = mono_global_codeman_reserve (64);
469 amd64_push_reg (code, AMD64_ARG_REG2);
471 /* Call exception_from_token */
472 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
474 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
475 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
476 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
477 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
479 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
480 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
482 #ifdef PLATFORM_WIN32
483 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
485 amd64_call_reg (code, AMD64_R11);
486 #ifdef PLATFORM_WIN32
487 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
490 /* Compute throw_ip */
491 amd64_pop_reg (code, AMD64_ARG_REG2);
493 amd64_pop_reg (code, AMD64_ARG_REG3);
494 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
496 /* Put the throw_ip at the top of the misaligned stack */
497 amd64_push_reg (code, AMD64_ARG_REG3);
499 throw_ex = (guint64)mono_get_throw_exception ();
501 /* Call throw_exception */
502 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
504 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
505 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
507 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
509 /* The original IP is on the stack */
510 amd64_jump_reg (code, AMD64_R11);
512 g_assert ((code - start) < 64);
514 mono_arch_flush_icache (start, code - start);
516 *code_size = code - start;
521 /* mono_arch_find_jit_info:
523 * This function is used to gather information from @ctx. It return the
524 * MonoJitInfo of the corresponding function, unwinds one stack frame and
525 * stores the resulting context into @new_ctx. It also stores a string
526 * describing the stack location into @trace (if not NULL), and modifies
527 * the @lmf if necessary. @native_offset return the IP offset from the
528 * start of the function or -1 if that info is not available.
531 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
532 MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
537 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
539 /* Avoid costly table lookup during stack overflow */
540 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
543 ji = mono_jit_info_table_find (domain, ip);
552 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
555 if (!ji->method->wrapper_type)
559 * If a method has save_lmf set, then register save/restore code is not generated
560 * by the JIT, so we have to restore callee saved registers from the lmf.
562 if (ji->method->save_lmf) {
566 * *lmf might not point to the LMF pushed by this method, so compute the LMF
570 lmf_addr = (MonoLMF*)ctx->rsp;
572 lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
574 new_ctx->rbp = lmf_addr->rbp;
575 new_ctx->rbx = lmf_addr->rbx;
576 new_ctx->r12 = lmf_addr->r12;
577 new_ctx->r13 = lmf_addr->r13;
578 new_ctx->r14 = lmf_addr->r14;
579 new_ctx->r15 = lmf_addr->r15;
582 offset = omit_fp ? 0 : -1;
583 /* restore caller saved registers */
584 for (i = 0; i < AMD64_NREG; i ++)
585 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
589 reg = *((guint64*)ctx->rsp + offset);
593 reg = *((guint64 *)ctx->rbp + offset);
617 g_assert_not_reached ();
622 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
623 /* remove any unused lmf */
624 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
629 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
630 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
631 /* Pop return address */
635 /* Pop EBP and the return address */
636 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
637 /* we substract 1, so that the IP points into the call instruction */
638 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
639 new_ctx->rbp = *((guint64 *)ctx->rbp);
642 /* Pop arguments off the stack */
644 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
646 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
647 new_ctx->rsp += stack_to_pop;
654 if (((guint64)(*lmf)->previous_lmf) & 1) {
655 /* This LMF has the rip field set */
657 } else if ((*lmf)->rsp == 0) {
662 * The rsp field is set just before the call which transitioned to native
663 * code. Obtain the rip from the stack.
665 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
668 ji = mono_jit_info_table_find (domain, (gpointer)rip);
673 /* Trampoline lmf frame */
674 memset (res, 0, sizeof (MonoJitInfo));
675 res->method = (*lmf)->method;
679 new_ctx->rbp = (*lmf)->rbp;
680 new_ctx->rsp = (*lmf)->rsp;
682 new_ctx->rbx = (*lmf)->rbx;
683 new_ctx->r12 = (*lmf)->r12;
684 new_ctx->r13 = (*lmf)->r13;
685 new_ctx->r14 = (*lmf)->r14;
686 new_ctx->r15 = (*lmf)->r15;
688 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
690 return ji ? ji : res;
697 * mono_arch_handle_exception:
699 * @ctx: saved processor state
700 * @obj: the exception object
703 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
707 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
709 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
711 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
716 #ifdef MONO_ARCH_USE_SIGACTION
717 static inline guint64*
718 gregs_from_ucontext (ucontext_t *ctx)
721 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
723 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
730 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
732 #ifdef MONO_ARCH_USE_SIGACTION
733 ucontext_t *ctx = (ucontext_t*)sigctx;
735 guint64 *gregs = gregs_from_ucontext (ctx);
737 mctx->rax = gregs [REG_RAX];
738 mctx->rbx = gregs [REG_RBX];
739 mctx->rcx = gregs [REG_RCX];
740 mctx->rdx = gregs [REG_RDX];
741 mctx->rbp = gregs [REG_RBP];
742 mctx->rsp = gregs [REG_RSP];
743 mctx->rsi = gregs [REG_RSI];
744 mctx->rdi = gregs [REG_RDI];
745 mctx->rip = gregs [REG_RIP];
746 mctx->r12 = gregs [REG_R12];
747 mctx->r13 = gregs [REG_R13];
748 mctx->r14 = gregs [REG_R14];
749 mctx->r15 = gregs [REG_R15];
751 MonoContext *ctx = (MonoContext *)sigctx;
753 mctx->rax = ctx->rax;
754 mctx->rbx = ctx->rbx;
755 mctx->rcx = ctx->rcx;
756 mctx->rdx = ctx->rdx;
757 mctx->rbp = ctx->rbp;
758 mctx->rsp = ctx->rsp;
759 mctx->rsi = ctx->rsi;
760 mctx->rdi = ctx->rdi;
761 mctx->rip = ctx->rip;
762 mctx->r12 = ctx->r12;
763 mctx->r13 = ctx->r13;
764 mctx->r14 = ctx->r14;
765 mctx->r15 = ctx->r15;
770 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
772 #ifdef MONO_ARCH_USE_SIGACTION
773 ucontext_t *ctx = (ucontext_t*)sigctx;
775 guint64 *gregs = gregs_from_ucontext (ctx);
777 gregs [REG_RAX] = mctx->rax;
778 gregs [REG_RBX] = mctx->rbx;
779 gregs [REG_RCX] = mctx->rcx;
780 gregs [REG_RDX] = mctx->rdx;
781 gregs [REG_RBP] = mctx->rbp;
782 gregs [REG_RSP] = mctx->rsp;
783 gregs [REG_RSI] = mctx->rsi;
784 gregs [REG_RDI] = mctx->rdi;
785 gregs [REG_RIP] = mctx->rip;
786 gregs [REG_R12] = mctx->r12;
787 gregs [REG_R13] = mctx->r13;
788 gregs [REG_R14] = mctx->r14;
789 gregs [REG_R15] = mctx->r15;
791 MonoContext *ctx = (MonoContext *)sigctx;
793 ctx->rax = mctx->rax;
794 ctx->rbx = mctx->rbx;
795 ctx->rcx = mctx->rcx;
796 ctx->rdx = mctx->rdx;
797 ctx->rbp = mctx->rbp;
798 ctx->rsp = mctx->rsp;
799 ctx->rsi = mctx->rsi;
800 ctx->rdi = mctx->rdi;
801 ctx->rip = mctx->rip;
802 ctx->r12 = mctx->r12;
803 ctx->r13 = mctx->r13;
804 ctx->r14 = mctx->r14;
805 ctx->r15 = mctx->r15;
810 mono_arch_ip_from_context (void *sigctx)
813 #ifdef MONO_ARCH_USE_SIGACTION
815 ucontext_t *ctx = (ucontext_t*)sigctx;
817 guint64 *gregs = gregs_from_ucontext (ctx);
819 return (gpointer)gregs [REG_RIP];
821 MonoContext *ctx = sigctx;
822 return (gpointer)ctx->rip;
827 restore_soft_guard_pages (void)
829 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
830 if (jit_tls->stack_ovf_guard_base)
831 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
835 * this function modifies mctx so that when it is restored, it
836 * won't execcute starting at mctx.eip, but in a function that
837 * will restore the protection on the soft-guard pages and return back to
838 * continue at mctx.eip.
841 prepare_for_guard_pages (MonoContext *mctx)
844 sp = (gpointer)(mctx->rsp);
846 /* the return addr */
847 sp [0] = (gpointer)(mctx->rip);
848 mctx->rip = (guint64)restore_soft_guard_pages;
849 mctx->rsp = (guint64)sp;
853 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
855 void (*restore_context) (MonoContext *);
858 restore_context = mono_get_restore_context ();
859 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
860 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
862 prepare_for_guard_pages (&mctx);
863 restore_context (&mctx);
867 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
869 #ifdef MONO_ARCH_USE_SIGACTION
870 MonoException *exc = NULL;
871 ucontext_t *ctx = (ucontext_t*)sigctx;
872 guint64 *gregs = gregs_from_ucontext (ctx);
873 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
878 exc = mono_domain_get ()->stack_overflow_ex;
880 mono_handle_native_sigsegv (SIGSEGV, sigctx);
882 /* setup a call frame on the real stack so that control is returned there
883 * and exception handling can continue.
884 * The frame looks like:
888 * 128 is the size of the red zone
890 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
893 sp = (gpointer)(gregs [REG_RSP] & ~15);
894 sp = (gpointer)((char*)sp - frame_size);
895 /* the arguments must be aligned */
896 sp [-1] = (gpointer)gregs [REG_RIP];
897 /* may need to adjust pointers in the new struct copy, depending on the OS */
898 memcpy (sp + 4, ctx, sizeof (ucontext_t));
899 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
900 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
901 gregs [REG_RSP] = (unsigned long)(sp - 1);
902 gregs [REG_RDI] = (unsigned long)(sp + 4);
903 gregs [REG_RSI] = (guint64)exc;
904 gregs [REG_RDX] = stack_ovf;
909 get_original_ip (void)
911 MonoLMF *lmf = mono_get_lmf ();
915 /* Reset the change to previous_lmf */
916 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
922 get_throw_pending_exception (void)
924 static guint8* start;
925 static gboolean inited = FALSE;
928 gpointer throw_trampoline;
933 start = code = mono_global_codeman_reserve (128);
935 /* We are in the frame of a managed method after a call */
937 * We would like to throw the pending exception in such a way that it looks to
938 * be thrown from the managed method.
941 /* Save registers which might contain the return value of the call */
942 amd64_push_reg (code, AMD64_RAX);
943 amd64_push_reg (code, AMD64_RDX);
945 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
946 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
949 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
951 /* Obtain the pending exception */
952 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
953 amd64_call_reg (code, AMD64_R11);
955 /* Check if it is NULL, and branch */
956 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
957 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
959 /* exc != NULL branch */
961 /* Save the exc on the stack */
962 amd64_push_reg (code, AMD64_RAX);
964 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
966 /* Obtain the original ip and clear the flag in previous_lmf */
967 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
968 amd64_call_reg (code, AMD64_R11);
971 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
973 /* Pop saved stuff from the stack */
974 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
976 /* Setup arguments for the throw trampoline */
978 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
979 /* The trampoline expects the caller ip to be pushed on the stack */
980 amd64_push_reg (code, AMD64_RAX);
982 /* Call the throw trampoline */
983 throw_trampoline = mono_get_throw_exception ();
984 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
985 /* We use a jump instead of a call so we can push the original ip on the stack */
986 amd64_jump_reg (code, AMD64_R11);
988 /* ex == NULL branch */
989 mono_amd64_patch (br [0], code);
991 /* Obtain the original ip and clear the flag in previous_lmf */
992 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
993 amd64_call_reg (code, AMD64_R11);
994 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
996 /* Restore registers */
997 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
998 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
999 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1000 amd64_pop_reg (code, AMD64_RDX);
1001 amd64_pop_reg (code, AMD64_RAX);
1003 /* Return to original code */
1004 amd64_jump_reg (code, AMD64_R11);
1006 g_assert ((code - start) < 128);
1014 * Called when a thread receives an async exception while executing unmanaged code.
1015 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1016 * the return address on the stack to point to a helper routine which throws the
1020 mono_arch_notify_pending_exc (void)
1022 MonoLMF *lmf = mono_get_lmf ();
1028 if ((guint64)lmf->previous_lmf & 1)
1029 /* Already hijacked or trampoline LMF entry */
1032 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1033 lmf->rip = *(guint64*)(lmf->rsp - 8);
1034 /* Signal that lmf->rip is set */
1035 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1037 *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
1040 #ifdef PLATFORM_WIN32
1043 * The mono_arch_unwindinfo* methods are used to build and add
1044 * function table info for each emitted method from mono. On Winx64
1045 * the seh handler will not be called if the mono methods are not
1046 * added to the function table.
1048 * We should not need to add non-volatile register info to the
1049 * table since mono stores that info elsewhere. (Except for the register
1053 #define MONO_MAX_UNWIND_CODES 22
1055 typedef union _UNWIND_CODE {
1058 guchar UnwindOp : 4;
1061 gushort FrameOffset;
1062 } UNWIND_CODE, *PUNWIND_CODE;
1064 typedef struct _UNWIND_INFO {
1067 guchar SizeOfProlog;
1068 guchar CountOfCodes;
1069 guchar FrameRegister : 4;
1070 guchar FrameOffset : 4;
1071 /* custom size for mono allowing for mono allowing for*/
1072 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1073 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1074 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1075 /*UWOP_PUSH_NONVOL offset = 15-0*/
1076 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1078 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1080 * OPTIONAL ULONG ExceptionHandler;
1081 * OPTIONAL ULONG FunctionEntry;
1083 * OPTIONAL ULONG ExceptionData[]; */
1084 } UNWIND_INFO, *PUNWIND_INFO;
1088 RUNTIME_FUNCTION runtimeFunction;
1089 UNWIND_INFO unwindInfo;
1090 } MonoUnwindInfo, *PMonoUnwindInfo;
1093 mono_arch_unwindinfo_create (gpointer* monoui)
1095 PMonoUnwindInfo newunwindinfo;
1096 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1097 newunwindinfo->unwindInfo.Version = 1;
1101 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1103 PMonoUnwindInfo unwindinfo;
1104 PUNWIND_CODE unwindcode;
1107 mono_arch_unwindinfo_create (monoui);
1109 unwindinfo = (MonoUnwindInfo*)*monoui;
1111 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1112 g_error ("Larger allocation needed for the unwind information.");
1114 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1115 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1116 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1117 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1118 unwindcode->OpInfo = reg;
1120 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1121 g_error ("Adding unwind info in wrong order.");
1123 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1127 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1129 PMonoUnwindInfo unwindinfo;
1130 PUNWIND_CODE unwindcode;
1133 mono_arch_unwindinfo_create (monoui);
1135 unwindinfo = (MonoUnwindInfo*)*monoui;
1137 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1138 g_error ("Larger allocation needed for the unwind information.");
1140 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1141 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1142 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1144 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1145 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1146 unwindcode->OpInfo = reg;
1148 unwindinfo->unwindInfo.FrameRegister = reg;
1150 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1151 g_error ("Adding unwind info in wrong order.");
1153 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1157 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1159 PMonoUnwindInfo unwindinfo;
1160 PUNWIND_CODE unwindcode;
1164 mono_arch_unwindinfo_create (monoui);
1166 unwindinfo = (MonoUnwindInfo*)*monoui;
1169 g_error ("Stack allocation must be equal to or greater than 0x8.");
1173 else if (size <= 0x7FFF8)
1178 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1179 g_error ("Larger allocation needed for the unwind information.");
1181 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1182 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1184 if (codesneeded == 1) {
1185 /*The size of the allocation is
1186 (the number in the OpInfo member) times 8 plus 8*/
1187 unwindcode->OpInfo = (size - 8)/8;
1188 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1191 if (codesneeded == 3) {
1192 /*the unscaled size of the allocation is recorded
1193 in the next two slots in little-endian format*/
1194 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1196 unwindcode->OpInfo = 1;
1199 /*the size of the allocation divided by 8
1200 is recorded in the next slot*/
1201 unwindcode->FrameOffset = size/8;
1203 unwindcode->OpInfo = 0;
1206 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1209 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1211 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1212 g_error ("Adding unwind info in wrong order.");
1214 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1218 mono_arch_unwindinfo_get_size (gpointer monoui)
1220 PMonoUnwindInfo unwindinfo;
1224 unwindinfo = (MonoUnwindInfo*)monoui;
1225 return (8 + sizeof (MonoUnwindInfo)) -
1226 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1230 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1232 PMonoUnwindInfo unwindinfo, targetinfo;
1234 guint64 targetlocation;
1238 unwindinfo = (MonoUnwindInfo*)*monoui;
1239 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1240 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1242 unwindinfo->runtimeFunction.EndAddress = code_size;
1243 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1245 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1247 codecount = unwindinfo->unwindInfo.CountOfCodes;
1249 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1250 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1253 g_free (unwindinfo);
1256 RtlAddFunctionTable (&targetinfo->runtimeFunction, 1, (DWORD64)code);