2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifdef HAVE_UCONTEXT_H
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 static MonoW32ExceptionHandler fpe_handler;
36 static MonoW32ExceptionHandler ill_handler;
37 static MonoW32ExceptionHandler segv_handler;
39 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
41 #define W32_SEH_HANDLE_EX(_ex) \
42 if (_ex##_handler) _ex##_handler((int)sctx)
45 * Unhandled Exception Filter
46 * Top-level per-process exception handler.
48 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
55 res = EXCEPTION_CONTINUE_EXECUTION;
57 er = ep->ExceptionRecord;
58 ctx = ep->ContextRecord;
59 sctx = g_malloc(sizeof(MonoContext));
61 /* Copy Win32 context to UNIX style context */
76 switch (er->ExceptionCode) {
77 case EXCEPTION_ACCESS_VIOLATION:
78 W32_SEH_HANDLE_EX(segv);
80 case EXCEPTION_ILLEGAL_INSTRUCTION:
81 W32_SEH_HANDLE_EX(ill);
83 case EXCEPTION_INT_DIVIDE_BY_ZERO:
84 case EXCEPTION_INT_OVERFLOW:
85 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
86 case EXCEPTION_FLT_OVERFLOW:
87 case EXCEPTION_FLT_UNDERFLOW:
88 case EXCEPTION_FLT_INEXACT_RESULT:
89 W32_SEH_HANDLE_EX(fpe);
95 /* Copy context back */
100 ctx->Rbx = sctx->rbx;
101 ctx->Rbp = sctx->rbp;
102 ctx->R12 = sctx->r12;
103 ctx->R13 = sctx->r13;
104 ctx->R14 = sctx->r14;
105 ctx->R15 = sctx->r15;
106 ctx->Rip = sctx->rip;
108 /* Volatile But should not matter?*/
109 ctx->Rax = sctx->rax;
110 ctx->Rcx = sctx->rcx;
111 ctx->Rdx = sctx->rdx;
118 void win32_seh_init()
120 old_handler = SetUnhandledExceptionFilter(seh_handler);
123 void win32_seh_cleanup()
125 if (old_handler) SetUnhandledExceptionFilter(old_handler);
128 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
132 fpe_handler = handler;
135 ill_handler = handler;
138 segv_handler = handler;
145 #endif /* PLATFORM_WIN32 */
148 * mono_arch_get_restore_context:
150 * Returns a pointer to a method which restores a previously saved sigcontext.
153 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
155 guint8 *start = NULL;
158 /* restore_contect (MonoContext *ctx) */
162 start = code = mono_global_codeman_reserve (256);
164 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
166 /* Restore all registers except %rip and %r11 */
167 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
168 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
169 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
170 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
171 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
172 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
173 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
174 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
175 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
176 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
177 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
178 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
179 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
180 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
182 if (mono_running_on_valgrind ()) {
183 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
184 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
185 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
186 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
188 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
189 /* get return address */
190 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
193 /* jump to the saved IP */
194 amd64_jump_reg (code, AMD64_R11);
196 mono_arch_flush_icache (start, code - start);
198 *code_size = code - start;
204 * mono_arch_get_call_filter:
206 * Returns a pointer to a method which calls an exception filter. We
207 * also use this function to call finally handlers (we pass NULL as
208 * @exc object in this case).
211 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
220 start = code = mono_global_codeman_reserve (128);
222 /* call_filter (MonoContext *ctx, unsigned long eip) */
225 /* Alloc new frame */
226 amd64_push_reg (code, AMD64_RBP);
227 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
229 /* Save callee saved regs */
231 for (i = 0; i < AMD64_NREG; ++i)
232 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
233 amd64_push_reg (code, i);
239 amd64_push_reg (code, AMD64_RBP);
241 /* Make stack misaligned, the call will make it aligned again */
243 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
246 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
247 /* load callee saved regs */
248 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
249 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
250 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
251 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
252 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
253 #ifdef PLATFORM_WIN32
254 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
255 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
258 /* call the handler */
259 amd64_call_reg (code, AMD64_ARG_REG2);
262 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
265 amd64_pop_reg (code, AMD64_RBP);
267 /* Restore callee saved regs */
268 for (i = AMD64_NREG; i >= 0; --i)
269 if (AMD64_IS_CALLEE_SAVED_REG (i))
270 amd64_pop_reg (code, i);
275 g_assert ((code - start) < 128);
277 mono_arch_flush_icache (start, code - start);
279 *code_size = code - start;
285 * The first few arguments are dummy, to force the other arguments to be passed on
286 * the stack, this avoids overwriting the argument registers in the throw trampoline.
289 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
290 guint64 dummy5, guint64 dummy6,
291 MonoObject *exc, guint64 rip, guint64 rsp,
292 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
293 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
294 guint64 rax, guint64 rcx, guint64 rdx,
297 static void (*restore_context) (MonoContext *);
300 if (!restore_context)
301 restore_context = mono_get_restore_context ();
317 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
319 * The debugger wants us to stop on the `throw' instruction.
320 * By the time we get here, it already inserted a breakpoint on
321 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
326 * In case of a rethrow, the JIT is emitting code like this:
328 * mov 0xffffffffffffffd0(%rbp),%rax'
332 * Here, restore_context() wouldn't restore the %rax register correctly.
336 restore_context (&ctx);
337 g_assert_not_reached ();
340 /* adjust eip so that it point into the call instruction */
343 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
344 MonoException *mono_ex = (MonoException*)exc;
346 mono_ex->stack_trace = NULL;
348 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
349 restore_context (&ctx);
351 g_assert_not_reached ();
355 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
360 start = code = mono_global_codeman_reserve (64);
366 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
369 amd64_push_imm (code, rethrow);
370 amd64_push_reg (code, AMD64_RDX);
371 amd64_push_reg (code, AMD64_RCX);
372 amd64_push_reg (code, AMD64_RAX);
373 amd64_push_reg (code, AMD64_RSI);
374 amd64_push_reg (code, AMD64_RDI);
375 amd64_push_reg (code, AMD64_R15);
376 amd64_push_reg (code, AMD64_R14);
377 amd64_push_reg (code, AMD64_R13);
378 amd64_push_reg (code, AMD64_R12);
379 amd64_push_reg (code, AMD64_RBP);
380 amd64_push_reg (code, AMD64_RBX);
383 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
384 amd64_push_reg (code, AMD64_RAX);
387 amd64_push_membase (code, AMD64_R11, 0);
390 amd64_push_reg (code, AMD64_ARG_REG1);
392 #ifdef PLATFORM_WIN32
394 amd64_push_imm (code, 0);
395 amd64_push_imm (code, 0);
396 amd64_push_imm (code, 0);
397 amd64_push_imm (code, 0);
398 amd64_push_imm (code, 0);
399 amd64_push_imm (code, 0);
403 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
404 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
406 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
408 amd64_call_reg (code, AMD64_R11);
409 amd64_breakpoint (code);
411 mono_arch_flush_icache (start, code - start);
413 g_assert ((code - start) < 64);
415 *code_size = code - start;
421 * mono_arch_get_throw_exception:
423 * Returns a function pointer which can be used to raise
424 * exceptions. The returned function has the following
425 * signature: void (*func) (MonoException *exc);
429 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
431 return get_throw_trampoline (FALSE, code_size, ji, aot);
435 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
437 return get_throw_trampoline (TRUE, code_size, ji, aot);
441 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
446 start = code = mono_global_codeman_reserve (64);
450 /* Not used on amd64 */
451 amd64_breakpoint (code);
453 mono_arch_flush_icache (start, code - start);
455 *code_size = code - start;
461 * mono_arch_get_throw_corlib_exception:
463 * Returns a function pointer which can be used to raise
464 * corlib exceptions. The returned function has the following
465 * signature: void (*func) (guint32 ex_token, guint32 offset);
466 * Here, offset is the offset which needs to be substracted from the caller IP
467 * to get the IP of the throw. Passing the offset has the advantage that it
468 * needs no relocations in the caller.
471 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
473 static guint8* start;
477 start = code = mono_global_codeman_reserve (64);
482 amd64_push_reg (code, AMD64_ARG_REG2);
484 /* Call exception_from_token */
485 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
487 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
488 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
489 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
490 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
492 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
493 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
495 #ifdef PLATFORM_WIN32
496 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
498 amd64_call_reg (code, AMD64_R11);
499 #ifdef PLATFORM_WIN32
500 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
503 /* Compute throw_ip */
504 amd64_pop_reg (code, AMD64_ARG_REG2);
506 amd64_pop_reg (code, AMD64_ARG_REG3);
507 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
509 /* Put the throw_ip at the top of the misaligned stack */
510 amd64_push_reg (code, AMD64_ARG_REG3);
512 throw_ex = (guint64)mono_get_throw_exception ();
514 /* Call throw_exception */
515 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
517 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
518 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
520 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
522 /* The original IP is on the stack */
523 amd64_jump_reg (code, AMD64_R11);
525 g_assert ((code - start) < 64);
527 mono_arch_flush_icache (start, code - start);
529 *code_size = code - start;
534 /* mono_arch_find_jit_info:
536 * This function is used to gather information from @ctx. It return the
537 * MonoJitInfo of the corresponding function, unwinds one stack frame and
538 * stores the resulting context into @new_ctx. It also stores a string
539 * describing the stack location into @trace (if not NULL), and modifies
540 * the @lmf if necessary. @native_offset return the IP offset from the
541 * start of the function or -1 if that info is not available.
544 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
545 MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed)
548 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
550 /* Avoid costly table lookup during stack overflow */
551 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
554 ji = mono_jit_info_table_find (domain, ip);
562 gssize regs [MONO_MAX_IREGS + 1];
564 guint32 unwind_info_len;
568 if (!ji->method->wrapper_type)
572 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
574 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
576 regs [AMD64_RAX] = new_ctx->rax;
577 regs [AMD64_RBX] = new_ctx->rbx;
578 regs [AMD64_RCX] = new_ctx->rcx;
579 regs [AMD64_RDX] = new_ctx->rdx;
580 regs [AMD64_RBP] = new_ctx->rbp;
581 regs [AMD64_RSP] = new_ctx->rsp;
582 regs [AMD64_RSI] = new_ctx->rsi;
583 regs [AMD64_RDI] = new_ctx->rdi;
584 regs [AMD64_RIP] = new_ctx->rip;
585 regs [AMD64_R12] = new_ctx->r12;
586 regs [AMD64_R13] = new_ctx->r13;
587 regs [AMD64_R14] = new_ctx->r14;
588 regs [AMD64_R15] = new_ctx->r15;
590 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
591 (guint8*)ji->code_start + ji->code_size,
592 ip, regs, MONO_MAX_IREGS + 1, &cfa);
594 new_ctx->rax = regs [AMD64_RAX];
595 new_ctx->rbx = regs [AMD64_RBX];
596 new_ctx->rcx = regs [AMD64_RCX];
597 new_ctx->rdx = regs [AMD64_RDX];
598 new_ctx->rbp = regs [AMD64_RBP];
599 new_ctx->rsp = regs [AMD64_RSP];
600 new_ctx->rsi = regs [AMD64_RSI];
601 new_ctx->rdi = regs [AMD64_RDI];
602 new_ctx->rip = regs [AMD64_RIP];
603 new_ctx->r12 = regs [AMD64_R12];
604 new_ctx->r13 = regs [AMD64_R13];
605 new_ctx->r14 = regs [AMD64_R14];
606 new_ctx->r15 = regs [AMD64_R15];
608 /* The CFA becomes the new SP value */
609 new_ctx->rsp = (gssize)cfa;
614 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
615 /* remove any unused lmf */
616 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
619 /* Pop arguments off the stack */
621 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
623 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
624 new_ctx->rsp += stack_to_pop;
631 if (((guint64)(*lmf)->previous_lmf) & 1) {
632 /* This LMF has the rip field set */
634 } else if ((*lmf)->rsp == 0) {
639 * The rsp field is set just before the call which transitioned to native
640 * code. Obtain the rip from the stack.
642 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
645 ji = mono_jit_info_table_find (domain, (gpointer)rip);
647 // FIXME: This can happen with multiple appdomains (bug #444383)
652 new_ctx->rbp = (*lmf)->rbp;
653 new_ctx->rsp = (*lmf)->rsp;
655 new_ctx->rbx = (*lmf)->rbx;
656 new_ctx->r12 = (*lmf)->r12;
657 new_ctx->r13 = (*lmf)->r13;
658 new_ctx->r14 = (*lmf)->r14;
659 new_ctx->r15 = (*lmf)->r15;
660 #ifdef PLATFORM_WIN32
661 new_ctx->rdi = (*lmf)->rdi;
662 new_ctx->rsi = (*lmf)->rsi;
665 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
667 return ji ? ji : res;
674 * mono_arch_handle_exception:
676 * @ctx: saved processor state
677 * @obj: the exception object
680 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
684 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
686 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
688 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
693 #ifdef MONO_ARCH_USE_SIGACTION
694 static inline guint64*
695 gregs_from_ucontext (ucontext_t *ctx)
698 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
700 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
707 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
709 #ifdef MONO_ARCH_USE_SIGACTION
710 ucontext_t *ctx = (ucontext_t*)sigctx;
712 guint64 *gregs = gregs_from_ucontext (ctx);
714 mctx->rax = gregs [REG_RAX];
715 mctx->rbx = gregs [REG_RBX];
716 mctx->rcx = gregs [REG_RCX];
717 mctx->rdx = gregs [REG_RDX];
718 mctx->rbp = gregs [REG_RBP];
719 mctx->rsp = gregs [REG_RSP];
720 mctx->rsi = gregs [REG_RSI];
721 mctx->rdi = gregs [REG_RDI];
722 mctx->rip = gregs [REG_RIP];
723 mctx->r12 = gregs [REG_R12];
724 mctx->r13 = gregs [REG_R13];
725 mctx->r14 = gregs [REG_R14];
726 mctx->r15 = gregs [REG_R15];
728 MonoContext *ctx = (MonoContext *)sigctx;
730 mctx->rax = ctx->rax;
731 mctx->rbx = ctx->rbx;
732 mctx->rcx = ctx->rcx;
733 mctx->rdx = ctx->rdx;
734 mctx->rbp = ctx->rbp;
735 mctx->rsp = ctx->rsp;
736 mctx->rsi = ctx->rsi;
737 mctx->rdi = ctx->rdi;
738 mctx->rip = ctx->rip;
739 mctx->r12 = ctx->r12;
740 mctx->r13 = ctx->r13;
741 mctx->r14 = ctx->r14;
742 mctx->r15 = ctx->r15;
747 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
749 #ifdef MONO_ARCH_USE_SIGACTION
750 ucontext_t *ctx = (ucontext_t*)sigctx;
752 guint64 *gregs = gregs_from_ucontext (ctx);
754 gregs [REG_RAX] = mctx->rax;
755 gregs [REG_RBX] = mctx->rbx;
756 gregs [REG_RCX] = mctx->rcx;
757 gregs [REG_RDX] = mctx->rdx;
758 gregs [REG_RBP] = mctx->rbp;
759 gregs [REG_RSP] = mctx->rsp;
760 gregs [REG_RSI] = mctx->rsi;
761 gregs [REG_RDI] = mctx->rdi;
762 gregs [REG_RIP] = mctx->rip;
763 gregs [REG_R12] = mctx->r12;
764 gregs [REG_R13] = mctx->r13;
765 gregs [REG_R14] = mctx->r14;
766 gregs [REG_R15] = mctx->r15;
768 MonoContext *ctx = (MonoContext *)sigctx;
770 ctx->rax = mctx->rax;
771 ctx->rbx = mctx->rbx;
772 ctx->rcx = mctx->rcx;
773 ctx->rdx = mctx->rdx;
774 ctx->rbp = mctx->rbp;
775 ctx->rsp = mctx->rsp;
776 ctx->rsi = mctx->rsi;
777 ctx->rdi = mctx->rdi;
778 ctx->rip = mctx->rip;
779 ctx->r12 = mctx->r12;
780 ctx->r13 = mctx->r13;
781 ctx->r14 = mctx->r14;
782 ctx->r15 = mctx->r15;
787 mono_arch_ip_from_context (void *sigctx)
790 #ifdef MONO_ARCH_USE_SIGACTION
792 ucontext_t *ctx = (ucontext_t*)sigctx;
794 guint64 *gregs = gregs_from_ucontext (ctx);
796 return (gpointer)gregs [REG_RIP];
798 MonoContext *ctx = sigctx;
799 return (gpointer)ctx->rip;
804 restore_soft_guard_pages (void)
806 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
807 if (jit_tls->stack_ovf_guard_base)
808 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
812 * this function modifies mctx so that when it is restored, it
813 * won't execcute starting at mctx.eip, but in a function that
814 * will restore the protection on the soft-guard pages and return back to
815 * continue at mctx.eip.
818 prepare_for_guard_pages (MonoContext *mctx)
821 sp = (gpointer)(mctx->rsp);
823 /* the return addr */
824 sp [0] = (gpointer)(mctx->rip);
825 mctx->rip = (guint64)restore_soft_guard_pages;
826 mctx->rsp = (guint64)sp;
830 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
832 void (*restore_context) (MonoContext *);
835 restore_context = mono_get_restore_context ();
836 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
837 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
839 prepare_for_guard_pages (&mctx);
840 restore_context (&mctx);
844 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
846 #ifdef MONO_ARCH_USE_SIGACTION
847 MonoException *exc = NULL;
848 ucontext_t *ctx = (ucontext_t*)sigctx;
849 guint64 *gregs = gregs_from_ucontext (ctx);
850 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
855 exc = mono_domain_get ()->stack_overflow_ex;
857 mono_handle_native_sigsegv (SIGSEGV, sigctx);
859 /* setup a call frame on the real stack so that control is returned there
860 * and exception handling can continue.
861 * The frame looks like:
865 * 128 is the size of the red zone
867 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
870 sp = (gpointer)(gregs [REG_RSP] & ~15);
871 sp = (gpointer)((char*)sp - frame_size);
872 /* the arguments must be aligned */
873 sp [-1] = (gpointer)gregs [REG_RIP];
874 /* may need to adjust pointers in the new struct copy, depending on the OS */
875 memcpy (sp + 4, ctx, sizeof (ucontext_t));
876 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
877 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
878 gregs [REG_RSP] = (unsigned long)(sp - 1);
879 gregs [REG_RDI] = (unsigned long)(sp + 4);
880 gregs [REG_RSI] = (guint64)exc;
881 gregs [REG_RDX] = stack_ovf;
886 get_original_ip (void)
888 MonoLMF *lmf = mono_get_lmf ();
892 /* Reset the change to previous_lmf */
893 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
899 mono_arch_get_throw_pending_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
901 guint8 *code, *start;
903 gpointer throw_trampoline;
909 start = code = mono_global_codeman_reserve (128);
911 /* We are in the frame of a managed method after a call */
913 * We would like to throw the pending exception in such a way that it looks to
914 * be thrown from the managed method.
917 /* Save registers which might contain the return value of the call */
918 amd64_push_reg (code, AMD64_RAX);
919 amd64_push_reg (code, AMD64_RDX);
921 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
922 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
925 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
927 /* Obtain the pending exception */
928 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
929 amd64_call_reg (code, AMD64_R11);
931 /* Check if it is NULL, and branch */
932 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
933 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
935 /* exc != NULL branch */
937 /* Save the exc on the stack */
938 amd64_push_reg (code, AMD64_RAX);
940 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
942 /* Obtain the original ip and clear the flag in previous_lmf */
943 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
944 amd64_call_reg (code, AMD64_R11);
947 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
949 /* Pop saved stuff from the stack */
950 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
952 /* Setup arguments for the throw trampoline */
954 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
955 /* The trampoline expects the caller ip to be pushed on the stack */
956 amd64_push_reg (code, AMD64_RAX);
958 /* Call the throw trampoline */
959 throw_trampoline = mono_get_throw_exception ();
960 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
961 /* We use a jump instead of a call so we can push the original ip on the stack */
962 amd64_jump_reg (code, AMD64_R11);
964 /* ex == NULL branch */
965 mono_amd64_patch (br [0], code);
967 /* Obtain the original ip and clear the flag in previous_lmf */
968 amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
969 amd64_call_reg (code, AMD64_R11);
970 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
972 /* Restore registers */
973 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
974 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
975 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
976 amd64_pop_reg (code, AMD64_RDX);
977 amd64_pop_reg (code, AMD64_RAX);
979 /* Return to original code */
980 amd64_jump_reg (code, AMD64_R11);
982 g_assert ((code - start) < 128);
984 *code_size = code - start;
989 static gpointer throw_pending_exception;
992 * Called when a thread receives an async exception while executing unmanaged code.
993 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
994 * the return address on the stack to point to a helper routine which throws the
998 mono_arch_notify_pending_exc (void)
1000 MonoLMF *lmf = mono_get_lmf ();
1006 if ((guint64)lmf->previous_lmf & 1)
1007 /* Already hijacked or trampoline LMF entry */
1010 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1011 lmf->rip = *(guint64*)(lmf->rsp - 8);
1012 /* Signal that lmf->rip is set */
1013 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1015 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1019 mono_arch_exceptions_init (void)
1024 /* Call this to avoid initialization races */
1025 throw_pending_exception = mono_arch_get_throw_pending_exception_full (&code_size, &ji, FALSE);
1028 #ifdef PLATFORM_WIN32
1031 * The mono_arch_unwindinfo* methods are used to build and add
1032 * function table info for each emitted method from mono. On Winx64
1033 * the seh handler will not be called if the mono methods are not
1034 * added to the function table.
1036 * We should not need to add non-volatile register info to the
1037 * table since mono stores that info elsewhere. (Except for the register
1041 #define MONO_MAX_UNWIND_CODES 22
1043 typedef union _UNWIND_CODE {
1046 guchar UnwindOp : 4;
1049 gushort FrameOffset;
1050 } UNWIND_CODE, *PUNWIND_CODE;
1052 typedef struct _UNWIND_INFO {
1055 guchar SizeOfProlog;
1056 guchar CountOfCodes;
1057 guchar FrameRegister : 4;
1058 guchar FrameOffset : 4;
1059 /* custom size for mono allowing for mono allowing for*/
1060 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1061 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1062 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1063 /*UWOP_PUSH_NONVOL offset = 15-0*/
1064 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1066 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1068 * OPTIONAL ULONG ExceptionHandler;
1069 * OPTIONAL ULONG FunctionEntry;
1071 * OPTIONAL ULONG ExceptionData[]; */
1072 } UNWIND_INFO, *PUNWIND_INFO;
1076 RUNTIME_FUNCTION runtimeFunction;
1077 UNWIND_INFO unwindInfo;
1078 } MonoUnwindInfo, *PMonoUnwindInfo;
1081 mono_arch_unwindinfo_create (gpointer* monoui)
1083 PMonoUnwindInfo newunwindinfo;
1084 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1085 newunwindinfo->unwindInfo.Version = 1;
1089 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1091 PMonoUnwindInfo unwindinfo;
1092 PUNWIND_CODE unwindcode;
1095 mono_arch_unwindinfo_create (monoui);
1097 unwindinfo = (MonoUnwindInfo*)*monoui;
1099 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1100 g_error ("Larger allocation needed for the unwind information.");
1102 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1103 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1104 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1105 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1106 unwindcode->OpInfo = reg;
1108 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1109 g_error ("Adding unwind info in wrong order.");
1111 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1115 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1117 PMonoUnwindInfo unwindinfo;
1118 PUNWIND_CODE unwindcode;
1121 mono_arch_unwindinfo_create (monoui);
1123 unwindinfo = (MonoUnwindInfo*)*monoui;
1125 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1126 g_error ("Larger allocation needed for the unwind information.");
1128 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1129 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1130 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1132 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1133 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1134 unwindcode->OpInfo = reg;
1136 unwindinfo->unwindInfo.FrameRegister = reg;
1138 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1139 g_error ("Adding unwind info in wrong order.");
1141 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1145 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1147 PMonoUnwindInfo unwindinfo;
1148 PUNWIND_CODE unwindcode;
1152 mono_arch_unwindinfo_create (monoui);
1154 unwindinfo = (MonoUnwindInfo*)*monoui;
1157 g_error ("Stack allocation must be equal to or greater than 0x8.");
1161 else if (size <= 0x7FFF8)
1166 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1167 g_error ("Larger allocation needed for the unwind information.");
1169 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1170 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1172 if (codesneeded == 1) {
1173 /*The size of the allocation is
1174 (the number in the OpInfo member) times 8 plus 8*/
1175 unwindcode->OpInfo = (size - 8)/8;
1176 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1179 if (codesneeded == 3) {
1180 /*the unscaled size of the allocation is recorded
1181 in the next two slots in little-endian format*/
1182 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1184 unwindcode->OpInfo = 1;
1187 /*the size of the allocation divided by 8
1188 is recorded in the next slot*/
1189 unwindcode->FrameOffset = size/8;
1191 unwindcode->OpInfo = 0;
1194 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1197 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1199 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1200 g_error ("Adding unwind info in wrong order.");
1202 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1206 mono_arch_unwindinfo_get_size (gpointer monoui)
1208 PMonoUnwindInfo unwindinfo;
1212 unwindinfo = (MonoUnwindInfo*)monoui;
1213 return (8 + sizeof (MonoUnwindInfo)) -
1214 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1218 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1222 PMonoUnwindInfo targetinfo;
1223 MonoDomain *domain = mono_domain_get ();
1225 ji = mono_jit_info_table_find (domain, (char*)ControlPc);
1229 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1231 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1233 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1235 return &targetinfo->runtimeFunction;
1239 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1241 PMonoUnwindInfo unwindinfo, targetinfo;
1243 guint64 targetlocation;
1247 unwindinfo = (MonoUnwindInfo*)*monoui;
1248 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1249 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1251 unwindinfo->runtimeFunction.EndAddress = code_size;
1252 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1254 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1256 codecount = unwindinfo->unwindInfo.CountOfCodes;
1258 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1259 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1262 g_free (unwindinfo);
1265 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);