2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifdef HAVE_UCONTEXT_H
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
37 static MonoW32ExceptionHandler fpe_handler;
38 static MonoW32ExceptionHandler ill_handler;
39 static MonoW32ExceptionHandler segv_handler;
41 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
43 #define W32_SEH_HANDLE_EX(_ex) \
44 if (_ex##_handler) _ex##_handler((int)sctx)
47 * Unhandled Exception Filter
48 * Top-level per-process exception handler.
50 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
57 res = EXCEPTION_CONTINUE_EXECUTION;
59 er = ep->ExceptionRecord;
60 ctx = ep->ContextRecord;
61 sctx = g_malloc(sizeof(MonoContext));
63 /* Copy Win32 context to UNIX style context */
78 switch (er->ExceptionCode) {
79 case EXCEPTION_ACCESS_VIOLATION:
80 W32_SEH_HANDLE_EX(segv);
82 case EXCEPTION_ILLEGAL_INSTRUCTION:
83 W32_SEH_HANDLE_EX(ill);
85 case EXCEPTION_INT_DIVIDE_BY_ZERO:
86 case EXCEPTION_INT_OVERFLOW:
87 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
88 case EXCEPTION_FLT_OVERFLOW:
89 case EXCEPTION_FLT_UNDERFLOW:
90 case EXCEPTION_FLT_INEXACT_RESULT:
91 W32_SEH_HANDLE_EX(fpe);
97 /* Copy context back */
100 ctx->Rdi = sctx->rdi;
101 ctx->Rsi = sctx->rsi;
102 ctx->Rbx = sctx->rbx;
103 ctx->Rbp = sctx->rbp;
104 ctx->R12 = sctx->r12;
105 ctx->R13 = sctx->r13;
106 ctx->R14 = sctx->r14;
107 ctx->R15 = sctx->r15;
108 ctx->Rip = sctx->rip;
110 /* Volatile But should not matter?*/
111 ctx->Rax = sctx->rax;
112 ctx->Rcx = sctx->rcx;
113 ctx->Rdx = sctx->rdx;
120 void win32_seh_init()
122 old_handler = SetUnhandledExceptionFilter(seh_handler);
125 void win32_seh_cleanup()
127 if (old_handler) SetUnhandledExceptionFilter(old_handler);
130 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
134 fpe_handler = handler;
137 ill_handler = handler;
140 segv_handler = handler;
147 #endif /* PLATFORM_WIN32 */
150 * mono_arch_get_restore_context:
152 * Returns a pointer to a method which restores a previously saved sigcontext.
155 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
157 guint8 *start = NULL;
160 /* restore_contect (MonoContext *ctx) */
164 start = code = mono_global_codeman_reserve (256);
166 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
168 /* Restore all registers except %rip and %r11 */
169 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
170 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
171 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
172 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
173 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
174 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
175 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
176 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
177 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
178 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
179 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
180 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
181 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
182 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
184 if (mono_running_on_valgrind ()) {
185 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
186 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
187 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
188 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
190 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
191 /* get return address */
192 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
195 /* jump to the saved IP */
196 amd64_jump_reg (code, AMD64_R11);
198 mono_arch_flush_icache (start, code - start);
200 *code_size = code - start;
206 * mono_arch_get_call_filter:
208 * Returns a pointer to a method which calls an exception filter. We
209 * also use this function to call finally handlers (we pass NULL as
210 * @exc object in this case).
213 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
222 start = code = mono_global_codeman_reserve (128);
224 /* call_filter (MonoContext *ctx, unsigned long eip) */
227 /* Alloc new frame */
228 amd64_push_reg (code, AMD64_RBP);
229 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
231 /* Save callee saved regs */
233 for (i = 0; i < AMD64_NREG; ++i)
234 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
235 amd64_push_reg (code, i);
241 amd64_push_reg (code, AMD64_RBP);
243 /* Make stack misaligned, the call will make it aligned again */
245 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
248 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
249 /* load callee saved regs */
250 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
251 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
252 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
253 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
254 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
255 #ifdef PLATFORM_WIN32
256 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
257 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
260 /* call the handler */
261 amd64_call_reg (code, AMD64_ARG_REG2);
264 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
267 amd64_pop_reg (code, AMD64_RBP);
269 /* Restore callee saved regs */
270 for (i = AMD64_NREG; i >= 0; --i)
271 if (AMD64_IS_CALLEE_SAVED_REG (i))
272 amd64_pop_reg (code, i);
277 g_assert ((code - start) < 128);
279 mono_arch_flush_icache (start, code - start);
281 *code_size = code - start;
287 * The first few arguments are dummy, to force the other arguments to be passed on
288 * the stack, this avoids overwriting the argument registers in the throw trampoline.
291 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
292 guint64 dummy5, guint64 dummy6,
293 MonoObject *exc, guint64 rip, guint64 rsp,
294 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
295 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
296 guint64 rax, guint64 rcx, guint64 rdx,
299 static void (*restore_context) (MonoContext *);
302 if (!restore_context)
303 restore_context = mono_get_restore_context ();
319 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
320 MonoException *mono_ex = (MonoException*)exc;
322 mono_ex->stack_trace = NULL;
325 if (mono_debug_using_mono_debugger ()) {
326 guint8 buf [16], *code;
328 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
331 if (buf [3] == 0xe8) {
332 MonoContext ctx_cp = ctx;
333 ctx_cp.rip = rip - 5;
335 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
336 restore_context (&ctx_cp);
337 g_assert_not_reached ();
342 /* adjust eip so that it point into the call instruction */
345 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
346 restore_context (&ctx);
348 g_assert_not_reached ();
352 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
357 start = code = mono_global_codeman_reserve (64);
363 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
366 amd64_push_imm (code, rethrow);
367 amd64_push_reg (code, AMD64_RDX);
368 amd64_push_reg (code, AMD64_RCX);
369 amd64_push_reg (code, AMD64_RAX);
370 amd64_push_reg (code, AMD64_RSI);
371 amd64_push_reg (code, AMD64_RDI);
372 amd64_push_reg (code, AMD64_R15);
373 amd64_push_reg (code, AMD64_R14);
374 amd64_push_reg (code, AMD64_R13);
375 amd64_push_reg (code, AMD64_R12);
376 amd64_push_reg (code, AMD64_RBP);
377 amd64_push_reg (code, AMD64_RBX);
380 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
381 amd64_push_reg (code, AMD64_RAX);
384 amd64_push_membase (code, AMD64_R11, 0);
387 amd64_push_reg (code, AMD64_ARG_REG1);
389 #ifdef PLATFORM_WIN32
391 amd64_push_imm (code, 0);
392 amd64_push_imm (code, 0);
393 amd64_push_imm (code, 0);
394 amd64_push_imm (code, 0);
395 amd64_push_imm (code, 0);
396 amd64_push_imm (code, 0);
400 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
401 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
403 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
405 amd64_call_reg (code, AMD64_R11);
406 amd64_breakpoint (code);
408 mono_arch_flush_icache (start, code - start);
410 g_assert ((code - start) < 64);
412 *code_size = code - start;
418 * mono_arch_get_throw_exception:
420 * Returns a function pointer which can be used to raise
421 * exceptions. The returned function has the following
422 * signature: void (*func) (MonoException *exc);
426 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
428 return get_throw_trampoline (FALSE, code_size, ji, aot);
432 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
434 return get_throw_trampoline (TRUE, code_size, ji, aot);
438 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
443 start = code = mono_global_codeman_reserve (64);
447 /* Not used on amd64 */
448 amd64_breakpoint (code);
450 mono_arch_flush_icache (start, code - start);
452 *code_size = code - start;
458 * mono_arch_get_throw_corlib_exception:
460 * Returns a function pointer which can be used to raise
461 * corlib exceptions. The returned function has the following
462 * signature: void (*func) (guint32 ex_token, guint32 offset);
463 * Here, offset is the offset which needs to be substracted from the caller IP
464 * to get the IP of the throw. Passing the offset has the advantage that it
465 * needs no relocations in the caller.
468 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
470 static guint8* start;
474 start = code = mono_global_codeman_reserve (64);
479 amd64_push_reg (code, AMD64_ARG_REG2);
481 /* Call exception_from_token */
482 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
484 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
485 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
486 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
487 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
489 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
490 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
492 #ifdef PLATFORM_WIN32
493 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
495 amd64_call_reg (code, AMD64_R11);
496 #ifdef PLATFORM_WIN32
497 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
500 /* Compute throw_ip */
501 amd64_pop_reg (code, AMD64_ARG_REG2);
503 amd64_pop_reg (code, AMD64_ARG_REG3);
504 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
506 /* Put the throw_ip at the top of the misaligned stack */
507 amd64_push_reg (code, AMD64_ARG_REG3);
509 throw_ex = (guint64)mono_get_throw_exception ();
511 /* Call throw_exception */
512 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
514 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
515 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
517 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
519 /* The original IP is on the stack */
520 amd64_jump_reg (code, AMD64_R11);
522 g_assert ((code - start) < 64);
524 mono_arch_flush_icache (start, code - start);
526 *code_size = code - start;
531 /* mono_arch_find_jit_info:
533 * This function is used to gather information from @ctx. It return the
534 * MonoJitInfo of the corresponding function, unwinds one stack frame and
535 * stores the resulting context into @new_ctx. It also stores a string
536 * describing the stack location into @trace (if not NULL), and modifies
537 * the @lmf if necessary. @native_offset return the IP offset from the
538 * start of the function or -1 if that info is not available.
541 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
542 MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed)
545 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
547 /* Avoid costly table lookup during stack overflow */
548 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
551 ji = mono_jit_info_table_find (domain, ip);
559 gssize regs [MONO_MAX_IREGS + 1];
561 guint32 unwind_info_len;
565 if (!ji->method->wrapper_type)
569 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
571 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
573 regs [AMD64_RAX] = new_ctx->rax;
574 regs [AMD64_RBX] = new_ctx->rbx;
575 regs [AMD64_RCX] = new_ctx->rcx;
576 regs [AMD64_RDX] = new_ctx->rdx;
577 regs [AMD64_RBP] = new_ctx->rbp;
578 regs [AMD64_RSP] = new_ctx->rsp;
579 regs [AMD64_RSI] = new_ctx->rsi;
580 regs [AMD64_RDI] = new_ctx->rdi;
581 regs [AMD64_RIP] = new_ctx->rip;
582 regs [AMD64_R12] = new_ctx->r12;
583 regs [AMD64_R13] = new_ctx->r13;
584 regs [AMD64_R14] = new_ctx->r14;
585 regs [AMD64_R15] = new_ctx->r15;
587 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
588 (guint8*)ji->code_start + ji->code_size,
589 ip, regs, MONO_MAX_IREGS + 1, &cfa);
591 new_ctx->rax = regs [AMD64_RAX];
592 new_ctx->rbx = regs [AMD64_RBX];
593 new_ctx->rcx = regs [AMD64_RCX];
594 new_ctx->rdx = regs [AMD64_RDX];
595 new_ctx->rbp = regs [AMD64_RBP];
596 new_ctx->rsp = regs [AMD64_RSP];
597 new_ctx->rsi = regs [AMD64_RSI];
598 new_ctx->rdi = regs [AMD64_RDI];
599 new_ctx->rip = regs [AMD64_RIP];
600 new_ctx->r12 = regs [AMD64_R12];
601 new_ctx->r13 = regs [AMD64_R13];
602 new_ctx->r14 = regs [AMD64_R14];
603 new_ctx->r15 = regs [AMD64_R15];
605 /* The CFA becomes the new SP value */
606 new_ctx->rsp = (gssize)cfa;
611 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
612 /* remove any unused lmf */
613 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
616 /* Pop arguments off the stack */
618 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
620 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
621 new_ctx->rsp += stack_to_pop;
628 if (((guint64)(*lmf)->previous_lmf) & 1) {
629 /* This LMF has the rip field set */
631 } else if ((*lmf)->rsp == 0) {
636 * The rsp field is set just before the call which transitioned to native
637 * code. Obtain the rip from the stack.
639 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
642 ji = mono_jit_info_table_find (domain, (gpointer)rip);
644 // FIXME: This can happen with multiple appdomains (bug #444383)
649 new_ctx->rbp = (*lmf)->rbp;
650 new_ctx->rsp = (*lmf)->rsp;
652 new_ctx->rbx = (*lmf)->rbx;
653 new_ctx->r12 = (*lmf)->r12;
654 new_ctx->r13 = (*lmf)->r13;
655 new_ctx->r14 = (*lmf)->r14;
656 new_ctx->r15 = (*lmf)->r15;
657 #ifdef PLATFORM_WIN32
658 new_ctx->rdi = (*lmf)->rdi;
659 new_ctx->rsi = (*lmf)->rsi;
662 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
664 return ji ? ji : res;
671 * mono_arch_handle_exception:
673 * @ctx: saved processor state
674 * @obj: the exception object
677 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
681 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
683 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
686 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
688 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
693 #ifdef MONO_ARCH_USE_SIGACTION
694 static inline guint64*
695 gregs_from_ucontext (ucontext_t *ctx)
698 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
700 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
707 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
709 #ifdef MONO_ARCH_USE_SIGACTION
710 ucontext_t *ctx = (ucontext_t*)sigctx;
712 guint64 *gregs = gregs_from_ucontext (ctx);
714 mctx->rax = gregs [REG_RAX];
715 mctx->rbx = gregs [REG_RBX];
716 mctx->rcx = gregs [REG_RCX];
717 mctx->rdx = gregs [REG_RDX];
718 mctx->rbp = gregs [REG_RBP];
719 mctx->rsp = gregs [REG_RSP];
720 mctx->rsi = gregs [REG_RSI];
721 mctx->rdi = gregs [REG_RDI];
722 mctx->rip = gregs [REG_RIP];
723 mctx->r12 = gregs [REG_R12];
724 mctx->r13 = gregs [REG_R13];
725 mctx->r14 = gregs [REG_R14];
726 mctx->r15 = gregs [REG_R15];
728 MonoContext *ctx = (MonoContext *)sigctx;
730 mctx->rax = ctx->rax;
731 mctx->rbx = ctx->rbx;
732 mctx->rcx = ctx->rcx;
733 mctx->rdx = ctx->rdx;
734 mctx->rbp = ctx->rbp;
735 mctx->rsp = ctx->rsp;
736 mctx->rsi = ctx->rsi;
737 mctx->rdi = ctx->rdi;
738 mctx->rip = ctx->rip;
739 mctx->r12 = ctx->r12;
740 mctx->r13 = ctx->r13;
741 mctx->r14 = ctx->r14;
742 mctx->r15 = ctx->r15;
747 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
749 #ifdef MONO_ARCH_USE_SIGACTION
750 ucontext_t *ctx = (ucontext_t*)sigctx;
752 guint64 *gregs = gregs_from_ucontext (ctx);
754 gregs [REG_RAX] = mctx->rax;
755 gregs [REG_RBX] = mctx->rbx;
756 gregs [REG_RCX] = mctx->rcx;
757 gregs [REG_RDX] = mctx->rdx;
758 gregs [REG_RBP] = mctx->rbp;
759 gregs [REG_RSP] = mctx->rsp;
760 gregs [REG_RSI] = mctx->rsi;
761 gregs [REG_RDI] = mctx->rdi;
762 gregs [REG_RIP] = mctx->rip;
763 gregs [REG_R12] = mctx->r12;
764 gregs [REG_R13] = mctx->r13;
765 gregs [REG_R14] = mctx->r14;
766 gregs [REG_R15] = mctx->r15;
768 MonoContext *ctx = (MonoContext *)sigctx;
770 ctx->rax = mctx->rax;
771 ctx->rbx = mctx->rbx;
772 ctx->rcx = mctx->rcx;
773 ctx->rdx = mctx->rdx;
774 ctx->rbp = mctx->rbp;
775 ctx->rsp = mctx->rsp;
776 ctx->rsi = mctx->rsi;
777 ctx->rdi = mctx->rdi;
778 ctx->rip = mctx->rip;
779 ctx->r12 = mctx->r12;
780 ctx->r13 = mctx->r13;
781 ctx->r14 = mctx->r14;
782 ctx->r15 = mctx->r15;
787 mono_arch_ip_from_context (void *sigctx)
790 #ifdef MONO_ARCH_USE_SIGACTION
792 ucontext_t *ctx = (ucontext_t*)sigctx;
794 guint64 *gregs = gregs_from_ucontext (ctx);
796 return (gpointer)gregs [REG_RIP];
798 MonoContext *ctx = sigctx;
799 return (gpointer)ctx->rip;
804 restore_soft_guard_pages (void)
806 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
807 if (jit_tls->stack_ovf_guard_base)
808 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
812 * this function modifies mctx so that when it is restored, it
813 * won't execcute starting at mctx.eip, but in a function that
814 * will restore the protection on the soft-guard pages and return back to
815 * continue at mctx.eip.
818 prepare_for_guard_pages (MonoContext *mctx)
821 sp = (gpointer)(mctx->rsp);
823 /* the return addr */
824 sp [0] = (gpointer)(mctx->rip);
825 mctx->rip = (guint64)restore_soft_guard_pages;
826 mctx->rsp = (guint64)sp;
830 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
832 void (*restore_context) (MonoContext *);
835 restore_context = mono_get_restore_context ();
836 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
838 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
840 prepare_for_guard_pages (&mctx);
841 restore_context (&mctx);
844 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
846 prepare_for_guard_pages (&mctx);
847 restore_context (&mctx);
851 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
853 #ifdef MONO_ARCH_USE_SIGACTION
854 MonoException *exc = NULL;
855 ucontext_t *ctx = (ucontext_t*)sigctx;
856 guint64 *gregs = gregs_from_ucontext (ctx);
857 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
862 exc = mono_domain_get ()->stack_overflow_ex;
864 mono_handle_native_sigsegv (SIGSEGV, sigctx);
866 /* setup a call frame on the real stack so that control is returned there
867 * and exception handling can continue.
868 * The frame looks like:
872 * 128 is the size of the red zone
874 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
877 sp = (gpointer)(gregs [REG_RSP] & ~15);
878 sp = (gpointer)((char*)sp - frame_size);
879 /* the arguments must be aligned */
880 sp [-1] = (gpointer)gregs [REG_RIP];
881 /* may need to adjust pointers in the new struct copy, depending on the OS */
882 memcpy (sp + 4, ctx, sizeof (ucontext_t));
883 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
884 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
885 gregs [REG_RSP] = (unsigned long)(sp - 1);
886 gregs [REG_RDI] = (unsigned long)(sp + 4);
887 gregs [REG_RSI] = (guint64)exc;
888 gregs [REG_RDX] = stack_ovf;
893 mono_amd64_get_original_ip (void)
895 MonoLMF *lmf = mono_get_lmf ();
899 /* Reset the change to previous_lmf */
900 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
906 mono_arch_get_throw_pending_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
908 guint8 *code, *start;
910 gpointer throw_trampoline;
914 start = code = mono_global_codeman_reserve (128);
916 /* We are in the frame of a managed method after a call */
918 * We would like to throw the pending exception in such a way that it looks to
919 * be thrown from the managed method.
922 /* Save registers which might contain the return value of the call */
923 amd64_push_reg (code, AMD64_RAX);
924 amd64_push_reg (code, AMD64_RDX);
926 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
927 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
930 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
932 /* Obtain the pending exception */
934 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
935 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
937 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
939 amd64_call_reg (code, AMD64_R11);
941 /* Check if it is NULL, and branch */
942 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
943 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
945 /* exc != NULL branch */
947 /* Save the exc on the stack */
948 amd64_push_reg (code, AMD64_RAX);
950 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
952 /* Obtain the original ip and clear the flag in previous_lmf */
954 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
955 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
957 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
959 amd64_call_reg (code, AMD64_R11);
962 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
964 /* Pop saved stuff from the stack */
965 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
967 /* Setup arguments for the throw trampoline */
969 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
970 /* The trampoline expects the caller ip to be pushed on the stack */
971 amd64_push_reg (code, AMD64_RAX);
973 /* Call the throw trampoline */
975 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
976 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
978 throw_trampoline = mono_get_throw_exception ();
979 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
981 /* We use a jump instead of a call so we can push the original ip on the stack */
982 amd64_jump_reg (code, AMD64_R11);
984 /* ex == NULL branch */
985 mono_amd64_patch (br [0], code);
987 /* Obtain the original ip and clear the flag in previous_lmf */
989 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
990 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
992 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
994 amd64_call_reg (code, AMD64_R11);
995 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
997 /* Restore registers */
998 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
999 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1000 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1001 amd64_pop_reg (code, AMD64_RDX);
1002 amd64_pop_reg (code, AMD64_RAX);
1004 /* Return to original code */
1005 amd64_jump_reg (code, AMD64_R11);
1007 g_assert ((code - start) < 128);
1009 *code_size = code - start;
1014 static gpointer throw_pending_exception;
1017 * Called when a thread receives an async exception while executing unmanaged code.
1018 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1019 * the return address on the stack to point to a helper routine which throws the
1023 mono_arch_notify_pending_exc (void)
1025 MonoLMF *lmf = mono_get_lmf ();
1031 if ((guint64)lmf->previous_lmf & 1)
1032 /* Already hijacked or trampoline LMF entry */
1035 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1036 lmf->rip = *(guint64*)(lmf->rsp - 8);
1037 /* Signal that lmf->rip is set */
1038 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1040 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1044 mono_arch_exceptions_init (void)
1049 if (mono_aot_only) {
1050 throw_pending_exception = mono_aot_get_named_code ("throw_pending_exception");
1052 /* Call this to avoid initialization races */
1053 throw_pending_exception = mono_arch_get_throw_pending_exception_full (&code_size, &ji, FALSE);
1057 #ifdef PLATFORM_WIN32
1060 * The mono_arch_unwindinfo* methods are used to build and add
1061 * function table info for each emitted method from mono. On Winx64
1062 * the seh handler will not be called if the mono methods are not
1063 * added to the function table.
1065 * We should not need to add non-volatile register info to the
1066 * table since mono stores that info elsewhere. (Except for the register
1070 #define MONO_MAX_UNWIND_CODES 22
1072 typedef union _UNWIND_CODE {
1075 guchar UnwindOp : 4;
1078 gushort FrameOffset;
1079 } UNWIND_CODE, *PUNWIND_CODE;
1081 typedef struct _UNWIND_INFO {
1084 guchar SizeOfProlog;
1085 guchar CountOfCodes;
1086 guchar FrameRegister : 4;
1087 guchar FrameOffset : 4;
1088 /* custom size for mono allowing for mono allowing for*/
1089 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1090 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1091 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1092 /*UWOP_PUSH_NONVOL offset = 15-0*/
1093 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1095 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1097 * OPTIONAL ULONG ExceptionHandler;
1098 * OPTIONAL ULONG FunctionEntry;
1100 * OPTIONAL ULONG ExceptionData[]; */
1101 } UNWIND_INFO, *PUNWIND_INFO;
1105 RUNTIME_FUNCTION runtimeFunction;
1106 UNWIND_INFO unwindInfo;
1107 } MonoUnwindInfo, *PMonoUnwindInfo;
1110 mono_arch_unwindinfo_create (gpointer* monoui)
1112 PMonoUnwindInfo newunwindinfo;
1113 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1114 newunwindinfo->unwindInfo.Version = 1;
1118 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1120 PMonoUnwindInfo unwindinfo;
1121 PUNWIND_CODE unwindcode;
1124 mono_arch_unwindinfo_create (monoui);
1126 unwindinfo = (MonoUnwindInfo*)*monoui;
1128 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1129 g_error ("Larger allocation needed for the unwind information.");
1131 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1132 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1133 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1134 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1135 unwindcode->OpInfo = reg;
1137 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1138 g_error ("Adding unwind info in wrong order.");
1140 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1144 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1146 PMonoUnwindInfo unwindinfo;
1147 PUNWIND_CODE unwindcode;
1150 mono_arch_unwindinfo_create (monoui);
1152 unwindinfo = (MonoUnwindInfo*)*monoui;
1154 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1155 g_error ("Larger allocation needed for the unwind information.");
1157 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1158 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1159 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1161 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1162 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1163 unwindcode->OpInfo = reg;
1165 unwindinfo->unwindInfo.FrameRegister = reg;
1167 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1168 g_error ("Adding unwind info in wrong order.");
1170 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1174 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1176 PMonoUnwindInfo unwindinfo;
1177 PUNWIND_CODE unwindcode;
1181 mono_arch_unwindinfo_create (monoui);
1183 unwindinfo = (MonoUnwindInfo*)*monoui;
1186 g_error ("Stack allocation must be equal to or greater than 0x8.");
1190 else if (size <= 0x7FFF8)
1195 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1196 g_error ("Larger allocation needed for the unwind information.");
1198 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1199 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1201 if (codesneeded == 1) {
1202 /*The size of the allocation is
1203 (the number in the OpInfo member) times 8 plus 8*/
1204 unwindcode->OpInfo = (size - 8)/8;
1205 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1208 if (codesneeded == 3) {
1209 /*the unscaled size of the allocation is recorded
1210 in the next two slots in little-endian format*/
1211 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1213 unwindcode->OpInfo = 1;
1216 /*the size of the allocation divided by 8
1217 is recorded in the next slot*/
1218 unwindcode->FrameOffset = size/8;
1220 unwindcode->OpInfo = 0;
1223 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1226 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1228 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1229 g_error ("Adding unwind info in wrong order.");
1231 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1235 mono_arch_unwindinfo_get_size (gpointer monoui)
1237 PMonoUnwindInfo unwindinfo;
1241 unwindinfo = (MonoUnwindInfo*)monoui;
1242 return (8 + sizeof (MonoUnwindInfo)) -
1243 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1247 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1251 PMonoUnwindInfo targetinfo;
1252 MonoDomain *domain = mono_domain_get ();
1254 ji = mono_jit_info_table_find (domain, (char*)ControlPc);
1258 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1260 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1262 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1264 return &targetinfo->runtimeFunction;
1268 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1270 PMonoUnwindInfo unwindinfo, targetinfo;
1272 guint64 targetlocation;
1276 unwindinfo = (MonoUnwindInfo*)*monoui;
1277 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1278 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1280 unwindinfo->runtimeFunction.EndAddress = code_size;
1281 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1283 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1285 codecount = unwindinfo->unwindInfo.CountOfCodes;
1287 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1288 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1291 g_free (unwindinfo);
1294 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1299 #if MONO_SUPPORT_TASKLETS
1300 MonoContinuationRestore
1301 mono_tasklets_arch_restore (void)
1303 static guint8* saved = NULL;
1304 guint8 *code, *start;
1305 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1308 return (MonoContinuationRestore)saved;
1309 code = start = mono_global_codeman_reserve (64);
1310 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1311 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1312 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1313 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1314 * We move cont to cont_reg since we need both rcx and rdi for the copy
1315 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1317 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1318 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1319 /* setup the copy of the stack */
1320 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1321 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1323 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1324 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1325 amd64_prefix (code, X86_REP_PREFIX);
1328 /* now restore the registers from the LMF */
1329 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1330 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1331 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1332 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1333 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1334 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1335 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1336 #ifdef PLATFORM_WIN32
1337 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1338 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1340 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1342 /* restore the lmf chain */
1343 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1344 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1346 /* state is already in rax */
1347 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1348 g_assert ((code - start) <= 64);
1350 return (MonoContinuationRestore)saved;