2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
20 #ifdef HAVE_UCONTEXT_H
24 #include <mono/arch/amd64/amd64-codegen.h>
25 #include <mono/metadata/abi-details.h>
26 #include <mono/metadata/appdomain.h>
27 #include <mono/metadata/tabledefs.h>
28 #include <mono/metadata/threads.h>
29 #include <mono/metadata/threads-types.h>
30 #include <mono/metadata/debug-helpers.h>
31 #include <mono/metadata/exception.h>
32 #include <mono/metadata/gc-internals.h>
33 #include <mono/metadata/mono-debug.h>
34 #include <mono/utils/mono-mmap.h>
37 #include "mini-amd64.h"
40 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
43 static MonoW32ExceptionHandler fpe_handler;
44 static MonoW32ExceptionHandler ill_handler;
45 static MonoW32ExceptionHandler segv_handler;
47 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
48 void *mono_win_vectored_exception_handle;
50 #define W32_SEH_HANDLE_EX(_ex) \
51 if (_ex##_handler) _ex##_handler(0, ep, ctx)
53 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
55 #ifndef MONO_CROSS_COMPILE
56 if (mono_old_win_toplevel_exception_filter) {
57 return (*mono_old_win_toplevel_exception_filter)(ep);
61 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
63 return EXCEPTION_CONTINUE_SEARCH;
67 * Unhandled Exception Filter
68 * Top-level per-process exception handler.
70 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
75 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
77 /* If the thread is not managed by the runtime return early */
79 return EXCEPTION_CONTINUE_SEARCH;
81 jit_tls->mono_win_chained_exception_needs_run = FALSE;
82 res = EXCEPTION_CONTINUE_EXECUTION;
84 er = ep->ExceptionRecord;
85 ctx = ep->ContextRecord;
87 switch (er->ExceptionCode) {
88 case EXCEPTION_ACCESS_VIOLATION:
89 W32_SEH_HANDLE_EX(segv);
91 case EXCEPTION_ILLEGAL_INSTRUCTION:
92 W32_SEH_HANDLE_EX(ill);
94 case EXCEPTION_INT_DIVIDE_BY_ZERO:
95 case EXCEPTION_INT_OVERFLOW:
96 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
97 case EXCEPTION_FLT_OVERFLOW:
98 case EXCEPTION_FLT_UNDERFLOW:
99 case EXCEPTION_FLT_INEXACT_RESULT:
100 W32_SEH_HANDLE_EX(fpe);
103 jit_tls->mono_win_chained_exception_needs_run = TRUE;
107 if (jit_tls->mono_win_chained_exception_needs_run) {
108 /* Don't copy context back if we chained exception
109 * as the handler may have modfied the EXCEPTION_POINTERS
110 * directly. We don't pass sigcontext to chained handlers.
111 * Return continue search so the UnhandledExceptionFilter
112 * can correctly chain the exception.
114 res = EXCEPTION_CONTINUE_SEARCH;
120 void win32_seh_init()
122 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
123 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
126 void win32_seh_cleanup()
130 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
132 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
136 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
140 fpe_handler = handler;
143 ill_handler = handler;
146 segv_handler = handler;
153 #endif /* TARGET_WIN32 */
157 * mono_arch_get_restore_context:
159 * Returns a pointer to a method which restores a previously saved sigcontext.
162 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
164 guint8 *start = NULL;
166 MonoJumpInfo *ji = NULL;
167 GSList *unwind_ops = NULL;
170 /* restore_contect (MonoContext *ctx) */
172 start = code = (guint8 *)mono_global_codeman_reserve (256);
174 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
176 /* Restore all registers except %rip and %r11 */
177 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
178 for (i = 0; i < AMD64_NREG; ++i) {
179 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
180 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
184 * The context resides on the stack, in the stack frame of the
185 * caller of this function. The stack pointer that we need to
186 * restore is potentially many stack frames higher up, so the
187 * distance between them can easily be more than the red zone
188 * size. Hence the stack pointer can be restored only after
189 * we have finished loading everything from the context.
191 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
192 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
193 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
195 /* jump to the saved IP */
196 amd64_jump_reg (code, AMD64_R11);
198 mono_arch_flush_icache (start, code - start);
199 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
202 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
208 * mono_arch_get_call_filter:
210 * Returns a pointer to a method which calls an exception filter. We
211 * also use this function to call finally handlers (we pass NULL as
212 * @exc object in this case).
215 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
221 MonoJumpInfo *ji = NULL;
222 GSList *unwind_ops = NULL;
223 const guint kMaxCodeSize = 128;
225 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
227 /* call_filter (MonoContext *ctx, unsigned long eip) */
230 /* Alloc new frame */
231 amd64_push_reg (code, AMD64_RBP);
232 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
234 /* Save callee saved regs */
236 for (i = 0; i < AMD64_NREG; ++i)
237 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
238 amd64_push_reg (code, i);
244 amd64_push_reg (code, AMD64_RBP);
246 /* Make stack misaligned, the call will make it aligned again */
248 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
250 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
253 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
254 /* load callee saved regs */
255 for (i = 0; i < AMD64_NREG; ++i) {
256 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
257 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
259 /* load exc register */
260 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
262 /* call the handler */
263 amd64_call_reg (code, AMD64_ARG_REG2);
266 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
269 amd64_pop_reg (code, AMD64_RBP);
271 /* Restore callee saved regs */
272 for (i = AMD64_NREG; i >= 0; --i)
273 if (AMD64_IS_CALLEE_SAVED_REG (i))
274 amd64_pop_reg (code, i);
279 g_assert ((code - start) < kMaxCodeSize);
281 mono_arch_flush_icache (start, code - start);
282 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
285 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
289 #endif /* !DISABLE_JIT */
292 * The first few arguments are dummy, to force the other arguments to be passed on
293 * the stack, this avoids overwriting the argument registers in the throw trampoline.
296 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
297 guint64 dummy5, guint64 dummy6,
298 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
303 /* mctx is on the caller's stack */
304 memcpy (&ctx, mctx, sizeof (MonoContext));
306 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
307 MonoException *mono_ex = (MonoException*)exc;
309 mono_ex->stack_trace = NULL;
310 mono_ex->trace_ips = NULL;
313 mono_error_assert_ok (&error);
315 /* adjust eip so that it point into the call instruction */
316 ctx.gregs [AMD64_RIP] --;
318 mono_handle_exception (&ctx, exc);
319 mono_restore_context (&ctx);
320 g_assert_not_reached ();
324 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
325 guint64 dummy5, guint64 dummy6,
326 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
328 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
331 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
333 mctx->gregs [AMD64_RIP] -= pc_offset;
335 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
336 mctx->gregs [AMD64_RIP] += 1;
338 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
342 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
343 guint64 dummy5, guint64 dummy6,
344 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
346 /* Only the register parameters are valid */
349 /* mctx is on the caller's stack */
350 memcpy (&ctx, mctx, sizeof (MonoContext));
352 mono_resume_unwind (&ctx);
357 * get_throw_trampoline:
359 * Generate a call to mono_amd64_throw_exception/
360 * mono_amd64_throw_corlib_exception.
363 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
367 MonoJumpInfo *ji = NULL;
368 GSList *unwind_ops = NULL;
369 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
370 const guint kMaxCodeSize = 256;
373 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
375 dummy_stack_space = 0;
378 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
380 /* The stack is unaligned on entry */
381 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
386 unwind_ops = mono_arch_get_cie_program ();
389 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
391 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
394 * To hide linux/windows calling convention differences, we pass all arguments on
395 * the stack by passing 6 dummy values in registers.
398 arg_offsets [0] = dummy_stack_space + 0;
399 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
400 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
401 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
402 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
405 for (i = 0; i < AMD64_NREG; ++i)
407 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
409 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
410 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
412 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
413 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
414 /* Set arg1 == ctx */
415 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
416 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
417 /* Set arg2 == exc/ex_token_index */
419 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
421 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
422 /* Set arg3 == rethrow/pc offset */
424 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
428 * The caller doesn't pass in a pc/pc offset, instead we simply use the
429 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
431 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
433 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
435 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
439 const char *icall_name;
442 icall_name = "mono_amd64_resume_unwind";
444 icall_name = "mono_amd64_throw_corlib_exception";
446 icall_name = "mono_amd64_throw_exception";
447 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
448 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
450 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
452 amd64_call_reg (code, AMD64_R11);
453 amd64_breakpoint (code);
455 mono_arch_flush_icache (start, code - start);
457 g_assert ((code - start) < kMaxCodeSize);
459 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
462 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
468 * mono_arch_get_throw_exception:
470 * Returns a function pointer which can be used to raise
471 * exceptions. The returned function has the following
472 * signature: void (*func) (MonoException *exc);
476 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
478 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
482 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
484 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
488 * mono_arch_get_throw_corlib_exception:
490 * Returns a function pointer which can be used to raise
491 * corlib exceptions. The returned function has the following
492 * signature: void (*func) (guint32 ex_token, guint32 offset);
493 * Here, offset is the offset which needs to be substracted from the caller IP
494 * to get the IP of the throw. Passing the offset has the advantage that it
495 * needs no relocations in the caller.
498 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
500 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
502 #endif /* !DISABLE_JIT */
505 * mono_arch_unwind_frame:
507 * This function is used to gather information from @ctx, and store it in @frame_info.
508 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
509 * is modified if needed.
510 * Returns TRUE on success, FALSE otherwise.
513 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
514 MonoJitInfo *ji, MonoContext *ctx,
515 MonoContext *new_ctx, MonoLMF **lmf,
516 mgreg_t **save_locations,
517 StackFrameInfo *frame)
519 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
522 memset (frame, 0, sizeof (StackFrameInfo));
528 mgreg_t regs [MONO_MAX_IREGS + 1];
530 guint32 unwind_info_len;
532 guint8 *epilog = NULL;
534 if (ji->is_trampoline)
535 frame->type = FRAME_TYPE_TRAMPOLINE;
537 frame->type = FRAME_TYPE_MANAGED;
539 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
541 frame->unwind_info = unwind_info;
542 frame->unwind_info_len = unwind_info_len;
545 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
546 mono_print_unwind_info (unwind_info, unwind_info_len);
548 /* LLVM compiled code doesn't have this info */
549 if (ji->has_arch_eh_info)
550 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
552 for (i = 0; i < AMD64_NREG; ++i)
553 regs [i] = new_ctx->gregs [i];
555 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
556 (guint8*)ji->code_start + ji->code_size,
557 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
558 save_locations, MONO_MAX_IREGS, &cfa);
560 for (i = 0; i < AMD64_NREG; ++i)
561 new_ctx->gregs [i] = regs [i];
563 /* The CFA becomes the new SP value */
564 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
567 new_ctx->gregs [AMD64_RIP] --;
573 if (((guint64)(*lmf)->previous_lmf) & 2) {
575 * This LMF entry is created by the soft debug code to mark transitions to
576 * managed code done during invokes.
578 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
580 g_assert (ext->debugger_invoke);
582 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
584 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
586 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
591 if (((guint64)(*lmf)->previous_lmf) & 4) {
592 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
594 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
595 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
596 /* This LMF has the rip field set */
598 } else if ((*lmf)->rsp == 0) {
603 * The rsp field is set just before the call which transitioned to native
604 * code. Obtain the rip from the stack.
606 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
609 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
611 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
612 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
620 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
622 if (((guint64)(*lmf)->previous_lmf) & 4) {
623 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
625 /* Trampoline frame */
626 for (i = 0; i < AMD64_NREG; ++i)
627 new_ctx->gregs [i] = ext->ctx->gregs [i];
629 new_ctx->gregs [AMD64_RIP] --;
632 * The registers saved in the LMF will be restored using the normal unwind info,
633 * when the wrapper frame is processed.
637 new_ctx->gregs [AMD64_RIP] = rip;
638 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
639 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
640 for (i = 0; i < AMD64_NREG; ++i) {
641 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
642 new_ctx->gregs [i] = 0;
646 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
657 * Called by resuming from a signal handler.
660 handle_signal_exception (gpointer obj)
662 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
665 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
667 mono_handle_exception (&ctx, (MonoObject *)obj);
669 mono_restore_context (&ctx);
673 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
675 guint64 sp = ctx->gregs [AMD64_RSP];
677 ctx->gregs [AMD64_RDI] = (guint64)user_data;
679 /* Allocate a stack frame below the red zone */
681 /* The stack should be unaligned */
685 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
686 *(guint64*)sp = ctx->gregs [AMD64_RIP];
688 ctx->gregs [AMD64_RSP] = sp;
689 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
693 * mono_arch_handle_exception:
695 * @ctx: saved processor state
696 * @obj: the exception object
699 mono_arch_handle_exception (void *sigctx, gpointer obj)
701 #if defined(MONO_ARCH_USE_SIGACTION)
705 * Handling the exception in the signal handler is problematic, since the original
706 * signal is disabled, and we could run arbitrary code though the debugger. So
707 * resume into the normal stack and do most work there if possible.
709 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
711 /* Pass the ctx parameter in TLS */
712 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
714 mctx = jit_tls->ex_ctx;
715 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
716 mono_monoctx_to_sigctx (&mctx, sigctx);
722 mono_sigctx_to_monoctx (sigctx, &mctx);
724 mono_handle_exception (&mctx, obj);
726 mono_monoctx_to_sigctx (&mctx, sigctx);
733 mono_arch_ip_from_context (void *sigctx)
735 #if defined(MONO_ARCH_USE_SIGACTION)
736 ucontext_t *ctx = (ucontext_t*)sigctx;
738 return (gpointer)UCONTEXT_REG_RIP (ctx);
739 #elif defined(HOST_WIN32)
740 return ((CONTEXT*)sigctx)->Rip;
742 MonoContext *ctx = sigctx;
743 return (gpointer)ctx->gregs [AMD64_RIP];
748 restore_soft_guard_pages (void)
750 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
751 if (jit_tls->stack_ovf_guard_base)
752 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
756 * this function modifies mctx so that when it is restored, it
757 * won't execcute starting at mctx.eip, but in a function that
758 * will restore the protection on the soft-guard pages and return back to
759 * continue at mctx.eip.
762 prepare_for_guard_pages (MonoContext *mctx)
765 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
767 /* the return addr */
768 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
769 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
770 mctx->gregs [AMD64_RSP] = (guint64)sp;
774 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
777 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
780 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
784 mono_handle_exception (&mctx, obj);
786 prepare_for_guard_pages (&mctx);
787 mono_restore_context (&mctx);
791 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
793 #if defined(MONO_ARCH_USE_SIGACTION)
794 MonoException *exc = NULL;
797 MonoContext *copied_ctx;
800 exc = mono_domain_get ()->stack_overflow_ex;
802 /* setup a call frame on the real stack so that control is returned there
803 * and exception handling can continue.
804 * The frame looks like:
808 * 128 is the size of the red zone
810 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
813 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
814 sp = (gpointer *)((char*)sp - frame_size);
815 copied_ctx = (MonoContext*)(sp + 4);
816 /* the arguments must be aligned */
817 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
818 mono_sigctx_to_monoctx (sigctx, copied_ctx);
819 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
820 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
821 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
822 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
823 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
824 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
829 mono_amd64_get_original_ip (void)
831 MonoLMF *lmf = mono_get_lmf ();
835 /* Reset the change to previous_lmf */
836 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
843 mono_amd64_get_exception_trampolines (gboolean aot)
846 GSList *tramps = NULL;
848 /* LLVM needs different throw trampolines */
849 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
850 tramps = g_slist_prepend (tramps, info);
852 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
853 tramps = g_slist_prepend (tramps, info);
855 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
856 tramps = g_slist_prepend (tramps, info);
860 #endif /* !DISABLE_JIT */
863 mono_arch_exceptions_init (void)
869 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
870 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
871 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
872 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
873 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
874 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
876 /* Call this to avoid initialization races */
877 tramps = mono_amd64_get_exception_trampolines (FALSE);
878 for (l = tramps; l; l = l->next) {
879 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
881 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
882 mono_tramp_info_register (info, NULL);
884 g_slist_free (tramps);
888 #if defined(TARGET_WIN32) && !defined(DISABLE_JIT)
891 * The mono_arch_unwindinfo* methods are used to build and add
892 * function table info for each emitted method from mono. On Winx64
893 * the seh handler will not be called if the mono methods are not
894 * added to the function table.
896 * We should not need to add non-volatile register info to the
897 * table since mono stores that info elsewhere. (Except for the register
901 #define MONO_MAX_UNWIND_CODES 22
903 typedef union _UNWIND_CODE {
910 } UNWIND_CODE, *PUNWIND_CODE;
912 typedef struct _UNWIND_INFO {
917 guchar FrameRegister : 4;
918 guchar FrameOffset : 4;
919 /* custom size for mono allowing for mono allowing for*/
920 /*UWOP_PUSH_NONVOL ebp offset = 21*/
921 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
922 /*UWOP_SET_FPREG : requires 2 offset = 17*/
923 /*UWOP_PUSH_NONVOL offset = 15-0*/
924 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
926 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
928 * OPTIONAL ULONG ExceptionHandler;
929 * OPTIONAL ULONG FunctionEntry;
931 * OPTIONAL ULONG ExceptionData[]; */
932 } UNWIND_INFO, *PUNWIND_INFO;
936 RUNTIME_FUNCTION runtimeFunction;
937 UNWIND_INFO unwindInfo;
938 } MonoUnwindInfo, *PMonoUnwindInfo;
941 mono_arch_unwindinfo_create (gpointer* monoui)
943 PMonoUnwindInfo newunwindinfo;
944 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
945 newunwindinfo->unwindInfo.Version = 1;
949 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
951 PMonoUnwindInfo unwindinfo;
952 PUNWIND_CODE unwindcode;
955 mono_arch_unwindinfo_create (monoui);
957 unwindinfo = (MonoUnwindInfo*)*monoui;
959 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
960 g_error ("Larger allocation needed for the unwind information.");
962 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
963 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
964 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
965 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
966 unwindcode->OpInfo = reg;
968 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
969 g_error ("Adding unwind info in wrong order.");
971 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
975 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
977 PMonoUnwindInfo unwindinfo;
978 PUNWIND_CODE unwindcode;
981 mono_arch_unwindinfo_create (monoui);
983 unwindinfo = (MonoUnwindInfo*)*monoui;
985 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
986 g_error ("Larger allocation needed for the unwind information.");
988 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
989 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
990 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
992 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
993 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
994 unwindcode->OpInfo = reg;
996 unwindinfo->unwindInfo.FrameRegister = reg;
998 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
999 g_error ("Adding unwind info in wrong order.");
1001 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1005 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1007 PMonoUnwindInfo unwindinfo;
1008 PUNWIND_CODE unwindcode;
1012 mono_arch_unwindinfo_create (monoui);
1014 unwindinfo = (MonoUnwindInfo*)*monoui;
1017 g_error ("Stack allocation must be equal to or greater than 0x8.");
1021 else if (size <= 0x7FFF8)
1026 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1027 g_error ("Larger allocation needed for the unwind information.");
1029 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1030 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1032 if (codesneeded == 1) {
1033 /*The size of the allocation is
1034 (the number in the OpInfo member) times 8 plus 8*/
1035 unwindcode->OpInfo = (size - 8)/8;
1036 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1039 if (codesneeded == 3) {
1040 /*the unscaled size of the allocation is recorded
1041 in the next two slots in little-endian format*/
1042 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1044 unwindcode->OpInfo = 1;
1047 /*the size of the allocation divided by 8
1048 is recorded in the next slot*/
1049 unwindcode->FrameOffset = size/8;
1051 unwindcode->OpInfo = 0;
1054 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1057 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1059 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1060 g_error ("Adding unwind info in wrong order.");
1062 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1066 mono_arch_unwindinfo_get_size (gpointer monoui)
1068 PMonoUnwindInfo unwindinfo;
1072 unwindinfo = (MonoUnwindInfo*)monoui;
1073 return (8 + sizeof (MonoUnwindInfo)) -
1074 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1077 static PRUNTIME_FUNCTION
1078 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1082 PMonoUnwindInfo targetinfo;
1083 MonoDomain *domain = mono_domain_get ();
1085 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1089 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1091 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1093 targetinfo->runtimeFunction.BeginAddress = ((DWORD64)ji->code_start) - ((DWORD64)Context);
1094 targetinfo->runtimeFunction.EndAddress = pos - ((DWORD64)Context);
1095 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1097 return &targetinfo->runtimeFunction;
1101 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1103 PMonoUnwindInfo unwindinfo, targetinfo;
1105 guint64 targetlocation;
1109 unwindinfo = (MonoUnwindInfo*)*monoui;
1110 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1111 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1113 unwindinfo->runtimeFunction.EndAddress = code_size;
1114 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1116 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1118 codecount = unwindinfo->unwindInfo.CountOfCodes;
1120 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1121 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1124 g_free (unwindinfo);
1129 mono_arch_code_chunk_new (void *chunk, int size)
1131 BOOLEAN success = RtlInstallFunctionTableCallback (((DWORD64)chunk) | 0x3, (DWORD64)chunk, size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, chunk, NULL);
1135 void mono_arch_code_chunk_destroy (void *chunk)
1137 BOOLEAN success = RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)((DWORD64)chunk | 0x03));
1141 #endif /* defined(TARGET_WIN32) !defined(DISABLE_JIT) */
1143 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1144 MonoContinuationRestore
1145 mono_tasklets_arch_restore (void)
1147 static guint8* saved = NULL;
1148 guint8 *code, *start;
1149 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1150 const guint kMaxCodeSize = 64;
1154 return (MonoContinuationRestore)saved;
1155 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1156 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1157 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1158 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1159 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1160 * We move cont to cont_reg since we need both rcx and rdi for the copy
1161 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1163 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1164 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1165 /* setup the copy of the stack */
1166 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1167 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1169 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1170 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1171 amd64_prefix (code, X86_REP_PREFIX);
1174 /* now restore the registers from the LMF */
1175 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1176 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1177 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1180 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1182 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1185 /* state is already in rax */
1186 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1187 g_assert ((code - start) <= kMaxCodeSize);
1189 mono_arch_flush_icache (start, code - start);
1190 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1193 return (MonoContinuationRestore)saved;
1195 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1198 * mono_arch_setup_resume_sighandler_ctx:
1200 * Setup CTX so execution continues at FUNC.
1203 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1206 * When resuming from a signal handler, the stack should be misaligned, just like right after
1209 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1210 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1211 MONO_CONTEXT_SET_IP (ctx, func);
1216 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1218 g_assert_not_reached ();
1223 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1225 g_assert_not_reached ();
1230 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1232 g_assert_not_reached ();
1237 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1239 g_assert_not_reached ();
1244 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1246 g_assert_not_reached ();
1251 mono_amd64_get_exception_trampolines (gboolean aot)
1253 g_assert_not_reached ();
1256 #endif /* DISABLE_JIT */
1258 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1259 MonoContinuationRestore
1260 mono_tasklets_arch_restore (void)
1262 g_assert_not_reached ();
1265 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */