2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
20 #ifdef HAVE_UCONTEXT_H
24 #include <mono/arch/amd64/amd64-codegen.h>
25 #include <mono/metadata/abi-details.h>
26 #include <mono/metadata/appdomain.h>
27 #include <mono/metadata/tabledefs.h>
28 #include <mono/metadata/threads.h>
29 #include <mono/metadata/threads-types.h>
30 #include <mono/metadata/debug-helpers.h>
31 #include <mono/metadata/exception.h>
32 #include <mono/metadata/gc-internals.h>
33 #include <mono/metadata/mono-debug.h>
34 #include <mono/utils/mono-mmap.h>
37 #include "mini-amd64.h"
40 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
43 static MonoW32ExceptionHandler fpe_handler;
44 static MonoW32ExceptionHandler ill_handler;
45 static MonoW32ExceptionHandler segv_handler;
47 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
48 void *mono_win_vectored_exception_handle;
50 #define W32_SEH_HANDLE_EX(_ex) \
51 if (_ex##_handler) _ex##_handler(0, ep, ctx)
53 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
55 #ifndef MONO_CROSS_COMPILE
56 if (mono_old_win_toplevel_exception_filter) {
57 return (*mono_old_win_toplevel_exception_filter)(ep);
61 mono_handle_native_sigsegv (SIGSEGV, NULL, NULL);
63 return EXCEPTION_CONTINUE_SEARCH;
67 * Unhandled Exception Filter
68 * Top-level per-process exception handler.
70 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
75 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
77 /* If the thread is not managed by the runtime return early */
79 return EXCEPTION_CONTINUE_SEARCH;
81 jit_tls->mono_win_chained_exception_needs_run = FALSE;
82 res = EXCEPTION_CONTINUE_EXECUTION;
84 er = ep->ExceptionRecord;
85 ctx = ep->ContextRecord;
87 switch (er->ExceptionCode) {
88 case EXCEPTION_ACCESS_VIOLATION:
89 W32_SEH_HANDLE_EX(segv);
91 case EXCEPTION_ILLEGAL_INSTRUCTION:
92 W32_SEH_HANDLE_EX(ill);
94 case EXCEPTION_INT_DIVIDE_BY_ZERO:
95 case EXCEPTION_INT_OVERFLOW:
96 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
97 case EXCEPTION_FLT_OVERFLOW:
98 case EXCEPTION_FLT_UNDERFLOW:
99 case EXCEPTION_FLT_INEXACT_RESULT:
100 W32_SEH_HANDLE_EX(fpe);
103 jit_tls->mono_win_chained_exception_needs_run = TRUE;
107 if (jit_tls->mono_win_chained_exception_needs_run) {
108 /* Don't copy context back if we chained exception
109 * as the handler may have modfied the EXCEPTION_POINTERS
110 * directly. We don't pass sigcontext to chained handlers.
111 * Return continue search so the UnhandledExceptionFilter
112 * can correctly chain the exception.
114 res = EXCEPTION_CONTINUE_SEARCH;
120 void win32_seh_init()
122 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
123 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
126 void win32_seh_cleanup()
130 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
132 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
136 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
140 fpe_handler = handler;
143 ill_handler = handler;
146 segv_handler = handler;
153 #endif /* TARGET_WIN32 */
156 * mono_arch_get_restore_context:
158 * Returns a pointer to a method which restores a previously saved sigcontext.
161 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
163 guint8 *start = NULL;
165 MonoJumpInfo *ji = NULL;
166 GSList *unwind_ops = NULL;
169 /* restore_contect (MonoContext *ctx) */
171 start = code = (guint8 *)mono_global_codeman_reserve (256);
173 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
175 /* Restore all registers except %rip and %r11 */
176 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
177 for (i = 0; i < AMD64_NREG; ++i) {
178 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
179 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
183 * The context resides on the stack, in the stack frame of the
184 * caller of this function. The stack pointer that we need to
185 * restore is potentially many stack frames higher up, so the
186 * distance between them can easily be more than the red zone
187 * size. Hence the stack pointer can be restored only after
188 * we have finished loading everything from the context.
190 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
191 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
192 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
194 /* jump to the saved IP */
195 amd64_jump_reg (code, AMD64_R11);
197 mono_arch_flush_icache (start, code - start);
198 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
201 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
207 * mono_arch_get_call_filter:
209 * Returns a pointer to a method which calls an exception filter. We
210 * also use this function to call finally handlers (we pass NULL as
211 * @exc object in this case).
214 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
220 MonoJumpInfo *ji = NULL;
221 GSList *unwind_ops = NULL;
222 const guint kMaxCodeSize = 128;
224 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
226 /* call_filter (MonoContext *ctx, unsigned long eip) */
229 /* Alloc new frame */
230 amd64_push_reg (code, AMD64_RBP);
231 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
233 /* Save callee saved regs */
235 for (i = 0; i < AMD64_NREG; ++i)
236 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
237 amd64_push_reg (code, i);
243 amd64_push_reg (code, AMD64_RBP);
245 /* Make stack misaligned, the call will make it aligned again */
247 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
249 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
252 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
253 /* load callee saved regs */
254 for (i = 0; i < AMD64_NREG; ++i) {
255 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
256 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
258 /* load exc register */
259 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
261 /* call the handler */
262 amd64_call_reg (code, AMD64_ARG_REG2);
265 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
268 amd64_pop_reg (code, AMD64_RBP);
270 /* Restore callee saved regs */
271 for (i = AMD64_NREG; i >= 0; --i)
272 if (AMD64_IS_CALLEE_SAVED_REG (i))
273 amd64_pop_reg (code, i);
278 g_assert ((code - start) < kMaxCodeSize);
280 mono_arch_flush_icache (start, code - start);
281 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
284 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
290 * The first few arguments are dummy, to force the other arguments to be passed on
291 * the stack, this avoids overwriting the argument registers in the throw trampoline.
294 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
295 guint64 dummy5, guint64 dummy6,
296 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
301 /* mctx is on the caller's stack */
302 memcpy (&ctx, mctx, sizeof (MonoContext));
304 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
305 MonoException *mono_ex = (MonoException*)exc;
307 mono_ex->stack_trace = NULL;
308 mono_ex->trace_ips = NULL;
311 mono_error_assert_ok (&error);
313 /* adjust eip so that it point into the call instruction */
314 ctx.gregs [AMD64_RIP] --;
316 mono_handle_exception (&ctx, exc);
317 mono_restore_context (&ctx);
318 g_assert_not_reached ();
322 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
323 guint64 dummy5, guint64 dummy6,
324 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
326 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
329 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
331 mctx->gregs [AMD64_RIP] -= pc_offset;
333 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
334 mctx->gregs [AMD64_RIP] += 1;
336 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
340 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
341 guint64 dummy5, guint64 dummy6,
342 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
344 /* Only the register parameters are valid */
347 /* mctx is on the caller's stack */
348 memcpy (&ctx, mctx, sizeof (MonoContext));
350 mono_resume_unwind (&ctx);
354 * get_throw_trampoline:
356 * Generate a call to mono_amd64_throw_exception/
357 * mono_amd64_throw_corlib_exception.
360 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
364 MonoJumpInfo *ji = NULL;
365 GSList *unwind_ops = NULL;
366 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
367 const guint kMaxCodeSize = 256;
370 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
372 dummy_stack_space = 0;
375 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
377 /* The stack is unaligned on entry */
378 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
383 unwind_ops = mono_arch_get_cie_program ();
386 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
388 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
391 * To hide linux/windows calling convention differences, we pass all arguments on
392 * the stack by passing 6 dummy values in registers.
395 arg_offsets [0] = dummy_stack_space + 0;
396 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
397 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
398 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
399 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
402 for (i = 0; i < AMD64_NREG; ++i)
404 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
406 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
407 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
409 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
410 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
411 /* Set arg1 == ctx */
412 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
413 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
414 /* Set arg2 == exc/ex_token_index */
416 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
418 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
419 /* Set arg3 == rethrow/pc offset */
421 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
425 * The caller doesn't pass in a pc/pc offset, instead we simply use the
426 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
428 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
430 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
432 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
436 const char *icall_name;
439 icall_name = "mono_amd64_resume_unwind";
441 icall_name = "mono_amd64_throw_corlib_exception";
443 icall_name = "mono_amd64_throw_exception";
444 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
445 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
447 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
449 amd64_call_reg (code, AMD64_R11);
450 amd64_breakpoint (code);
452 mono_arch_flush_icache (start, code - start);
454 g_assert ((code - start) < kMaxCodeSize);
456 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
459 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
465 * mono_arch_get_throw_exception:
467 * Returns a function pointer which can be used to raise
468 * exceptions. The returned function has the following
469 * signature: void (*func) (MonoException *exc);
473 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
475 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
479 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
481 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
485 * mono_arch_get_throw_corlib_exception:
487 * Returns a function pointer which can be used to raise
488 * corlib exceptions. The returned function has the following
489 * signature: void (*func) (guint32 ex_token, guint32 offset);
490 * Here, offset is the offset which needs to be substracted from the caller IP
491 * to get the IP of the throw. Passing the offset has the advantage that it
492 * needs no relocations in the caller.
495 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
497 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
501 * mono_arch_unwind_frame:
503 * This function is used to gather information from @ctx, and store it in @frame_info.
504 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
505 * is modified if needed.
506 * Returns TRUE on success, FALSE otherwise.
509 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
510 MonoJitInfo *ji, MonoContext *ctx,
511 MonoContext *new_ctx, MonoLMF **lmf,
512 mgreg_t **save_locations,
513 StackFrameInfo *frame)
515 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
518 memset (frame, 0, sizeof (StackFrameInfo));
524 mgreg_t regs [MONO_MAX_IREGS + 1];
526 guint32 unwind_info_len;
528 guint8 *epilog = NULL;
530 if (ji->is_trampoline)
531 frame->type = FRAME_TYPE_TRAMPOLINE;
533 frame->type = FRAME_TYPE_MANAGED;
535 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
537 frame->unwind_info = unwind_info;
538 frame->unwind_info_len = unwind_info_len;
541 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
542 mono_print_unwind_info (unwind_info, unwind_info_len);
544 /* LLVM compiled code doesn't have this info */
545 if (ji->has_arch_eh_info)
546 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
548 for (i = 0; i < AMD64_NREG; ++i)
549 regs [i] = new_ctx->gregs [i];
551 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
552 (guint8*)ji->code_start + ji->code_size,
553 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
554 save_locations, MONO_MAX_IREGS, &cfa);
556 for (i = 0; i < AMD64_NREG; ++i)
557 new_ctx->gregs [i] = regs [i];
559 /* The CFA becomes the new SP value */
560 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
563 new_ctx->gregs [AMD64_RIP] --;
569 if (((guint64)(*lmf)->previous_lmf) & 2) {
571 * This LMF entry is created by the soft debug code to mark transitions to
572 * managed code done during invokes.
574 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
576 g_assert (ext->debugger_invoke);
578 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
580 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
582 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
587 if (((guint64)(*lmf)->previous_lmf) & 4) {
588 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
590 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
591 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
592 /* This LMF has the rip field set */
594 } else if ((*lmf)->rsp == 0) {
599 * The rsp field is set just before the call which transitioned to native
600 * code. Obtain the rip from the stack.
602 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
605 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
607 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
608 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
616 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
618 if (((guint64)(*lmf)->previous_lmf) & 4) {
619 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
621 /* Trampoline frame */
622 for (i = 0; i < AMD64_NREG; ++i)
623 new_ctx->gregs [i] = ext->ctx->gregs [i];
625 new_ctx->gregs [AMD64_RIP] --;
628 * The registers saved in the LMF will be restored using the normal unwind info,
629 * when the wrapper frame is processed.
633 new_ctx->gregs [AMD64_RIP] = rip;
634 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
635 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
636 for (i = 0; i < AMD64_NREG; ++i) {
637 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
638 new_ctx->gregs [i] = 0;
642 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
653 * Called by resuming from a signal handler.
656 handle_signal_exception (gpointer obj)
658 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_native_tls_get_value (mono_jit_tls_id);
661 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
663 mono_handle_exception (&ctx, (MonoObject *)obj);
665 mono_restore_context (&ctx);
669 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
671 guint64 sp = ctx->gregs [AMD64_RSP];
673 ctx->gregs [AMD64_RDI] = (guint64)user_data;
675 /* Allocate a stack frame below the red zone */
677 /* The stack should be unaligned */
681 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
682 *(guint64*)sp = ctx->gregs [AMD64_RIP];
684 ctx->gregs [AMD64_RSP] = sp;
685 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
689 * mono_arch_handle_exception:
691 * @ctx: saved processor state
692 * @obj: the exception object
695 mono_arch_handle_exception (void *sigctx, gpointer obj)
697 #if defined(MONO_ARCH_USE_SIGACTION)
701 * Handling the exception in the signal handler is problematic, since the original
702 * signal is disabled, and we could run arbitrary code though the debugger. So
703 * resume into the normal stack and do most work there if possible.
705 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_native_tls_get_value (mono_jit_tls_id);
707 /* Pass the ctx parameter in TLS */
708 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
710 mctx = jit_tls->ex_ctx;
711 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
712 mono_monoctx_to_sigctx (&mctx, sigctx);
718 mono_sigctx_to_monoctx (sigctx, &mctx);
720 mono_handle_exception (&mctx, obj);
722 mono_monoctx_to_sigctx (&mctx, sigctx);
729 mono_arch_ip_from_context (void *sigctx)
731 #if defined(MONO_ARCH_USE_SIGACTION)
732 ucontext_t *ctx = (ucontext_t*)sigctx;
734 return (gpointer)UCONTEXT_REG_RIP (ctx);
735 #elif defined(HOST_WIN32)
736 return ((CONTEXT*)sigctx)->Rip;
738 MonoContext *ctx = sigctx;
739 return (gpointer)ctx->gregs [AMD64_RIP];
744 restore_soft_guard_pages (void)
746 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_native_tls_get_value (mono_jit_tls_id);
747 if (jit_tls->stack_ovf_guard_base)
748 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
752 * this function modifies mctx so that when it is restored, it
753 * won't execcute starting at mctx.eip, but in a function that
754 * will restore the protection on the soft-guard pages and return back to
755 * continue at mctx.eip.
758 prepare_for_guard_pages (MonoContext *mctx)
761 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
763 /* the return addr */
764 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
765 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
766 mctx->gregs [AMD64_RSP] = (guint64)sp;
770 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
776 mono_handle_exception (&mctx, obj);
778 prepare_for_guard_pages (&mctx);
779 mono_restore_context (&mctx);
783 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
785 #if defined(MONO_ARCH_USE_SIGACTION)
786 MonoException *exc = NULL;
787 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (char *)UCONTEXT_REG_RIP (sigctx), NULL);
790 MonoContext *copied_ctx;
793 exc = mono_domain_get ()->stack_overflow_ex;
795 mono_handle_native_sigsegv (SIGSEGV, sigctx, siginfo);
797 /* setup a call frame on the real stack so that control is returned there
798 * and exception handling can continue.
799 * The frame looks like:
803 * 128 is the size of the red zone
805 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
808 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
809 sp = (gpointer *)((char*)sp - frame_size);
810 copied_ctx = (MonoContext*)(sp + 4);
811 /* the arguments must be aligned */
812 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
813 mono_sigctx_to_monoctx (sigctx, copied_ctx);
814 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
815 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
816 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
817 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
818 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
819 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
824 mono_amd64_get_original_ip (void)
826 MonoLMF *lmf = mono_get_lmf ();
830 /* Reset the change to previous_lmf */
831 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
837 mono_amd64_get_exception_trampolines (gboolean aot)
840 GSList *tramps = NULL;
842 /* LLVM needs different throw trampolines */
843 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
844 tramps = g_slist_prepend (tramps, info);
846 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
847 tramps = g_slist_prepend (tramps, info);
849 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
850 tramps = g_slist_prepend (tramps, info);
856 mono_arch_exceptions_init (void)
862 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
863 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
864 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
865 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
866 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
867 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
869 /* Call this to avoid initialization races */
870 tramps = mono_amd64_get_exception_trampolines (FALSE);
871 for (l = tramps; l; l = l->next) {
872 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
874 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
875 mono_tramp_info_register (info, NULL);
877 g_slist_free (tramps);
884 * The mono_arch_unwindinfo* methods are used to build and add
885 * function table info for each emitted method from mono. On Winx64
886 * the seh handler will not be called if the mono methods are not
887 * added to the function table.
889 * We should not need to add non-volatile register info to the
890 * table since mono stores that info elsewhere. (Except for the register
894 #define MONO_MAX_UNWIND_CODES 22
896 typedef union _UNWIND_CODE {
903 } UNWIND_CODE, *PUNWIND_CODE;
905 typedef struct _UNWIND_INFO {
910 guchar FrameRegister : 4;
911 guchar FrameOffset : 4;
912 /* custom size for mono allowing for mono allowing for*/
913 /*UWOP_PUSH_NONVOL ebp offset = 21*/
914 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
915 /*UWOP_SET_FPREG : requires 2 offset = 17*/
916 /*UWOP_PUSH_NONVOL offset = 15-0*/
917 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
919 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
921 * OPTIONAL ULONG ExceptionHandler;
922 * OPTIONAL ULONG FunctionEntry;
924 * OPTIONAL ULONG ExceptionData[]; */
925 } UNWIND_INFO, *PUNWIND_INFO;
929 RUNTIME_FUNCTION runtimeFunction;
930 UNWIND_INFO unwindInfo;
931 } MonoUnwindInfo, *PMonoUnwindInfo;
934 mono_arch_unwindinfo_create (gpointer* monoui)
936 PMonoUnwindInfo newunwindinfo;
937 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
938 newunwindinfo->unwindInfo.Version = 1;
942 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
944 PMonoUnwindInfo unwindinfo;
945 PUNWIND_CODE unwindcode;
948 mono_arch_unwindinfo_create (monoui);
950 unwindinfo = (MonoUnwindInfo*)*monoui;
952 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
953 g_error ("Larger allocation needed for the unwind information.");
955 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
956 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
957 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
958 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
959 unwindcode->OpInfo = reg;
961 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
962 g_error ("Adding unwind info in wrong order.");
964 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
968 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
970 PMonoUnwindInfo unwindinfo;
971 PUNWIND_CODE unwindcode;
974 mono_arch_unwindinfo_create (monoui);
976 unwindinfo = (MonoUnwindInfo*)*monoui;
978 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
979 g_error ("Larger allocation needed for the unwind information.");
981 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
982 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
983 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
985 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
986 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
987 unwindcode->OpInfo = reg;
989 unwindinfo->unwindInfo.FrameRegister = reg;
991 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
992 g_error ("Adding unwind info in wrong order.");
994 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
998 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1000 PMonoUnwindInfo unwindinfo;
1001 PUNWIND_CODE unwindcode;
1005 mono_arch_unwindinfo_create (monoui);
1007 unwindinfo = (MonoUnwindInfo*)*monoui;
1010 g_error ("Stack allocation must be equal to or greater than 0x8.");
1014 else if (size <= 0x7FFF8)
1019 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1020 g_error ("Larger allocation needed for the unwind information.");
1022 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1023 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1025 if (codesneeded == 1) {
1026 /*The size of the allocation is
1027 (the number in the OpInfo member) times 8 plus 8*/
1028 unwindcode->OpInfo = (size - 8)/8;
1029 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1032 if (codesneeded == 3) {
1033 /*the unscaled size of the allocation is recorded
1034 in the next two slots in little-endian format*/
1035 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1037 unwindcode->OpInfo = 1;
1040 /*the size of the allocation divided by 8
1041 is recorded in the next slot*/
1042 unwindcode->FrameOffset = size/8;
1044 unwindcode->OpInfo = 0;
1047 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1050 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1052 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1053 g_error ("Adding unwind info in wrong order.");
1055 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1059 mono_arch_unwindinfo_get_size (gpointer monoui)
1061 PMonoUnwindInfo unwindinfo;
1065 unwindinfo = (MonoUnwindInfo*)monoui;
1066 return (8 + sizeof (MonoUnwindInfo)) -
1067 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1070 static PRUNTIME_FUNCTION
1071 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1075 PMonoUnwindInfo targetinfo;
1076 MonoDomain *domain = mono_domain_get ();
1078 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1082 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1084 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1086 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1088 return &targetinfo->runtimeFunction;
1092 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1094 PMonoUnwindInfo unwindinfo, targetinfo;
1096 guint64 targetlocation;
1100 unwindinfo = (MonoUnwindInfo*)*monoui;
1101 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1102 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1104 unwindinfo->runtimeFunction.EndAddress = code_size;
1105 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1107 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1109 codecount = unwindinfo->unwindInfo.CountOfCodes;
1111 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1112 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1115 g_free (unwindinfo);
1118 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1123 #if MONO_SUPPORT_TASKLETS
1124 MonoContinuationRestore
1125 mono_tasklets_arch_restore (void)
1127 static guint8* saved = NULL;
1128 guint8 *code, *start;
1129 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1130 const guint kMaxCodeSize = 64;
1134 return (MonoContinuationRestore)saved;
1135 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1136 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1137 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1138 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1139 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1140 * We move cont to cont_reg since we need both rcx and rdi for the copy
1141 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1143 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1144 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1145 /* setup the copy of the stack */
1146 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1147 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1149 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1150 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1151 amd64_prefix (code, X86_REP_PREFIX);
1154 /* now restore the registers from the LMF */
1155 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1156 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1157 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1160 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1162 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1165 /* state is already in rax */
1166 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1167 g_assert ((code - start) <= kMaxCodeSize);
1169 mono_arch_flush_icache (start, code - start);
1170 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1173 return (MonoContinuationRestore)saved;
1178 * mono_arch_setup_resume_sighandler_ctx:
1180 * Setup CTX so execution continues at FUNC.
1183 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1186 * When resuming from a signal handler, the stack should be misaligned, just like right after
1189 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1190 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1191 MONO_CONTEXT_SET_IP (ctx, func);